// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
}
+ } else if (GetResultInst *GRI = dyn_cast<GetResultInst>(I)) {
+ // Ensure that all getresult instructions live in the same basic block
+ // as their associated struct-value instructions. Codegen requires
+ // this, as lowering only works on one basic block at a time.
+ if (Instruction *Agg = dyn_cast<Instruction>(GRI->getAggregateValue())) {
+ BasicBlock *AggBB = Agg->getParent();
+ if (AggBB != GRI->getParent())
+ GRI->moveBefore(AggBB->getTerminator());
+ }
}
}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86
+
+declare {x86_fp80, x86_fp80} @test()
+
+define void @call2(x86_fp80 *%P1, x86_fp80 *%P2) {
+ %a = call {x86_fp80,x86_fp80} @test()
+ %b = getresult {x86_fp80,x86_fp80} %a, 1
+ store x86_fp80 %b, x86_fp80* %P1
+br label %L
+
+L:
+ %c = getresult {x86_fp80,x86_fp80} %a, 0
+ store x86_fp80 %c, x86_fp80* %P2
+ ret void
+}