// Remember to operand index of the group flags.
SmallVector<unsigned, 8> GroupIdx;
- unsigned PrevDefGroup = 0;
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
unsigned DefGroup = 0;
if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
- // Check that the def groups are monotonically increasing.
- // Otherwise, the tied uses and defs won't line up, and
- // MI::findTiedOperandIdx() will find the wrong operand. This
- // should be automatically enforced by the front ends when
- // translating "+" constraints into tied def+use pairs.
- assert(DefGroup >= PrevDefGroup &&
- "Tied inline asm operands must be in increasing order.");
- PrevDefGroup = DefGroup;
-
unsigned DefIdx = GroupIdx[DefGroup] + 1;
unsigned UseIdx = GroupIdx.back() + 1;
for (unsigned j = 0; j != NumVals; ++j) {
%1 = load i64* %retval ; <i64> [#uses=1]
ret i64 %1
}
+
+; The tied operands are not necessarily in the same order as the defs.
+; PR13742
+define i64 @swapped(i64 %x, i64 %y) nounwind {
+entry:
+ %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind
+ %x1 = extractvalue { i64, i64 } %x0, 0
+ ret i64 %x1
+}