if (StackAlignment)
stackAlignment = StackAlignment;
}
-
-bool X86Subtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtarget::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- return OptLevel >= CodeGenOpt::Aggressive;
-}
/// indicating the number of scheduling cycles of backscheduling that
/// should be attempted.
unsigned getSpecialAddressLatency() const;
-
- /// enablePostRAScheduler - X86 target is enabling post-alloc scheduling
- /// at 'More' optimization level.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
};
} // End llvm namespace
%tmp14 = fadd float %tmp12, %tmp7
ret float %tmp14
-; CHECK: mulss LCPI1_3(%rip)
-; CHECK-NEXT: mulss LCPI1_0(%rip)
-; CHECK-NEXT: mulss LCPI1_1(%rip)
-; CHECK-NEXT: mulss LCPI1_2(%rip)
-; CHECK-NEXT: addss
-; CHECK-NEXT: addss
-; CHECK-NEXT: addss
-; CHECK-NEXT: ret
+; CHECK: mulss LCPI1_0(%rip)
+; CHECK: mulss LCPI1_1(%rip)
+; CHECK: addss
+; CHECK: mulss LCPI1_2(%rip)
+; CHECK: addss
+; CHECK: mulss LCPI1_3(%rip)
+; CHECK: addss
+; CHECK: ret
}
; CHECK: full_me_0:
; CHECK: movsd (%rsi), %xmm0
-; CHECK: addq $8, %rsi
; CHECK: mulsd (%rdx), %xmm0
-; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, (%rdi)
+; CHECK: addq $8, %rsi
+; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
; CHECK: mulsd -2048(%rdx), %xmm0
; CHECK: movsd %xmm0, -2048(%rdi)
; CHECK: movsd (%rsi), %xmm0
-; CHECK: addq $8, %rsi
; CHECK: divsd (%rdx), %xmm0
-; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, (%rdi)
+; CHECK: addq $8, %rsi
+; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
; CHECK: mulsd (%rdx), %xmm0
; CHECK: movsd %xmm0, (%rdi)
; CHECK: movsd -2048(%rsi), %xmm0
-; CHECK: addq $8, %rsi
; CHECK: divsd -2048(%rdx), %xmm0
-; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, -2048(%rdi)
+; CHECK: addq $8, %rsi
+; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
; CHECK: mulsd (%rdx), %xmm0
; CHECK: movsd %xmm0, (%rdi)
; CHECK: movsd -4096(%rsi), %xmm0
-; CHECK: addq $8, %rsi
; CHECK: divsd -4096(%rdx), %xmm0
-; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, -4096(%rdi)
+; CHECK: addq $8, %rsi
+; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
; CHECK: addsd (%rsi), %xmm0
; CHECK: movsd %xmm0, (%rdx)
; CHECK: movsd 40(%rdi), %xmm0
-; CHECK: addq $8, %rdi
; CHECK: subsd 40(%rsi), %xmm0
-; CHECK: addq $8, %rsi
; CHECK: movsd %xmm0, 40(%rdx)
+; CHECK: addq $8, %rdi
+; CHECK: addq $8, %rsi
; CHECK: addq $8, %rdx
; CHECK: decq %rcx
; CHECK: jne
; CHECK: t1:
; CHECK: movl 8(%esp), %eax
-; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movapd (%eax), %xmm0
; CHECK-NEXT: movlpd 12(%esp), %xmm0
-; CHECK-NEXT: movapd %xmm0, (%ecx)
+; CHECK-NEXT: movl 4(%esp), %eax
+; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: ret
}
; CHECK: t2:
; CHECK: movl 8(%esp), %eax
-; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movapd (%eax), %xmm0
; CHECK-NEXT: movhpd 12(%esp), %xmm0
-; CHECK-NEXT: movapd %xmm0, (%ecx)
+; CHECK-NEXT: movl 4(%esp), %eax
+; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: ret
}
; X64: t0:
; X64: movddup (%rsi), %xmm0
-; X64: xorl %eax, %eax
; X64: pshuflw $0, %xmm0, %xmm0
+; X64: xorl %eax, %eax
; X64: pinsrw $0, %eax, %xmm0
; X64: movaps %xmm0, (%rdi)
; X64: ret
ret void
; X64: t10:
; X64: pextrw $4, %xmm0, %eax
-; X64: pextrw $6, %xmm0, %edx
; X64: movlhps %xmm1, %xmm1
; X64: pshuflw $8, %xmm1, %xmm1
; X64: pinsrw $2, %eax, %xmm1
-; X64: pinsrw $3, %edx, %xmm1
+; X64: pextrw $6, %xmm0, %eax
+; X64: pinsrw $3, %eax, %xmm1
}
ret <8 x i16> %tmp7
; X64: t11:
-; X64: movlhps %xmm0, %xmm0
; X64: movd %xmm1, %eax
+; X64: movlhps %xmm0, %xmm0
; X64: pshuflw $1, %xmm0, %xmm0
; X64: pinsrw $1, %eax, %xmm0
; X64: ret
ret <8 x i16> %tmp9
; X64: t12:
-; X64: movlhps %xmm0, %xmm0
; X64: pextrw $3, %xmm1, %eax
+; X64: movlhps %xmm0, %xmm0
; X64: pshufhw $3, %xmm0, %xmm0
; X64: pinsrw $5, %eax, %xmm0
; X64: ret