1 //===---------------------------------------------------------------------===//
2 // Random ideas for the X86 backend: SSE-specific stuff.
3 //===---------------------------------------------------------------------===//
5 - Consider eliminating the unaligned SSE load intrinsics, replacing them with
6 unaligned LLVM load instructions.
8 //===---------------------------------------------------------------------===//
10 Expand libm rounding functions inline: Significant speedups possible.
11 http://gcc.gnu.org/ml/gcc-patches/2006-10/msg00909.html
13 //===---------------------------------------------------------------------===//
15 When compiled with unsafemath enabled, "main" should enable SSE DAZ mode and
18 //===---------------------------------------------------------------------===//
20 Think about doing i64 math in SSE regs on x86-32.
22 //===---------------------------------------------------------------------===//
24 This testcase should have no SSE instructions in it, and only one load from
27 double %test3(bool %B) {
28 %C = select bool %B, double 123.412, double 523.01123123
32 Currently, the select is being lowered, which prevents the dag combiner from
33 turning 'select (load CPI1), (load CPI2)' -> 'load (select CPI1, CPI2)'
35 The pattern isel got this one right.
37 //===---------------------------------------------------------------------===//
39 SSE doesn't have [mem] op= reg instructions. If we have an SSE instruction
44 and the register allocator decides to spill X, it is cheaper to emit this as:
55 ..and this uses one fewer register (so this should be done at load folding
56 time, not at spiller time). *Note* however that this can only be done
57 if Y is dead. Here's a testcase:
59 @.str_3 = external global [15 x i8]
60 declare void @printf(i32, ...)
65 no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
66 %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ],
67 [ %tmp.34.i18, %no_exit.i7 ]
68 %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ],
69 [ %tmp.28.i16, %no_exit.i7 ]
70 %tmp.28.i16 = add double %tmp.0.0.0.i10, 0.000000e+00
71 %tmp.34.i18 = add double %tmp.0.1.0.i9, 0.000000e+00
72 br i1 false, label %Compute_Tree.exit23, label %no_exit.i7
74 Compute_Tree.exit23: ; preds = %no_exit.i7
75 tail call void (i32, ...)* @printf( i32 0 )
76 store double %tmp.34.i18, double* null
85 *** movsd %XMM2, QWORD PTR [%ESP + 8]
86 *** addsd %XMM2, %XMM1
87 *** movsd QWORD PTR [%ESP + 8], %XMM2
88 jmp .BBmain_1 # no_exit.i7
90 This is a bugpoint reduced testcase, which is why the testcase doesn't make
91 much sense (e.g. its an infinite loop). :)
93 //===---------------------------------------------------------------------===//
95 SSE should implement 'select_cc' using 'emulated conditional moves' that use
96 pcmp/pand/pandn/por to do a selection instead of a conditional branch:
98 double %X(double %Y, double %Z, double %A, double %B) {
99 %C = setlt double %A, %B
100 %z = add double %Z, 0.0 ;; select operand is not a load
101 %D = select bool %C, double %Y, double %z
110 addsd 24(%esp), %xmm0
111 movsd 32(%esp), %xmm1
112 movsd 16(%esp), %xmm2
113 ucomisd 40(%esp), %xmm1
123 //===---------------------------------------------------------------------===//
125 It's not clear whether we should use pxor or xorps / xorpd to clear XMM
126 registers. The choice may depend on subtarget information. We should do some
127 more experiments on different x86 machines.
129 //===---------------------------------------------------------------------===//
131 Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
134 //===---------------------------------------------------------------------===//
137 if (copysign(1.0, x) == copysign(1.0, y))
142 //===---------------------------------------------------------------------===//
144 Use movhps to update upper 64-bits of a v4sf value. Also movlps on lower half
147 //===---------------------------------------------------------------------===//
149 Better codegen for vector_shuffles like this { x, 0, 0, 0 } or { x, 0, x, 0}.
150 Perhaps use pxor / xorp* to clear a XMM register first?
152 //===---------------------------------------------------------------------===//
154 How to decide when to use the "floating point version" of logical ops? Here are
157 movaps LCPI5_5, %xmm2
160 mulps 8656(%ecx), %xmm3
161 addps 8672(%ecx), %xmm3
167 movaps LCPI5_5, %xmm1
170 mulps 8656(%ecx), %xmm3
171 addps 8672(%ecx), %xmm3
175 movaps %xmm3, 112(%esp)
178 Due to some minor source change, the later case ended up using orps and movaps
179 instead of por and movdqa. Does it matter?
181 //===---------------------------------------------------------------------===//
183 X86RegisterInfo::copyRegToReg() returns X86::MOVAPSrr for VR128. Is it possible
184 to choose between movaps, movapd, and movdqa based on types of source and
187 How about andps, andpd, and pand? Do we really care about the type of the packed
188 elements? If not, why not always use the "ps" variants which are likely to be
191 //===---------------------------------------------------------------------===//
193 External test Nurbs exposed some problems. Look for
194 __ZN15Nurbs_SSE_Cubic17TessellateSurfaceE, bb cond_next140. This is what icc
197 movaps (%edx), %xmm2 #59.21
198 movaps (%edx), %xmm5 #60.21
199 movaps (%edx), %xmm4 #61.21
200 movaps (%edx), %xmm3 #62.21
201 movl 40(%ecx), %ebp #69.49
202 shufps $0, %xmm2, %xmm5 #60.21
203 movl 100(%esp), %ebx #69.20
204 movl (%ebx), %edi #69.20
205 imull %ebp, %edi #69.49
206 addl (%eax), %edi #70.33
207 shufps $85, %xmm2, %xmm4 #61.21
208 shufps $170, %xmm2, %xmm3 #62.21
209 shufps $255, %xmm2, %xmm2 #63.21
210 lea (%ebp,%ebp,2), %ebx #69.49
212 lea -3(%edi,%ebx), %ebx #70.33
214 addl 32(%ecx), %ebx #68.37
215 testb $15, %bl #91.13
216 jne L_B1.24 # Prob 5% #91.13
218 This is the llvm code after instruction scheduling:
220 cond_next140 (0xa910740, LLVM BB @0xa90beb0):
221 %reg1078 = MOV32ri -3
222 %reg1079 = ADD32rm %reg1078, %reg1068, 1, %NOREG, 0
223 %reg1037 = MOV32rm %reg1024, 1, %NOREG, 40
224 %reg1080 = IMUL32rr %reg1079, %reg1037
225 %reg1081 = MOV32rm %reg1058, 1, %NOREG, 0
226 %reg1038 = LEA32r %reg1081, 1, %reg1080, -3
227 %reg1036 = MOV32rm %reg1024, 1, %NOREG, 32
228 %reg1082 = SHL32ri %reg1038, 4
229 %reg1039 = ADD32rr %reg1036, %reg1082
230 %reg1083 = MOVAPSrm %reg1059, 1, %NOREG, 0
231 %reg1034 = SHUFPSrr %reg1083, %reg1083, 170
232 %reg1032 = SHUFPSrr %reg1083, %reg1083, 0
233 %reg1035 = SHUFPSrr %reg1083, %reg1083, 255
234 %reg1033 = SHUFPSrr %reg1083, %reg1083, 85
235 %reg1040 = MOV32rr %reg1039
236 %reg1084 = AND32ri8 %reg1039, 15
238 JE mbb<cond_next204,0xa914d30>
240 Still ok. After register allocation:
242 cond_next140 (0xa910740, LLVM BB @0xa90beb0):
244 %EDX = MOV32rm <fi#3>, 1, %NOREG, 0
245 ADD32rm %EAX<def&use>, %EDX, 1, %NOREG, 0
246 %EDX = MOV32rm <fi#7>, 1, %NOREG, 0
247 %EDX = MOV32rm %EDX, 1, %NOREG, 40
248 IMUL32rr %EAX<def&use>, %EDX
249 %ESI = MOV32rm <fi#5>, 1, %NOREG, 0
250 %ESI = MOV32rm %ESI, 1, %NOREG, 0
251 MOV32mr <fi#4>, 1, %NOREG, 0, %ESI
252 %EAX = LEA32r %ESI, 1, %EAX, -3
253 %ESI = MOV32rm <fi#7>, 1, %NOREG, 0
254 %ESI = MOV32rm %ESI, 1, %NOREG, 32
256 SHL32ri %EDI<def&use>, 4
257 ADD32rr %EDI<def&use>, %ESI
258 %XMM0 = MOVAPSrm %ECX, 1, %NOREG, 0
259 %XMM1 = MOVAPSrr %XMM0
260 SHUFPSrr %XMM1<def&use>, %XMM1, 170
261 %XMM2 = MOVAPSrr %XMM0
262 SHUFPSrr %XMM2<def&use>, %XMM2, 0
263 %XMM3 = MOVAPSrr %XMM0
264 SHUFPSrr %XMM3<def&use>, %XMM3, 255
265 SHUFPSrr %XMM0<def&use>, %XMM0, 85
267 AND32ri8 %EBX<def&use>, 15
269 JE mbb<cond_next204,0xa914d30>
271 This looks really bad. The problem is shufps is a destructive opcode. Since it
272 appears as operand two in more than one shufps ops. It resulted in a number of
273 copies. Note icc also suffers from the same problem. Either the instruction
274 selector should select pshufd or The register allocator can made the two-address
275 to three-address transformation.
277 It also exposes some other problems. See MOV32ri -3 and the spills.
279 //===---------------------------------------------------------------------===//
281 http://gcc.gnu.org/bugzilla/show_bug.cgi?id=25500
283 LLVM is producing bad code.
285 LBB_main_4: # cond_true44
296 jne LBB_main_4 # cond_true44
298 There are two problems. 1) No need to two loop induction variables. We can
299 compare against 262144 * 16. 2) Known register coalescer issue. We should
300 be able eliminate one of the movaps:
302 addps %xmm2, %xmm1 <=== Commute!
305 movaps %xmm1, %xmm1 <=== Eliminate!
312 jne LBB_main_4 # cond_true44
314 //===---------------------------------------------------------------------===//
318 __m128 test(float a) {
319 return _mm_set_ps(0.0, 0.0, 0.0, a*a);
330 Because mulss doesn't modify the top 3 elements, the top elements of
331 xmm1 are already zero'd. We could compile this to:
337 //===---------------------------------------------------------------------===//
339 Here's a sick and twisted idea. Consider code like this:
341 __m128 test(__m128 a) {
342 float b = *(float*)&A;
344 return _mm_set_ps(0.0, 0.0, 0.0, b);
347 This might compile to this code:
349 movaps c(%esp), %xmm1
354 Now consider if the ... code caused xmm1 to get spilled. This might produce
357 movaps c(%esp), %xmm1
358 movaps %xmm1, c2(%esp)
362 movaps c2(%esp), %xmm1
366 However, since the reload is only used by these instructions, we could
367 "fold" it into the uses, producing something like this:
369 movaps c(%esp), %xmm1
370 movaps %xmm1, c2(%esp)
373 movss c2(%esp), %xmm0
376 ... saving two instructions.
378 The basic idea is that a reload from a spill slot, can, if only one 4-byte
379 chunk is used, bring in 3 zeros the the one element instead of 4 elements.
380 This can be used to simplify a variety of shuffle operations, where the
381 elements are fixed zeros.
383 //===---------------------------------------------------------------------===//
385 __m128d test1( __m128d A, __m128d B) {
386 return _mm_shuffle_pd(A, B, 0x3);
391 shufpd $3, %xmm1, %xmm0
393 Perhaps it's better to use unpckhpd instead?
395 unpckhpd %xmm1, %xmm0
397 Don't know if unpckhpd is faster. But it is shorter.
399 //===---------------------------------------------------------------------===//
401 This code generates ugly code, probably due to costs being off or something:
403 define void @test(float* %P, <4 x float>* %P2 ) {
404 %xFloat0.688 = load float* %P
405 %tmp = load <4 x float>* %P2
406 %inFloat3.713 = insertelement <4 x float> %tmp, float 0.0, i32 3
407 store <4 x float> %inFloat3.713, <4 x float>* %P2
418 shufps $50, %xmm1, %xmm2
419 shufps $132, %xmm2, %xmm0
423 Would it be better to generate:
429 pinsrw $6, %eax, %xmm0
430 pinsrw $7, %eax, %xmm0
436 //===---------------------------------------------------------------------===//
438 Some useful information in the Apple Altivec / SSE Migration Guide:
440 http://developer.apple.com/documentation/Performance/Conceptual/
441 Accelerate_sse_migration/index.html
443 e.g. SSE select using and, andnot, or. Various SSE compare translations.
445 //===---------------------------------------------------------------------===//
447 Add hooks to commute some CMPP operations.
449 //===---------------------------------------------------------------------===//
451 Apply the same transformation that merged four float into a single 128-bit load
452 to loads from constant pool.
454 //===---------------------------------------------------------------------===//
456 Floating point max / min are commutable when -enable-unsafe-fp-path is
457 specified. We should turn int_x86_sse_max_ss and X86ISD::FMIN etc. into other
458 nodes which are selected to max / min instructions that are marked commutable.
460 //===---------------------------------------------------------------------===//
462 We should materialize vector constants like "all ones" and "signbit" with
465 cmpeqps xmm1, xmm1 ; xmm1 = all-ones
468 cmpeqps xmm1, xmm1 ; xmm1 = all-ones
469 psrlq xmm1, 31 ; xmm1 = all 100000000000...
471 instead of using a load from the constant pool. The later is important for
472 ABS/NEG/copysign etc.
474 //===---------------------------------------------------------------------===//
478 #include <xmmintrin.h>
480 void x(unsigned short n) {
481 a = _mm_slli_epi32 (a, n);
484 a = _mm_slli_epi32 (a, n);
487 compile to ( -O3 -static -fomit-frame-pointer):
502 "y" looks good, but "x" does silly movzwl stuff around into a GPR. It seems
503 like movd would be sufficient in both cases as the value is already zero
504 extended in the 32-bit stack slot IIRC. For signed short, it should also be
505 save, as a really-signed value would be undefined for pslld.
508 //===---------------------------------------------------------------------===//
511 int t1(double d) { return signbit(d); }
513 This currently compiles to:
515 movsd 16(%esp), %xmm0
522 We should use movmskp{s|d} instead.
524 //===---------------------------------------------------------------------===//
526 CodeGen/X86/vec_align.ll tests whether we can turn 4 scalar loads into a single
527 (aligned) vector load. This functionality has a couple of problems.
529 1. The code to infer alignment from loads of globals is in the X86 backend,
530 not the dag combiner. This is because dagcombine2 needs to be able to see
531 through the X86ISD::Wrapper node, which DAGCombine can't really do.
532 2. The code for turning 4 x load into a single vector load is target
533 independent and should be moved to the dag combiner.
534 3. The code for turning 4 x load into a vector load can only handle a direct
535 load from a global or a direct load from the stack. It should be generalized
536 to handle any load from P, P+4, P+8, P+12, where P can be anything.
537 4. The alignment inference code cannot handle loads from globals in non-static
538 mode because it doesn't look through the extra dyld stub load. If you try
539 vec_align.ll without -relocation-model=static, you'll see what I mean.
541 //===---------------------------------------------------------------------===//
543 We should lower store(fneg(load p), q) into an integer load+xor+store, which
544 eliminates a constant pool load. For example, consider:
546 define i64 @ccosf(float %z.0, float %z.1) nounwind readonly {
548 %tmp6 = sub float -0.000000e+00, %z.1 ; <float> [#uses=1]
549 %tmp20 = tail call i64 @ccoshf( float %tmp6, float %z.0 ) nounwind readonly
553 This currently compiles to:
555 LCPI1_0: # <4 x float>
556 .long 2147483648 # float -0
557 .long 2147483648 # float -0
558 .long 2147483648 # float -0
559 .long 2147483648 # float -0
562 movss 16(%esp), %xmm0
564 movss 20(%esp), %xmm0
571 Note the load into xmm0, then xor (to negate), then store. In PIC mode,
572 this code computes the pic base and does two loads to do the constant pool
573 load, so the improvement is much bigger.
575 The tricky part about this xform is that the argument load/store isn't exposed
576 until post-legalize, and at that point, the fneg has been custom expanded into
577 an X86 fxor. This means that we need to handle this case in the x86 backend
578 instead of in target independent code.
580 //===---------------------------------------------------------------------===//
582 Non-SSE4 insert into 16 x i8 is atrociously bad.
584 //===---------------------------------------------------------------------===//
586 <2 x i64> extract is substantially worse than <2 x f64>, even if the destination
589 //===---------------------------------------------------------------------===//
591 SSE4 extract-to-mem ops aren't being pattern matched because of the AssertZext
592 sitting between the truncate and the extract.
594 //===---------------------------------------------------------------------===//
596 INSERTPS can match any insert (extract, imm1), imm2 for 4 x float, and insert
597 any number of 0.0 simultaneously. Currently we only use it for simple
600 See comments in LowerINSERT_VECTOR_ELT_SSE4.
602 //===---------------------------------------------------------------------===//
604 On a random note, SSE2 should declare insert/extract of 2 x f64 as legal, not
605 Custom. All combinations of insert/extract reg-reg, reg-mem, and mem-reg are
606 legal, it'll just take a few extra patterns written in the .td file.
608 Note: this is not a code quality issue; the custom lowered code happens to be
609 right, but we shouldn't have to custom lower anything. This is probably related
610 to <2 x i64> ops being so bad.
612 //===---------------------------------------------------------------------===//
614 'select' on vectors and scalars could be a whole lot better. We currently
615 lower them to conditional branches. On x86-64 for example, we compile this:
617 double test(double a, double b, double c, double d) { return a<b ? c : d; }
639 For unpredictable branches, the later is much more efficient. This should
640 just be a matter of having scalar sse map to SELECT_CC and custom expanding
643 //===---------------------------------------------------------------------===//
645 LLVM currently generates stack realignment code, when it is not necessary
646 needed. The problem is that we need to know about stack alignment too early,
649 At that point we don't know, whether there will be vector spill, or not.
650 Stack realignment logic is overly conservative here, but otherwise we can
651 produce unaligned loads/stores.
653 Fixing this will require some huge RA changes.
656 #include <emmintrin.h>
658 typedef short vSInt16 __attribute__ ((__vector_size__ (16)));
660 static const vSInt16 a = {- 22725, - 12873, - 22725, - 12873, - 22725, - 12873,
663 vSInt16 madd(vSInt16 b)
665 return _mm_madd_epi16(a, b);
668 Generated code (x86-32, linux):
673 movaps .LCPI1_0, %xmm1
679 //===---------------------------------------------------------------------===//
682 #include <emmintrin.h>
683 __m128 foo2 (float x) {
684 return _mm_set_ps (0, 0, x, 0);
687 In x86-32 mode, we generate this spiffy code:
691 pshufd $81, %xmm0, %xmm0
694 in x86-64 mode, we generate this code, which could be better:
699 pshufd $81, %xmm1, %xmm0
702 In sse4 mode, we could use insertps to make both better.
704 Here's another testcase that could use insertps [mem]:
706 #include <xmmintrin.h>
708 __m128 foo1 (float x1, float x4) {
709 return _mm_set_ps (x2, x1, x3, x4);
712 gcc mainline compiles it to:
715 insertps $0x10, x2(%rip), %xmm0
716 insertps $0x10, x3(%rip), %xmm1
722 //===---------------------------------------------------------------------===//
724 We compile vector multiply-by-constant into poor code:
726 define <4 x i32> @f(<4 x i32> %i) nounwind {
727 %A = mul <4 x i32> %i, < i32 10, i32 10, i32 10, i32 10 >
731 On targets without SSE4.1, this compiles into:
733 LCPI1_0: ## <4 x i32>
742 pshufd $3, %xmm0, %xmm1
744 imull LCPI1_0+12, %eax
746 pshufd $1, %xmm0, %xmm2
748 imull LCPI1_0+4, %eax
750 punpckldq %xmm1, %xmm2
756 imull LCPI1_0+8, %eax
758 punpckldq %xmm0, %xmm1
760 punpckldq %xmm2, %xmm0
763 It would be better to synthesize integer vector multiplication by constants
764 using shifts and adds, pslld and paddd here. And even on targets with SSE4.1,
765 simple cases such as multiplication by powers of two would be better as
766 vector shifts than as multiplications.
768 //===---------------------------------------------------------------------===//
775 return _mm_set_epi8 (1, 0, 0, 0, 0, 0, 0, 0, 0, x, 0, 1, 0, 0, 0, 0);
781 pinsrw $2, %eax, %xmm0
783 pinsrw $3, %eax, %xmm0
785 pinsrw $7, %eax, %xmm0
791 movzbl 16(%esp), %eax
793 pinsrw $3, %eax, %xmm0
808 With SSE4, it should be
809 movdqa .LC0(%rip), %xmm0
810 pinsrb $6, %edi, %xmm0
812 //===---------------------------------------------------------------------===//
814 We should transform a shuffle of two vectors of constants into a single vector
815 of constants. Also, insertelement of a constant into a vector of constants
816 should also result in a vector of constants. e.g. 2008-06-25-VecISelBug.ll.
818 We compiled it to something horrible:
822 .long 1065353216 ## float 1
826 LCPI1_0: ## <4 x float>
828 .long 1065353216 ## float 1
830 .long 1065353216 ## float 1
836 movhps LCPI1_0, %xmm0
839 shufps $2, %xmm1, %xmm2
840 shufps $132, %xmm2, %xmm0
843 //===---------------------------------------------------------------------===//
846 llvm-gcc-4.2 does the following for uint32_t -> float conversions on i386:
853 movl %eax, -8(%ebp) // write x to the stack
854 movl $0x3ff00000, -4(%ebp) // 2^52 + x as a double at -4(%ebp)
855 movsd -8(%ebp), %xmm0
856 subsd [2^52 double], %xmm0 // subtract 2^52 -- this is exact
857 cvtsd2ss %xmm0, %xmm0 // convert to single -- rounding happens here
859 On merom/yonah, this takes a substantial stall. The following is a much
862 movd %eax, %xmm0 // load x into low word of xmm0
863 movsd [2^52 double], %xmm1 // load 2^52 into xmm1
864 orpd %xmm1, %xmm0 // 2^52 + x in double precision
865 subsd %xmm1, %xmm0 // x in double precision
866 cvtsd2ss %xmm0, %xmm0 // x rounded to single precision
868 IF we don't already need PIC, then the following is even faster still, at a
869 small cost to code size:
871 movl $0x3ff00000, %ecx // conjure high word of 2^52
873 movss %eax, %xmm0 // load x into low word of xmm0
874 psllq $32, %xmm1 // 2^52
875 orpd %xmm1, %xmm0 // 2^52 + x in double precision
876 subsd %xmm1, %xmm0 // x in double precision
877 cvtsd2ss %xmm0, %xmm0 // x in single precision
879 //===---------------------------------------------------------------------===//
884 float foo(unsigned char x) {
888 compiles to (x86-32):
890 define float @foo(i8 zeroext %x) nounwind {
891 %tmp12 = uitofp i8 %x to float ; <float> [#uses=1]
906 We should be able to use:
907 cvtsi2ss 8($esp), %xmm0
908 since we know the stack slot is already zext'd.
910 //===---------------------------------------------------------------------===//
912 Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64))
913 when code size is critical. movlps is slower than movsd on core2 but it's one
916 //===---------------------------------------------------------------------===//
918 We should use a dynamic programming based approach to tell when using FPStack
919 operations is cheaper than SSE. SciMark montecarlo contains code like this
922 double MonteCarlo_num_flops(int Num_samples) {
923 return ((double) Num_samples)* 4.0;
926 In fpstack mode, this compiles into:
929 .long 1082130432 ## float 4.000000e+00
930 _MonteCarlo_num_flops:
939 in SSE mode, it compiles into significantly slower code:
941 _MonteCarlo_num_flops:
943 cvtsi2sd 16(%esp), %xmm0
950 There are also other cases in scimark where using fpstack is better, it is
951 cheaper to do fld1 than load from a constant pool for example, so
952 "load, add 1.0, store" is better done in the fp stack, etc.
954 //===---------------------------------------------------------------------===//