1 //===---------------------------------------------------------------------===//
2 // Random ideas for the X86 backend: SSE-specific stuff.
3 //===---------------------------------------------------------------------===//
5 //===---------------------------------------------------------------------===//
7 SSE Variable shift can be custom lowered to something like this, which uses a
8 small table + unaligned load + shuffle instead of going through memory.
11 .byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
12 .byte -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
15 __m128i shift_right(__m128i value, unsigned long offset) {
16 return _mm_shuffle_epi8(value,
17 _mm_loadu_si128((__m128 *) (___m128i_shift_right + offset)));
20 //===---------------------------------------------------------------------===//
22 SSE has instructions for doing operations on complex numbers, we should pattern
23 match them. For example, this should turn into a horizontal add:
25 typedef float __attribute__((vector_size(16))) v4f32;
27 return A[0]+A[1]+A[2]+A[3];
33 pshufd $1, %xmm0, %xmm1 ## xmm1 = xmm0[1,0,0,0]
35 pshufd $3, %xmm0, %xmm2 ## xmm2 = xmm0[3,0,0,0]
36 movhlps %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
43 Also, there are cases where some simple local SLP would improve codegen a bit.
46 _Complex float f32(_Complex float A, _Complex float B) {
55 pshufd $1, %xmm1, %xmm1 ## xmm1 = xmm1[1,0,0,0]
56 pshufd $1, %xmm0, %xmm3 ## xmm3 = xmm0[1,0,0,0]
59 unpcklps %xmm3, %xmm0 ## xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
62 seems silly when it could just be one addps.
65 //===---------------------------------------------------------------------===//
67 Expand libm rounding functions inline: Significant speedups possible.
68 http://gcc.gnu.org/ml/gcc-patches/2006-10/msg00909.html
70 //===---------------------------------------------------------------------===//
72 When compiled with unsafemath enabled, "main" should enable SSE DAZ mode and
75 //===---------------------------------------------------------------------===//
77 Think about doing i64 math in SSE regs on x86-32.
79 //===---------------------------------------------------------------------===//
81 This testcase should have no SSE instructions in it, and only one load from
84 double %test3(bool %B) {
85 %C = select bool %B, double 123.412, double 523.01123123
89 Currently, the select is being lowered, which prevents the dag combiner from
90 turning 'select (load CPI1), (load CPI2)' -> 'load (select CPI1, CPI2)'
92 The pattern isel got this one right.
94 //===---------------------------------------------------------------------===//
96 SSE should implement 'select_cc' using 'emulated conditional moves' that use
97 pcmp/pand/pandn/por to do a selection instead of a conditional branch:
99 double %X(double %Y, double %Z, double %A, double %B) {
100 %C = setlt double %A, %B
101 %z = fadd double %Z, 0.0 ;; select operand is not a load
102 %D = select bool %C, double %Y, double %z
111 addsd 24(%esp), %xmm0
112 movsd 32(%esp), %xmm1
113 movsd 16(%esp), %xmm2
114 ucomisd 40(%esp), %xmm1
124 //===---------------------------------------------------------------------===//
126 Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
129 //===---------------------------------------------------------------------===//
132 if (copysign(1.0, x) == copysign(1.0, y))
137 //===---------------------------------------------------------------------===//
139 Use movhps to update upper 64-bits of a v4sf value. Also movlps on lower half
142 //===---------------------------------------------------------------------===//
144 Better codegen for vector_shuffles like this { x, 0, 0, 0 } or { x, 0, x, 0}.
145 Perhaps use pxor / xorp* to clear a XMM register first?
147 //===---------------------------------------------------------------------===//
149 External test Nurbs exposed some problems. Look for
150 __ZN15Nurbs_SSE_Cubic17TessellateSurfaceE, bb cond_next140. This is what icc
153 movaps (%edx), %xmm2 #59.21
154 movaps (%edx), %xmm5 #60.21
155 movaps (%edx), %xmm4 #61.21
156 movaps (%edx), %xmm3 #62.21
157 movl 40(%ecx), %ebp #69.49
158 shufps $0, %xmm2, %xmm5 #60.21
159 movl 100(%esp), %ebx #69.20
160 movl (%ebx), %edi #69.20
161 imull %ebp, %edi #69.49
162 addl (%eax), %edi #70.33
163 shufps $85, %xmm2, %xmm4 #61.21
164 shufps $170, %xmm2, %xmm3 #62.21
165 shufps $255, %xmm2, %xmm2 #63.21
166 lea (%ebp,%ebp,2), %ebx #69.49
168 lea -3(%edi,%ebx), %ebx #70.33
170 addl 32(%ecx), %ebx #68.37
171 testb $15, %bl #91.13
172 jne L_B1.24 # Prob 5% #91.13
174 This is the llvm code after instruction scheduling:
176 cond_next140 (0xa910740, LLVM BB @0xa90beb0):
177 %reg1078 = MOV32ri -3
178 %reg1079 = ADD32rm %reg1078, %reg1068, 1, %NOREG, 0
179 %reg1037 = MOV32rm %reg1024, 1, %NOREG, 40
180 %reg1080 = IMUL32rr %reg1079, %reg1037
181 %reg1081 = MOV32rm %reg1058, 1, %NOREG, 0
182 %reg1038 = LEA32r %reg1081, 1, %reg1080, -3
183 %reg1036 = MOV32rm %reg1024, 1, %NOREG, 32
184 %reg1082 = SHL32ri %reg1038, 4
185 %reg1039 = ADD32rr %reg1036, %reg1082
186 %reg1083 = MOVAPSrm %reg1059, 1, %NOREG, 0
187 %reg1034 = SHUFPSrr %reg1083, %reg1083, 170
188 %reg1032 = SHUFPSrr %reg1083, %reg1083, 0
189 %reg1035 = SHUFPSrr %reg1083, %reg1083, 255
190 %reg1033 = SHUFPSrr %reg1083, %reg1083, 85
191 %reg1040 = MOV32rr %reg1039
192 %reg1084 = AND32ri8 %reg1039, 15
194 JE mbb<cond_next204,0xa914d30>
196 Still ok. After register allocation:
198 cond_next140 (0xa910740, LLVM BB @0xa90beb0):
200 %EDX = MOV32rm <fi#3>, 1, %NOREG, 0
201 ADD32rm %EAX<def&use>, %EDX, 1, %NOREG, 0
202 %EDX = MOV32rm <fi#7>, 1, %NOREG, 0
203 %EDX = MOV32rm %EDX, 1, %NOREG, 40
204 IMUL32rr %EAX<def&use>, %EDX
205 %ESI = MOV32rm <fi#5>, 1, %NOREG, 0
206 %ESI = MOV32rm %ESI, 1, %NOREG, 0
207 MOV32mr <fi#4>, 1, %NOREG, 0, %ESI
208 %EAX = LEA32r %ESI, 1, %EAX, -3
209 %ESI = MOV32rm <fi#7>, 1, %NOREG, 0
210 %ESI = MOV32rm %ESI, 1, %NOREG, 32
212 SHL32ri %EDI<def&use>, 4
213 ADD32rr %EDI<def&use>, %ESI
214 %XMM0 = MOVAPSrm %ECX, 1, %NOREG, 0
215 %XMM1 = MOVAPSrr %XMM0
216 SHUFPSrr %XMM1<def&use>, %XMM1, 170
217 %XMM2 = MOVAPSrr %XMM0
218 SHUFPSrr %XMM2<def&use>, %XMM2, 0
219 %XMM3 = MOVAPSrr %XMM0
220 SHUFPSrr %XMM3<def&use>, %XMM3, 255
221 SHUFPSrr %XMM0<def&use>, %XMM0, 85
223 AND32ri8 %EBX<def&use>, 15
225 JE mbb<cond_next204,0xa914d30>
227 This looks really bad. The problem is shufps is a destructive opcode. Since it
228 appears as operand two in more than one shufps ops. It resulted in a number of
229 copies. Note icc also suffers from the same problem. Either the instruction
230 selector should select pshufd or The register allocator can made the two-address
231 to three-address transformation.
233 It also exposes some other problems. See MOV32ri -3 and the spills.
235 //===---------------------------------------------------------------------===//
239 __m128 test(float a) {
240 return _mm_set_ps(0.0, 0.0, 0.0, a*a);
251 Because mulss doesn't modify the top 3 elements, the top elements of
252 xmm1 are already zero'd. We could compile this to:
258 //===---------------------------------------------------------------------===//
260 Here's a sick and twisted idea. Consider code like this:
262 __m128 test(__m128 a) {
263 float b = *(float*)&A;
265 return _mm_set_ps(0.0, 0.0, 0.0, b);
268 This might compile to this code:
270 movaps c(%esp), %xmm1
275 Now consider if the ... code caused xmm1 to get spilled. This might produce
278 movaps c(%esp), %xmm1
279 movaps %xmm1, c2(%esp)
283 movaps c2(%esp), %xmm1
287 However, since the reload is only used by these instructions, we could
288 "fold" it into the uses, producing something like this:
290 movaps c(%esp), %xmm1
291 movaps %xmm1, c2(%esp)
294 movss c2(%esp), %xmm0
297 ... saving two instructions.
299 The basic idea is that a reload from a spill slot, can, if only one 4-byte
300 chunk is used, bring in 3 zeros the one element instead of 4 elements.
301 This can be used to simplify a variety of shuffle operations, where the
302 elements are fixed zeros.
304 //===---------------------------------------------------------------------===//
306 This code generates ugly code, probably due to costs being off or something:
308 define void @test(float* %P, <4 x float>* %P2 ) {
309 %xFloat0.688 = load float* %P
310 %tmp = load <4 x float>* %P2
311 %inFloat3.713 = insertelement <4 x float> %tmp, float 0.0, i32 3
312 store <4 x float> %inFloat3.713, <4 x float>* %P2
323 shufps $50, %xmm1, %xmm2
324 shufps $132, %xmm2, %xmm0
328 Would it be better to generate:
334 pinsrw $6, %eax, %xmm0
335 pinsrw $7, %eax, %xmm0
341 //===---------------------------------------------------------------------===//
343 Some useful information in the Apple Altivec / SSE Migration Guide:
345 http://developer.apple.com/documentation/Performance/Conceptual/
346 Accelerate_sse_migration/index.html
348 e.g. SSE select using and, andnot, or. Various SSE compare translations.
350 //===---------------------------------------------------------------------===//
352 Add hooks to commute some CMPP operations.
354 //===---------------------------------------------------------------------===//
356 Apply the same transformation that merged four float into a single 128-bit load
357 to loads from constant pool.
359 //===---------------------------------------------------------------------===//
361 Floating point max / min are commutable when -enable-unsafe-fp-path is
362 specified. We should turn int_x86_sse_max_ss and X86ISD::FMIN etc. into other
363 nodes which are selected to max / min instructions that are marked commutable.
365 //===---------------------------------------------------------------------===//
367 We should materialize vector constants like "all ones" and "signbit" with
370 cmpeqps xmm1, xmm1 ; xmm1 = all-ones
373 cmpeqps xmm1, xmm1 ; xmm1 = all-ones
374 psrlq xmm1, 31 ; xmm1 = all 100000000000...
376 instead of using a load from the constant pool. The later is important for
377 ABS/NEG/copysign etc.
379 //===---------------------------------------------------------------------===//
383 #include <xmmintrin.h>
385 void x(unsigned short n) {
386 a = _mm_slli_epi32 (a, n);
389 a = _mm_slli_epi32 (a, n);
392 compile to ( -O3 -static -fomit-frame-pointer):
407 "y" looks good, but "x" does silly movzwl stuff around into a GPR. It seems
408 like movd would be sufficient in both cases as the value is already zero
409 extended in the 32-bit stack slot IIRC. For signed short, it should also be
410 save, as a really-signed value would be undefined for pslld.
413 //===---------------------------------------------------------------------===//
416 int t1(double d) { return signbit(d); }
418 This currently compiles to:
420 movsd 16(%esp), %xmm0
427 We should use movmskp{s|d} instead.
429 //===---------------------------------------------------------------------===//
431 CodeGen/X86/vec_align.ll tests whether we can turn 4 scalar loads into a single
432 (aligned) vector load. This functionality has a couple of problems.
434 1. The code to infer alignment from loads of globals is in the X86 backend,
435 not the dag combiner. This is because dagcombine2 needs to be able to see
436 through the X86ISD::Wrapper node, which DAGCombine can't really do.
437 2. The code for turning 4 x load into a single vector load is target
438 independent and should be moved to the dag combiner.
439 3. The code for turning 4 x load into a vector load can only handle a direct
440 load from a global or a direct load from the stack. It should be generalized
441 to handle any load from P, P+4, P+8, P+12, where P can be anything.
442 4. The alignment inference code cannot handle loads from globals in non-static
443 mode because it doesn't look through the extra dyld stub load. If you try
444 vec_align.ll without -relocation-model=static, you'll see what I mean.
446 //===---------------------------------------------------------------------===//
448 We should lower store(fneg(load p), q) into an integer load+xor+store, which
449 eliminates a constant pool load. For example, consider:
451 define i64 @ccosf(float %z.0, float %z.1) nounwind readonly {
453 %tmp6 = fsub float -0.000000e+00, %z.1 ; <float> [#uses=1]
454 %tmp20 = tail call i64 @ccoshf( float %tmp6, float %z.0 ) nounwind readonly
457 declare i64 @ccoshf(float %z.0, float %z.1) nounwind readonly
459 This currently compiles to:
461 LCPI1_0: # <4 x float>
462 .long 2147483648 # float -0
463 .long 2147483648 # float -0
464 .long 2147483648 # float -0
465 .long 2147483648 # float -0
468 movss 16(%esp), %xmm0
470 movss 20(%esp), %xmm0
477 Note the load into xmm0, then xor (to negate), then store. In PIC mode,
478 this code computes the pic base and does two loads to do the constant pool
479 load, so the improvement is much bigger.
481 The tricky part about this xform is that the argument load/store isn't exposed
482 until post-legalize, and at that point, the fneg has been custom expanded into
483 an X86 fxor. This means that we need to handle this case in the x86 backend
484 instead of in target independent code.
486 //===---------------------------------------------------------------------===//
488 Non-SSE4 insert into 16 x i8 is atrociously bad.
490 //===---------------------------------------------------------------------===//
492 <2 x i64> extract is substantially worse than <2 x f64>, even if the destination
495 //===---------------------------------------------------------------------===//
497 SSE4 extract-to-mem ops aren't being pattern matched because of the AssertZext
498 sitting between the truncate and the extract.
500 //===---------------------------------------------------------------------===//
502 INSERTPS can match any insert (extract, imm1), imm2 for 4 x float, and insert
503 any number of 0.0 simultaneously. Currently we only use it for simple
506 See comments in LowerINSERT_VECTOR_ELT_SSE4.
508 //===---------------------------------------------------------------------===//
510 On a random note, SSE2 should declare insert/extract of 2 x f64 as legal, not
511 Custom. All combinations of insert/extract reg-reg, reg-mem, and mem-reg are
512 legal, it'll just take a few extra patterns written in the .td file.
514 Note: this is not a code quality issue; the custom lowered code happens to be
515 right, but we shouldn't have to custom lower anything. This is probably related
516 to <2 x i64> ops being so bad.
518 //===---------------------------------------------------------------------===//
520 'select' on vectors and scalars could be a whole lot better. We currently
521 lower them to conditional branches. On x86-64 for example, we compile this:
523 double test(double a, double b, double c, double d) { return a<b ? c : d; }
545 For unpredictable branches, the later is much more efficient. This should
546 just be a matter of having scalar sse map to SELECT_CC and custom expanding
549 //===---------------------------------------------------------------------===//
551 LLVM currently generates stack realignment code, when it is not necessary
552 needed. The problem is that we need to know about stack alignment too early,
555 At that point we don't know, whether there will be vector spill, or not.
556 Stack realignment logic is overly conservative here, but otherwise we can
557 produce unaligned loads/stores.
559 Fixing this will require some huge RA changes.
562 #include <emmintrin.h>
564 typedef short vSInt16 __attribute__ ((__vector_size__ (16)));
566 static const vSInt16 a = {- 22725, - 12873, - 22725, - 12873, - 22725, - 12873,
569 vSInt16 madd(vSInt16 b)
571 return _mm_madd_epi16(a, b);
574 Generated code (x86-32, linux):
579 movaps .LCPI1_0, %xmm1
585 //===---------------------------------------------------------------------===//
588 #include <emmintrin.h>
589 __m128 foo2 (float x) {
590 return _mm_set_ps (0, 0, x, 0);
593 In x86-32 mode, we generate this spiffy code:
597 pshufd $81, %xmm0, %xmm0
600 in x86-64 mode, we generate this code, which could be better:
605 pshufd $81, %xmm1, %xmm0
608 In sse4 mode, we could use insertps to make both better.
610 Here's another testcase that could use insertps [mem]:
612 #include <xmmintrin.h>
614 __m128 foo1 (float x1, float x4) {
615 return _mm_set_ps (x2, x1, x3, x4);
618 gcc mainline compiles it to:
621 insertps $0x10, x2(%rip), %xmm0
622 insertps $0x10, x3(%rip), %xmm1
628 //===---------------------------------------------------------------------===//
630 We compile vector multiply-by-constant into poor code:
632 define <4 x i32> @f(<4 x i32> %i) nounwind {
633 %A = mul <4 x i32> %i, < i32 10, i32 10, i32 10, i32 10 >
637 On targets without SSE4.1, this compiles into:
639 LCPI1_0: ## <4 x i32>
648 pshufd $3, %xmm0, %xmm1
650 imull LCPI1_0+12, %eax
652 pshufd $1, %xmm0, %xmm2
654 imull LCPI1_0+4, %eax
656 punpckldq %xmm1, %xmm2
662 imull LCPI1_0+8, %eax
664 punpckldq %xmm0, %xmm1
666 punpckldq %xmm2, %xmm0
669 It would be better to synthesize integer vector multiplication by constants
670 using shifts and adds, pslld and paddd here. And even on targets with SSE4.1,
671 simple cases such as multiplication by powers of two would be better as
672 vector shifts than as multiplications.
674 //===---------------------------------------------------------------------===//
681 return _mm_set_epi8 (1, 0, 0, 0, 0, 0, 0, 0, 0, x, 0, 1, 0, 0, 0, 0);
687 pinsrw $2, %eax, %xmm0
689 pinsrw $3, %eax, %xmm0
691 pinsrw $7, %eax, %xmm0
697 movzbl 16(%esp), %eax
699 pinsrw $3, %eax, %xmm0
714 With SSE4, it should be
715 movdqa .LC0(%rip), %xmm0
716 pinsrb $6, %edi, %xmm0
718 //===---------------------------------------------------------------------===//
720 We should transform a shuffle of two vectors of constants into a single vector
721 of constants. Also, insertelement of a constant into a vector of constants
722 should also result in a vector of constants. e.g. 2008-06-25-VecISelBug.ll.
724 We compiled it to something horrible:
728 .long 1065353216 ## float 1
732 LCPI1_0: ## <4 x float>
734 .long 1065353216 ## float 1
736 .long 1065353216 ## float 1
742 movhps LCPI1_0, %xmm0
745 shufps $2, %xmm1, %xmm2
746 shufps $132, %xmm2, %xmm0
749 //===---------------------------------------------------------------------===//
754 float foo(unsigned char x) {
758 compiles to (x86-32):
760 define float @foo(i8 zeroext %x) nounwind {
761 %tmp12 = uitofp i8 %x to float ; <float> [#uses=1]
776 We should be able to use:
777 cvtsi2ss 8($esp), %xmm0
778 since we know the stack slot is already zext'd.
780 //===---------------------------------------------------------------------===//
782 Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64))
783 when code size is critical. movlps is slower than movsd on core2 but it's one
786 //===---------------------------------------------------------------------===//
788 We should use a dynamic programming based approach to tell when using FPStack
789 operations is cheaper than SSE. SciMark montecarlo contains code like this
792 double MonteCarlo_num_flops(int Num_samples) {
793 return ((double) Num_samples)* 4.0;
796 In fpstack mode, this compiles into:
799 .long 1082130432 ## float 4.000000e+00
800 _MonteCarlo_num_flops:
809 in SSE mode, it compiles into significantly slower code:
811 _MonteCarlo_num_flops:
813 cvtsi2sd 16(%esp), %xmm0
820 There are also other cases in scimark where using fpstack is better, it is
821 cheaper to do fld1 than load from a constant pool for example, so
822 "load, add 1.0, store" is better done in the fp stack, etc.
824 //===---------------------------------------------------------------------===//
826 The X86 backend should be able to if-convert SSE comparisons like "ucomisd" to
827 "cmpsd". For example, this code:
829 double d1(double x) { return x == x ? x : x + x; }
841 Also, the 'ret's should be shared. This is PR6032.
843 //===---------------------------------------------------------------------===//
845 These should compile into the same code (PR6214): Perhaps instcombine should
846 canonicalize the former into the later?
848 define float @foo(float %x) nounwind {
849 %t = bitcast float %x to i32
850 %s = and i32 %t, 2147483647
851 %d = bitcast i32 %s to float
855 declare float @fabsf(float %n)
856 define float @bar(float %x) nounwind {
857 %d = call float @fabsf(float %x)
861 //===---------------------------------------------------------------------===//
863 This IR (from PR6194):
865 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
866 target triple = "x86_64-apple-darwin10.0.0"
868 %0 = type { double, double }
869 %struct.float3 = type { float, float, float }
871 define void @test(%0, %struct.float3* nocapture %res) nounwind noinline ssp {
873 %tmp18 = extractvalue %0 %0, 0 ; <double> [#uses=1]
874 %tmp19 = bitcast double %tmp18 to i64 ; <i64> [#uses=1]
875 %tmp20 = zext i64 %tmp19 to i128 ; <i128> [#uses=1]
876 %tmp10 = lshr i128 %tmp20, 32 ; <i128> [#uses=1]
877 %tmp11 = trunc i128 %tmp10 to i32 ; <i32> [#uses=1]
878 %tmp12 = bitcast i32 %tmp11 to float ; <float> [#uses=1]
879 %tmp5 = getelementptr inbounds %struct.float3* %res, i64 0, i32 1 ; <float*> [#uses=1]
880 store float %tmp12, float* %tmp5
892 This would be better kept in the SSE unit by treating XMM0 as a 4xfloat and
893 doing a shuffle from v[1] to v[0] then a float store.
895 //===---------------------------------------------------------------------===//
897 On SSE4 machines, we compile this code:
899 define <2 x float> @test2(<2 x float> %Q, <2 x float> %R,
900 <2 x float> *%P) nounwind {
901 %Z = fadd <2 x float> %Q, %R
903 store <2 x float> %Z, <2 x float> *%P
911 insertps $0, %xmm2, %xmm2
912 insertps $16, %xmm3, %xmm2
913 insertps $0, %xmm0, %xmm3
914 insertps $16, %xmm1, %xmm3
918 pshufd $1, %xmm3, %xmm1
919 ## kill: XMM1<def> XMM1<kill>
922 The insertps's of $0 are pointless complex copies.
924 //===---------------------------------------------------------------------===//