1 //===---------------------------------------------------------------------===//
2 // Random ideas for the X86 backend: SSE-specific stuff.
3 //===---------------------------------------------------------------------===//
5 //===---------------------------------------------------------------------===//
7 Expand libm rounding functions inline: Significant speedups possible.
8 http://gcc.gnu.org/ml/gcc-patches/2006-10/msg00909.html
10 //===---------------------------------------------------------------------===//
12 When compiled with unsafemath enabled, "main" should enable SSE DAZ mode and
15 //===---------------------------------------------------------------------===//
17 Think about doing i64 math in SSE regs.
19 //===---------------------------------------------------------------------===//
21 Bitcast to<->from SSE registers should use movd/movq instead of going through
22 the stack. Testcase here: CodeGen/X86/bitcast.ll
24 //===---------------------------------------------------------------------===//
26 This testcase should have no SSE instructions in it, and only one load from
29 double %test3(bool %B) {
30 %C = select bool %B, double 123.412, double 523.01123123
34 Currently, the select is being lowered, which prevents the dag combiner from
35 turning 'select (load CPI1), (load CPI2)' -> 'load (select CPI1, CPI2)'
37 The pattern isel got this one right.
39 //===---------------------------------------------------------------------===//
41 SSE doesn't have [mem] op= reg instructions. If we have an SSE instruction
46 and the register allocator decides to spill X, it is cheaper to emit this as:
57 ..and this uses one fewer register (so this should be done at load folding
58 time, not at spiller time). *Note* however that this can only be done
59 if Y is dead. Here's a testcase:
61 %.str_3 = external global [15 x sbyte] ; <[15 x sbyte]*> [#uses=0]
62 implementation ; Functions:
63 declare void %printf(int, ...)
67 no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
68 %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ], [ %tmp.34.i18, %no_exit.i7 ] ; <double> [#uses=1]
69 %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ], [ %tmp.28.i16, %no_exit.i7 ] ; <double> [#uses=1]
70 %tmp.28.i16 = add double %tmp.0.0.0.i10, 0.000000e+00
71 %tmp.34.i18 = add double %tmp.0.1.0.i9, 0.000000e+00
72 br bool false, label %Compute_Tree.exit23, label %no_exit.i7
73 Compute_Tree.exit23: ; preds = %no_exit.i7
74 tail call void (int, ...)* %printf( int 0 )
75 store double %tmp.34.i18, double* null
84 *** movsd %XMM2, QWORD PTR [%ESP + 8]
85 *** addsd %XMM2, %XMM1
86 *** movsd QWORD PTR [%ESP + 8], %XMM2
87 jmp .BBmain_1 # no_exit.i7
89 This is a bugpoint reduced testcase, which is why the testcase doesn't make
90 much sense (e.g. its an infinite loop). :)
92 //===---------------------------------------------------------------------===//
94 SSE should implement 'select_cc' using 'emulated conditional moves' that use
95 pcmp/pand/pandn/por to do a selection instead of a conditional branch:
97 double %X(double %Y, double %Z, double %A, double %B) {
98 %C = setlt double %A, %B
99 %z = add double %Z, 0.0 ;; select operand is not a load
100 %D = select bool %C, double %Y, double %z
109 addsd 24(%esp), %xmm0
110 movsd 32(%esp), %xmm1
111 movsd 16(%esp), %xmm2
112 ucomisd 40(%esp), %xmm1
122 //===---------------------------------------------------------------------===//
124 It's not clear whether we should use pxor or xorps / xorpd to clear XMM
125 registers. The choice may depend on subtarget information. We should do some
126 more experiments on different x86 machines.
128 //===---------------------------------------------------------------------===//
130 Currently the x86 codegen isn't very good at mixing SSE and FPStack
133 unsigned int foo(double x) { return x; }
137 movsd 24(%esp), %xmm0
145 This will be solved when we go to a dynamic programming based isel.
147 //===---------------------------------------------------------------------===//
149 Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
152 //===---------------------------------------------------------------------===//
154 Teach the coalescer to commute 2-addr instructions, allowing us to eliminate
155 the reg-reg copy in this example:
157 float foo(int *x, float *y, unsigned c) {
160 for (i = 0; i < c; i++) {
161 float xx = (float)x[i];
170 cvtsi2ss %XMM0, DWORD PTR [%EDX + 4*%ESI]
171 mulss %XMM0, DWORD PTR [%EAX + 4*%ESI]
175 **** movaps %XMM1, %XMM0
176 jb LBB_foo_3 # no_exit
178 //===---------------------------------------------------------------------===//
181 if (copysign(1.0, x) == copysign(1.0, y))
186 //===---------------------------------------------------------------------===//
188 Use movhps to update upper 64-bits of a v4sf value. Also movlps on lower half
191 //===---------------------------------------------------------------------===//
193 Better codegen for vector_shuffles like this { x, 0, 0, 0 } or { x, 0, x, 0}.
194 Perhaps use pxor / xorp* to clear a XMM register first?
196 //===---------------------------------------------------------------------===//
198 How to decide when to use the "floating point version" of logical ops? Here are
201 movaps LCPI5_5, %xmm2
204 mulps 8656(%ecx), %xmm3
205 addps 8672(%ecx), %xmm3
211 movaps LCPI5_5, %xmm1
214 mulps 8656(%ecx), %xmm3
215 addps 8672(%ecx), %xmm3
219 movaps %xmm3, 112(%esp)
222 Due to some minor source change, the later case ended up using orps and movaps
223 instead of por and movdqa. Does it matter?
225 //===---------------------------------------------------------------------===//
227 X86RegisterInfo::copyRegToReg() returns X86::MOVAPSrr for VR128. Is it possible
228 to choose between movaps, movapd, and movdqa based on types of source and
231 How about andps, andpd, and pand? Do we really care about the type of the packed
232 elements? If not, why not always use the "ps" variants which are likely to be
235 //===---------------------------------------------------------------------===//
237 External test Nurbs exposed some problems. Look for
238 __ZN15Nurbs_SSE_Cubic17TessellateSurfaceE, bb cond_next140. This is what icc
241 movaps (%edx), %xmm2 #59.21
242 movaps (%edx), %xmm5 #60.21
243 movaps (%edx), %xmm4 #61.21
244 movaps (%edx), %xmm3 #62.21
245 movl 40(%ecx), %ebp #69.49
246 shufps $0, %xmm2, %xmm5 #60.21
247 movl 100(%esp), %ebx #69.20
248 movl (%ebx), %edi #69.20
249 imull %ebp, %edi #69.49
250 addl (%eax), %edi #70.33
251 shufps $85, %xmm2, %xmm4 #61.21
252 shufps $170, %xmm2, %xmm3 #62.21
253 shufps $255, %xmm2, %xmm2 #63.21
254 lea (%ebp,%ebp,2), %ebx #69.49
256 lea -3(%edi,%ebx), %ebx #70.33
258 addl 32(%ecx), %ebx #68.37
259 testb $15, %bl #91.13
260 jne L_B1.24 # Prob 5% #91.13
262 This is the llvm code after instruction scheduling:
264 cond_next140 (0xa910740, LLVM BB @0xa90beb0):
265 %reg1078 = MOV32ri -3
266 %reg1079 = ADD32rm %reg1078, %reg1068, 1, %NOREG, 0
267 %reg1037 = MOV32rm %reg1024, 1, %NOREG, 40
268 %reg1080 = IMUL32rr %reg1079, %reg1037
269 %reg1081 = MOV32rm %reg1058, 1, %NOREG, 0
270 %reg1038 = LEA32r %reg1081, 1, %reg1080, -3
271 %reg1036 = MOV32rm %reg1024, 1, %NOREG, 32
272 %reg1082 = SHL32ri %reg1038, 4
273 %reg1039 = ADD32rr %reg1036, %reg1082
274 %reg1083 = MOVAPSrm %reg1059, 1, %NOREG, 0
275 %reg1034 = SHUFPSrr %reg1083, %reg1083, 170
276 %reg1032 = SHUFPSrr %reg1083, %reg1083, 0
277 %reg1035 = SHUFPSrr %reg1083, %reg1083, 255
278 %reg1033 = SHUFPSrr %reg1083, %reg1083, 85
279 %reg1040 = MOV32rr %reg1039
280 %reg1084 = AND32ri8 %reg1039, 15
282 JE mbb<cond_next204,0xa914d30>
284 Still ok. After register allocation:
286 cond_next140 (0xa910740, LLVM BB @0xa90beb0):
288 %EDX = MOV32rm <fi#3>, 1, %NOREG, 0
289 ADD32rm %EAX<def&use>, %EDX, 1, %NOREG, 0
290 %EDX = MOV32rm <fi#7>, 1, %NOREG, 0
291 %EDX = MOV32rm %EDX, 1, %NOREG, 40
292 IMUL32rr %EAX<def&use>, %EDX
293 %ESI = MOV32rm <fi#5>, 1, %NOREG, 0
294 %ESI = MOV32rm %ESI, 1, %NOREG, 0
295 MOV32mr <fi#4>, 1, %NOREG, 0, %ESI
296 %EAX = LEA32r %ESI, 1, %EAX, -3
297 %ESI = MOV32rm <fi#7>, 1, %NOREG, 0
298 %ESI = MOV32rm %ESI, 1, %NOREG, 32
300 SHL32ri %EDI<def&use>, 4
301 ADD32rr %EDI<def&use>, %ESI
302 %XMM0 = MOVAPSrm %ECX, 1, %NOREG, 0
303 %XMM1 = MOVAPSrr %XMM0
304 SHUFPSrr %XMM1<def&use>, %XMM1, 170
305 %XMM2 = MOVAPSrr %XMM0
306 SHUFPSrr %XMM2<def&use>, %XMM2, 0
307 %XMM3 = MOVAPSrr %XMM0
308 SHUFPSrr %XMM3<def&use>, %XMM3, 255
309 SHUFPSrr %XMM0<def&use>, %XMM0, 85
311 AND32ri8 %EBX<def&use>, 15
313 JE mbb<cond_next204,0xa914d30>
315 This looks really bad. The problem is shufps is a destructive opcode. Since it
316 appears as operand two in more than one shufps ops. It resulted in a number of
317 copies. Note icc also suffers from the same problem. Either the instruction
318 selector should select pshufd or The register allocator can made the two-address
319 to three-address transformation.
321 It also exposes some other problems. See MOV32ri -3 and the spills.
323 //===---------------------------------------------------------------------===//
325 http://gcc.gnu.org/bugzilla/show_bug.cgi?id=25500
327 LLVM is producing bad code.
329 LBB_main_4: # cond_true44
340 jne LBB_main_4 # cond_true44
342 There are two problems. 1) No need to two loop induction variables. We can
343 compare against 262144 * 16. 2) Known register coalescer issue. We should
344 be able eliminate one of the movaps:
346 addps %xmm2, %xmm1 <=== Commute!
349 movaps %xmm1, %xmm1 <=== Eliminate!
356 jne LBB_main_4 # cond_true44
358 //===---------------------------------------------------------------------===//
362 __m128 test(float a) {
363 return _mm_set_ps(0.0, 0.0, 0.0, a*a);
374 Because mulss doesn't modify the top 3 elements, the top elements of
375 xmm1 are already zero'd. We could compile this to:
381 //===---------------------------------------------------------------------===//
383 Here's a sick and twisted idea. Consider code like this:
385 __m128 test(__m128 a) {
386 float b = *(float*)&A;
388 return _mm_set_ps(0.0, 0.0, 0.0, b);
391 This might compile to this code:
393 movaps c(%esp), %xmm1
398 Now consider if the ... code caused xmm1 to get spilled. This might produce
401 movaps c(%esp), %xmm1
402 movaps %xmm1, c2(%esp)
406 movaps c2(%esp), %xmm1
410 However, since the reload is only used by these instructions, we could
411 "fold" it into the uses, producing something like this:
413 movaps c(%esp), %xmm1
414 movaps %xmm1, c2(%esp)
417 movss c2(%esp), %xmm0
420 ... saving two instructions.
422 The basic idea is that a reload from a spill slot, can, if only one 4-byte
423 chunk is used, bring in 3 zeros the the one element instead of 4 elements.
424 This can be used to simplify a variety of shuffle operations, where the
425 elements are fixed zeros.
427 //===---------------------------------------------------------------------===//
431 #include <emmintrin.h>
432 void test(__m128d *r, __m128d *A, double B) {
433 *r = _mm_loadl_pd(*A, &B);
439 movsd 24(%esp), %xmm0
451 movl 4(%esp), %edx #3.6
452 movl 8(%esp), %eax #3.6
453 movapd (%eax), %xmm0 #4.22
454 movlpd 12(%esp), %xmm0 #4.8
455 movapd %xmm0, (%edx) #4.3
458 So icc is smart enough to know that B is in memory so it doesn't load it and
459 store it back to stack.
461 //===---------------------------------------------------------------------===//
463 __m128d test1( __m128d A, __m128d B) {
464 return _mm_shuffle_pd(A, B, 0x3);
469 shufpd $3, %xmm1, %xmm0
471 Perhaps it's better to use unpckhpd instead?
473 unpckhpd %xmm1, %xmm0
475 Don't know if unpckhpd is faster. But it is shorter.
477 //===---------------------------------------------------------------------===//
479 This code generates ugly code, probably due to costs being off or something:
481 void %test(float* %P, <4 x float>* %P2 ) {
482 %xFloat0.688 = load float* %P
483 %loadVector37.712 = load <4 x float>* %P2
484 %inFloat3.713 = insertelement <4 x float> %loadVector37.712, float 0.000000e+00, uint 3
485 store <4 x float> %inFloat3.713, <4 x float>* %P2
493 movd %xmm0, %eax ;; EAX = 0!
496 pinsrw $6, %eax, %xmm0
497 shrl $16, %eax ;; EAX = 0 again!
498 pinsrw $7, %eax, %xmm0
502 It would be better to generate:
508 pinsrw $6, %eax, %xmm0
509 pinsrw $7, %eax, %xmm0
513 or use pxor (to make a zero vector) and shuffle (to insert it).
515 //===---------------------------------------------------------------------===//
517 Some useful information in the Apple Altivec / SSE Migration Guide:
519 http://developer.apple.com/documentation/Performance/Conceptual/
520 Accelerate_sse_migration/index.html
522 e.g. SSE select using and, andnot, or. Various SSE compare translations.
524 //===---------------------------------------------------------------------===//
526 Add hooks to commute some CMPP operations.
528 //===---------------------------------------------------------------------===//
530 Apply the same transformation that merged four float into a single 128-bit load
531 to loads from constant pool.
533 //===---------------------------------------------------------------------===//
535 Floating point max / min are commutable when -enable-unsafe-fp-path is
536 specified. We should turn int_x86_sse_max_ss and X86ISD::FMIN etc. into other
537 nodes which are selected to max / min instructions that are marked commutable.
539 //===---------------------------------------------------------------------===//
541 Add MOVDI2SSrr and MOVDSS2DIrr to X86RegisterInfo::foldMemoryOperand() once the
542 recent X86 JIT regressions have been identified and fixed.