X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FREADME.txt;h=dfe988fc5cde548682675c460b728f432f921839;hb=a0a7510711f408ff720aa516a2a274536c8bbf5f;hp=42d701ef01ba9210bf053dcbe6032eef90d30df1;hpb=33c1dab0bdb2f1a1a75405aa82b9dd9d01d76cd1;p=oota-llvm.git diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt index 42d701ef01b..dfe988fc5cd 100644 --- a/lib/Target/PowerPC/README.txt +++ b/lib/Target/PowerPC/README.txt @@ -1,64 +1,62 @@ +//===- README.txt - Notes for improving PowerPC-specific code gen ---------===// + TODO: -* gpr0 allocation -* implement do-loop -> bdnz transform -* implement powerpc-64 for darwin +* lmw/stmw pass a la arm load store optimizer for prolog/epilog ===-------------------------------------------------------------------------=== -Use the stfiwx instruction for: - -void foo(float a, int *b) { *b = a; } +This code: -===-------------------------------------------------------------------------=== +unsigned add32carry(unsigned sum, unsigned x) { + unsigned z = sum + x; + if (sum + x < x) + z++; + return z; +} -unsigned short foo(float a) { return a; } -should be: -_foo: - fctiwz f0,f1 - stfd f0,-8(r1) - lhz r3,-2(r1) - blr -not: -_foo: - fctiwz f0, f1 - stfd f0, -8(r1) - lwz r2, -4(r1) - rlwinm r3, r2, 0, 16, 31 - blr +Should compile to something like: -===-------------------------------------------------------------------------=== + addc r3,r3,r4 + addze r3,r3 -Support 'update' load/store instructions. These are cracked on the G5, but are -still a codesize win. +instead we get: -===-------------------------------------------------------------------------=== + add r3, r4, r3 + cmplw cr7, r3, r4 + mfcr r4 ; 1 + rlwinm r4, r4, 29, 31, 31 + add r3, r3, r4 -Should hint to the branch select pass that it doesn't need to print the second -unconditional branch, so we don't end up with things like: - b .LBBl42__2E_expand_function_8_674 ; loopentry.24 - b .LBBl42__2E_expand_function_8_42 ; NewDefault - b .LBBl42__2E_expand_function_8_42 ; NewDefault +Ick. ===-------------------------------------------------------------------------=== -* Codegen this: - - void test2(int X) { - if (X == 0x12345678) bar(); - } - - as: - - xoris r0,r3,0x1234 - cmpwi cr0,r0,0x5678 - beq cr0,L6 - - not: - - lis r2, 4660 - ori r2, r2, 22136 - cmpw cr0, r3, r2 - bne .LBB_test2_2 +We compile the hottest inner loop of viterbi to: + + li r6, 0 + b LBB1_84 ;bb432.i +LBB1_83: ;bb420.i + lbzx r8, r5, r7 + addi r6, r7, 1 + stbx r8, r4, r7 +LBB1_84: ;bb432.i + mr r7, r6 + cmplwi cr0, r7, 143 + bne cr0, LBB1_83 ;bb420.i + +The CBE manages to produce: + + li r0, 143 + mtctr r0 +loop: + lbzx r2, r2, r11 + stbx r0, r2, r9 + addi r2, r2, 1 + bdz later + b loop + +This could be much better (bdnz instead of bdz) but it still beats us. If we +produced this with bdnz, the loop would be a single dispatch group. ===-------------------------------------------------------------------------=== @@ -90,6 +88,23 @@ mode. Note that this (and the static variable version) is discussed here for GCC: http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html +Here's another example (the sgn function): +double testf(double a) { + return a == 0.0 ? 0.0 : (a > 0.0 ? 1.0 : -1.0); +} + +it produces a BB like this: +LBB1_1: ; cond_true + lis r2, ha16(LCPI1_0) + lfs f0, lo16(LCPI1_0)(r2) + lis r2, ha16(LCPI1_1) + lis r3, ha16(LCPI1_2) + lfs f2, lo16(LCPI1_2)(r3) + lfs f3, lo16(LCPI1_1)(r2) + fsub f0, f0, f1 + fsel f1, f0, f2, f3 + blr + ===-------------------------------------------------------------------------=== PIC Code Gen IPO optimization: @@ -103,400 +118,513 @@ http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html ===-------------------------------------------------------------------------=== -Implement Newton-Rhapson method for improving estimate instructions to the -correct accuracy, and implementing divide as multiply by reciprocal when it has -more than one use. Itanium will want this too. - -===-------------------------------------------------------------------------=== - -#define ARRAY_LENGTH 16 - -union bitfield { - struct { -#ifndef __ppc__ - unsigned int field0 : 6; - unsigned int field1 : 6; - unsigned int field2 : 6; - unsigned int field3 : 6; - unsigned int field4 : 3; - unsigned int field5 : 4; - unsigned int field6 : 1; -#else - unsigned int field6 : 1; - unsigned int field5 : 4; - unsigned int field4 : 3; - unsigned int field3 : 6; - unsigned int field2 : 6; - unsigned int field1 : 6; - unsigned int field0 : 6; -#endif - } bitfields, bits; - unsigned int u32All; - signed int i32All; - float f32All; -}; - - -typedef struct program_t { - union bitfield array[ARRAY_LENGTH]; - int size; - int loaded; -} program; - - -void AdjustBitfields(program* prog, unsigned int fmt1) -{ - unsigned int shift = 0; - unsigned int texCount = 0; - unsigned int i; - - for (i = 0; i < 8; i++) - { - prog->array[i].bitfields.field0 = texCount; - prog->array[i].bitfields.field1 = texCount + 1; - prog->array[i].bitfields.field2 = texCount + 2; - prog->array[i].bitfields.field3 = texCount + 3; - - texCount += (fmt1 >> shift) & 0x7; - shift += 3; - } -} +Darwin Stub removal: -In the loop above, the bitfield adds get generated as -(add (shl bitfield, C1), (shl C2, C1)) where C2 is 1, 2 or 3. - -Since the input to the (or and, and) is an (add) rather than a (shl), the shift -doesn't get folded into the rlwimi instruction. We should ideally see through -things like this, rather than forcing llvm to generate the equivalent - -(shl (add bitfield, C2), C1) with some kind of mask. +We still generate calls to foo$stub, and stubs, on Darwin. This is not +necessary when building with the Leopard (10.5) or later linker, as stubs are +generated by ld when necessary. Parameterizing this based on the deployment +target (-mmacosx-version-min) is probably enough. x86-32 does this right, see +its logic. ===-------------------------------------------------------------------------=== -Compile this: +Darwin Stub LICM optimization: -int %f1(int %a, int %b) { - %tmp.1 = and int %a, 15 ; [#uses=1] - %tmp.3 = and int %b, 240 ; [#uses=1] - %tmp.4 = or int %tmp.3, %tmp.1 ; [#uses=1] - ret int %tmp.4 -} +Loops like this: + + for (...) bar(); -without a copy. We make this currently: +Have to go through an indirect stub if bar is external or linkonce. It would +be better to compile it as: -_f1: - rlwinm r2, r4, 0, 24, 27 - rlwimi r2, r3, 0, 28, 31 - or r3, r2, r2 - blr + fp = &bar; + for (...) fp(); -The two-addr pass or RA needs to learn when it is profitable to commute an -instruction to avoid a copy AFTER the 2-addr instruction. The 2-addr pass -currently only commutes to avoid inserting a copy BEFORE the two addr instr. +which only computes the address of bar once (instead of each time through the +stub). This is Darwin specific and would have to be done in the code generator. +Probably not a win on x86. ===-------------------------------------------------------------------------=== -176.gcc contains a bunch of code like this (this occurs dozens of times): +Simple IPO for argument passing, change: + void foo(int X, double Y, int Z) -> void foo(int X, int Z, double Y) -int %test(uint %mode.0.i.0) { - %tmp.79 = cast uint %mode.0.i.0 to sbyte ; [#uses=1] - %tmp.80 = cast sbyte %tmp.79 to int ; [#uses=1] - %tmp.81 = shl int %tmp.80, ubyte 16 ; [#uses=1] - %tmp.82 = and int %tmp.81, 16711680 - ret int %tmp.82 -} +the Darwin ABI specifies that any integer arguments in the first 32 bytes worth +of arguments get assigned to r3 through r10. That is, if you have a function +foo(int, double, int) you get r3, f1, r6, since the 64 bit double ate up the +argument bytes for r4 and r5. The trick then would be to shuffle the argument +order for functions we can internalize so that the maximum number of +integers/pointers get passed in regs before you see any of the fp arguments. -which we compile to: +Instead of implementing this, it would actually probably be easier to just +implement a PPC fastcc, where we could do whatever we wanted to the CC, +including having this work sanely. -_test: - extsb r2, r3 - rlwinm r3, r2, 16, 8, 15 - blr +===-------------------------------------------------------------------------=== -The extsb is obviously dead. This can be handled by a future thing like -MaskedValueIsZero that checks to see if bits are ever demanded (in this case, -the sign bits are never used, so we can fold the sext_inreg to nothing). +Fix Darwin FP-In-Integer Registers ABI -I'm seeing code like this: +Darwin passes doubles in structures in integer registers, which is very very +bad. Add something like a BITCAST to LLVM, then do an i-p transformation that +percolates these things out of functions. - srwi r3, r3, 16 - extsb r3, r3 - rlwimi r4, r3, 16, 8, 15 +Check out how horrible this is: +http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html -in which the extsb is preventing the srwi from being nuked. +This is an extension of "interprocedural CC unmunging" that can't be done with +just fastcc. ===-------------------------------------------------------------------------=== -Another example that occurs is: +Fold add and sub with constant into non-extern, non-weak addresses so this: -uint %test(int %specbits.6.1) { - %tmp.2540 = shr int %specbits.6.1, ubyte 11 ; [#uses=1] - %tmp.2541 = cast int %tmp.2540 to uint ; [#uses=1] - %tmp.2542 = shl uint %tmp.2541, ubyte 13 ; [#uses=1] - %tmp.2543 = and uint %tmp.2542, 8192 ; [#uses=1] - ret uint %tmp.2543 +static int a; +void bar(int b) { a = b; } +void foo(unsigned char *c) { + *c = a; } -which we codegen as: +So that -l1_test: - srawi r2, r3, 11 - rlwinm r3, r2, 13, 18, 18 +_foo: + lis r2, ha16(_a) + la r2, lo16(_a)(r2) + lbz r2, 3(r2) + stb r2, 0(r3) blr -the srawi can be nuked by turning the SAR into a logical SHR (the sext bits are -dead), which I think can then be folded into the rlwinm. +Becomes + +_foo: + lis r2, ha16(_a+3) + lbz r2, lo16(_a+3)(r2) + stb r2, 0(r3) + blr ===-------------------------------------------------------------------------=== -Compile offsets from allocas: +We should compile these two functions to the same thing: -int *%test() { - %X = alloca { int, int } - %Y = getelementptr {int,int}* %X, int 0, uint 1 - ret int* %Y +#include +void f(int a, int b, int *P) { + *P = (a-b)>=0?(a-b):(b-a); +} +void g(int a, int b, int *P) { + *P = abs(a-b); } -into a single add, not two: +Further, they should compile to something better than: + +_g: + subf r2, r4, r3 + subfic r3, r2, 0 + cmpwi cr0, r2, -1 + bgt cr0, LBB2_2 ; entry +LBB2_1: ; entry + mr r2, r3 +LBB2_2: ; entry + stw r2, 0(r5) + blr -_test: - addi r2, r1, -8 - addi r3, r2, 4 +GCC produces: + +_g: + subf r4,r4,r3 + srawi r2,r4,31 + xor r0,r2,r4 + subf r0,r2,r0 + stw r0,0(r5) blr ---> important for C++. +... which is much nicer. + +This theoretically may help improve twolf slightly (used in dimbox.c:142?). ===-------------------------------------------------------------------------=== -int test3(int a, int b) { return (a < 0) ? a : 0; } +PR5945: This: +define i32 @clamp0g(i32 %a) { +entry: + %cmp = icmp slt i32 %a, 0 + %sel = select i1 %cmp, i32 0, i32 %a + ret i32 %sel +} -should be branch free code. LLVM is turning it into < 1 because of the RHS. +Is compile to this with the PowerPC (32-bit) backend: -===-------------------------------------------------------------------------=== +_clamp0g: + cmpwi cr0, r3, 0 + li r2, 0 + blt cr0, LBB1_2 +; BB#1: ; %entry + mr r2, r3 +LBB1_2: ; %entry + mr r3, r2 + blr -No loads or stores of the constants should be needed: +This could be reduced to the much simpler: -struct foo { double X, Y; }; -void xxx(struct foo F); -void bar() { struct foo R = { 1.0, 2.0 }; xxx(R); } +_clamp0g: + srawi r2, r3, 31 + andc r3, r3, r2 + blr ===-------------------------------------------------------------------------=== -Darwin Stub LICM optimization: - -Loops like this: +int foo(int N, int ***W, int **TK, int X) { + int t, i; - for (...) bar(); - -Have to go through an indirect stub if bar is external or linkonce. It would -be better to compile it as: + for (t = 0; t < N; ++t) + for (i = 0; i < 4; ++i) + W[t / X][i][t % X] = TK[i][t]; + + return 5; +} - fp = &bar; - for (...) fp(); +We generate relatively atrocious code for this loop compared to gcc. -which only computes the address of bar once (instead of each time through the -stub). This is Darwin specific and would have to be done in the code generator. -Probably not a win on x86. +We could also strength reduce the rem and the div: +http://www.lcs.mit.edu/pubs/pdf/MIT-LCS-TM-600.pdf ===-------------------------------------------------------------------------=== -PowerPC i1/setcc stuff (depends on subreg stuff): - -Check out the PPC code we get for 'compare' in this testcase: -http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19672 - -oof. on top of not doing the logical crnand instead of (mfcr, mfcr, -invert, invert, or), we then have to compare it against zero instead of -using the value already in a CR! - -that should be something like - cmpw cr7, r8, r5 - cmpw cr0, r7, r3 - crnand cr0, cr0, cr7 - bne cr0, LBB_compare_4 - -instead of - cmpw cr7, r8, r5 - cmpw cr0, r7, r3 - mfcr r7, 1 - mcrf cr7, cr0 - mfcr r8, 1 - rlwinm r7, r7, 30, 31, 31 - rlwinm r8, r8, 30, 31, 31 - xori r7, r7, 1 - xori r8, r8, 1 - addi r2, r2, 1 - or r7, r8, r7 - cmpwi cr0, r7, 0 - bne cr0, LBB_compare_4 ; loopexit +We generate ugly code for this: + +void func(unsigned int *ret, float dx, float dy, float dz, float dw) { + unsigned code = 0; + if(dx < -dw) code |= 1; + if(dx > dw) code |= 2; + if(dy < -dw) code |= 4; + if(dy > dw) code |= 8; + if(dz < -dw) code |= 16; + if(dz > dw) code |= 32; + *ret = code; +} ===-------------------------------------------------------------------------=== -Simple IPO for argument passing, change: - void foo(int X, double Y, int Z) -> void foo(int X, int Z, double Y) +%struct.B = type { i8, [3 x i8] } + +define void @bar(%struct.B* %b) { +entry: + %tmp = bitcast %struct.B* %b to i32* ; [#uses=1] + %tmp = load i32* %tmp ; [#uses=1] + %tmp3 = bitcast %struct.B* %b to i32* ; [#uses=1] + %tmp4 = load i32* %tmp3 ; [#uses=1] + %tmp8 = bitcast %struct.B* %b to i32* ; [#uses=2] + %tmp9 = load i32* %tmp8 ; [#uses=1] + %tmp4.mask17 = shl i32 %tmp4, i8 1 ; [#uses=1] + %tmp1415 = and i32 %tmp4.mask17, 2147483648 ; [#uses=1] + %tmp.masked = and i32 %tmp, 2147483648 ; [#uses=1] + %tmp11 = or i32 %tmp1415, %tmp.masked ; [#uses=1] + %tmp12 = and i32 %tmp9, 2147483647 ; [#uses=1] + %tmp13 = or i32 %tmp12, %tmp11 ; [#uses=1] + store i32 %tmp13, i32* %tmp8 + ret void +} -the Darwin ABI specifies that any integer arguments in the first 32 bytes worth -of arguments get assigned to r3 through r10. That is, if you have a function -foo(int, double, int) you get r3, f1, r6, since the 64 bit double ate up the -argument bytes for r4 and r5. The trick then would be to shuffle the argument -order for functions we can internalize so that the maximum number of -integers/pointers get passed in regs before you see any of the fp arguments. +We emit: -Instead of implementing this, it would actually probably be easier to just -implement a PPC fastcc, where we could do whatever we wanted to the CC, -including having this work sanely. +_foo: + lwz r2, 0(r3) + slwi r4, r2, 1 + or r4, r4, r2 + rlwimi r2, r4, 0, 0, 0 + stw r2, 0(r3) + blr + +We could collapse a bunch of those ORs and ANDs and generate the following +equivalent code: + +_foo: + lwz r2, 0(r3) + rlwinm r4, r2, 1, 0, 0 + or r2, r2, r4 + stw r2, 0(r3) + blr ===-------------------------------------------------------------------------=== -Fix Darwin FP-In-Integer Registers ABI +Consider a function like this: -Darwin passes doubles in structures in integer registers, which is very very -bad. Add something like a BIT_CONVERT to LLVM, then do an i-p transformation -that percolates these things out of functions. +float foo(float X) { return X + 1234.4123f; } -Check out how horrible this is: -http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html +The FP constant ends up in the constant pool, so we need to get the LR register. + This ends up producing code like this: -This is an extension of "interprocedural CC unmunging" that can't be done with -just fastcc. +_foo: +.LBB_foo_0: ; entry + mflr r11 +*** stw r11, 8(r1) + bl "L00000$pb" +"L00000$pb": + mflr r2 + addis r2, r2, ha16(.CPI_foo_0-"L00000$pb") + lfs f0, lo16(.CPI_foo_0-"L00000$pb")(r2) + fadds f1, f1, f0 +*** lwz r11, 8(r1) + mtlr r11 + blr -===-------------------------------------------------------------------------=== +This is functional, but there is no reason to spill the LR register all the way +to the stack (the two marked instrs): spilling it to a GPR is quite enough. -Generate lwbrx and other byteswapping load/store instructions when reasonable. +Implementing this will require some codegen improvements. Nate writes: -===-------------------------------------------------------------------------=== +"So basically what we need to support the "no stack frame save and restore" is a +generalization of the LR optimization to "callee-save regs". -Implement TargetConstantVec, and set up PPC to custom lower ConstantVec into -TargetConstantVec's if it's one of the many forms that are algorithmically -computable using the spiffy altivec instructions. +Currently, we have LR marked as a callee-save reg. The register allocator sees +that it's callee save, and spills it directly to the stack. -===-------------------------------------------------------------------------=== +Ideally, something like this would happen: -Compile this: +LR would be in a separate register class from the GPRs. The class of LR would be +marked "unspillable". When the register allocator came across an unspillable +reg, it would ask "what is the best class to copy this into that I *can* spill" +If it gets a class back, which it will in this case (the gprs), it grabs a free +register of that class. If it is then later necessary to spill that reg, so be +it. -double %test(double %X) { - %Y = cast double %X to long - %Z = cast long %Y to double - ret double %Z +===-------------------------------------------------------------------------=== + +We compile this: +int test(_Bool X) { + return X ? 524288 : 0; } -to this: +to: +_test: + cmplwi cr0, r3, 0 + lis r2, 8 + li r3, 0 + beq cr0, LBB1_2 ;entry +LBB1_1: ;entry + mr r3, r2 +LBB1_2: ;entry + blr +instead of: _test: - fctidz f0, f1 - stfd f0, -8(r1) - lwz r2, -4(r1) - lwz r3, -8(r1) - stw r2, -12(r1) - stw r3, -16(r1) - lfd f0, -16(r1) - fcfid f1, f0 + addic r2,r3,-1 + subfe r0,r2,r3 + slwi r3,r0,19 blr -without the lwz/stw's. +This sort of thing occurs a lot due to globalopt. ===-------------------------------------------------------------------------=== -Compile this: +We compile: -int foo(int a) { - int b = (a < 8); - if (b) { - return b * 3; // ignore the fact that this is always 3. - } else { - return 2; - } +define i32 @bar(i32 %x) nounwind readnone ssp { +entry: + %0 = icmp eq i32 %x, 0 ; [#uses=1] + %neg = sext i1 %0 to i32 ; [#uses=1] + ret i32 %neg } -into something not this: +to: -_foo: -1) cmpwi cr7, r3, 8 - mfcr r2, 1 - rlwinm r2, r2, 29, 31, 31 -1) cmpwi cr0, r3, 7 - bgt cr0, LBB1_2 ; UnifiedReturnBlock -LBB1_1: ; then - rlwinm r2, r2, 0, 31, 31 - mulli r3, r2, 3 - blr -LBB1_2: ; UnifiedReturnBlock - li r3, 2 - blr +_bar: + cntlzw r2, r3 + slwi r2, r2, 26 + srawi r3, r2, 31 + blr -In particular, the two compares (marked 1) could be shared by reversing one. -This could be done in the dag combiner, by swapping a BR_CC when a SETCC of the -same operands (but backwards) exists. In this case, this wouldn't save us -anything though, because the compares still wouldn't be shared. +it would be better to produce: + +_bar: + addic r3,r3,-1 + subfe r3,r3,r3 + blr ===-------------------------------------------------------------------------=== -The legalizer should lower this: +We generate horrible ppc code for this: -bool %test(ulong %x) { - %tmp = setlt ulong %x, 4294967296 - ret bool %tmp +#define N 2000000 +double a[N],c[N]; +void simpleloop() { + int j; + for (j=0; j + inline std::pair full_add(unsigned a, unsigned b) + { return std::make_pair(a + b, a + b < a); } + bool no_overflow(unsigned a, unsigned b) + { return !full_add(a, b).second; } -===-------------------------------------------------------------------------=== +Should compile to: -We should custom expand setcc instead of pretending that we have it. That -would allow us to expose the access of the crbit after the mfcr, allowing -that access to be trivially folded into other ops. A simple example: - -int foo(int a, int b) { return (a < b) << 4; } +__Z11no_overflowjj: + add r4,r3,r4 + subfc r3,r3,r4 + li r3,0 + adde r3,r3,r3 + blr -compiles into: +(or better) not: -_foo: - cmpw cr7, r3, r4 - mfcr r2, 1 +__Z11no_overflowjj: + add r2, r4, r3 + cmplw cr7, r2, r3 + mfcr r2 rlwinm r2, r2, 29, 31, 31 - slwi r3, r2, 4 - blr + xori r3, r2, 1 + blr -===-------------------------------------------------------------------------=== +//===---------------------------------------------------------------------===// -Fold add and sub with constant into non-extern, non-weak addresses so this: +We compile some FP comparisons into an mfcr with two rlwinms and an or. For +example: +#include +int test(double x, double y) { return islessequal(x, y);} +int test2(double x, double y) { return islessgreater(x, y);} +int test3(double x, double y) { return !islessequal(x, y);} -static int a; -void bar(int b) { a = b; } -void foo(unsigned char *c) { - *c = a; +Compiles into (all three are similar, but the bits differ): + +_test: + fcmpu cr7, f1, f2 + mfcr r2 + rlwinm r3, r2, 29, 31, 31 + rlwinm r2, r2, 31, 31, 31 + or r3, r2, r3 + blr + +GCC compiles this into: + + _test: + fcmpu cr7,f1,f2 + cror 30,28,30 + mfcr r3 + rlwinm r3,r3,31,1 + blr + +which is more efficient and can use mfocr. See PR642 for some more context. + +//===---------------------------------------------------------------------===// + +void foo(float *data, float d) { + long i; + for (i = 0; i < 8000; i++) + data[i] = d; +} +void foo2(float *data, float d) { + long i; + data--; + for (i = 0; i < 8000; i++) { + data[1] = d; + data++; + } } -So that +These compile to: _foo: - lis r2, ha16(_a) - la r2, lo16(_a)(r2) - lbz r2, 3(r2) - stb r2, 0(r3) - blr + li r2, 0 +LBB1_1: ; bb + addi r4, r2, 4 + stfsx f1, r3, r2 + cmplwi cr0, r4, 32000 + mr r2, r4 + bne cr0, LBB1_1 ; bb + blr +_foo2: + li r2, 0 +LBB2_1: ; bb + addi r4, r2, 4 + stfsx f1, r3, r2 + cmplwi cr0, r4, 32000 + mr r2, r4 + bne cr0, LBB2_1 ; bb + blr + +The 'mr' could be eliminated to folding the add into the cmp better. + +//===---------------------------------------------------------------------===// +Codegen for the following (low-probability) case deteriorated considerably +when the correctness fixes for unordered comparisons went in (PR 642, 58871). +It should be possible to recover the code quality described in the comments. + +; RUN: llvm-as < %s | llc -march=ppc32 | grep or | count 3 +; This should produce one 'or' or 'cror' instruction per function. + +; RUN: llvm-as < %s | llc -march=ppc32 | grep mfcr | count 3 +; PR2964 + +define i32 @test(double %x, double %y) nounwind { +entry: + %tmp3 = fcmp ole double %x, %y ; [#uses=1] + %tmp345 = zext i1 %tmp3 to i32 ; [#uses=1] + ret i32 %tmp345 +} -Becomes +define i32 @test2(double %x, double %y) nounwind { +entry: + %tmp3 = fcmp one double %x, %y ; [#uses=1] + %tmp345 = zext i1 %tmp3 to i32 ; [#uses=1] + ret i32 %tmp345 +} + +define i32 @test3(double %x, double %y) nounwind { +entry: + %tmp3 = fcmp ugt double %x, %y ; [#uses=1] + %tmp34 = zext i1 %tmp3 to i32 ; [#uses=1] + ret i32 %tmp34 +} +//===----------------------------------------------------------------------===// +; RUN: llvm-as < %s | llc -march=ppc32 | not grep fneg + +; This could generate FSEL with appropriate flags (FSEL is not IEEE-safe, and +; should not be generated except with -enable-finite-only-fp-math or the like). +; With the correctness fixes for PR642 (58871) LowerSELECT_CC would need to +; recognize a more elaborate tree than a simple SETxx. + +define double @test_FNEG_sel(double %A, double %B, double %C) { + %D = fsub double -0.000000e+00, %A ; [#uses=1] + %Cond = fcmp ugt double %D, -0.000000e+00 ; [#uses=1] + %E = select i1 %Cond, double %B, double %C ; [#uses=1] + ret double %E +} + +//===----------------------------------------------------------------------===// +The save/restore sequence for CR in prolog/epilog is terrible: +- Each CR subreg is saved individually, rather than doing one save as a unit. +- On Darwin, the save is done after the decrement of SP, which means the offset +from SP of the save slot can be too big for a store instruction, which means we +need an additional register (currently hacked in 96015+96020; the solution there +is correct, but poor). +- On SVR4 the same thing can happen, and I don't think saving before the SP +decrement is safe on that target, as there is no red zone. This is currently +broken AFAIK, although it's not a target I can exercise. +The following demonstrates the problem: +extern void bar(char *p); +void foo() { + char x[100000]; + bar(x); + __asm__("" ::: "cr2"); +} + +//===----------------------------------------------------------------------===// + +Instruction fusion was introduced in ISA 2.06 and more opportunities added in +ISA 2.07. LLVM needs to add infrastructure to recognize fusion opportunities +and force instruction pairs to be scheduled together. -_foo: - lis r2, ha16(_a+3) - lbz r2, lo16(_a+3)(r2) - stb r2, 0(r3) - blr