+ %tmp3 = call i32 @foo( i32* %x ) ; <i32> [#uses=1]
+ ret i32 %tmp3
+}
+
+//===---------------------------------------------------------------------===//
+
+"basicaa" should know how to look through "or" instructions that act like add
+instructions. For example in this code, the x*4+1 is turned into x*4 | 1, and
+basicaa can't analyze the array subscript, leading to duplicated loads in the
+generated code:
+
+void test(int X, int Y, int a[]) {
+int i;
+ for (i=2; i<1000; i+=4) {
+ a[i+0] = a[i-1+0]*a[i-2+0];
+ a[i+1] = a[i-1+1]*a[i-2+1];
+ a[i+2] = a[i-1+2]*a[i-2+2];
+ a[i+3] = a[i-1+3]*a[i-2+3];
+ }
+}
+
+BasicAA also doesn't do this for add. It needs to know that &A[i+1] != &A[i].
+
+//===---------------------------------------------------------------------===//
+
+We should investigate an instruction sinking pass. Consider this silly
+example in pic mode:
+
+#include <assert.h>
+void foo(int x) {
+ assert(x);
+ //...
+}
+
+we compile this to:
+_foo:
+ subl $28, %esp
+ call "L1$pb"
+"L1$pb":
+ popl %eax
+ cmpl $0, 32(%esp)
+ je LBB1_2 # cond_true
+LBB1_1: # return
+ # ...
+ addl $28, %esp
+ ret
+LBB1_2: # cond_true
+...
+
+The PIC base computation (call+popl) is only used on one path through the
+code, but is currently always computed in the entry block. It would be
+better to sink the picbase computation down into the block for the
+assertion, as it is the only one that uses it. This happens for a lot of
+code with early outs.
+
+Another example is loads of arguments, which are usually emitted into the
+entry block on targets like x86. If not used in all paths through a
+function, they should be sunk into the ones that do.
+
+In this case, whole-function-isel would also handle this.
+
+//===---------------------------------------------------------------------===//
+
+Investigate lowering of sparse switch statements into perfect hash tables:
+http://burtleburtle.net/bob/hash/perfect.html
+
+//===---------------------------------------------------------------------===//
+
+We should turn things like "load+fabs+store" and "load+fneg+store" into the
+corresponding integer operations. On a yonah, this loop:
+
+double a[256];
+void foo() {
+ int i, b;
+ for (b = 0; b < 10000000; b++)
+ for (i = 0; i < 256; i++)
+ a[i] = -a[i];
+}
+
+is twice as slow as this loop:
+
+long long a[256];
+void foo() {
+ int i, b;
+ for (b = 0; b < 10000000; b++)
+ for (i = 0; i < 256; i++)
+ a[i] ^= (1ULL << 63);
+}
+
+and I suspect other processors are similar. On X86 in particular this is a
+big win because doing this with integers allows the use of read/modify/write
+instructions.
+
+//===---------------------------------------------------------------------===//
+
+DAG Combiner should try to combine small loads into larger loads when
+profitable. For example, we compile this C++ example:
+
+struct THotKey { short Key; bool Control; bool Shift; bool Alt; };
+extern THotKey m_HotKey;
+THotKey GetHotKey () { return m_HotKey; }
+
+into (-O3 -fno-exceptions -static -fomit-frame-pointer):
+
+__Z9GetHotKeyv:
+ pushl %esi
+ movl 8(%esp), %eax
+ movb _m_HotKey+3, %cl
+ movb _m_HotKey+4, %dl
+ movb _m_HotKey+2, %ch
+ movw _m_HotKey, %si
+ movw %si, (%eax)
+ movb %ch, 2(%eax)
+ movb %cl, 3(%eax)
+ movb %dl, 4(%eax)
+ popl %esi
+ ret $4
+
+GCC produces:
+
+__Z9GetHotKeyv:
+ movl _m_HotKey, %edx
+ movl 4(%esp), %eax
+ movl %edx, (%eax)
+ movzwl _m_HotKey+4, %edx
+ movw %dx, 4(%eax)
+ ret $4
+
+The LLVM IR contains the needed alignment info, so we should be able to
+merge the loads and stores into 4-byte loads:
+
+ %struct.THotKey = type { i16, i8, i8, i8 }
+define void @_Z9GetHotKeyv(%struct.THotKey* sret %agg.result) nounwind {
+...
+ %tmp2 = load i16* getelementptr (@m_HotKey, i32 0, i32 0), align 8
+ %tmp5 = load i8* getelementptr (@m_HotKey, i32 0, i32 1), align 2
+ %tmp8 = load i8* getelementptr (@m_HotKey, i32 0, i32 2), align 1
+ %tmp11 = load i8* getelementptr (@m_HotKey, i32 0, i32 3), align 2
+
+Alternatively, we should use a small amount of base-offset alias analysis
+to make it so the scheduler doesn't need to hold all the loads in regs at
+once.
+
+//===---------------------------------------------------------------------===//
+
+We should extend parameter attributes to capture more information about
+pointer parameters for alias analysis. Some ideas:
+
+1. Add a "nocapture" attribute, which indicates that the callee does not store
+ the address of the parameter into a global or any other memory location
+ visible to the callee. This can be used to make basicaa and other analyses
+ more powerful. It is true for things like memcpy, strcat, and many other
+ things, including structs passed by value, most C++ references, etc.
+2. Generalize readonly to be set on parameters. This is important mod/ref
+ info for the function, which is important for basicaa and others. It can
+ also be used by the inliner to avoid inserting a memcpy for byval
+ arguments when the function is inlined.
+
+These functions can be inferred by various analysis passes such as the
+globalsmodrefaa pass. Note that getting #2 right is actually really tricky.
+Consider this code:
+
+struct S; S G;
+void caller(S byvalarg) { G.field = 1; ... }
+void callee() { caller(G); }
+
+The fact that the caller does not modify byval arg is not enough, we need
+to know that it doesn't modify G either. This is very tricky.
+
+//===---------------------------------------------------------------------===//
+
+We should add an FRINT node to the DAG to model targets that have legal
+implementations of ceil/floor/rint.
+
+//===---------------------------------------------------------------------===//
+
+This GCC bug: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34043
+contains a testcase that compiles down to:
+
+ %struct.XMM128 = type { <4 x float> }
+..
+ %src = alloca %struct.XMM128
+..
+ %tmp6263 = bitcast %struct.XMM128* %src to <2 x i64>*
+ %tmp65 = getelementptr %struct.XMM128* %src, i32 0, i32 0
+ store <2 x i64> %tmp5899, <2 x i64>* %tmp6263, align 16
+ %tmp66 = load <4 x float>* %tmp65, align 16
+ %tmp71 = add <4 x float> %tmp66, %tmp66
+
+If the mid-level optimizer turned the bitcast of pointer + store of tmp5899
+into a bitcast of the vector value and a store to the pointer, then the
+store->load could be easily removed.
+
+//===---------------------------------------------------------------------===//
+
+Consider:
+
+int test() {
+ long long input[8] = {1,1,1,1,1,1,1,1};
+ foo(input);
+}
+
+We currently compile this into a memcpy from a global array since the
+initializer is fairly large and not memset'able. This is good, but the memcpy
+gets lowered to load/stores in the code generator. This is also ok, except
+that the codegen lowering for memcpy doesn't handle the case when the source
+is a constant global. This gives us atrocious code like this:
+
+ call "L1$pb"
+"L1$pb":
+ popl %eax
+ movl _C.0.1444-"L1$pb"+32(%eax), %ecx
+ movl %ecx, 40(%esp)
+ movl _C.0.1444-"L1$pb"+20(%eax), %ecx
+ movl %ecx, 28(%esp)
+ movl _C.0.1444-"L1$pb"+36(%eax), %ecx
+ movl %ecx, 44(%esp)
+ movl _C.0.1444-"L1$pb"+44(%eax), %ecx
+ movl %ecx, 52(%esp)
+ movl _C.0.1444-"L1$pb"+40(%eax), %ecx
+ movl %ecx, 48(%esp)
+ movl _C.0.1444-"L1$pb"+12(%eax), %ecx
+ movl %ecx, 20(%esp)
+ movl _C.0.1444-"L1$pb"+4(%eax), %ecx
+...
+
+instead of:
+ movl $1, 16(%esp)
+ movl $0, 20(%esp)
+ movl $1, 24(%esp)
+ movl $0, 28(%esp)
+ movl $1, 32(%esp)
+ movl $0, 36(%esp)
+ ...
+
+//===---------------------------------------------------------------------===//
+
+http://llvm.org/PR717:
+
+The following code should compile into "ret int undef". Instead, LLVM
+produces "ret int 0":
+
+int f() {
+ int x = 4;
+ int y;
+ if (x == 3) y = 0;
+ return y;
+}
+
+//===---------------------------------------------------------------------===//
+
+The loop unroller should partially unroll loops (instead of peeling them)
+when code growth isn't too bad and when an unroll count allows simplification
+of some code within the loop. One trivial example is:
+
+#include <stdio.h>
+int main() {
+ int nRet = 17;
+ int nLoop;
+ for ( nLoop = 0; nLoop < 1000; nLoop++ ) {
+ if ( nLoop & 1 )
+ nRet += 2;
+ else
+ nRet -= 1;
+ }
+ return nRet;
+}
+
+Unrolling by 2 would eliminate the '&1' in both copies, leading to a net
+reduction in code size. The resultant code would then also be suitable for
+exit value computation.
+
+//===---------------------------------------------------------------------===//
+
+We miss a bunch of rotate opportunities on various targets, including ppc, x86,
+etc. On X86, we miss a bunch of 'rotate by variable' cases because the rotate
+matching code in dag combine doesn't look through truncates aggressively
+enough. Here are some testcases reduces from GCC PR17886:
+
+unsigned long long f(unsigned long long x, int y) {
+ return (x << y) | (x >> 64-y);
+}
+unsigned f2(unsigned x, int y){
+ return (x << y) | (x >> 32-y);
+}
+unsigned long long f3(unsigned long long x){
+ int y = 9;
+ return (x << y) | (x >> 64-y);
+}
+unsigned f4(unsigned x){
+ int y = 10;
+ return (x << y) | (x >> 32-y);
+}
+unsigned long long f5(unsigned long long x, unsigned long long y) {
+ return (x << 8) | ((y >> 48) & 0xffull);
+}
+unsigned long long f6(unsigned long long x, unsigned long long y, int z) {
+ switch(z) {
+ case 1:
+ return (x << 8) | ((y >> 48) & 0xffull);
+ case 2:
+ return (x << 16) | ((y >> 40) & 0xffffull);
+ case 3:
+ return (x << 24) | ((y >> 32) & 0xffffffull);
+ case 4:
+ return (x << 32) | ((y >> 24) & 0xffffffffull);
+ default:
+ return (x << 40) | ((y >> 16) & 0xffffffffffull);
+ }
+}
+
+On X86-64, we only handle f2/f3/f4 right. On x86-32, a few of these
+generate truly horrible code, instead of using shld and friends. On
+ARM, we end up with calls to L___lshrdi3/L___ashldi3 in f, which is
+badness. PPC64 misses f, f5 and f6. CellSPU aborts in isel.
+
+//===---------------------------------------------------------------------===//
+
+We do a number of simplifications in simplify libcalls to strength reduce
+standard library functions, but we don't currently merge them together. For
+example, it is useful to merge memcpy(a,b,strlen(b)) -> strcpy. This can only
+be done safely if "b" isn't modified between the strlen and memcpy of course.
+
+//===---------------------------------------------------------------------===//
+
+We should be able to evaluate this loop:
+
+int test(int x_offs) {
+ while (x_offs > 4)
+ x_offs -= 4;
+ return x_offs;
+}
+
+//===---------------------------------------------------------------------===//
+
+Reassociate should turn things like:
+
+int factorial(int X) {
+ return X*X*X*X*X*X*X*X;
+}
+
+into llvm.powi calls, allowing the code generator to produce balanced
+multiplication trees.
+
+//===---------------------------------------------------------------------===//
+
+We generate a horrible libcall for llvm.powi. For example, we compile:
+
+#include <cmath>
+double f(double a) { return std::pow(a, 4); }
+
+into:
+
+__Z1fd:
+ subl $12, %esp
+ movsd 16(%esp), %xmm0
+ movsd %xmm0, (%esp)
+ movl $4, 8(%esp)
+ call L___powidf2$stub
+ addl $12, %esp
+ ret
+
+GCC produces:
+
+__Z1fd:
+ subl $12, %esp
+ movsd 16(%esp), %xmm0
+ mulsd %xmm0, %xmm0
+ mulsd %xmm0, %xmm0
+ movsd %xmm0, (%esp)
+ fldl (%esp)
+ addl $12, %esp
+ ret
+
+//===---------------------------------------------------------------------===//
+
+We compile this program: (from GCC PR11680)
+http://gcc.gnu.org/bugzilla/attachment.cgi?id=4487
+
+Into code that runs the same speed in fast/slow modes, but both modes run 2x
+slower than when compile with GCC (either 4.0 or 4.2):
+
+$ llvm-g++ perf.cpp -O3 -fno-exceptions
+$ time ./a.out fast
+1.821u 0.003s 0:01.82 100.0% 0+0k 0+0io 0pf+0w
+
+$ g++ perf.cpp -O3 -fno-exceptions
+$ time ./a.out fast
+0.821u 0.001s 0:00.82 100.0% 0+0k 0+0io 0pf+0w
+
+It looks like we are making the same inlining decisions, so this may be raw
+codegen badness or something else (haven't investigated).
+
+//===---------------------------------------------------------------------===//
+
+We miss some instcombines for stuff like this:
+void bar (void);
+void foo (unsigned int a) {
+ /* This one is equivalent to a >= (3 << 2). */
+ if ((a >> 2) >= 3)
+ bar ();
+}
+
+A few other related ones are in GCC PR14753.
+
+//===---------------------------------------------------------------------===//
+
+Divisibility by constant can be simplified (according to GCC PR12849) from
+being a mulhi to being a mul lo (cheaper). Testcase:
+
+void bar(unsigned n) {
+ if (n % 3 == 0)
+ true();
+}
+
+I think this basically amounts to a dag combine to simplify comparisons against
+multiply hi's into a comparison against the mullo.
+
+//===---------------------------------------------------------------------===//
+
+SROA is not promoting the union on the stack in this example, we should end
+up with no allocas.
+
+union vec2d {
+ double e[2];
+ double v __attribute__((vector_size(16)));
+};
+typedef union vec2d vec2d;
+
+static vec2d a={{1,2}}, b={{3,4}};
+
+vec2d foo () {
+ return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
+}
+
+//===---------------------------------------------------------------------===//
+
+Better mod/ref analysis for scanf would allow us to eliminate the vtable and a
+bunch of other stuff from this example (see PR1604):
+
+#include <cstdio>
+struct test {
+ int val;
+ virtual ~test() {}
+};
+
+int main() {
+ test t;
+ std::scanf("%d", &t.val);
+ std::printf("%d\n", t.val);
+}
+
+//===---------------------------------------------------------------------===//
+
+Instcombine will merge comparisons like (x >= 10) && (x < 20) by producing (x -
+10) u< 10, but only when the comparisons have matching sign.
+
+This could be converted with a similiar technique. (PR1941)
+
+define i1 @test(i8 %x) {
+ %A = icmp uge i8 %x, 5
+ %B = icmp slt i8 %x, 20
+ %C = and i1 %A, %B
+ ret i1 %C
+}
+
+//===---------------------------------------------------------------------===//
+
+These functions perform the same computation, but produce different assembly.
+
+define i8 @select(i8 %x) readnone nounwind {
+ %A = icmp ult i8 %x, 250
+ %B = select i1 %A, i8 0, i8 1
+ ret i8 %B
+}
+
+define i8 @addshr(i8 %x) readnone nounwind {
+ %A = zext i8 %x to i9
+ %B = add i9 %A, 6 ;; 256 - 250 == 6
+ %C = lshr i9 %B, 8
+ %D = trunc i9 %C to i8
+ ret i8 %D
+}
+
+//===---------------------------------------------------------------------===//
+
+From gcc bug 24696:
+int
+f (unsigned long a, unsigned long b, unsigned long c)
+{
+ return ((a & (c - 1)) != 0) || ((b & (c - 1)) != 0);
+}
+int
+f (unsigned long a, unsigned long b, unsigned long c)
+{
+ return ((a & (c - 1)) != 0) | ((b & (c - 1)) != 0);
+}
+Both should combine to ((a|b) & (c-1)) != 0. Currently not optimized with
+"clang -emit-llvm-bc | opt -std-compile-opts".
+
+//===---------------------------------------------------------------------===//
+
+From GCC Bug 20192:
+#define PMD_MASK (~((1UL << 23) - 1))
+void clear_pmd_range(unsigned long start, unsigned long end)
+{
+ if (!(start & ~PMD_MASK) && !(end & ~PMD_MASK))
+ f();
+}
+The expression should optimize to something like
+"!((start|end)&~PMD_MASK). Currently not optimized with "clang
+-emit-llvm-bc | opt -std-compile-opts".
+
+//===---------------------------------------------------------------------===//
+
+From GCC Bug 15241:
+unsigned int
+foo (unsigned int a, unsigned int b)
+{
+ if (a <= 7 && b <= 7)
+ baz ();
+}
+Should combine to "(a|b) <= 7". Currently not optimized with "clang
+-emit-llvm-bc | opt -std-compile-opts".
+
+//===---------------------------------------------------------------------===//
+
+From GCC Bug 3756:
+int
+pn (int n)
+{
+ return (n >= 0 ? 1 : -1);
+}
+Should combine to (n >> 31) | 1. Currently not optimized with "clang
+-emit-llvm-bc | opt -std-compile-opts | llc".
+
+//===---------------------------------------------------------------------===//
+
+From GCC Bug 28685:
+int test(int a, int b)
+{
+ int lt = a < b;
+ int eq = a == b;
+
+ return (lt || eq);
+}
+Should combine to "a <= b". Currently not optimized with "clang
+-emit-llvm-bc | opt -std-compile-opts | llc".
+
+//===---------------------------------------------------------------------===//
+
+void a(int variable)
+{
+ if (variable == 4 || variable == 6)
+ bar();
+}
+This should optimize to "if ((variable | 2) == 6)". Currently not
+optimized with "clang -emit-llvm-bc | opt -std-compile-opts | llc".
+
+//===---------------------------------------------------------------------===//
+
+unsigned int f(unsigned int i, unsigned int n) {++i; if (i == n) ++i; return
+i;}
+unsigned int f2(unsigned int i, unsigned int n) {++i; i += i == n; return i;}
+These should combine to the same thing. Currently, the first function
+produces better code on X86.
+
+//===---------------------------------------------------------------------===//
+
+From GCC Bug 15784:
+#define abs(x) x>0?x:-x
+int f(int x, int y)
+{
+ return (abs(x)) >= 0;
+}
+This should optimize to x == INT_MIN. (With -fwrapv.) Currently not
+optimized with "clang -emit-llvm-bc | opt -std-compile-opts".
+
+//===---------------------------------------------------------------------===//
+
+From GCC Bug 14753:
+void
+rotate_cst (unsigned int a)
+{
+ a = (a << 10) | (a >> 22);
+ if (a == 123)
+ bar ();
+}
+void
+minus_cst (unsigned int a)
+{
+ unsigned int tem;
+
+ tem = 20 - a;
+ if (tem == 5)
+ bar ();