1 ; RUN: opt -basicaa -load-combine -instcombine -S < %s | FileCheck %s
2 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
3 target triple = "x86_64-unknown-linux-gnu"
5 declare void @llvm.assume(i1) nounwind
7 ; 'load' before the 'call' gets optimized:
8 define i64 @test1(i32* nocapture readonly %a, i1 %b) {
11 ; CHECK-DAG: load i64, i64* %1, align 4
12 ; CHECK-DAG: tail call void @llvm.assume(i1 %b)
15 %load1 = load i32, i32* %a, align 4
16 %conv = zext i32 %load1 to i64
17 %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
18 %load2 = load i32, i32* %arrayidx1, align 4
19 tail call void @llvm.assume(i1 %b)
20 %conv2 = zext i32 %load2 to i64
21 %shl = shl nuw i64 %conv2, 32
22 %add = or i64 %shl, %conv
26 ; 'call' before the 'load' doesn't get optimized:
27 define i64 @test2(i32* nocapture readonly %a, i1 %b) {
30 ; CHECK-DAG: load i64, i64* %1, align 4
31 ; CHECK-DAG: tail call void @llvm.assume(i1 %b)
34 %load1 = load i32, i32* %a, align 4
35 %conv = zext i32 %load1 to i64
36 %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
37 tail call void @llvm.assume(i1 %b)
38 %load2 = load i32, i32* %arrayidx1, align 4
39 %conv2 = zext i32 %load2 to i64
40 %shl = shl nuw i64 %conv2, 32
41 %add = or i64 %shl, %conv