1 ; RUN: opt < %s -default-data-layout="e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -gvn -S -die | FileCheck %s
2 ; RUN: opt < %s -default-data-layout="E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basicaa -gvn -S -die | FileCheck %s
5 define i32 @test0(i32 %V, i32* %P) {
15 ;;===----------------------------------------------------------------------===;;
17 ;;===----------------------------------------------------------------------===;;
20 define i8 @crash0({i32, i32} %A, {i32, i32}* %P) {
21 store {i32, i32} %A, {i32, i32}* %P
22 %X = bitcast {i32, i32}* %P to i8*
27 ;; No PR filed, crashed in CaptureTracker.
28 declare void @helper()
29 define void @crash1() {
30 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* undef, i64 undef, i32 1, i1 false) nounwind
31 %tmp = load i8* bitcast (void ()* @helper to i8*)
32 %x = icmp eq i8 %tmp, 15
37 ;;===----------------------------------------------------------------------===;;
38 ;; Store -> Load and Load -> Load forwarding where src and dst are different
39 ;; types, but where the base pointer is a must alias.
40 ;;===----------------------------------------------------------------------===;;
42 ;; i32 -> f32 forwarding.
43 define float @coerce_mustalias1(i32 %V, i32* %P) {
46 %P2 = bitcast i32* %P to float*
50 ; CHECK: @coerce_mustalias1
55 ;; i32* -> float forwarding.
56 define float @coerce_mustalias2(i32* %V, i32** %P) {
57 store i32* %V, i32** %P
59 %P2 = bitcast i32** %P to float*
63 ; CHECK: @coerce_mustalias2
68 ;; float -> i32* forwarding.
69 define i32* @coerce_mustalias3(float %V, float* %P) {
70 store float %V, float* %P
72 %P2 = bitcast float* %P to i32**
76 ; CHECK: @coerce_mustalias3
81 ;; i32 -> f32 load forwarding.
82 define float @coerce_mustalias4(i32* %P, i1 %cond) {
85 %P2 = bitcast i32* %P to float*
87 br i1 %cond, label %T, label %F
92 %X = bitcast i32 %A to float
95 ; CHECK: @coerce_mustalias4
96 ; CHECK: %A = load i32* %P
102 ;; i32 -> i8 forwarding
103 define i8 @coerce_mustalias5(i32 %V, i32* %P) {
104 store i32 %V, i32* %P
106 %P2 = bitcast i32* %P to i8*
110 ; CHECK: @coerce_mustalias5
115 ;; i64 -> float forwarding
116 define float @coerce_mustalias6(i64 %V, i64* %P) {
117 store i64 %V, i64* %P
119 %P2 = bitcast i64* %P to float*
123 ; CHECK: @coerce_mustalias6
128 ;; i64 -> i8* (32-bit) forwarding
129 define i8* @coerce_mustalias7(i64 %V, i64* %P) {
130 store i64 %V, i64* %P
132 %P2 = bitcast i64* %P to i8**
136 ; CHECK: @coerce_mustalias7
141 ; memset -> i16 forwarding.
142 define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp {
144 %conv = bitcast i16* %A to i8*
145 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
146 %arrayidx = getelementptr inbounds i16* %A, i64 42
147 %tmp2 = load i16* %arrayidx
149 ; CHECK: @memset_to_i16_local
154 ; memset -> float forwarding.
155 define float @memset_to_float_local(float* %A, i8 %Val) nounwind ssp {
157 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
158 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 %Val, i64 400, i32 1, i1 false)
159 %arrayidx = getelementptr inbounds float* %A, i64 42 ; <float*> [#uses=1]
160 %tmp2 = load float* %arrayidx ; <float> [#uses=1]
162 ; CHECK: @memset_to_float_local
169 ; CHECK-NEXT: bitcast
170 ; CHECK-NEXT: ret float
173 ;; non-local memset -> i16 load forwarding.
174 define i16 @memset_to_i16_nonlocal0(i16* %P, i1 %cond) {
175 %P3 = bitcast i16* %P to i8*
176 br i1 %cond, label %T, label %F
178 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 1, i64 400, i32 1, i1 false)
182 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 2, i64 400, i32 1, i1 false)
186 %P2 = getelementptr i16* %P, i32 4
190 ; CHECK: @memset_to_i16_nonlocal0
192 ; CHECK-NEXT: %A = phi i16 [ 514, %F ], [ 257, %T ]
197 @GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
199 ; memset -> float forwarding.
200 define float @memcpy_to_float_local(float* %A) nounwind ssp {
202 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
203 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
204 %arrayidx = getelementptr inbounds float* %A, i64 1 ; <float*> [#uses=1]
205 %tmp2 = load float* %arrayidx ; <float> [#uses=1]
207 ; CHECK: @memcpy_to_float_local
209 ; CHECK: ret float 1.400000e+01
214 ;; non-local i32/float -> i8 load forwarding.
215 define i8 @coerce_mustalias_nonlocal0(i32* %P, i1 %cond) {
216 %P2 = bitcast i32* %P to float*
217 %P3 = bitcast i32* %P to i8*
218 br i1 %cond, label %T, label %F
220 store i32 42, i32* %P
224 store float 1.0, float* %P2
231 ; CHECK: @coerce_mustalias_nonlocal0
233 ; CHECK: %A = phi i8 [
239 ;; non-local i32/float -> i8 load forwarding. This also tests that the "P3"
240 ;; bitcast equivalence can be properly phi translated.
241 define i8 @coerce_mustalias_nonlocal1(i32* %P, i1 %cond) {
242 %P2 = bitcast i32* %P to float*
243 br i1 %cond, label %T, label %F
245 store i32 42, i32* %P
249 store float 1.0, float* %P2
253 %P3 = bitcast i32* %P to i8*
257 ;; FIXME: This is disabled because this caused a miscompile in the llvm-gcc
258 ;; bootstrap, see r82411
260 ; HECK: @coerce_mustalias_nonlocal1
262 ; HECK: %A = phi i8 [
268 ;; non-local i32 -> i8 partial redundancy load forwarding.
269 define i8 @coerce_mustalias_pre0(i32* %P, i1 %cond) {
270 %P3 = bitcast i32* %P to i8*
271 br i1 %cond, label %T, label %F
273 store i32 42, i32* %P
283 ; CHECK: @coerce_mustalias_pre0
285 ; CHECK: load i8* %P3
287 ; CHECK: %A = phi i8 [
292 ;;===----------------------------------------------------------------------===;;
293 ;; Store -> Load and Load -> Load forwarding where src and dst are different
294 ;; types, and the reload is an offset from the store pointer.
295 ;;===----------------------------------------------------------------------===;;
297 ;; i32 -> i8 forwarding.
299 define i8 @coerce_offset0(i32 %V, i32* %P) {
300 store i32 %V, i32* %P
302 %P2 = bitcast i32* %P to i8*
303 %P3 = getelementptr i8* %P2, i32 2
307 ; CHECK: @coerce_offset0
312 ;; non-local i32/float -> i8 load forwarding.
313 define i8 @coerce_offset_nonlocal0(i32* %P, i1 %cond) {
314 %P2 = bitcast i32* %P to float*
315 %P3 = bitcast i32* %P to i8*
316 %P4 = getelementptr i8* %P3, i32 2
317 br i1 %cond, label %T, label %F
319 store i32 57005, i32* %P
323 store float 1.0, float* %P2
330 ; CHECK: @coerce_offset_nonlocal0
332 ; CHECK: %A = phi i8 [
338 ;; non-local i32 -> i8 partial redundancy load forwarding.
339 define i8 @coerce_offset_pre0(i32* %P, i1 %cond) {
340 %P3 = bitcast i32* %P to i8*
341 %P4 = getelementptr i8* %P3, i32 2
342 br i1 %cond, label %T, label %F
344 store i32 42, i32* %P
354 ; CHECK: @coerce_offset_pre0
356 ; CHECK: load i8* %P4
358 ; CHECK: %A = phi i8 [
363 define i32 @chained_load(i32** %p) {
368 store i32* %z, i32** %A
369 br i1 true, label %block2, label %block3
384 ; CHECK: @chained_load
385 ; CHECK: %z = load i32** %p
387 ; CHECK: %d = load i32* %z
388 ; CHECK-NEXT: ret i32 %d
392 declare i1 @cond() readonly
393 declare i1 @cond2() readonly
395 define i32 @phi_trans2() {
398 %P = alloca i32, i32 400
402 %A = phi i32 [1, %entry], [2, %F]
403 %cond2 = call i1 @cond()
404 br i1 %cond2, label %T1, label %TY
407 %P2 = getelementptr i32* %P, i32 %A
409 %cond = call i1 @cond2()
410 br i1 %cond, label %TX, label %F
413 %P3 = getelementptr i32* %P, i32 2
414 store i32 17, i32* %P3
416 store i32 42, i32* %P2 ; Provides "P[A]".
420 ; This load should not be compiled to 'ret i32 42'. An overly clever
421 ; implementation of GVN would see that we're returning 17 if the loop
422 ; executes once or 42 if it executes more than that, but we'd have to do
423 ; loop restructuring to expose this, and GVN shouldn't do this sort of CFG
433 define i32 @phi_trans3(i32* %p) {
436 br i1 true, label %block2, label %block3
439 store i32 87, i32* %p
443 %p2 = getelementptr i32* %p, i32 43
444 store i32 97, i32* %p2
448 %A = phi i32 [-1, %block2], [42, %block3]
449 br i1 true, label %block5, label %exit
452 ; CHECK-NEXT: %D = phi i32 [ 87, %block2 ], [ 97, %block3 ]
457 br i1 true, label %block6, label %exit
460 %C = getelementptr i32* %p, i32 %B
461 br i1 true, label %block7, label %exit
468 ; CHECK-NEXT: ret i32 %D
474 define i8 @phi_trans4(i8* %p) {
477 %X3 = getelementptr i8* %p, i32 192
478 store i8 192, i8* %X3
480 %X = getelementptr i8* %p, i32 4
485 %i = phi i32 [4, %entry], [192, %loop]
486 %X2 = getelementptr i8* %p, i32 %i
490 ; CHECK-NEXT: %Y2 = phi i8 [ %Y, %entry ], [ 0, %loop ]
493 %cond = call i1 @cond2()
495 %Z = bitcast i8 *%X3 to i32*
497 br i1 %cond, label %loop, label %out
504 define i8 @phi_trans5(i8* %p) {
508 %X4 = getelementptr i8* %p, i32 2
511 %X = getelementptr i8* %p, i32 4
516 %i = phi i32 [4, %entry], [3, %cont]
517 %X2 = getelementptr i8* %p, i32 %i
518 %Y2 = load i8* %X2 ; Ensure this load is not being incorrectly replaced.
519 %cond = call i1 @cond2()
520 br i1 %cond, label %cont, label %out
523 %Z = getelementptr i8* %X2, i32 -1
524 %Z2 = bitcast i8 *%Z to i32*
525 store i32 50462976, i32* %Z2 ;; (1 << 8) | (2 << 16) | (3 << 24)
529 ; CHECK-NEXT: getelementptr i8* %p, i32 3
530 ; CHECK-NEXT: load i8*
540 define i32 @memset_to_load() nounwind readnone {
542 %x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
543 %tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
544 call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
545 %arraydecay = getelementptr inbounds [256 x i32]* %x, i32 0, i32 0 ; <i32*>
546 %tmp1 = load i32* %arraydecay ; <i32> [#uses=1]
548 ; CHECK: @memset_to_load
553 ;;===----------------------------------------------------------------------===;;
554 ;; Load -> Load forwarding in partial alias case.
555 ;;===----------------------------------------------------------------------===;;
557 define i32 @load_load_partial_alias(i8* %P) nounwind ssp {
559 %0 = bitcast i8* %P to i32*
561 %add.ptr = getelementptr inbounds i8* %P, i64 1
562 %tmp5 = load i8* %add.ptr
563 %conv = zext i8 %tmp5 to i32
564 %add = add nsw i32 %tmp2, %conv
567 ; TEMPORARILYDISABLED: @load_load_partial_alias
568 ; TEMPORARILYDISABLED: load i32*
569 ; TEMPORARILYDISABLED-NOT: load
570 ; TEMPORARILYDISABLED: lshr i32 {{.*}}, 8
571 ; TEMPORARILYDISABLED-NOT: load
572 ; TEMPORARILYDISABLED: trunc i32 {{.*}} to i8
573 ; TEMPORARILYDISABLED-NOT: load
574 ; TEMPORARILYDISABLED: ret i32
578 ; Cross block partial alias case.
579 define i32 @load_load_partial_alias_cross_block(i8* %P) nounwind ssp {
581 %xx = bitcast i8* %P to i32*
582 %x1 = load i32* %xx, align 4
583 %cmp = icmp eq i32 %x1, 127
584 br i1 %cmp, label %land.lhs.true, label %if.end
586 land.lhs.true: ; preds = %entry
587 %arrayidx4 = getelementptr inbounds i8* %P, i64 1
588 %tmp5 = load i8* %arrayidx4, align 1
589 %conv6 = zext i8 %tmp5 to i32
594 ; TEMPORARILY_DISABLED: @load_load_partial_alias_cross_block
595 ; TEMPORARILY_DISABLED: land.lhs.true:
596 ; TEMPORARILY_DISABLED-NOT: load i8
597 ; TEMPORARILY_DISABLED: ret i32 %conv6
601 ;;===----------------------------------------------------------------------===;;
603 ;;===----------------------------------------------------------------------===;;
605 %widening1 = type { i32, i8, i8, i8, i8 }
607 @f = global %widening1 zeroinitializer, align 4
609 define i32 @test_widening1(i8* %P) nounwind ssp noredzone {
611 %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
612 %conv = zext i8 %tmp to i32
613 %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
614 %conv2 = zext i8 %tmp1 to i32
615 %add = add nsw i32 %conv, %conv2
617 ; CHECK: @test_widening1
624 define i32 @test_widening2() nounwind ssp noredzone {
626 %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
627 %conv = zext i8 %tmp to i32
628 %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
629 %conv2 = zext i8 %tmp1 to i32
630 %add = add nsw i32 %conv, %conv2
632 %tmp2 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 3), align 2
633 %conv3 = zext i8 %tmp2 to i32
634 %add2 = add nsw i32 %add, %conv3
636 %tmp3 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 4), align 1
637 %conv4 = zext i8 %tmp3 to i32
638 %add3 = add nsw i32 %add2, %conv3
641 ; CHECK: @test_widening2
648 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
650 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
652 ;;===----------------------------------------------------------------------===;;
653 ;; Load -> Store dependency which isn't interfered with by a call that happens
654 ;; before the pointer was captured.
655 ;;===----------------------------------------------------------------------===;;
657 %class.X = type { [8 x i8] }
659 @_ZTV1X = weak_odr constant [5 x i8*] zeroinitializer
660 @_ZTV1Y = weak_odr constant [5 x i8*] zeroinitializer
663 declare void @use3(i8***, i8**)
666 define void @test_escape1() nounwind {
667 %x = alloca i8**, align 8
668 store i8** getelementptr inbounds ([5 x i8*]* @_ZTV1X, i64 0, i64 2), i8*** %x, align 8
669 call void @use() nounwind
670 %DEAD = load i8*** %x, align 8
671 call void @use3(i8*** %x, i8** %DEAD) nounwind
673 ; CHECK: test_escape1