1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
4 ; Although we have the ability to fold an unaligned load with AVX
5 ; and under special conditions with some SSE implementations, we
6 ; can not fold the load under any circumstances in these test
7 ; cases because they are not 16-byte loads. The load must be
8 ; executed as a scalar ('movs*') with a zero extension to
9 ; 128-bits and then used in the packed logical ('andp*') op.
10 ; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371
12 define double @load_double_no_fold(double %x, double %y) {
13 ; SSE2-LABEL: load_double_no_fold:
15 ; SSE2-NEXT: cmplesd %xmm0, %xmm1
16 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
17 ; SSE2-NEXT: andpd %xmm1, %xmm0
20 ; AVX-LABEL: load_double_no_fold:
22 ; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
23 ; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
24 ; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
27 %cmp = fcmp oge double %x, %y
28 %zext = zext i1 %cmp to i32
29 %conv = sitofp i32 %zext to double
33 define float @load_float_no_fold(float %x, float %y) {
34 ; SSE2-LABEL: load_float_no_fold:
36 ; SSE2-NEXT: cmpless %xmm0, %xmm1
37 ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
38 ; SSE2-NEXT: andps %xmm1, %xmm0
41 ; AVX-LABEL: load_float_no_fold:
43 ; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
44 ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
45 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
48 %cmp = fcmp oge float %x, %y
49 %zext = zext i1 %cmp to i32
50 %conv = sitofp i32 %zext to float