From 18128bd3768e7995deef1a90358f2d1f2b8ef2ed Mon Sep 17 00:00:00 2001 From: Ahmed Bougacha Date: Tue, 31 Mar 2015 03:16:51 +0000 Subject: [PATCH] [X86] Generate MOVNT for all vector types. We used to miss non-Q YMM integer vectors, and, non-Q/D XMM integer vectors. While there, change the v4i32 patterns to prefer MOVNTDQ. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@233668 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrSSE.td | 21 ++- test/CodeGen/X86/nontemporal-2.ll | 286 +++++++++++++++++++++++++++++- 2 files changed, 298 insertions(+), 9 deletions(-) diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index ccdbf0e28df..9b134d7a690 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -3678,13 +3678,30 @@ def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), PS, Requires<[HasSSE2]>; } // SchedRW = [WriteStore] +let Predicates = [HasAVX2, NoVLX] in { + def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst), + (VMOVNTDQYmr addr:$dst, VR256:$src)>; + def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst), + (VMOVNTDQYmr addr:$dst, VR256:$src)>; + def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst), + (VMOVNTDQYmr addr:$dst, VR256:$src)>; +} + let Predicates = [HasAVX, NoVLX] in { def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst), - (VMOVNTPSmr addr:$dst, VR128:$src)>; + (VMOVNTDQmr addr:$dst, VR128:$src)>; + def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst), + (VMOVNTDQmr addr:$dst, VR128:$src)>; + def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst), + (VMOVNTDQmr addr:$dst, VR128:$src)>; } def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst), - (MOVNTPSmr addr:$dst, VR128:$src)>; + (MOVNTDQmr addr:$dst, VR128:$src)>; +def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst), + (MOVNTDQmr addr:$dst, VR128:$src)>; +def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst), + (MOVNTDQmr addr:$dst, VR128:$src)>; } // AddedComplexity diff --git a/test/CodeGen/X86/nontemporal-2.ll b/test/CodeGen/X86/nontemporal-2.ll index f62f3725d7d..8c08b3c163c 100644 --- a/test/CodeGen/X86/nontemporal-2.ll +++ b/test/CodeGen/X86/nontemporal-2.ll @@ -1,31 +1,303 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX - +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2 ; Make sure that we generate non-temporal stores for the test cases below. +; We use xorps for zeroing, so domain information isn't available anymore. -define void @test1(<4 x float>* %dst) { -; CHECK-LABEL: test1: +define void @test_zero_v4f32(<4 x float>* %dst) { +; CHECK-LABEL: test_zero_v4f32: ; SSE: movntps ; AVX: vmovntps store <4 x float> zeroinitializer, <4 x float>* %dst, align 16, !nontemporal !1 ret void } -define void @test2(<4 x i32>* %dst) { -; CHECK-LABEL: test2: +define void @test_zero_v4i32(<4 x i32>* %dst) { +; CHECK-LABEL: test_zero_v4i32: ; SSE: movntps ; AVX: vmovntps store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1 ret void } -define void @test3(<2 x double>* %dst) { -; CHECK-LABEL: test3: +define void @test_zero_v2f64(<2 x double>* %dst) { +; CHECK-LABEL: test_zero_v2f64: ; SSE: movntps ; AVX: vmovntps store <2 x double> zeroinitializer, <2 x double>* %dst, align 16, !nontemporal !1 ret void } +define void @test_zero_v2i64(<2 x i64>* %dst) { +; CHECK-LABEL: test_zero_v2i64: +; SSE: movntps +; AVX: vmovntps + store <2 x i64> zeroinitializer, <2 x i64>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_zero_v8i16(<8 x i16>* %dst) { +; CHECK-LABEL: test_zero_v8i16: +; SSE: movntps +; AVX: vmovntps + store <8 x i16> zeroinitializer, <8 x i16>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_zero_v16i8(<16 x i8>* %dst) { +; CHECK-LABEL: test_zero_v16i8: +; SSE: movntps +; AVX: vmovntps + store <16 x i8> zeroinitializer, <16 x i8>* %dst, align 16, !nontemporal !1 + ret void +} + +; And now YMM versions. + +define void @test_zero_v8f32(<8 x float>* %dst) { +; CHECK-LABEL: test_zero_v8f32: +; AVX: vmovntps %ymm + store <8 x float> zeroinitializer, <8 x float>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v8i32(<8 x i32>* %dst) { +; CHECK-LABEL: test_zero_v8i32: +; AVX2: vmovntps %ymm + store <8 x i32> zeroinitializer, <8 x i32>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v4f64(<4 x double>* %dst) { +; CHECK-LABEL: test_zero_v4f64: +; AVX: vmovntps %ymm + store <4 x double> zeroinitializer, <4 x double>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v4i64(<4 x i64>* %dst) { +; CHECK-LABEL: test_zero_v4i64: +; AVX2: vmovntps %ymm + store <4 x i64> zeroinitializer, <4 x i64>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v16i16(<16 x i16>* %dst) { +; CHECK-LABEL: test_zero_v16i16: +; AVX2: vmovntps %ymm + store <16 x i16> zeroinitializer, <16 x i16>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_zero_v32i8(<32 x i8>* %dst) { +; CHECK-LABEL: test_zero_v32i8: +; AVX2: vmovntps %ymm + store <32 x i8> zeroinitializer, <32 x i8>* %dst, align 32, !nontemporal !1 + ret void +} + + +; Check that we also handle arguments. Here the type survives longer. + +define void @test_arg_v4f32(<4 x float> %arg, <4 x float>* %dst) { +; CHECK-LABEL: test_arg_v4f32: +; SSE: movntps +; AVX: vmovntps + store <4 x float> %arg, <4 x float>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %dst) { +; CHECK-LABEL: test_arg_v4i32: +; SSE: movntps +; AVX: vmovntps + store <4 x i32> %arg, <4 x i32>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v2f64(<2 x double> %arg, <2 x double>* %dst) { +; CHECK-LABEL: test_arg_v2f64: +; SSE: movntps +; AVX: vmovntps + store <2 x double> %arg, <2 x double>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %dst) { +; CHECK-LABEL: test_arg_v2i64: +; SSE: movntps +; AVX: vmovntps + store <2 x i64> %arg, <2 x i64>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %dst) { +; CHECK-LABEL: test_arg_v8i16: +; SSE: movntps +; AVX: vmovntps + store <8 x i16> %arg, <8 x i16>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) { +; CHECK-LABEL: test_arg_v16i8: +; SSE: movntps +; AVX: vmovntps + store <16 x i8> %arg, <16 x i8>* %dst, align 16, !nontemporal !1 + ret void +} + +; And now YMM versions. + +define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) { +; CHECK-LABEL: test_arg_v8f32: +; AVX: vmovntps %ymm + store <8 x float> %arg, <8 x float>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) { +; CHECK-LABEL: test_arg_v8i32: +; AVX2: vmovntps %ymm + store <8 x i32> %arg, <8 x i32>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) { +; CHECK-LABEL: test_arg_v4f64: +; AVX: vmovntps %ymm + store <4 x double> %arg, <4 x double>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) { +; CHECK-LABEL: test_arg_v4i64: +; AVX2: vmovntps %ymm + store <4 x i64> %arg, <4 x i64>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) { +; CHECK-LABEL: test_arg_v16i16: +; AVX2: vmovntps %ymm + store <16 x i16> %arg, <16 x i16>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) { +; CHECK-LABEL: test_arg_v32i8: +; AVX2: vmovntps %ymm + store <32 x i8> %arg, <32 x i8>* %dst, align 32, !nontemporal !1 + ret void +} + + +; Now check that if the execution domain is trivially visible, we use it. +; We use an add to make the type survive all the way to the MOVNT. + +define void @test_op_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>* %dst) { +; CHECK-LABEL: test_op_v4f32: +; SSE: movntps +; AVX: vmovntps + %r = fadd <4 x float> %a, %b + store <4 x float> %r, <4 x float>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32>* %dst) { +; CHECK-LABEL: test_op_v4i32: +; SSE: movntdq +; AVX: vmovntdq + %r = add <4 x i32> %a, %b + store <4 x i32> %r, <4 x i32>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v2f64(<2 x double> %a, <2 x double> %b, <2 x double>* %dst) { +; CHECK-LABEL: test_op_v2f64: +; SSE: movntpd +; AVX: vmovntpd + %r = fadd <2 x double> %a, %b + store <2 x double> %r, <2 x double>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64>* %dst) { +; CHECK-LABEL: test_op_v2i64: +; SSE: movntdq +; AVX: vmovntdq + %r = add <2 x i64> %a, %b + store <2 x i64> %r, <2 x i64>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16>* %dst) { +; CHECK-LABEL: test_op_v8i16: +; SSE: movntdq +; AVX: vmovntdq + %r = add <8 x i16> %a, %b + store <8 x i16> %r, <8 x i16>* %dst, align 16, !nontemporal !1 + ret void +} + +define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) { +; CHECK-LABEL: test_op_v16i8: +; SSE: movntdq +; AVX: vmovntdq + %r = add <16 x i8> %a, %b + store <16 x i8> %r, <16 x i8>* %dst, align 16, !nontemporal !1 + ret void +} + +; And now YMM versions. + +define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) { +; CHECK-LABEL: test_op_v8f32: +; AVX: vmovntps %ymm + %r = fadd <8 x float> %a, %b + store <8 x float> %r, <8 x float>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) { +; CHECK-LABEL: test_op_v8i32: +; AVX2: vmovntdq %ymm + %r = add <8 x i32> %a, %b + store <8 x i32> %r, <8 x i32>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst) { +; CHECK-LABEL: test_op_v4f64: +; AVX: vmovntpd %ymm + %r = fadd <4 x double> %a, %b + store <4 x double> %r, <4 x double>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) { +; CHECK-LABEL: test_op_v4i64: +; AVX2: vmovntdq %ymm + %r = add <4 x i64> %a, %b + store <4 x i64> %r, <4 x i64>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) { +; CHECK-LABEL: test_op_v16i16: +; AVX2: vmovntdq %ymm + %r = add <16 x i16> %a, %b + store <16 x i16> %r, <16 x i16>* %dst, align 32, !nontemporal !1 + ret void +} + +define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) { +; CHECK-LABEL: test_op_v32i8: +; AVX2: vmovntdq %ymm + %r = add <32 x i8> %a, %b + store <32 x i8> %r, <32 x i8>* %dst, align 32, !nontemporal !1 + ret void +} + !1 = !{i32 1} -- 2.34.1