From 1efa1696e3b7c170f3abc704e2dc4b8052849563 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Wed, 26 Sep 2007 06:29:31 +0000 Subject: [PATCH] move PR1160 here. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@42347 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/README.txt | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index 40cbd864354..aee90a23154 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -1208,3 +1208,34 @@ __Z11no_overflowjj: Re-materialize MOV32r0 etc. with xor instead of changing them to moves if the condition register is dead. xor reg reg is shorter than mov reg, #0. + +//===---------------------------------------------------------------------===// + +We aren't matching RMW instructions aggressively +enough. Here's a reduced testcase (more in PR1160): + +define void @test(i32* %huge_ptr, i32* %target_ptr) { + %A = load i32* %huge_ptr ; [#uses=1] + %B = load i32* %target_ptr ; [#uses=1] + %C = or i32 %A, %B ; [#uses=1] + store i32 %C, i32* %target_ptr + ret void +} + +$ llvm-as < t.ll | llc -march=x86-64 + +_test: + movl (%rdi), %eax + orl (%rsi), %eax + movl %eax, (%rsi) + ret + +That should be something like: + +_test: + movl (%rdi), %eax + orl %eax, (%rsi) + ret + +//===---------------------------------------------------------------------===// + -- 2.34.1