case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
+ case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
}
}
def S_CMOV_B64 : SOP1_64 <0x00000006, "S_CMOV_B64", []>;
} // End isMoveImm = 1
-def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32", []>;
+def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32",
+ [(set i32:$dst, (not i32:$src0))]
+>;
+
def S_NOT_B64 : SOP1_64 <0x00000008, "S_NOT_B64", []>;
def S_WQM_B32 : SOP1_32 <0x00000009, "S_WQM_B32", []>;
def S_WQM_B64 : SOP1_64 <0x0000000a, "S_WQM_B64", []>;
store i32 %result, i32 addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @scalar_not_i32
+; SI-CHECK: S_NOT_B32
+define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
+ %result = xor i32 %a, -1
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @vector_not_i32
+; SI-CHECK: V_NOT_B32
+define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+ %a = load i32 addrspace(1)* %in0
+ %b = load i32 addrspace(1)* %in1
+ %result = xor i32 %a, -1
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}