// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include #include "src/common/globals.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node-matchers.h" #include "src/objects/objects-inl.h" #include "test/unittests/compiler/backend/instruction-selector-unittest.h" #if V8_ENABLE_WEBASSEMBLY #include "src/wasm/simd-shuffle.h" #endif // V8_ENABLE_WEBASSEMBLY namespace v8 { namespace internal { namespace compiler { // ----------------------------------------------------------------------------- // Conversions. TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) { StreamBuilder m(this, MachineType::Float32(), MachineType::Float64()); m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kSSEFloat32ToFloat64, s[0]->arch_opcode()); EXPECT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int32()); m.Return(m.ChangeInt32ToInt64(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode()); } TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) { StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32()); m.Return(m.ChangeUint32ToFloat64(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kSSEUint32ToFloat64, s[0]->arch_opcode()); } TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) { StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint32()); m.Return(m.ChangeUint32ToUint64(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); } TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) { StreamBuilder m(this, MachineType::Float64(), MachineType::Float32()); m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kSSEFloat64ToFloat32, s[0]->arch_opcode()); EXPECT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int64()); m.Return(m.TruncateInt64ToInt32(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); } TEST_F(InstructionSelectorTest, SelectWord32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* cond = m.Int32Constant(1); m.Return(m.Word32Select(cond, m.Parameter(0), m.Parameter(1))); Stream s = m.Build(); EXPECT_EQ(kX64Cmp32, s[0]->arch_opcode()); EXPECT_EQ(4U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(kFlags_select, s[0]->flags_mode()); EXPECT_EQ(kNotEqual, s[0]->flags_condition()); EXPECT_TRUE(s.IsSameAsInput(s[0]->Output(), 2)); } TEST_F(InstructionSelectorTest, SelectWord64) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(), MachineType::Int64()); Node* cond = m.Int32Constant(1); m.Return(m.Word64Select(cond, m.Parameter(0), m.Parameter(1))); Stream s = m.Build(); EXPECT_EQ(kX64Cmp32, s[0]->arch_opcode()); EXPECT_EQ(4U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(kFlags_select, s[0]->flags_mode()); EXPECT_EQ(kNotEqual, s[0]->flags_condition()); EXPECT_TRUE(s.IsSameAsInput(s[0]->Output(), 2)); } namespace { struct LoadWithToInt64Extension { MachineType type; ArchOpcode expected_opcode; }; std::ostream& operator<<(std::ostream& os, const LoadWithToInt64Extension& i32toi64) { return os << i32toi64.type; } static const LoadWithToInt64Extension kLoadWithToInt64Extensions[] = { {MachineType::Int8(), kX64Movsxbq}, {MachineType::Uint8(), kX64Movzxbq}, {MachineType::Int16(), kX64Movsxwq}, {MachineType::Uint16(), kX64Movzxwq}, {MachineType::Int32(), kX64Movsxlq}}; // The parameterized test that use the following type are intentionally part // of the anonymous namespace. The issue here is that the type parameter is // using a type that is in the anonymous namespace, but the class generated by // TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning. // // In this case there will only be single translation unit and the warning // about subobject-linkage can be avoided by placing the class generated // by TEST_P in the anoynmous namespace as well. using InstructionSelectorChangeInt32ToInt64Test = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorChangeInt32ToInt64Test, ChangeInt32ToInt64WithLoad) { const LoadWithToInt64Extension extension = GetParam(); StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer()); m.Return(m.ChangeInt32ToInt64(m.Load(extension.type, m.Parameter(0)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(extension.expected_opcode, s[0]->arch_opcode()); } } // namespace INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorChangeInt32ToInt64Test, ::testing::ValuesIn(kLoadWithToInt64Extensions)); // ----------------------------------------------------------------------------- // Loads and stores namespace { struct MemoryAccess { MachineType type; ArchOpcode load_opcode; ArchOpcode store_opcode; }; std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) { return os << memacc.type; } static const MemoryAccess kMemoryAccesses[] = { {MachineType::Int8(), kX64Movsxbl, kX64Movb}, {MachineType::Uint8(), kX64Movzxbl, kX64Movb}, {MachineType::Int16(), kX64Movsxwl, kX64Movw}, {MachineType::Uint16(), kX64Movzxwl, kX64Movw}, {MachineType::Int32(), kX64Movl, kX64Movl}, {MachineType::Uint32(), kX64Movl, kX64Movl}, {MachineType::Int64(), kX64Movq, kX64Movq}, {MachineType::Uint64(), kX64Movq, kX64Movq}, {MachineType::Float32(), kX64Movss, kX64Movss}, {MachineType::Float64(), kX64Movsd, kX64Movsd}}; // The parameterized test that use the following type are intentionally part // of the anonymous namespace. The issue here is that the type parameter is // using a type that is in the anonymous namespace, but the class generated by // TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning. // // In this case there will only be single translation unit and the warning // about subobject-linkage can be avoided by placing the class generated // by TEST_P in the anoynmous namespace as well. using InstructionSelectorMemoryAccessTest = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) { const MemoryAccess memacc = GetParam(); StreamBuilder m(this, memacc.type, MachineType::Pointer(), MachineType::Int32()); m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) { const MemoryAccess memacc = GetParam(); StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), MachineType::Int32(), memacc.type); m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1), m.Parameter(2), kNoWriteBarrier); m.Return(m.Int32Constant(0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); EXPECT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(0U, s[0]->OutputCount()); } } // namespace INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMemoryAccessTest, ::testing::ValuesIn(kMemoryAccesses)); // ----------------------------------------------------------------------------- // ChangeUint32ToUint64. namespace { using Constructor = Node* (RawMachineAssembler::*)(Node*, Node*); struct BinaryOperation { Constructor constructor; const char* constructor_name; }; std::ostream& operator<<(std::ostream& os, const BinaryOperation& bop) { return os << bop.constructor_name; } const BinaryOperation kWord32BinaryOperations[] = { {&RawMachineAssembler::Word32And, "Word32And"}, {&RawMachineAssembler::Word32Or, "Word32Or"}, {&RawMachineAssembler::Word32Xor, "Word32Xor"}, {&RawMachineAssembler::Word32Shl, "Word32Shl"}, {&RawMachineAssembler::Word32Shr, "Word32Shr"}, {&RawMachineAssembler::Word32Sar, "Word32Sar"}, {&RawMachineAssembler::Word32Ror, "Word32Ror"}, {&RawMachineAssembler::Word32Equal, "Word32Equal"}, {&RawMachineAssembler::Int32Add, "Int32Add"}, {&RawMachineAssembler::Int32Sub, "Int32Sub"}, {&RawMachineAssembler::Int32Mul, "Int32Mul"}, {&RawMachineAssembler::Int32MulHigh, "Int32MulHigh"}, {&RawMachineAssembler::Int32Div, "Int32Div"}, {&RawMachineAssembler::Int32LessThan, "Int32LessThan"}, {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual"}, {&RawMachineAssembler::Int32Mod, "Int32Mod"}, {&RawMachineAssembler::Uint32Div, "Uint32Div"}, {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan"}, {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual"}, {&RawMachineAssembler::Uint32Mod, "Uint32Mod"}}; // The parameterized test that use the following type are intentionally part // of the anonymous namespace. The issue here is that the type parameter is // using a type that is in the anonymous namespace, but the class generated by // TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning. // // In this case there will only be single translation unit and the warning // about subobject-linkage can be avoided by placing the class generated // by TEST_P in the anoynmous namespace as well. using InstructionSelectorChangeUint32ToUint64Test = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) { const BinaryOperation& bop = GetParam(); StreamBuilder m(this, MachineType::Uint64(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); } } // namespace INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorChangeUint32ToUint64Test, ::testing::ValuesIn(kWord32BinaryOperations)); // ----------------------------------------------------------------------------- // CanElideChangeUint32ToUint64 namespace { template struct MachInst { T constructor; const char* constructor_name; ArchOpcode arch_opcode; MachineType machine_type; }; using MachInst2 = MachInst; // X64 instructions that clear the top 32 bits of the destination. const MachInst2 kCanElideChangeUint32ToUint64[] = { {&RawMachineAssembler::Word32And, "Word32And", kX64And32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Or, "Word32Or", kX64Or32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Xor, "Word32Xor", kX64Xor32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Shl, "Word32Shl", kX64Shl32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Shr, "Word32Shr", kX64Shr32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Sar, "Word32Sar", kX64Sar32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Ror, "Word32Ror", kX64Ror32, MachineType::Uint32()}, {&RawMachineAssembler::Word32Equal, "Word32Equal", kX64Cmp32, MachineType::Uint32()}, {&RawMachineAssembler::Int32Add, "Int32Add", kX64Lea32, MachineType::Int32()}, {&RawMachineAssembler::Int32Sub, "Int32Sub", kX64Sub32, MachineType::Int32()}, {&RawMachineAssembler::Int32Mul, "Int32Mul", kX64Imul32, MachineType::Int32()}, {&RawMachineAssembler::Int32MulHigh, "Int32MulHigh", kX64ImulHigh32, MachineType::Int32()}, {&RawMachineAssembler::Int32Div, "Int32Div", kX64Idiv32, MachineType::Int32()}, {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kX64Cmp32, MachineType::Int32()}, {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual", kX64Cmp32, MachineType::Int32()}, {&RawMachineAssembler::Int32Mod, "Int32Mod", kX64Idiv32, MachineType::Int32()}, {&RawMachineAssembler::Uint32Div, "Uint32Div", kX64Udiv32, MachineType::Uint32()}, {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kX64Cmp32, MachineType::Uint32()}, {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual", kX64Cmp32, MachineType::Uint32()}, {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kX64Udiv32, MachineType::Uint32()}, }; // The parameterized test that use the following type are intentionally part // of the anonymous namespace. The issue here is that the type parameter is // using a type that is in the anonymous namespace, but the class generated by // TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning. // // In this case there will only be single translation unit and the warning // about subobject-linkage can be avoided by placing the class generated // by TEST_P in the anoynmous namespace as well. using InstructionSelectorElidedChangeUint32ToUint64Test = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) { const MachInst2 binop = GetParam(); StreamBuilder m(this, MachineType::Uint64(), binop.machine_type, binop.machine_type); m.Return(m.ChangeUint32ToUint64( (m.*binop.constructor)(m.Parameter(0), m.Parameter(1)))); Stream s = m.Build(); // Make sure the `ChangeUint32ToUint64` node turned into a no-op. ASSERT_EQ(1U, s.size()); EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } } // namespace INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorElidedChangeUint32ToUint64Test, ::testing::ValuesIn(kCanElideChangeUint32ToUint64)); // ChangeUint32ToUint64AfterLoad TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) { // For each case, make sure the `ChangeUint32ToUint64` node turned into a // no-op. // movzxbl { StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(), MachineType::Int32()); m.Return(m.ChangeUint32ToUint64( m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } // movsxbl { StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(), MachineType::Int32()); m.Return(m.ChangeUint32ToUint64( m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movsxbl, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } // movzxwl { StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(), MachineType::Int32()); m.Return(m.ChangeUint32ToUint64( m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } // movsxwl { StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(), MachineType::Int32()); m.Return(m.ChangeUint32ToUint64( m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movsxwl, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } } // ----------------------------------------------------------------------------- // TruncateInt64ToInt32. TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int64()); Node* const p = m.Parameter(0); Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32))); m.Return(t); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Shr, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0))); EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0))); } TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int64()); Node* const p = m.Parameter(0); Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32))); m.Return(t); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Shr, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0))); EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0))); } // ----------------------------------------------------------------------------- // Addition. TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const a0 = m.Int32Add(p0, p1); // Additional uses of input to add chooses lea Node* const a1 = m.Int32Div(p0, p1); m.Return(m.Int32Div(a0, a1)); Stream s = m.Build(); ASSERT_EQ(3U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); } TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(15); // If one of the add's operands is only used once, use an "leal", even though // an "addl" could be used. The "leal" has proven faster--out best guess is // that it gives the register allocation more freedom and it doesn't set // flags, reducing pressure in the CPU's pipeline. If we're lucky with // register allocation, then code generation will select an "addl" later for // the cases that have been measured to be faster. Node* const v0 = m.Int32Add(p0, c0); m.Return(v0); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(1); // If there is only a single use of an add's input and the immediate constant // for the add is 1, don't use an inc. It is much slower on modern Intel // architectures. m.Return(m.Int32Add(p0, c0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(15); // A second use of an add's input uses lea Node* const a0 = m.Int32Add(p0, c0); m.Return(m.Int32Div(a0, p0)); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(15); // If one of the add's operands is only used once, use an "leal", even though // an "addl" could be used. The "leal" has proven faster--out best guess is // that it gives the register allocation more freedom and it doesn't set // flags, reducing pressure in the CPU's pipeline. If we're lucky with // register allocation, then code generation will select an "addl" later for // the cases that have been measured to be faster. m.Return(m.Int32Add(c0, p0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(15); // A second use of an add's input uses lea Node* const a0 = m.Int32Add(c0, p0); USE(a0); m.Return(m.Int32Div(a0, p0)); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); // If one of the add's operands is only used once, use an "leal", even though // an "addl" could be used. The "leal" has proven faster--out best guess is // that it gives the register allocation more freedom and it doesn't set // flags, reducing pressure in the CPU's pipeline. If we're lucky with // register allocation, then code generation will select an "addl" later for // the cases that have been measured to be faster. m.Return(m.Int32Add(p0, p1)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); // If all of of the add's operands are used multiple times, use an "leal". Node* const v1 = m.Int32Add(p0, p1); m.Return(m.Int32Add(m.Int32Add(v1, p1), p0)); Stream s = m.Build(); ASSERT_EQ(3U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); m.Return(m.Int32Add(p0, s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); m.Return(m.Int32Add(s0, p0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1)); m.Return(m.Int32Add(p0, s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1)); m.Return(m.Int32Add(s0, p0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4)); m.Return(m.Int32Add(p0, s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR4, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2)); m.Return(m.Int32Add(p0, s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR4, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8)); m.Return(m.Int32Add(p0, s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR8, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3)); m.Return(m.Int32Add(p0, s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR8, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(c0, m.Int32Add(p0, s0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(p0, m.Int32Add(s0, c0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(s0, m.Int32Add(c0, p0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(m.Int32Add(s0, c0), p0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(m.Int32Add(c0, p0), s0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(m.Int32Add(p0, s0), c0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(c0, m.Int32Add(p0, s0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(c0, m.Int32Add(p0, s0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(c0, m.Int32Add(p0, s0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(c0, m.Int32Add(p0, s0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3)); Node* const c0 = m.Int32Constant(15); m.Return(m.Int32Add(c0, m.Int32Add(p0, s0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(-1); // If there is only a single use of on of the sub's non-constant input, use a // "subl" instruction. m.Return(m.Int32Sub(p0, c0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const c0 = m.Int32Constant(-1); // If there are multiple uses of on of the sub's non-constant input, use a // "leal" instruction. Node* const v0 = m.Int32Sub(p0, c0); m.Return(m.Int32Div(p0, v0)); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); } TEST_F(InstructionSelectorTest, Int32AddScaled2Other) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const p2 = m.Parameter(2); Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2)); Node* const a0 = m.Int32Add(s0, p2); Node* const a1 = m.Int32Add(p0, a0); m.Return(a1); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[0]->OutputAt(0))); ASSERT_EQ(2U, s[1]->InputCount()); EXPECT_EQ(kX64Lea32, s[1]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0))); EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[1]->InputAt(1))); EXPECT_EQ(s.ToVreg(a1), s.ToVreg(s[1]->OutputAt(0))); } TEST_F(InstructionSelectorTest, Int32AddMinNegativeDisplacement) { // This test case is simplified from a Wasm fuzz test in // https://crbug.com/1091892. The key here is that we match on a // sequence like: Int32Add(Int32Sub(-524288, -2147483648), -26048), which // matches on an EmitLea, with -2147483648 as the displacement. Since we // have a Int32Sub node, it sets kNegativeDisplacement, and later we try to // negate -2147483648, which overflows. StreamBuilder m(this, MachineType::Int32()); Node* const c0 = m.Int32Constant(-524288); Node* const c1 = m.Int32Constant(std::numeric_limits::min()); Node* const c2 = m.Int32Constant(-26048); Node* const a0 = m.Int32Sub(c0, c1); Node* const a1 = m.Int32Add(a0, c2); m.Return(a1); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Sub32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(kMode_None, s[0]->addressing_mode()); EXPECT_EQ(s.ToVreg(c0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(c1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[0]->OutputAt(0))); EXPECT_EQ(kX64Add32, s[1]->arch_opcode()); ASSERT_EQ(2U, s[1]->InputCount()); EXPECT_EQ(kMode_None, s[1]->addressing_mode()); EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[1]->InputAt(0))); EXPECT_TRUE(s[1]->InputAt(1)->IsImmediate()); EXPECT_EQ(s.ToVreg(a1), s.ToVreg(s[1]->OutputAt(0))); } // ----------------------------------------------------------------------------- // Multiplication. TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const m0 = m.Int32Mul(p0, p1); m.Return(m.Int32Mul(m0, p0)); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Imul32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[0]->OutputAt(0))); EXPECT_EQ(kX64Imul32, s[1]->arch_opcode()); ASSERT_EQ(2U, s[1]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0))); EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[1]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32MulHigh) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const n = m.Int32MulHigh(p0, p1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64ImulHigh32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax)); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1))); ASSERT_LE(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx)); } TEST_F(InstructionSelectorTest, Uint32MulHigh) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const n = m.Uint32MulHigh(p0, p1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64UmulHigh32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax)); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1))); ASSERT_LE(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx)); } TEST_F(InstructionSelectorTest, Int32Mul2BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(2); Node* const n = m.Int32Mul(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32Mul3BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(3); Node* const n = m.Int32Mul(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32Mul4BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(4); Node* const n = m.Int32Mul(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_M4, s[0]->addressing_mode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); } TEST_F(InstructionSelectorTest, Int32Mul5BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(5); Node* const n = m.Int32Mul(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR4, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32Mul8BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(8); Node* const n = m.Int32Mul(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_M8, s[0]->addressing_mode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); } TEST_F(InstructionSelectorTest, Int32Mul9BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(9); Node* const n = m.Int32Mul(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR8, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); } // ----------------------------------------------------------------------------- // Word32Shl. TEST_F(InstructionSelectorTest, Int32Shl1BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(1); Node* const n = m.Word32Shl(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, Int32Shl2BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(2); Node* const n = m.Word32Shl(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_M4, s[0]->addressing_mode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); } TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const c1 = m.Int32Constant(3); Node* const n = m.Word32Shl(p0, c1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); EXPECT_EQ(kMode_M8, s[0]->addressing_mode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); } // ----------------------------------------------------------------------------- // Binops with a memory operand. TEST_F(InstructionSelectorTest, LoadCmp32) { { // Word32Equal(Load[Int8](p0, p1), Int32Constant(0)) -> cmpb [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32Equal(m.Load(MachineType::Int8(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp8, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } { // Word32Equal(LoadImmutable[Int8](p0, p1), Int32Constant(0)) -> // cmpb [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32Equal(m.LoadImmutable(MachineType::Int8(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp8, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } { // Word32Equal(Load[Uint8](p0, p1), Int32Constant(0)) -> cmpb [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32Equal(m.Load(MachineType::Uint8(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp8, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } { // Word32Equal(Load[Int16](p0, p1), Int32Constant(0)) -> cmpw [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32Equal(m.Load(MachineType::Int16(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp16, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } { // Word32Equal(Load[Uint16](p0, p1), Int32Constant(0)) -> cmpw [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32Equal(m.Load(MachineType::Uint16(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp16, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } { // Word32Equal(Load[Int32](p0, p1), Int32Constant(0)) -> cmpl [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32Equal(m.Load(MachineType::Int32(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } { // Word32Equal(Load[Uint32](p0, p1), Int32Constant(0)) -> cmpl [p0,p1], 0 StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32Equal(m.Load(MachineType::Uint32(), p0, p1), m.Int32Constant(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Cmp32, s[0]->arch_opcode()); EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate()); } } TEST_F(InstructionSelectorTest, LoadAnd32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32And(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64And32, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadOr32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32Or(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Or32, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadXor32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32Xor(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Xor32, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadAdd32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Int32Add(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); // Use lea instead of add, so memory operand is invalid. ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); EXPECT_EQ(kX64Lea32, s[1]->arch_opcode()); } TEST_F(InstructionSelectorTest, LoadSub32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Int32Sub(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Sub32, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadAnd64) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word64And(p0, m.Load(MachineType::Int64(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64And, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadOr64) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word64Or(p0, m.Load(MachineType::Int64(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Or, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadXor64) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word64Xor(p0, m.Load(MachineType::Int64(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Xor, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadAdd64) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Int64Add(p0, m.Load(MachineType::Int64(), p1, m.Int32Constant(127)))); Stream s = m.Build(); // Use lea instead of add, so memory operand is invalid. ASSERT_EQ(2U, s.size()); EXPECT_EQ(kX64Movq, s[0]->arch_opcode()); EXPECT_EQ(kX64Lea, s[1]->arch_opcode()); } TEST_F(InstructionSelectorTest, LoadSub64) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Int64Sub(p0, m.Load(MachineType::Int64(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Sub, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } // ----------------------------------------------------------------------------- // Floating point operations. TEST_F(InstructionSelectorTest, Float32Abs) { { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); Node* const p0 = m.Parameter(0); Node* const n = m.Float32Abs(p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Float32Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); Node* const p0 = m.Parameter(0); Node* const n = m.Float32Abs(p0); m.Return(n); Stream s = m.Build(AVX); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Float32Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } } TEST_F(InstructionSelectorTest, Float64Abs) { { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); Node* const p0 = m.Parameter(0); Node* const n = m.Float64Abs(p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Float64Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); Node* const p0 = m.Parameter(0); Node* const n = m.Float64Abs(p0); m.Return(n); Stream s = m.Build(AVX); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Float64Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } } TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Float64()); Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); Node* mul = m.Float64Mul(add, m.Parameter(1)); Node* sub = m.Float64Sub(mul, add); Node* ret = m.Float64Div(mul, sub); m.Return(ret); Stream s = m.Build(AVX); ASSERT_EQ(4U, s.size()); EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode()); EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode()); EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode()); EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode()); } { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Float64()); Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); Node* mul = m.Float64Mul(add, m.Parameter(1)); Node* sub = m.Float64Sub(mul, add); Node* ret = m.Float64Div(mul, sub); m.Return(ret); Stream s = m.Build(); ASSERT_EQ(4U, s.size()); EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode()); EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode()); EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode()); EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode()); } } TEST_F(InstructionSelectorTest, Float32BinopArithmeticWithLoad) { { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const p2 = m.Parameter(2); Node* add = m.Float32Add( p0, m.Load(MachineType::Float32(), p1, m.Int32Constant(127))); Node* sub = m.Float32Sub( add, m.Load(MachineType::Float32(), p1, m.Int32Constant(127))); Node* ret = m.Float32Mul( m.Load(MachineType::Float32(), p2, m.Int32Constant(127)), sub); m.Return(ret); Stream s = m.Build(AVX); ASSERT_EQ(3U, s.size()); EXPECT_EQ(kAVXFloat32Add, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(kAVXFloat32Sub, s[1]->arch_opcode()); ASSERT_EQ(3U, s[1]->InputCount()); EXPECT_EQ(kAVXFloat32Mul, s[2]->arch_opcode()); ASSERT_EQ(3U, s[2]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1))); } { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const p2 = m.Parameter(2); Node* add = m.Float32Add( p0, m.Load(MachineType::Float32(), p1, m.Int32Constant(127))); Node* sub = m.Float32Sub( add, m.Load(MachineType::Float32(), p1, m.Int32Constant(127))); Node* ret = m.Float32Mul( m.Load(MachineType::Float32(), p2, m.Int32Constant(127)), sub); m.Return(ret); Stream s = m.Build(); ASSERT_EQ(3U, s.size()); EXPECT_EQ(kSSEFloat32Add, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(kSSEFloat32Sub, s[1]->arch_opcode()); ASSERT_EQ(3U, s[1]->InputCount()); EXPECT_EQ(kSSEFloat32Mul, s[2]->arch_opcode()); ASSERT_EQ(3U, s[2]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1))); } } TEST_F(InstructionSelectorTest, Float64BinopArithmeticWithLoad) { { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const p2 = m.Parameter(2); Node* add = m.Float64Add( p0, m.Load(MachineType::Float64(), p1, m.Int32Constant(127))); Node* sub = m.Float64Sub( add, m.Load(MachineType::Float64(), p1, m.Int32Constant(127))); Node* ret = m.Float64Mul( m.Load(MachineType::Float64(), p2, m.Int32Constant(127)), sub); m.Return(ret); Stream s = m.Build(AVX); ASSERT_EQ(3U, s.size()); EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(kAVXFloat64Sub, s[1]->arch_opcode()); ASSERT_EQ(3U, s[1]->InputCount()); EXPECT_EQ(kAVXFloat64Mul, s[2]->arch_opcode()); ASSERT_EQ(3U, s[2]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1))); } { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const p2 = m.Parameter(2); Node* add = m.Float64Add( p0, m.Load(MachineType::Float64(), p1, m.Int32Constant(127))); Node* sub = m.Float64Sub( add, m.Load(MachineType::Float64(), p1, m.Int32Constant(127))); Node* ret = m.Float64Mul( m.Load(MachineType::Float64(), p2, m.Int32Constant(127)), sub); m.Return(ret); Stream s = m.Build(); ASSERT_EQ(3U, s.size()); EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(kSSEFloat64Sub, s[1]->arch_opcode()); ASSERT_EQ(3U, s[1]->InputCount()); EXPECT_EQ(kSSEFloat64Mul, s[2]->arch_opcode()); ASSERT_EQ(3U, s[2]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1))); } } // ----------------------------------------------------------------------------- // Miscellaneous. TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) { TRACED_FORRANGE(int64_t, x, 32, 63) { StreamBuilder m(this, MachineType::Int64(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Shl, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) { TRACED_FORRANGE(int64_t, x, 32, 63) { StreamBuilder m(this, MachineType::Int64(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Shl, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word32AndWith0xFF) { { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word32And(p0, m.Int32Constant(0xFF)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word32And(m.Int32Constant(0xFF), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word64AndWith0xFFFFFFFF) { { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(p0, m.Int32Constant(0xFFFFFFFF)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(m.Int32Constant(0xFFFFFFFF), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word64AndWith0xFFFF) { { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(p0, m.Int32Constant(0xFFFF)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxwq, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(m.Int32Constant(0xFFFF), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxwq, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word64AndWith0xFF) { { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(p0, m.Int32Constant(0xFF)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxbq, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(m.Int32Constant(0xFF), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxbq, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word64AndWithInt64FitsUint32) { { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(p0, m.Int64Constant(15)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64And32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(m.Int64Constant(15), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64And32, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word64AndWithInt64DontFitsUint32) { { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(p0, m.Int64Constant(0x100000000)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64And, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int64()); Node* const p0 = m.Parameter(0); Node* const n = m.Word64And(m.Int64Constant(0x100000000), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64And, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word32AndWith0xFFFF) { { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word32And(p0, m.Int32Constant(0xFFFF)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word32And(m.Int32Constant(0xFFFF), p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } } TEST_F(InstructionSelectorTest, Word32Clz) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word32Clz(p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Lzcnt32, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) { { StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const load = m.Load(MachineType::Uint64(), p0); Node* const shift = m.Word64Shr(load, m.Int32Constant(32)); m.Return(shift); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const load = m.Load(MachineType::Int64(), p0); Node* const shift = m.Word64Sar(load, m.Int32Constant(32)); m.Return(shift); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output())); } { StreamBuilder m(this, MachineType::Int64(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const load = m.Load(MachineType::Int64(), p0); Node* const shift = m.Word64Sar(load, m.Int32Constant(32)); Node* const truncate = m.TruncateInt64ToInt32(shift); m.Return(truncate); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64Movl, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output())); } } // ----------------------------------------------------------------------------- // SIMD. TEST_F(InstructionSelectorTest, SIMDSplatZero) { // Test optimization for splat of contant 0. // {i8x16,i16x8,i32x4,i64x2}.splat(const(0)) -> v128.zero(). // Optimizations for f32x4.splat and f64x2.splat not implemented since it // doesn't improve the codegen as much (same number of instructions). { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I64x2Splat(m.Int64Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64SZero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I32x4Splat(m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64SZero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I16x8Splat(m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64SZero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I8x16Splat(m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kX64SZero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } } #if V8_ENABLE_WEBASSEMBLY struct ArchShuffle { uint8_t shuffle[kSimd128Size]; ArchOpcode arch_opcode; size_t input_count; }; static constexpr ArchShuffle kArchShuffles[] = { // These are architecture specific shuffles defined in // instruction-selecor-x64.cc arch_shuffles. { {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}, kX64S64x2UnpackLow, 2, }, { {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}, kX64S64x2UnpackHigh, 2, }, { {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, kX64S32x4UnpackLow, 2, }, { {8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, kX64S32x4UnpackHigh, 2, }, { {0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, kX64S16x8UnpackLow, 2, }, { {8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, kX64S16x8UnpackHigh, 2, }, { {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, kX64S8x16UnpackLow, 2, }, { {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, kX64S8x16UnpackHigh, 2, }, { {0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, kX64S16x8UnzipLow, 2, }, { {2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, kX64S16x8UnzipHigh, 2, }, { {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, kX64S8x16UnzipLow, 2, }, { {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, kX64S8x16UnzipHigh, 2, }, { {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, kX64S8x16TransposeLow, 2, }, { {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, kX64S8x16TransposeHigh, 2, }, { {7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kX64S8x8Reverse, 1, }, { {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kX64S8x4Reverse, 1, }, { {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kX64S8x2Reverse, 1, }, // These are matched by TryMatchConcat && TryMatch32x4Rotate. { {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3}, kX64S32x4Rotate, 2, }, { {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, kX64S32x4Rotate, 2, }, { {12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, kX64S32x4Rotate, 2, }, // These are matched by TryMatchConcat && !TryMatch32x4Rotate. { {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2}, kX64S8x16Alignr, 3, }, { {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1}, kX64S8x16Alignr, 3, }, { {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}, kX64S8x16Alignr, 3, }, // These are matched by TryMatch32x4Shuffle && is_swizzle. { {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15}, kX64S32x4Swizzle, 2, }, { {0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15, 8, 9, 10, 11}, kX64S32x4Swizzle, 2, }, // These are matched by TryMatch32x4Shuffle && !is_swizzle && TryMatchBlend. { {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31}, kX64S16x8Blend, 3, }, { {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15}, kX64S16x8Blend, 3, }, // These are matched by TryMatch32x4Shuffle && !is_swizzle && // TryMatchShufps. { {0, 1, 2, 3, 8, 9, 10, 11, 28, 29, 30, 31, 28, 29, 30, 31}, kX64Shufps, 3, }, { {8, 9, 10, 11, 0, 1, 2, 3, 28, 29, 30, 31, 28, 29, 30, 31}, kX64Shufps, 3, }, // These are matched by TryMatch32x4Shuffle && !is_swizzle. { {28, 29, 30, 31, 0, 1, 2, 3, 28, 29, 30, 31, 28, 29, 30, 31}, kX64S32x4Shuffle, 4, }, // These are matched by TryMatch16x8Shuffle && TryMatchBlend. { {16, 17, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15}, kX64S16x8Blend, 3, }, // These are matched by TryMatch16x8Shuffle && TryMatchSplat<8>. { {2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3}, kX64S16x8Dup, 2, }, // These are matched by TryMatch16x8Shuffle && TryMatch16x8HalfShuffle. { {6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kX64S16x8HalfShuffle1, 3, }, { {6, 7, 4, 5, 2, 3, 0, 1, 30, 31, 28, 29, 26, 27, 24, 25}, kX64S16x8HalfShuffle2, 5, }, // These are matched by TryMatchSplat<16>. { {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, kX64S8x16Dup, 2, }, // Generic shuffle that only uses 1 input. { {1, 15, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8}, kX64I8x16Shuffle, 5, }, // Generic shuffle that uses both input. { {1, 31, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8}, kX64I8x16Shuffle, 6, }, }; using InstructionSelectorSIMDArchShuffleTest = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorSIMDArchShuffleTest, SIMDArchShuffle) { MachineType type = MachineType::Simd128(); { // Tests various shuffle optimizations StreamBuilder m(this, type, type, type); auto param = GetParam(); auto shuffle = param.shuffle; const Operator* op = m.machine()->I8x16Shuffle(shuffle); Node* n = m.AddNode(op, m.Parameter(0), m.Parameter(1)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(param.arch_opcode, s[0]->arch_opcode()); ASSERT_EQ(param.input_count, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } } INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorSIMDArchShuffleTest, ::testing::ValuesIn(kArchShuffles)); struct ShuffleWithZeroInput { uint8_t shuffle_mask[kSimd128Size]; ArchOpcode arch_opcode; size_t input_count; }; static constexpr ShuffleWithZeroInput kShuffleWithZeroInput[] = { // These are matched by TryMatchByteToDwordZeroExtend. { {16, 1, 2, 3, 17, 4, 5, 6, 18, 7, 8, 9, 19, 10, 11, 12}, kX64I32X4ShiftZeroExtendI8x16, 2, }, // Generic shuffle that uses one zero input. { {16, 1, 2, 3, 17, 4, 5, 6, 18, 7, 8, 9, 19, 20, 21, 22}, kX64I8x16Shuffle, 5, }, }; using InstructionSelectorSIMDShuffleWithZeroInputTest = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorSIMDShuffleWithZeroInputTest, SIMDShuffleWithZeroInputTest) { MachineType type = MachineType::Simd128(); { // Tests shuffle to packed zero extend optimization uint8_t zeros[kSimd128Size] = {0}; StreamBuilder m(this, type, type); auto param = GetParam(); const Operator* op = m.machine()->I8x16Shuffle(param.shuffle_mask); Node* const c = m.S128Const(zeros); Node* n = m.AddNode(op, c, m.Parameter(0)); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(param.arch_opcode, s[0]->arch_opcode()); ASSERT_EQ(param.input_count, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } } INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorSIMDShuffleWithZeroInputTest, ::testing::ValuesIn(kShuffleWithZeroInput)); struct SwizzleConstants { uint8_t shuffle[kSimd128Size]; bool omit_add; }; static constexpr SwizzleConstants kSwizzleConstants[] = { { // all lanes < kSimd128Size {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, true, }, { // lanes that are >= kSimd128Size have top bit set {12, 13, 14, 15, 0x90, 0x91, 0x92, 0x93, 0xA0, 0xA1, 0xA2, 0xA3, 0xFC, 0xFD, 0xFE, 0xFF}, true, }, { {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}, false, }, }; using InstructionSelectorSIMDSwizzleConstantTest = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorSIMDSwizzleConstantTest, SimdSwizzleConstant) { // Test optimization of swizzle with constant indices. auto param = GetParam(); StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128()); Node* const c = m.S128Const(param.shuffle); Node* swizzle = m.AddNode(m.machine()->I8x16Swizzle(), m.Parameter(0), c); m.Return(swizzle); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); ASSERT_EQ(kX64I8x16Swizzle, s[1]->arch_opcode()); ASSERT_EQ(param.omit_add, s[1]->misc()); ASSERT_EQ(1U, s[0]->OutputCount()); } INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorSIMDSwizzleConstantTest, ::testing::ValuesIn(kSwizzleConstants)); TEST_F(InstructionSelectorTest, F64x2PromoteLowF32x4WithS128Load64Zero) { StreamBuilder m(this, MachineType::Simd128(), MachineType::Int32()); Node* const load = m.AddNode(m.machine()->LoadTransform(MemoryAccessKind::kProtected, LoadTransformation::kS128Load64Zero), m.Int32Constant(2), m.Parameter(0)); Node* const promote = m.AddNode(m.machine()->F64x2PromoteLowF32x4(), load); m.Return(promote); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); ASSERT_EQ(kX64F64x2PromoteLowF32x4, s[0]->arch_opcode()); ASSERT_EQ(kMode_MRI, s[0]->addressing_mode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } #endif // V8_ENABLE_WEBASSEMBLY } // namespace compiler } // namespace internal } // namespace v8