fix asserts

This commit is contained in:
lizzie 2026-03-30 02:52:11 +00:00
parent c4bc5b8ca3
commit b4516101ad
121 changed files with 1329 additions and 1324 deletions

View file

@ -32,7 +32,6 @@ else()
endif()
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" ${REQUIRE_WX})
option(DYNARMIC_IGNORE_ASSERTS "Ignore asserts" ON)
option(DYNARMIC_TESTS_USE_UNICORN "Enable fuzzing tests against unicorn" OFF)
CMAKE_DEPENDENT_OPTION(DYNARMIC_USE_LLVM "Support disassembly of jitted x86_64 code using LLVM" OFF "NOT YUZU_DISABLE_LLVM" OFF)

View file

@ -376,9 +376,6 @@ endif()
if (DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT)
target_compile_definitions(dynarmic PRIVATE DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT=1)
endif()
if (DYNARMIC_IGNORE_ASSERTS)
target_compile_definitions(dynarmic PRIVATE MCL_IGNORE_ASSERTS=1)
endif()
if (CMAKE_SYSTEM_NAME STREQUAL "Windows")
target_compile_definitions(dynarmic PRIVATE FMT_USE_WINDOWS_H=0)
endif()

View file

@ -10,7 +10,7 @@
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/backend/arm64/a32_address_space.h"
@ -31,7 +31,7 @@ struct Jit::Impl final {
, core(conf) {}
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
jit_interface->is_executing = true;
@ -42,7 +42,7 @@ struct Jit::Impl final {
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
jit_interface->is_executing = true;

View file

@ -10,7 +10,7 @@
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/backend/arm64/a64_address_space.h"
@ -31,7 +31,7 @@ struct Jit::Impl final {
, core(conf) {}
HaltReason Run() {
ASSERT(!is_executing);
assert(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
is_executing = true;
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
@ -41,7 +41,7 @@ struct Jit::Impl final {
}
HaltReason Step() {
ASSERT(!is_executing);
assert(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
is_executing = true;
HaltReason hr = core.Step(current_address_space, current_state, &halt_reason);

View file

@ -11,7 +11,7 @@
#include <initializer_list>
#include "common/common_types.h"
#include "common/assert.h"
#include <cassert>
#include <oaknut/oaknut.hpp>
@ -57,7 +57,7 @@ constexpr RegisterList ToRegList(oaknut::Reg reg) {
if (reg.is_vector()) {
return RegisterList{1} << (reg.index() + 32);
}
ASSERT(reg.index() != 31 && "ZR not allowed in reg list");
assert(reg.index() != 31 && "ZR not allowed in reg list");
if (reg.index() == -1) {
return RegisterList{1} << 31;
}

View file

@ -31,7 +31,7 @@ AddressSpace::AddressSpace(size_t code_cache_size)
, code(mem.ptr(), mem.ptr())
, fastmem_manager(exception_handler)
{
ASSERT(code_cache_size <= 128 * 1024 * 1024 && "code_cache_size > 128 MiB not currently supported");
assert(code_cache_size <= 128 * 1024 * 1024 && "code_cache_size > 128 MiB not currently supported");
exception_handler.Register(mem, code_cache_size);
exception_handler.SetFastmemCallback([this](u64 host_pc) {
@ -115,9 +115,9 @@ EmittedBlockInfo AddressSpace::Emit(IR::Block block) {
EmittedBlockInfo block_info = EmitArm64(code, std::move(block), GetEmitConfig(), fastmem_manager);
ASSERT(block_entries.insert({block.Location(), block_info.entry_point}).second);
ASSERT(reverse_block_entries.insert({block_info.entry_point, block.Location()}).second);
ASSERT(block_infos.insert({block_info.entry_point, block_info}).second);
assert(block_entries.insert({block.Location(), block_info.entry_point}).second);
assert(reverse_block_entries.insert({block_info.entry_point, block.Location()}).second);
assert(block_infos.insert({block_info.entry_point, block_info}).second);
Link(block_info);
RelinkForDescriptor(block.Location(), block_info.entry_point);

View file

@ -54,7 +54,7 @@ void EmitIR<IR::Opcode::PushRSB>(oaknut::CodeGenerator& code, EmitContext& ctx,
}
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[0].IsImmediate());
assert(args[0].IsImmediate());
const IR::LocationDescriptor target{args[0].GetImmediateU64()};
code.LDR(Wscratch2, SP, offsetof(StackLayout, rsb_ptr));
@ -71,19 +71,19 @@ void EmitIR<IR::Opcode::PushRSB>(oaknut::CodeGenerator& code, EmitContext& ctx,
template<>
void EmitIR<IR::Opcode::GetCarryFromOp>(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.WasValueDefined(inst));
assert(ctx.reg_alloc.WasValueDefined(inst));
}
template<>
void EmitIR<IR::Opcode::GetOverflowFromOp>(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.WasValueDefined(inst));
assert(ctx.reg_alloc.WasValueDefined(inst));
}
template<>
void EmitIR<IR::Opcode::GetGEFromOp>(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.WasValueDefined(inst));
assert(ctx.reg_alloc.WasValueDefined(inst));
}
template<>
@ -149,13 +149,13 @@ void EmitIR<IR::Opcode::GetNZFromOp>(oaknut::CodeGenerator& code, EmitContext& c
template<>
void EmitIR<IR::Opcode::GetUpperFromOp>(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.WasValueDefined(inst));
assert(ctx.reg_alloc.WasValueDefined(inst));
}
template<>
void EmitIR<IR::Opcode::GetLowerFromOp>(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.WasValueDefined(inst));
assert(ctx.reg_alloc.WasValueDefined(inst));
}
template<>
@ -206,9 +206,9 @@ EmittedBlockInfo EmitArm64(oaknut::CodeGenerator& code, IR::Block block, const E
ebi.entry_point = code.xptr<CodePtr>();
if (ctx.block.GetCondition() == IR::Cond::AL) {
ASSERT(!ctx.block.HasConditionFailedLocation());
assert(!ctx.block.HasConditionFailedLocation());
} else {
ASSERT(ctx.block.HasConditionFailedLocation());
assert(ctx.block.HasConditionFailedLocation());
oaknut::Label pass;
pass = conf.emit_cond(code, ctx, ctx.block.GetCondition());

View file

@ -211,7 +211,7 @@ void EmitIR<IR::Opcode::A32GetRegister>(oaknut::CodeGenerator& code, EmitContext
template<>
void EmitIR<IR::Opcode::A32GetExtendedRegister32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsSingleExtReg(reg));
assert(A32::IsSingleExtReg(reg));
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::S0);
auto Sresult = ctx.reg_alloc.WriteS(inst);
@ -225,7 +225,7 @@ void EmitIR<IR::Opcode::A32GetExtendedRegister32>(oaknut::CodeGenerator& code, E
template<>
void EmitIR<IR::Opcode::A32GetVector>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
assert(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
if (A32::IsDoubleExtReg(reg)) {
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
@ -243,7 +243,7 @@ void EmitIR<IR::Opcode::A32GetVector>(oaknut::CodeGenerator& code, EmitContext&
template<>
void EmitIR<IR::Opcode::A32GetExtendedRegister64>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg));
assert(A32::IsDoubleExtReg(reg));
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
auto Dresult = ctx.reg_alloc.WriteD(inst);
@ -271,7 +271,7 @@ void EmitIR<IR::Opcode::A32SetRegister>(oaknut::CodeGenerator& code, EmitContext
template<>
void EmitIR<IR::Opcode::A32SetExtendedRegister32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsSingleExtReg(reg));
assert(A32::IsSingleExtReg(reg));
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::S0);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
@ -286,7 +286,7 @@ void EmitIR<IR::Opcode::A32SetExtendedRegister32>(oaknut::CodeGenerator& code, E
template<>
void EmitIR<IR::Opcode::A32SetExtendedRegister64>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg));
assert(A32::IsDoubleExtReg(reg));
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
@ -301,7 +301,7 @@ void EmitIR<IR::Opcode::A32SetExtendedRegister64>(oaknut::CodeGenerator& code, E
template<>
void EmitIR<IR::Opcode::A32SetVector>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
assert(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (A32::IsDoubleExtReg(reg)) {

View file

@ -194,8 +194,8 @@ void EmitIR<IR::Opcode::TestBit>(oaknut::CodeGenerator& code, EmitContext& ctx,
auto Xresult = ctx.reg_alloc.WriteX(inst);
auto Xoperand = ctx.reg_alloc.ReadX(args[0]);
RegAlloc::Realize(Xresult, Xoperand);
ASSERT(args[1].IsImmediate());
ASSERT(args[1].GetImmediateU8() < 64);
assert(args[1].IsImmediate());
assert(args[1].GetImmediateU8() < 64);
code.UBFX(Xresult, Xoperand, args[1].GetImmediateU8(), 1);
}
@ -893,9 +893,9 @@ static void EmitAddSub(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst*
if (overflow_inst) {
// There is a limited set of circumstances where this is required, so assert for this.
ASSERT(!sub);
ASSERT(!nzcv_inst);
ASSERT(args[2].IsImmediate() && args[2].GetImmediateU1() == false);
assert(!sub);
assert(!nzcv_inst);
assert(args[2].IsImmediate() && args[2].GetImmediateU1() == false);
auto Rb = ctx.reg_alloc.ReadReg<bitsize>(args[1]);
auto Woverflow = ctx.reg_alloc.WriteW(overflow_inst);
@ -1134,7 +1134,7 @@ static void EmitBitOp(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* i
if constexpr (!std::is_same_v<EmitFn2, std::nullptr_t>) {
const auto nz_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZFromOp);
const auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
ASSERT(!(nz_inst && nzcv_inst));
assert(!(nz_inst && nzcv_inst));
const auto flag_inst = nz_inst ? nz_inst : nzcv_inst;
if (flag_inst) {
@ -1171,7 +1171,7 @@ template<size_t bitsize>
static void EmitAndNot(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const auto nz_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZFromOp);
const auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
ASSERT(!(nz_inst && nzcv_inst));
assert(!(nz_inst && nzcv_inst));
const auto flag_inst = nz_inst ? nz_inst : nzcv_inst;
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
@ -1402,7 +1402,7 @@ void EmitIR<IR::Opcode::CountLeadingZeros64>(oaknut::CodeGenerator& code, EmitCo
template<>
void EmitIR<IR::Opcode::ExtractRegister32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[2].IsImmediate());
assert(args[2].IsImmediate());
auto Wresult = ctx.reg_alloc.WriteW(inst);
auto Wop1 = ctx.reg_alloc.ReadW(args[0]);
@ -1416,7 +1416,7 @@ void EmitIR<IR::Opcode::ExtractRegister32>(oaknut::CodeGenerator& code, EmitCont
template<>
void EmitIR<IR::Opcode::ExtractRegister64>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[2].IsImmediate());
assert(args[2].IsImmediate());
auto Xresult = ctx.reg_alloc.WriteX(inst);
auto Xop1 = ctx.reg_alloc.ReadX(args[0]);
@ -1430,7 +1430,7 @@ void EmitIR<IR::Opcode::ExtractRegister64>(oaknut::CodeGenerator& code, EmitCont
template<>
void EmitIR<IR::Opcode::ReplicateBit32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
auto Wresult = ctx.reg_alloc.WriteW(inst);
auto Wvalue = ctx.reg_alloc.ReadW(args[0]);
@ -1444,7 +1444,7 @@ void EmitIR<IR::Opcode::ReplicateBit32>(oaknut::CodeGenerator& code, EmitContext
template<>
void EmitIR<IR::Opcode::ReplicateBit64>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
auto Xresult = ctx.reg_alloc.WriteX(inst);
auto Xvalue = ctx.reg_alloc.ReadX(args[0]);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -68,7 +68,7 @@ static void EmitConvert(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst
RegAlloc::Realize(Vto, Vfrom);
ctx.fpsr.Load();
ASSERT(rounding_mode == ctx.FPCR().RMode());
assert(rounding_mode == ctx.FPCR().RMode());
emit(Vto, Vfrom);
}
@ -106,8 +106,8 @@ static void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst*
}
}
} else {
ASSERT(fbits == 0);
ASSERT(bitsize_to != 16);
assert(fbits == 0);
assert(bitsize_to != 16);
if constexpr (is_signed) {
switch (rounding_mode) {
case FP::RoundingMode::ToNearest_TieEven:
@ -449,7 +449,7 @@ void EmitIR<IR::Opcode::FPRoundInt32>(oaknut::CodeGenerator& code, EmitContext&
ctx.fpsr.Load();
if (exact) {
ASSERT(ctx.FPCR().RMode() == rounding_mode);
assert(ctx.FPCR().RMode() == rounding_mode);
code.FRINTX(Sresult, Soperand);
} else {
switch (rounding_mode) {
@ -486,7 +486,7 @@ void EmitIR<IR::Opcode::FPRoundInt64>(oaknut::CodeGenerator& code, EmitContext&
ctx.fpsr.Load();
if (exact) {
ASSERT(ctx.FPCR().RMode() == rounding_mode);
assert(ctx.FPCR().RMode() == rounding_mode);
code.FRINTX(Dresult, Doperand);
} else {
switch (rounding_mode) {

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
@ -244,7 +247,7 @@ static void EmitPackedAddSub(oaknut::CodeGenerator& code, EmitContext& ctx, IR::
}
if (ge_inst) {
ASSERT(!is_halving);
assert(!is_halving);
auto Vge = ctx.reg_alloc.WriteD(ge_inst);
RegAlloc::Realize(Vge);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -24,7 +24,7 @@ using namespace oaknut::util;
template<>
void EmitIR<IR::Opcode::SignedSaturatedAddWithFlag32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
ASSERT(overflow_inst);
assert(overflow_inst);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto Wresult = ctx.reg_alloc.WriteW(inst);
@ -44,7 +44,7 @@ void EmitIR<IR::Opcode::SignedSaturatedAddWithFlag32>(oaknut::CodeGenerator& cod
template<>
void EmitIR<IR::Opcode::SignedSaturatedSubWithFlag32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
ASSERT(overflow_inst);
assert(overflow_inst);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto Wresult = ctx.reg_alloc.WriteW(inst);
@ -67,7 +67,7 @@ void EmitIR<IR::Opcode::SignedSaturation>(oaknut::CodeGenerator& code, EmitConte
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const size_t N = args[1].GetImmediateU8();
ASSERT(N >= 1 && N <= 32);
assert(N >= 1 && N <= 32);
if (N == 32) {
ctx.reg_alloc.DefineAsExisting(inst, args[0]);
@ -113,7 +113,7 @@ void EmitIR<IR::Opcode::UnsignedSaturation>(oaknut::CodeGenerator& code, EmitCon
ctx.reg_alloc.SpillFlags();
const size_t N = args[1].GetImmediateU8();
ASSERT(N <= 31);
assert(N <= 31);
const u32 saturated_value = (1u << N) - 1;
code.MOV(Wscratch0, saturated_value);

View file

@ -275,7 +275,7 @@ static void EmitReduce(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst,
template<size_t size, typename EmitFn>
static void EmitGetElement(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst, EmitFn emit) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
auto Rresult = ctx.reg_alloc.WriteReg<std::max<size_t>(32, size)>(inst);
@ -310,7 +310,7 @@ void EmitIR<IR::Opcode::VectorGetElement64>(oaknut::CodeGenerator& code, EmitCon
template<size_t size, typename EmitFn>
static void EmitSetElement(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst, EmitFn emit) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
auto Qvector = ctx.reg_alloc.ReadWriteQ(args[0], inst);
@ -650,7 +650,7 @@ void EmitIR<IR::Opcode::VectorExtract>(oaknut::CodeGenerator& code, EmitContext&
auto Qa = ctx.reg_alloc.ReadQ(args[0]);
auto Qb = ctx.reg_alloc.ReadQ(args[1]);
const u8 position = args[2].GetImmediateU8();
ASSERT(position % 8 == 0);
assert(position % 8 == 0);
RegAlloc::Realize(Qresult, Qa, Qb);
code.EXT(Qresult->B16(), Qa->B16(), Qb->B16(), position / 8);
@ -663,7 +663,7 @@ void EmitIR<IR::Opcode::VectorExtractLower>(oaknut::CodeGenerator& code, EmitCon
auto Da = ctx.reg_alloc.ReadD(args[0]);
auto Db = ctx.reg_alloc.ReadD(args[1]);
const u8 position = args[2].GetImmediateU8();
ASSERT(position % 8 == 0);
assert(position % 8 == 0);
RegAlloc::Realize(Dresult, Da, Db);
code.EXT(Dresult->B8(), Da->B8(), Db->B8(), position / 8);
@ -958,7 +958,7 @@ void EmitIR<IR::Opcode::VectorMultiply32>(oaknut::CodeGenerator& code, EmitConte
template<>
void EmitIR<IR::Opcode::VectorMultiply64>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
ASSERT(ctx.conf.very_verbose_debugging_output && "VectorMultiply64 is for debugging only");
assert(ctx.conf.very_verbose_debugging_output && "VectorMultiply64 is for debugging only");
EmitThreeOp(code, ctx, inst, [&](auto& Qresult, auto& Qa, auto& Qb) {
code.FMOV(Xscratch0, Qa->toD());
code.FMOV(Xscratch1, Qb->toD());
@ -1289,7 +1289,7 @@ void EmitIR<IR::Opcode::VectorReduceAdd64>(oaknut::CodeGenerator& code, EmitCont
template<>
void EmitIR<IR::Opcode::VectorRotateWholeVectorRight>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
EmitImmShift<8>(code, ctx, inst, [&](auto Vresult, auto Voperand, u8 shift_amount) {
ASSERT(shift_amount % 8 == 0);
assert(shift_amount % 8 == 0);
const u8 ext_imm = (shift_amount % 128) / 8;
code.EXT(Vresult, Voperand, Voperand, ext_imm);
});
@ -1602,12 +1602,12 @@ void EmitIR<IR::Opcode::VectorSub64>(oaknut::CodeGenerator& code, EmitContext& c
template<>
void EmitIR<IR::Opcode::VectorTable>(oaknut::CodeGenerator&, EmitContext&, IR::Inst* inst) {
// Do nothing. We *want* to hold on to the refcount for our arguments, so VectorTableLookup can use our arguments.
ASSERT(inst->UseCount() == 1 && "Table cannot be used multiple times");
assert(inst->UseCount() == 1 && "Table cannot be used multiple times");
}
template<>
void EmitIR<IR::Opcode::VectorTableLookup64>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
ASSERT(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
assert(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto table = ctx.reg_alloc.GetArgumentInfo(inst->GetArg(1).GetInst());
@ -1674,7 +1674,7 @@ void EmitIR<IR::Opcode::VectorTableLookup64>(oaknut::CodeGenerator& code, EmitCo
template<>
void EmitIR<IR::Opcode::VectorTableLookup128>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
ASSERT(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
assert(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto table = ctx.reg_alloc.GetArgumentInfo(inst->GetArg(1).GetInst());

View file

@ -139,7 +139,7 @@ static void EmitFromFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Ins
const u8 fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
const bool fpcr_controlled = args[3].GetImmediateU1();
ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
assert(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
RegAlloc::Realize(Qto, Qfrom);
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
@ -199,7 +199,7 @@ void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst)
}
}
} else {
ASSERT(fbits == 0);
assert(fbits == 0);
if constexpr (is_signed) {
switch (rounding_mode) {
case FP::RoundingMode::ToNearest_TieEven:
@ -346,7 +346,7 @@ template<>
void EmitIR<IR::Opcode::FPVectorFromHalf32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
ASSERT(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
assert(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
const bool fpcr_controlled = args[2].GetImmediateU1();
auto Qresult = ctx.reg_alloc.WriteQ(inst);
@ -617,7 +617,7 @@ void EmitIR<IR::Opcode::FPVectorRoundInt32>(oaknut::CodeGenerator& code, EmitCon
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
if (exact) {
ASSERT(ctx.FPCR(fpcr_controlled).RMode() == rounding_mode);
assert(ctx.FPCR(fpcr_controlled).RMode() == rounding_mode);
code.FRINTX(Qresult->S4(), Qoperand->S4());
} else {
switch (rounding_mode) {
@ -657,7 +657,7 @@ void EmitIR<IR::Opcode::FPVectorRoundInt64>(oaknut::CodeGenerator& code, EmitCon
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
if (exact) {
ASSERT(ctx.FPCR(fpcr_controlled).RMode() == rounding_mode);
assert(ctx.FPCR(fpcr_controlled).RMode() == rounding_mode);
code.FRINTX(Qresult->D2(), Qoperand->D2());
} else {
switch (rounding_mode) {
@ -743,7 +743,7 @@ template<>
void EmitIR<IR::Opcode::FPVectorToHalf32>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
ASSERT(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
assert(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
const bool fpcr_controlled = args[2].GetImmediateU1();
auto Dresult = ctx.reg_alloc.WriteD(inst);

View file

@ -10,7 +10,7 @@
#include <algorithm>
#include "common/assert.h"
#include <cassert>
namespace Dynarmic {

View file

@ -12,7 +12,7 @@
#include <array>
#include <iterator>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include <bit>
#include "common/common_types.h"
@ -53,19 +53,19 @@ bool Argument::GetImmediateU1() const {
u8 Argument::GetImmediateU8() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100);
assert(imm < 0x100);
return u8(imm);
}
u16 Argument::GetImmediateU16() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x10000);
assert(imm < 0x10000);
return u16(imm);
}
u32 Argument::GetImmediateU32() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100000000);
assert(imm < 0x100000000);
return u32(imm);
}
@ -74,12 +74,12 @@ u64 Argument::GetImmediateU64() const {
}
IR::Cond Argument::GetImmediateCond() const {
ASSERT(IsImmediate() && GetType() == IR::Type::Cond);
assert(IsImmediate() && GetType() == IR::Type::Cond);
return value.GetCond();
}
IR::AccType Argument::GetImmediateAccType() const {
ASSERT(IsImmediate() && GetType() == IR::Type::AccType);
assert(IsImmediate() && GetType() == IR::Type::AccType);
return value.GetAccType();
}
@ -92,12 +92,12 @@ bool HostLocInfo::Contains(const IR::Inst* value) const {
}
void HostLocInfo::SetupScratchLocation() {
ASSERT(IsCompletelyEmpty());
assert(IsCompletelyEmpty());
realized = true;
}
void HostLocInfo::SetupLocation(const IR::Inst* value) {
ASSERT(IsCompletelyEmpty());
assert(IsCompletelyEmpty());
values.clear();
values.push_back(value);
realized = true;
@ -135,7 +135,7 @@ RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(IR::Inst* inst) {
const IR::Value arg = inst->GetArg(i);
ret[i].value = arg;
if (!arg.IsImmediate() && !IsValuelessType(arg.GetType())) {
ASSERT(ValueLocation(arg.GetInst()) && "argument must already been defined");
assert(ValueLocation(arg.GetInst()) && "argument must already been defined");
ValueInfo(arg.GetInst()).uses_this_inst++;
}
}
@ -174,11 +174,11 @@ void RegAlloc::PrepareForCall(std::optional<Argument::copyable_reference> arg0,
for (int i = 0; i < 4; i++) {
if (args[i]) {
if (args[i]->get().GetType() == IR::Type::U128) {
ASSERT(fprs[nsrn].IsCompletelyEmpty());
assert(fprs[nsrn].IsCompletelyEmpty());
LoadCopyInto(args[i]->get().value, oaknut::QReg{nsrn});
nsrn++;
} else {
ASSERT(gprs[ngrn].IsCompletelyEmpty());
assert(gprs[ngrn].IsCompletelyEmpty());
LoadCopyInto(args[i]->get().value, oaknut::XReg{ngrn});
ngrn++;
}
@ -192,7 +192,7 @@ void RegAlloc::PrepareForCall(std::optional<Argument::copyable_reference> arg0,
void RegAlloc::DefineAsExisting(IR::Inst* inst, Argument& arg) {
defined_insts.insert(inst);
ASSERT(!ValueLocation(inst));
assert(!ValueLocation(inst));
if (arg.value.IsImmediate()) {
inst->ReplaceUsesWith(arg.value);
@ -206,9 +206,9 @@ void RegAlloc::DefineAsExisting(IR::Inst* inst, Argument& arg) {
void RegAlloc::DefineAsRegister(IR::Inst* inst, oaknut::Reg reg) {
defined_insts.insert(inst);
ASSERT(!ValueLocation(inst));
assert(!ValueLocation(inst));
auto& info = reg.is_vector() ? fprs[reg.index()] : gprs[reg.index()];
ASSERT(info.IsCompletelyEmpty());
assert(info.IsCompletelyEmpty());
info.values.push_back(inst);
info.expected_uses += inst->UseCount();
}
@ -228,18 +228,18 @@ void RegAlloc::UpdateAllUses() {
void RegAlloc::AssertAllUnlocked() const {
const auto is_unlocked = [](const auto& i) { return !i.locked && !i.realized; };
ASSERT(std::all_of(gprs.begin(), gprs.end(), is_unlocked));
ASSERT(std::all_of(fprs.begin(), fprs.end(), is_unlocked));
ASSERT(is_unlocked(flags));
ASSERT(std::all_of(spills.begin(), spills.end(), is_unlocked));
assert(std::all_of(gprs.begin(), gprs.end(), is_unlocked));
assert(std::all_of(fprs.begin(), fprs.end(), is_unlocked));
assert(is_unlocked(flags));
assert(std::all_of(spills.begin(), spills.end(), is_unlocked));
}
void RegAlloc::AssertNoMoreUses() const {
const auto is_empty = [](const auto& i) { return i.IsCompletelyEmpty(); };
ASSERT(std::all_of(gprs.begin(), gprs.end(), is_empty));
ASSERT(std::all_of(fprs.begin(), fprs.end(), is_empty));
ASSERT(is_empty(flags));
ASSERT(std::all_of(spills.begin(), spills.end(), is_empty));
assert(std::all_of(gprs.begin(), gprs.end(), is_empty));
assert(std::all_of(fprs.begin(), fprs.end(), is_empty));
assert(is_empty(flags));
assert(std::all_of(spills.begin(), spills.end(), is_empty));
}
void RegAlloc::EmitVerboseDebuggingOutput() {
@ -271,7 +271,7 @@ void RegAlloc::EmitVerboseDebuggingOutput() {
template<HostLoc::Kind kind>
int RegAlloc::GenerateImmediate(const IR::Value& value) {
ASSERT(value.GetType() != IR::Type::U1);
assert(value.GetType() != IR::Type::U1);
if constexpr (kind == HostLoc::Kind::Gpr) {
const int new_location_index = AllocateRegister(gprs, gpr_order);
SpillGpr(new_location_index);
@ -309,15 +309,15 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
}
const auto current_location = ValueLocation(value.GetInst());
ASSERT(current_location);
assert(current_location);
if (current_location->kind == required_kind) {
ValueInfo(*current_location).realized = true;
return current_location->index;
}
ASSERT(!ValueInfo(*current_location).realized);
ASSERT(ValueInfo(*current_location).locked);
assert(!ValueInfo(*current_location).realized);
assert(ValueInfo(*current_location).locked);
if constexpr (required_kind == HostLoc::Kind::Gpr) {
const int new_location_index = AllocateRegister(gprs, gpr_order);
@ -328,7 +328,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
UNREACHABLE(); //logic error
case HostLoc::Kind::Fpr:
code.FMOV(oaknut::XReg{new_location_index}, oaknut::DReg{current_location->index});
// ASSERT size fits
// assert size fits
break;
case HostLoc::Kind::Spill:
code.LDR(oaknut::XReg{new_location_index}, SP, spill_offset + current_location->index * spill_slot_size);
@ -355,7 +355,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
code.LDR(oaknut::QReg{new_location_index}, SP, spill_offset + current_location->index * spill_slot_size);
break;
case HostLoc::Kind::Flags:
ASSERT(false && "Moving from flags into fprs is not currently supported");
assert(false && "Moving from flags into fprs is not currently supported");
break;
}
@ -372,7 +372,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
template<HostLoc::Kind kind>
int RegAlloc::RealizeWriteImpl(const IR::Inst* value) {
defined_insts.insert(value);
ASSERT(!ValueLocation(value));
assert(!ValueLocation(value));
if constexpr (kind == HostLoc::Kind::Gpr) {
const int new_location_index = AllocateRegister(gprs, gpr_order);
@ -407,7 +407,7 @@ int RegAlloc::RealizeReadWriteImpl(const IR::Value& read_value, const IR::Inst*
LoadCopyInto(read_value, oaknut::QReg{write_loc});
return write_loc;
} else if constexpr (kind == HostLoc::Kind::Flags) {
ASSERT(false && "Incorrect function for ReadWrite of flags");
assert(false && "Incorrect function for ReadWrite of flags");
} else {
UNREACHABLE();
}
@ -439,7 +439,7 @@ int RegAlloc::AllocateRegister(const std::array<HostLocInfo, 32>& regs, const st
}
void RegAlloc::SpillGpr(int index) {
ASSERT(!gprs[index].locked && !gprs[index].realized);
assert(!gprs[index].locked && !gprs[index].realized);
if (gprs[index].values.empty()) {
return;
}
@ -449,7 +449,7 @@ void RegAlloc::SpillGpr(int index) {
}
void RegAlloc::SpillFpr(int index) {
ASSERT(!fprs[index].locked && !fprs[index].realized);
assert(!fprs[index].locked && !fprs[index].realized);
if (fprs[index].values.empty()) {
return;
}
@ -461,7 +461,7 @@ void RegAlloc::SpillFpr(int index) {
void RegAlloc::ReadWriteFlags(Argument& read, IR::Inst* write) {
defined_insts.insert(write);
const auto current_location = ValueLocation(read.value.GetInst());
ASSERT(current_location);
assert(current_location);
if (current_location->kind == HostLoc::Kind::Flags) {
if (!flags.IsOneRemainingUse()) {
@ -479,7 +479,7 @@ void RegAlloc::ReadWriteFlags(Argument& read, IR::Inst* write) {
code.LDR(Wscratch0, SP, spill_offset + current_location->index * spill_slot_size);
code.MSR(oaknut::SystemReg::NZCV, Xscratch0);
} else {
UNREACHABLE(); //ASSERT(false && "Invalid current location for flags");
UNREACHABLE(); //assert(false && "Invalid current location for flags");
}
if (write) {
@ -489,7 +489,7 @@ void RegAlloc::ReadWriteFlags(Argument& read, IR::Inst* write) {
}
void RegAlloc::SpillFlags() {
ASSERT(!flags.locked && !flags.realized);
assert(!flags.locked && !flags.realized);
if (flags.values.empty()) {
return;
}
@ -501,7 +501,7 @@ void RegAlloc::SpillFlags() {
int RegAlloc::FindFreeSpill() const {
const auto iter = std::find_if(spills.begin(), spills.end(), [](const HostLocInfo& info) { return info.values.empty(); });
ASSERT(iter != spills.end() && "All spill locations are full");
assert(iter != spills.end() && "All spill locations are full");
return static_cast<int>(iter - spills.begin());
}
@ -512,14 +512,14 @@ void RegAlloc::LoadCopyInto(const IR::Value& value, oaknut::XReg reg) {
}
const auto current_location = ValueLocation(value.GetInst());
ASSERT(current_location);
assert(current_location);
switch (current_location->kind) {
case HostLoc::Kind::Gpr:
code.MOV(reg, oaknut::XReg{current_location->index});
break;
case HostLoc::Kind::Fpr:
code.FMOV(reg, oaknut::DReg{current_location->index});
// ASSERT size fits
// assert size fits
break;
case HostLoc::Kind::Spill:
code.LDR(reg, SP, spill_offset + current_location->index * spill_slot_size);
@ -538,7 +538,7 @@ void RegAlloc::LoadCopyInto(const IR::Value& value, oaknut::QReg reg) {
}
const auto current_location = ValueLocation(value.GetInst());
ASSERT(current_location);
assert(current_location);
switch (current_location->kind) {
case HostLoc::Kind::Gpr:
code.FMOV(reg.toD(), oaknut::XReg{current_location->index});
@ -551,7 +551,7 @@ void RegAlloc::LoadCopyInto(const IR::Value& value, oaknut::QReg reg) {
code.LDR(reg, SP, spill_offset + current_location->index * spill_slot_size);
break;
case HostLoc::Kind::Flags:
UNREACHABLE(); //ASSERT(false && "Moving from flags into fprs is not currently supported");
UNREACHABLE(); //assert(false && "Moving from flags into fprs is not currently supported");
}
}

View file

@ -14,7 +14,7 @@
#include <utility>
#include <vector>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/mcl/is_instance_of_template.hpp"
#include <oaknut/oaknut.hpp>

View file

@ -19,7 +19,7 @@
#include <bit>
#include <fmt/format.h>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/backend/exception_handler.h"
@ -87,7 +87,7 @@ private:
};
MachHandler::MachHandler() {
#define KCHECK(x) ASSERT((x) == KERN_SUCCESS && "init failure at " #x)
#define KCHECK(x) assert((x) == KERN_SUCCESS && "init failure at " #x)
KCHECK(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &server_port));
KCHECK(mach_port_insert_right(mach_task_self(), server_port, server_port, MACH_MSG_TYPE_MAKE_SEND));
KCHECK(task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, server_port, EXCEPTION_STATE | MACH_EXCEPTION_CODES, THREAD_STATE));

View file

@ -17,7 +17,7 @@
#include <fmt/format.h>
#include <ankerl/unordered_dense.h>
#include "dynarmic/backend/exception_handler.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/common/context.h"
#include "common/common_types.h"
#if defined(ARCHITECTURE_x86_64)
@ -116,7 +116,7 @@ void RegisterHandler() {
}
void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
DEBUG_ASSERT(sig == SIGSEGV || sig == SIGBUS);
assert(sig == SIGSEGV || sig == SIGBUS);
CTX_DECLARE(raw_context);
#if defined(ARCHITECTURE_x86_64)
{

View file

@ -8,7 +8,7 @@
#include "dynarmic/backend/riscv64/a32_address_space.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/backend/riscv64/abi.h"
#include "dynarmic/backend/riscv64/emit_riscv64.h"
@ -94,7 +94,7 @@ void A32AddressSpace::EmitPrelude() {
void A32AddressSpace::SetCursorPtr(CodePtr ptr) {
ptrdiff_t offset = ptr - GetMemPtr<CodePtr>();
ASSERT(offset >= 0);
assert(offset >= 0);
as.RewindBuffer(offset);
}

View file

@ -10,7 +10,7 @@
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/backend/riscv64/a32_address_space.h"
@ -31,7 +31,7 @@ struct Jit::Impl final {
, core(conf) {}
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
jit_interface->is_executing = true;
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
RequestCacheInvalidation();
@ -40,9 +40,9 @@ struct Jit::Impl final {
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
jit_interface->is_executing = true;
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
RequestCacheInvalidation();
jit_interface->is_executing = false;
return HaltReason{};

View file

@ -5,7 +5,7 @@
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
@ -28,12 +28,12 @@ struct Jit::Impl final {
, jit_interface(jit_interface) {}
HaltReason Run() {
ASSERT(false);
assert(false);
return HaltReason{};
}
HaltReason Step() {
ASSERT(false);
assert(false);
return HaltReason{};
}
@ -51,7 +51,7 @@ struct Jit::Impl final {
}
void Reset() {
ASSERT(!is_executing);
assert(!is_executing);
//jit_state = {};
}

View file

@ -13,7 +13,7 @@
#include <sys/mman.h>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
namespace Dynarmic::Backend::RV64 {
@ -22,7 +22,7 @@ class CodeBlock {
public:
explicit CodeBlock(std::size_t size) noexcept : memsize(size) {
mem = (u8*)mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0);
ASSERT(mem != nullptr);
assert(mem != nullptr);
}
~CodeBlock() noexcept {

View file

@ -35,39 +35,39 @@ void EmitIR<IR::Opcode::Identity>(biscuit::Assembler&, EmitContext& ctx, IR::Ins
template<>
void EmitIR<IR::Opcode::Breakpoint>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CallHostFunction>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PushRSB>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::GetCarryFromOp>(biscuit::Assembler&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.IsValueLive(inst));
assert(ctx.reg_alloc.IsValueLive(inst));
}
template<>
void EmitIR<IR::Opcode::GetOverflowFromOp>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::GetGEFromOp>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::GetNZCVFromOp>(biscuit::Assembler&, EmitContext& ctx, IR::Inst* inst) {
[[maybe_unused]] auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(ctx.reg_alloc.IsValueLive(inst));
assert(ctx.reg_alloc.IsValueLive(inst));
}
template<>
@ -87,12 +87,12 @@ void EmitIR<IR::Opcode::GetNZFromOp>(biscuit::Assembler& as, EmitContext& ctx, I
template<>
void EmitIR<IR::Opcode::GetUpperFromOp>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::GetLowerFromOp>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -109,7 +109,7 @@ void EmitIR<IR::Opcode::GetCFlagFromNZCV>(biscuit::Assembler& as, EmitContext& c
template<>
void EmitIR<IR::Opcode::NZCVFromPackedFlags>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
EmittedBlockInfo EmitRV64(biscuit::Assembler& as, IR::Block block, const EmitConfig& emit_conf) {

View file

@ -205,7 +205,7 @@ void EmitA32Terminal(biscuit::Assembler& as, EmitContext& ctx) {
template<>
void EmitIR<IR::Opcode::A32SetCheckBit>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -220,17 +220,17 @@ void EmitIR<IR::Opcode::A32GetRegister>(biscuit::Assembler& as, EmitContext& ctx
template<>
void EmitIR<IR::Opcode::A32GetExtendedRegister32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32GetExtendedRegister64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32GetVector>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -249,27 +249,27 @@ void EmitIR<IR::Opcode::A32SetRegister>(biscuit::Assembler& as, EmitContext& ctx
template<>
void EmitIR<IR::Opcode::A32SetExtendedRegister32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetExtendedRegister64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetVector>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32GetCpsr>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetCpsr>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -284,17 +284,17 @@ void EmitIR<IR::Opcode::A32SetCpsrNZCV>(biscuit::Assembler& as, EmitContext& ctx
template<>
void EmitIR<IR::Opcode::A32SetCpsrNZCVRaw>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetCpsrNZCVQ>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetCpsrNZ>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -302,7 +302,7 @@ void EmitIR<IR::Opcode::A32SetCpsrNZC>(biscuit::Assembler& as, EmitContext& ctx,
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
// TODO: Add full implementation
ASSERT(!args[0].IsImmediate() && !args[1].IsImmediate());
assert(!args[0].IsImmediate() && !args[1].IsImmediate());
auto Xnz = ctx.reg_alloc.ReadX(args[0]);
auto Xc = ctx.reg_alloc.ReadX(args[1]);
@ -318,82 +318,82 @@ void EmitIR<IR::Opcode::A32SetCpsrNZC>(biscuit::Assembler& as, EmitContext& ctx,
template<>
void EmitIR<IR::Opcode::A32GetCFlag>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32OrQFlag>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32GetGEFlags>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetGEFlags>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetGEFlagsCompressed>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32BXWritePC>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32UpdateUpperLocationDescriptor>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CallSupervisor>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExceptionRaised>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32DataSynchronizationBarrier>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32DataMemoryBarrier>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32InstructionSynchronizationBarrier>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32GetFpscr>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetFpscr>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32GetFpscrNZCV>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32SetFpscrNZCV>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,37 +22,37 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::A32CoprocInternalOperation>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CoprocSendOneWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CoprocSendTwoWords>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CoprocGetOneWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CoprocGetTwoWords>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CoprocLoadWords>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32CoprocStoreWords>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,87 +22,87 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::A32ClearExclusive>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ReadMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ReadMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ReadMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ReadMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveReadMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveReadMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveReadMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveReadMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32WriteMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32WriteMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32WriteMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32WriteMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveWriteMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveWriteMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveWriteMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A32ExclusiveWriteMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,182 +22,182 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::A64SetCheckBit>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetCFlag>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetNZCVRaw>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetNZCVRaw>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetNZCV>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetW>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetX>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetS>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetD>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetQ>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetSP>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetFPCR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetFPSR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetW>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetX>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetS>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetD>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetQ>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetSP>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetFPCR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetFPSR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetPC>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64CallSupervisor>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExceptionRaised>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64DataCacheOperationRaised>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64InstructionCacheOperationRaised>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64DataSynchronizationBarrier>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64DataMemoryBarrier>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64InstructionSynchronizationBarrier>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetCNTFRQ>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetCNTPCT>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetCTR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetDCZID>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetTPIDR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64GetTPIDRRO>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64SetTPIDR>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,107 +22,107 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::A64ClearExclusive>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ReadMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ReadMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ReadMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ReadMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ReadMemory128>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveReadMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveReadMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveReadMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveReadMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveReadMemory128>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64WriteMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64WriteMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64WriteMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64WriteMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64WriteMemory128>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveWriteMemory8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveWriteMemory16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveWriteMemory32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveWriteMemory64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::A64ExclusiveWriteMemory128>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,82 +22,82 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::CRC32Castagnoli8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32Castagnoli16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32Castagnoli32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32Castagnoli64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32ISO8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32ISO16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32ISO32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CRC32ISO64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::AESDecryptSingleRound>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::AESEncryptSingleRound>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::AESInverseMixColumns>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::AESMixColumns>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SM4AccessSubstitutionBox>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SHA256Hash>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SHA256MessageSchedule0>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SHA256MessageSchedule1>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,67 +22,67 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::Pack2x32To1x64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Pack2x64To1x128>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LeastSignificantWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LeastSignificantHalf>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LeastSignificantByte>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MostSignificantWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MostSignificantBit>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::IsZero32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::IsZero64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::TestBit>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ConditionalSelect32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ConditionalSelect64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ConditionalSelectNZCV>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -95,8 +95,8 @@ void EmitIR<IR::Opcode::LogicalShiftLeft32>(biscuit::Assembler& as, EmitContext&
auto& carry_arg = args[2];
// TODO: Add full implementation
ASSERT(carry_inst != nullptr);
ASSERT(shift_arg.IsImmediate());
assert(carry_inst != nullptr);
assert(shift_arg.IsImmediate());
auto Xresult = ctx.reg_alloc.WriteX(inst);
auto Xcarry_out = ctx.reg_alloc.WriteX(carry_inst);
@ -124,7 +124,7 @@ void EmitIR<IR::Opcode::LogicalShiftLeft32>(biscuit::Assembler& as, EmitContext&
template<>
void EmitIR<IR::Opcode::LogicalShiftLeft64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -136,8 +136,8 @@ void EmitIR<IR::Opcode::LogicalShiftRight32>(biscuit::Assembler& as, EmitContext
auto& shift_arg = args[1];
// TODO: Add full implementation
ASSERT(carry_inst == nullptr);
ASSERT(shift_arg.IsImmediate());
assert(carry_inst == nullptr);
assert(shift_arg.IsImmediate());
const u8 shift = shift_arg.GetImmediateU8();
auto Xresult = ctx.reg_alloc.WriteX(inst);
@ -153,72 +153,72 @@ void EmitIR<IR::Opcode::LogicalShiftRight32>(biscuit::Assembler& as, EmitContext
template<>
void EmitIR<IR::Opcode::LogicalShiftRight64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ArithmeticShiftRight32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ArithmeticShiftRight64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::BitRotateRight32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::BitRotateRight64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::RotateRightExtended>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LogicalShiftLeftMasked32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LogicalShiftLeftMasked64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LogicalShiftRightMasked32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::LogicalShiftRightMasked64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ArithmeticShiftRightMasked32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ArithmeticShiftRightMasked64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::RotateRightMasked32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::RotateRightMasked64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<size_t bitsize>
@ -264,7 +264,7 @@ static void AddImmWithFlags(biscuit::Assembler& as, biscuit::GPR rd, biscuit::GP
as.SLLI(Xscratch1, Xscratch1, 28);
as.OR(flags, flags, Xscratch1);
} else {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
}
@ -279,7 +279,7 @@ static void EmitAddSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst)
auto Xa = ctx.reg_alloc.ReadX(args[0]);
if (overflow_inst) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
} else if (nzcv_inst) {
if (args[1].IsImmediate()) {
const u64 imm = args[1].GetImmediateU64();
@ -294,17 +294,17 @@ static void EmitAddSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst)
AddImmWithFlags<bitsize>(as, *Xresult, *Xa, sub ? -imm : imm, *Xflags);
}
} else {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} else {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} else {
if (args[1].IsImmediate()) {
const u64 imm = args[1].GetImmediateU64();
if (args[2].IsImmediate()) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
} else {
auto Xnzcv = ctx.reg_alloc.ReadX(args[2]);
RegAlloc::Realize(Xresult, Xa, Xnzcv);
@ -317,7 +317,7 @@ static void EmitAddSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst)
as.ADDW(Xresult, Xa, Xscratch0);
}
} else {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
}
}
@ -329,7 +329,7 @@ void EmitIR<IR::Opcode::Add32>(biscuit::Assembler& as, EmitContext& ctx, IR::Ins
template<>
void EmitIR<IR::Opcode::Add64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
@ -339,237 +339,237 @@ void EmitIR<IR::Opcode::Sub32>(biscuit::Assembler& as, EmitContext& ctx, IR::Ins
template<>
void EmitIR<IR::Opcode::Sub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Mul32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Mul64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedMultiplyHigh64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedMultiplyHigh64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedDiv32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedDiv64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedDiv32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedDiv64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::And32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::And64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::AndNot32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::AndNot64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Eor32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Eor64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Or32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Or64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Not32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::Not64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignExtendByteToWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignExtendHalfToWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignExtendByteToLong>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignExtendHalfToLong>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignExtendWordToLong>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ZeroExtendByteToWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ZeroExtendHalfToWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ZeroExtendByteToLong>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ZeroExtendHalfToLong>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ZeroExtendWordToLong>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ZeroExtendLongToQuad>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ByteReverseWord>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ByteReverseHalf>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ByteReverseDual>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CountLeadingZeros32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::CountLeadingZeros64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ExtractRegister32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ExtractRegister64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ReplicateBit32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::ReplicateBit64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MaxSigned32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MaxSigned64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MaxUnsigned32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MaxUnsigned64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MinSigned32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MinSigned64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MinUnsigned32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::MinUnsigned64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,442 +22,442 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::FPAbs16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPAbs32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPAbs64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPCompare32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPCompare64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDiv32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDiv64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMax32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMax64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMaxNumeric32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMaxNumeric64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMin32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMin64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMinNumeric32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMinNumeric64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMul32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMul64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulAdd16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulSub16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulX32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPMulX64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPNeg16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPNeg32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPNeg64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipEstimate16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipEstimate32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipEstimate64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipExponent16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipExponent32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipExponent64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipStepFused16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipStepFused32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRecipStepFused64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRoundInt16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRoundInt32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRoundInt64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRSqrtEstimate16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRSqrtEstimate32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRSqrtEstimate64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRSqrtStepFused16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRSqrtStepFused32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPRSqrtStepFused64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSqrt32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSqrt64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToHalf>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToHalf>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToFixedS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToFixedS32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToFixedS64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToFixedU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToFixedU32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPDoubleToFixedU64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToFixedS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToFixedS32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToFixedS64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToFixedU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToFixedU32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPHalfToFixedU64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToFixedS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToFixedS32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToFixedS64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToFixedU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToFixedU32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPSingleToFixedU64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedU16ToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedS16ToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedU16ToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedS16ToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedU32ToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedS32ToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedU32ToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedS32ToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedU64ToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedU64ToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedS64ToDouble>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPFixedS64ToSingle>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,172 +22,172 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::PackedAddU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedAddS8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSubU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSubS8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedAddU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedAddS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSubU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSubS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedAddSubU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedAddSubS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSubAddU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSubAddS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingAddU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingAddS8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingSubU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingSubS8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingAddU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingAddS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingSubU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingSubS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingAddSubU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingAddSubS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingSubAddU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedHalvingSubAddS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedAddU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedAddS8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedSubU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedSubS8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedAddU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedAddS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedSubU16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSaturatedSubS16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedAbsDiffSumU8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::PackedSelect>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,112 +22,112 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::SignedSaturatedAddWithFlag32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedSubWithFlag32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturation>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturation>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedAdd8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedAdd16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedDoublingMultiplyReturnHigh16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedDoublingMultiplyReturnHigh32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedSub8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedSub16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::SignedSaturatedSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedAdd8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedAdd16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedSub8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedSub16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::UnsignedSaturatedSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,337 +22,337 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::FPVectorAbs16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorAbs32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorAbs64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorDiv32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorDiv64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorEqual16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorEqual32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorEqual64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorFromHalf32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorFromSignedFixed32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorFromSignedFixed64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorFromUnsignedFixed32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorFromUnsignedFixed64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorGreater32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorGreater64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorGreaterEqual32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorGreaterEqual64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMax32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMax64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMaxNumeric32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMaxNumeric64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMin32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMin64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMinNumeric32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMinNumeric64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMul32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMul64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMulAdd16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMulAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMulAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMulX32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorMulX64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorNeg16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorNeg32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorNeg64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorPairedAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorPairedAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorPairedAddLower32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorPairedAddLower64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRecipEstimate16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRecipEstimate32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRecipEstimate64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRecipStepFused16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRecipStepFused32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRecipStepFused64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRoundInt16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRoundInt32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRoundInt64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRSqrtEstimate16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRSqrtEstimate32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRSqrtEstimate64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRSqrtStepFused16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRSqrtStepFused32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorRSqrtStepFused64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorSqrt32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorSqrt64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToHalf32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToSignedFixed16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToSignedFixed32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToSignedFixed64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToUnsignedFixed16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToUnsignedFixed32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::FPVectorToUnsignedFixed64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -22,82 +22,82 @@ namespace Dynarmic::Backend::RV64 {
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedAdd8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedAdd16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedSub8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedSub16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorSignedSaturatedSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedAdd8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedAdd16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedAdd32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedAdd64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedSub8>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedSub16>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedSub32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
template<>
void EmitIR<IR::Opcode::VectorUnsignedSaturatedSub64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
}
} // namespace Dynarmic::Backend::RV64

View file

@ -11,7 +11,7 @@
#include <algorithm>
#include <array>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/common/always_false.h"
@ -44,19 +44,19 @@ bool Argument::GetImmediateU1() const {
u8 Argument::GetImmediateU8() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100);
assert(imm < 0x100);
return u8(imm);
}
u16 Argument::GetImmediateU16() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x10000);
assert(imm < 0x10000);
return u16(imm);
}
u32 Argument::GetImmediateU32() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100000000);
assert(imm < 0x100000000);
return u32(imm);
}
@ -65,12 +65,12 @@ u64 Argument::GetImmediateU64() const {
}
IR::Cond Argument::GetImmediateCond() const {
ASSERT(IsImmediate() && GetType() == IR::Type::Cond);
assert(IsImmediate() && GetType() == IR::Type::Cond);
return value.GetCond();
}
IR::AccType Argument::GetImmediateAccType() const {
ASSERT(IsImmediate() && GetType() == IR::Type::AccType);
assert(IsImmediate() && GetType() == IR::Type::AccType);
return value.GetAccType();
}
@ -79,7 +79,7 @@ bool HostLocInfo::Contains(const IR::Inst* value) const {
}
void HostLocInfo::SetupScratchLocation() {
ASSERT(IsCompletelyEmpty());
assert(IsCompletelyEmpty());
realized = true;
}
@ -104,7 +104,7 @@ RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(IR::Inst* inst) {
const IR::Value arg = inst->GetArg(i);
ret[i].value = arg;
if (!arg.IsImmediate() && !IsValuelessType(arg.GetType())) {
ASSERT(ValueLocation(arg.GetInst()) && "argument must already been defined");
assert(ValueLocation(arg.GetInst()) && "argument must already been defined");
ValueInfo(arg.GetInst()).uses_this_inst++;
}
}
@ -128,7 +128,7 @@ void RegAlloc::UpdateAllUses() {
}
void RegAlloc::DefineAsExisting(IR::Inst* inst, Argument& arg) {
ASSERT(!ValueLocation(inst));
assert(!ValueLocation(inst));
if (arg.value.IsImmediate()) {
inst->ReplaceUsesWith(arg.value);
@ -142,15 +142,15 @@ void RegAlloc::DefineAsExisting(IR::Inst* inst, Argument& arg) {
void RegAlloc::AssertNoMoreUses() const {
const auto is_empty = [](const auto& i) { return i.IsCompletelyEmpty(); };
ASSERT(std::all_of(gprs.begin(), gprs.end(), is_empty));
ASSERT(std::all_of(fprs.begin(), fprs.end(), is_empty));
ASSERT(std::all_of(spills.begin(), spills.end(), is_empty));
assert(std::all_of(gprs.begin(), gprs.end(), is_empty));
assert(std::all_of(fprs.begin(), fprs.end(), is_empty));
assert(std::all_of(spills.begin(), spills.end(), is_empty));
}
template<HostLoc::Kind kind>
u32 RegAlloc::GenerateImmediate(const IR::Value& value) {
// TODO
// ASSERT(value.GetType() != IR::Type::U1);
// assert(value.GetType() != IR::Type::U1);
if constexpr (kind == HostLoc::Kind::Gpr) {
const u32 new_location_index = AllocateRegister(gprs, gpr_order);
@ -161,7 +161,7 @@ u32 RegAlloc::GenerateImmediate(const IR::Value& value) {
return new_location_index;
} else if constexpr (kind == HostLoc::Kind::Fpr) {
ASSERT(false && "Unimplemented instruction");
std::terminate(); //unimplemented
} else {
UNREACHABLE();
}
@ -175,15 +175,15 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
}
const auto current_location = ValueLocation(value.GetInst());
ASSERT(current_location);
assert(current_location);
if (current_location->kind == required_kind) {
ValueInfo(*current_location).realized = true;
return current_location->index;
}
ASSERT(!ValueInfo(*current_location).realized);
ASSERT(!ValueInfo(*current_location).locked);
assert(!ValueInfo(*current_location).realized);
assert(!ValueInfo(*current_location).locked);
if constexpr (required_kind == HostLoc::Kind::Gpr) {
const u32 new_location_index = AllocateRegister(gprs, gpr_order);
@ -194,7 +194,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
UNREACHABLE(); //logic error
case HostLoc::Kind::Fpr:
as.FMV_X_D(biscuit::GPR(new_location_index), biscuit::FPR{current_location->index});
// ASSERT size fits
// assert size fits
break;
case HostLoc::Kind::Spill:
as.LD(biscuit::GPR{new_location_index}, spill_offset + current_location->index * spill_slot_size, biscuit::sp);
@ -229,7 +229,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
template<HostLoc::Kind required_kind>
u32 RegAlloc::RealizeWriteImpl(const IR::Inst* value) {
ASSERT(!ValueLocation(value));
assert(!ValueLocation(value));
const auto setup_location = [&](HostLocInfo& info) {
info = {};
@ -274,7 +274,7 @@ u32 RegAlloc::AllocateRegister(const std::array<HostLocInfo, 32>& regs, const st
}
void RegAlloc::SpillGpr(u32 index) {
ASSERT(!gprs[index].locked && !gprs[index].realized);
assert(!gprs[index].locked && !gprs[index].realized);
if (gprs[index].values.empty()) {
return;
}
@ -284,7 +284,7 @@ void RegAlloc::SpillGpr(u32 index) {
}
void RegAlloc::SpillFpr(u32 index) {
ASSERT(!fprs[index].locked && !fprs[index].realized);
assert(!fprs[index].locked && !fprs[index].realized);
if (fprs[index].values.empty()) {
return;
}
@ -295,7 +295,7 @@ void RegAlloc::SpillFpr(u32 index) {
u32 RegAlloc::FindFreeSpill() const {
const auto iter = std::find_if(spills.begin(), spills.end(), [](const HostLocInfo& info) { return info.values.empty(); });
ASSERT(iter != spills.end() && "All spill locations are full");
assert(iter != spills.end() && "All spill locations are full");
return static_cast<u32>(iter - spills.begin());
}

View file

@ -16,7 +16,7 @@
#include <biscuit/assembler.hpp>
#include <biscuit/registers.hpp>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/mcl/is_instance_of_template.hpp"
#include <ankerl/unordered_dense.h>

View file

@ -14,7 +14,7 @@
#include <fmt/format.h>
#include <fmt/ostream.h>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
#include <boost/container/static_vector.hpp>
@ -183,11 +183,11 @@ void A32EmitX64::InvalidateCacheRanges(const boost::icl::interval_set<u32>& rang
void A32EmitX64::EmitCondPrelude(const A32EmitContext& ctx) {
if (ctx.block.GetCondition() == IR::Cond::AL) {
ASSERT(!ctx.block.HasConditionFailedLocation());
assert(!ctx.block.HasConditionFailedLocation());
return;
}
ASSERT(ctx.block.HasConditionFailedLocation());
assert(ctx.block.HasConditionFailedLocation());
Xbyak::Label pass = EmitCond(ctx.block.GetCondition());
if (conf.enable_cycle_counting) {
@ -285,7 +285,7 @@ void A32EmitX64::EmitA32GetRegister(A32EmitContext& ctx, IR::Inst* inst) {
void A32EmitX64::EmitA32GetExtendedRegister32(A32EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsSingleExtReg(reg));
assert(A32::IsSingleExtReg(reg));
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
code.movss(result, MJitStateExtReg(reg));
@ -294,7 +294,7 @@ void A32EmitX64::EmitA32GetExtendedRegister32(A32EmitContext& ctx, IR::Inst* ins
void A32EmitX64::EmitA32GetExtendedRegister64(A32EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg));
assert(A32::IsDoubleExtReg(reg));
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
code.movsd(result, MJitStateExtReg(reg));
@ -303,7 +303,7 @@ void A32EmitX64::EmitA32GetExtendedRegister64(A32EmitContext& ctx, IR::Inst* ins
void A32EmitX64::EmitA32GetVector(A32EmitContext& ctx, IR::Inst* inst) {
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
assert(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
if (A32::IsDoubleExtReg(reg)) {
@ -332,7 +332,7 @@ void A32EmitX64::EmitA32SetRegister(A32EmitContext& ctx, IR::Inst* inst) {
void A32EmitX64::EmitA32SetExtendedRegister32(A32EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsSingleExtReg(reg));
assert(A32::IsSingleExtReg(reg));
if (args[1].IsInXmm(ctx.reg_alloc)) {
Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(code, args[1]);
@ -346,7 +346,7 @@ void A32EmitX64::EmitA32SetExtendedRegister32(A32EmitContext& ctx, IR::Inst* ins
void A32EmitX64::EmitA32SetExtendedRegister64(A32EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg));
assert(A32::IsDoubleExtReg(reg));
if (args[1].IsInXmm(ctx.reg_alloc)) {
const Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(code, args[1]);
@ -360,7 +360,7 @@ void A32EmitX64::EmitA32SetExtendedRegister64(A32EmitContext& ctx, IR::Inst* ins
void A32EmitX64::EmitA32SetVector(A32EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
ASSERT(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
assert(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
const Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(code, args[1]);
if (A32::IsDoubleExtReg(reg)) {
@ -621,7 +621,7 @@ void A32EmitX64::EmitA32GetGEFlags(A32EmitContext& ctx, IR::Inst* inst) {
void A32EmitX64::EmitA32SetGEFlags(A32EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(!args[0].IsImmediate());
assert(!args[0].IsImmediate());
if (args[0].IsInXmm(ctx.reg_alloc)) {
const Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(code, args[0]);
@ -762,7 +762,7 @@ void A32EmitX64::EmitA32ExceptionRaised(A32EmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.EndOfAllocScope();
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[0].IsImmediate() && args[1].IsImmediate());
assert(args[0].IsImmediate() && args[1].IsImmediate());
const u32 pc = args[0].GetImmediateU32();
const u64 exception = args[1].GetImmediateU64();
Devirtualize<&A32::UserCallbacks::ExceptionRaised>(conf.callbacks).EmitCall(code, [&](RegList param) {

View file

@ -12,7 +12,7 @@
#include <boost/icl/interval_set.hpp>
#include <fmt/format.h>
#include "common/assert.h"
#include <cassert>
#include <bit>
#include "common/common_types.h"
#include "dynarmic/common/llvm_disassemble.h"
@ -74,7 +74,7 @@ struct Jit::Impl {
~Impl() = default;
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
jit_interface->is_executing = true;
const CodePtr current_codeptr = [this] {
@ -94,7 +94,7 @@ struct Jit::Impl {
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
jit_interface->is_executing = true;
const HaltReason hr = block_of_code.StepCode(&jit_state, GetCurrentSingleStep());
@ -116,7 +116,7 @@ struct Jit::Impl {
}
void Reset() {
ASSERT(!jit_interface->is_executing);
assert(!jit_interface->is_executing);
jit_state = {};
}

View file

@ -8,7 +8,7 @@
#include "dynarmic/backend/x64/a32_jitstate.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
@ -51,8 +51,8 @@ namespace Dynarmic::Backend::X64 {
*/
u32 A32JitState::Cpsr() const {
DEBUG_ASSERT((cpsr_q & ~1) == 0);
DEBUG_ASSERT((cpsr_jaifm & ~0x010001DF) == 0);
assert((cpsr_q & ~1) == 0);
assert((cpsr_jaifm & ~0x010001DF) == 0);
u32 cpsr = 0;
@ -167,7 +167,7 @@ constexpr u32 FPSCR_MODE_MASK = A32::LocationDescriptor::FPSCR_MODE_MASK;
constexpr u32 FPSCR_NZCV_MASK = 0xF0000000;
u32 A32JitState::Fpscr() const {
DEBUG_ASSERT((fpsr_nzcv & ~FPSCR_NZCV_MASK) == 0);
assert((fpsr_nzcv & ~FPSCR_NZCV_MASK) == 0);
const u32 fpcr_mode = static_cast<u32>(upper_location_descriptor) & FPSCR_MODE_MASK;
const u32 mxcsr = guest_MXCSR | asimd_MXCSR;

View file

@ -10,7 +10,7 @@
#include <fmt/format.h>
#include <fmt/ostream.h>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/mcl/integer_of_size.hpp"
#include <boost/container/static_vector.hpp>
@ -89,7 +89,7 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) noexcept {
code.align();
const auto* const entrypoint = code.getCurr();
DEBUG_ASSERT(block.GetCondition() == IR::Cond::AL);
assert(block.GetCondition() == IR::Cond::AL);
typedef void (EmitX64::*EmitHandlerFn)(EmitContext& context, IR::Inst* inst);
constexpr EmitHandlerFn opcode_handlers[] = {
#define OPCODE(name, type, ...) &EmitX64::Emit##name,
@ -497,7 +497,7 @@ void A64EmitX64::EmitA64SetPC(A64EmitContext& ctx, IR::Inst* inst) {
void A64EmitX64::EmitA64CallSupervisor(A64EmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.HostCall(code, nullptr);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[0].IsImmediate());
assert(args[0].IsImmediate());
const u32 imm = args[0].GetImmediateU32();
Devirtualize<&A64::UserCallbacks::CallSVC>(conf.callbacks).EmitCall(code, [&](RegList param) {
code.mov(param[0], imm);
@ -509,7 +509,7 @@ void A64EmitX64::EmitA64CallSupervisor(A64EmitContext& ctx, IR::Inst* inst) {
void A64EmitX64::EmitA64ExceptionRaised(A64EmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.HostCall(code, nullptr);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[0].IsImmediate() && args[1].IsImmediate());
assert(args[0].IsImmediate() && args[1].IsImmediate());
const u64 pc = args[0].GetImmediateU64();
const u64 exception = args[1].GetImmediateU64();
Devirtualize<&A64::UserCallbacks::ExceptionRaised>(conf.callbacks).EmitCall(code, [&](RegList param) {

View file

@ -11,7 +11,7 @@
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/llvm_disassemble.h"
#include <bit>
@ -66,13 +66,13 @@ public:
, emitter(block_of_code, conf, jit)
, polyfill_options(GenPolyfillOptions(block_of_code))
{
ASSERT(conf.page_table_address_space_bits >= 12 && conf.page_table_address_space_bits <= 64);
assert(conf.page_table_address_space_bits >= 12 && conf.page_table_address_space_bits <= 64);
}
~Impl() = default;
HaltReason Run() {
ASSERT(!is_executing);
assert(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
is_executing = true;
// TODO: Check code alignment
@ -92,7 +92,7 @@ public:
}
HaltReason Step() {
ASSERT(!is_executing);
assert(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
is_executing = true;
const HaltReason hr = block_of_code.StepCode(&jit_state, GetCurrentSingleStep());
@ -116,7 +116,7 @@ public:
}
void Reset() {
ASSERT(!is_executing);
assert(!is_executing);
jit_state = {};
}

View file

@ -24,7 +24,7 @@
#include <array>
#include <cstring>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/backend/x64/xbyak.h"
@ -278,12 +278,12 @@ void BlockOfCode::DisableWriting() {
}
void BlockOfCode::ClearCache() {
ASSERT(prelude_complete);
assert(prelude_complete);
SetCodePtr(code_begin);
}
size_t BlockOfCode::SpaceRemaining() const {
ASSERT(prelude_complete);
assert(prelude_complete);
const u8* current_ptr = getCurr<const u8*>();
if (current_ptr >= &top_[maxSize_])
return 0;
@ -553,7 +553,7 @@ void BlockOfCode::SetCodePtr(CodePtr code_ptr) {
void BlockOfCode::EnsurePatchLocationSize(CodePtr begin, size_t size) {
size_t current_size = getCurr<const u8*>() - reinterpret_cast<const u8*>(begin);
ASSERT(current_size <= size);
assert(current_size <= size);
nop(size - current_size);
}

View file

@ -10,7 +10,7 @@
#include <cstring>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/backend/x64/block_of_code.h"
@ -29,7 +29,7 @@ Xbyak::Address ConstantPool::GetConstant(const Xbyak::AddressFrame& frame, u64 l
const auto constant = ConstantT(lower, upper);
auto iter = constant_info.find(constant);
if (iter == constant_info.end()) {
ASSERT(insertion_point < pool.size());
assert(insertion_point < pool.size());
ConstantT& target_constant = pool[insertion_point];
target_constant = constant;
iter = constant_info.insert({constant, &target_constant}).first;

View file

@ -10,7 +10,7 @@
#include <iterator>
#include "common/assert.h"
#include <cassert>
#include <boost/variant/detail/apply_visitor_binary.hpp>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
@ -103,7 +103,7 @@ void EmitX64::PushRSBHelper(Xbyak::Reg64 loc_desc_reg, Xbyak::Reg64 index_reg, I
code.mov(qword[code.ABI_JIT_PTR + index_reg * 8 + code.GetJitStateInfo().offsetof_rsb_location_descriptors], loc_desc_reg);
code.mov(qword[code.ABI_JIT_PTR + index_reg * 8 + code.GetJitStateInfo().offsetof_rsb_codeptrs], rcx);
// Byte size hack
DEBUG_ASSERT(code.GetJitStateInfo().rsb_ptr_mask <= 0xFF);
assert(code.GetJitStateInfo().rsb_ptr_mask <= 0xFF);
code.add(index_reg.cvt32(), 1); //flags trashed, 1 single byte, haswell doesn't care
code.and_(index_reg.cvt32(), u32(code.GetJitStateInfo().rsb_ptr_mask)); //trashes flags
// Results ready and sort by least needed: give OOO some break
@ -144,7 +144,7 @@ void EmitX64::EmitVerboseDebuggingOutput(RegAlloc& reg_alloc) {
void EmitX64::EmitPushRSB(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[0].IsImmediate());
assert(args[0].IsImmediate());
const u64 unique_hash_of_target = args[0].GetImmediateU64();
ctx.reg_alloc.ScratchGpr(code, HostLoc::RCX);
@ -284,7 +284,7 @@ void EmitX64::EmitNZCVFromPackedFlags(EmitContext& ctx, IR::Inst* inst) {
}
void EmitX64::EmitAddCycles(size_t cycles) {
ASSERT(cycles < (std::numeric_limits<s32>::max)());
assert(cycles < (std::numeric_limits<s32>::max)());
code.sub(qword[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, cycles_remaining)], static_cast<u32>(cycles));
}

View file

@ -9,7 +9,7 @@
#include <cstddef>
#include <type_traits>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/backend/x64/block_of_code.h"
@ -129,7 +129,7 @@ void EmitX64::EmitIsZero64(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitTestBit(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
// TODO: Flag optimization
code.bt(result, args[1].GetImmediateU8());
code.setc(result.cvt8());

View file

@ -10,7 +10,7 @@
#include <type_traits>
#include <utility>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/mcl/integer_of_size.hpp"
#include "dynarmic/backend/x64/xbyak.h"
@ -1842,7 +1842,7 @@ void EmitX64::EmitFPFixedS32ToSingle(EmitContext& ctx, IR::Inst* inst) {
if (rounding_mode == ctx.FPCR().RMode() || ctx.HasOptimization(OptimizationFlag::Unsafe_IgnoreStandardFPCRValue)) {
code.cvtsi2ss(result, from);
} else {
ASSERT(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
assert(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
code.EnterStandardASIMD();
code.cvtsi2ss(result, from);
code.LeaveStandardASIMD();
@ -1878,7 +1878,7 @@ void EmitX64::EmitFPFixedU32ToSingle(EmitContext& ctx, IR::Inst* inst) {
if (rounding_mode == ctx.FPCR().RMode() || ctx.HasOptimization(OptimizationFlag::Unsafe_IgnoreStandardFPCRValue)) {
op();
} else {
ASSERT(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
assert(rounding_mode == FP::RoundingMode::ToNearest_TieEven);
code.EnterStandardASIMD();
op();
code.LeaveStandardASIMD();
@ -1984,7 +1984,7 @@ void EmitX64::EmitFPFixedS64ToDouble(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
const size_t fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
ASSERT(rounding_mode == ctx.FPCR().RMode());
assert(rounding_mode == ctx.FPCR().RMode());
code.cvtsi2sd(result, from);
@ -2003,7 +2003,7 @@ void EmitX64::EmitFPFixedS64ToSingle(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
const size_t fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
ASSERT(rounding_mode == ctx.FPCR().RMode());
assert(rounding_mode == ctx.FPCR().RMode());
code.cvtsi2ss(result, from);
@ -2022,7 +2022,7 @@ void EmitX64::EmitFPFixedU64ToDouble(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
const size_t fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
ASSERT(rounding_mode == ctx.FPCR().RMode());
assert(rounding_mode == ctx.FPCR().RMode());
if (code.HasHostFeature(HostFeature::AVX512F)) {
code.vcvtusi2sd(result, result, from);
@ -2053,7 +2053,7 @@ void EmitX64::EmitFPFixedU64ToSingle(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
const size_t fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
ASSERT(rounding_mode == ctx.FPCR().RMode());
assert(rounding_mode == ctx.FPCR().RMode());
if (code.HasHostFeature(HostFeature::AVX512F)) {
const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(code, args[0]);

View file

@ -113,7 +113,7 @@ void AxxEmitX64::EmitMemoryRead(AxxEmitContext& ctx, IR::Inst* inst) {
});
} else {
// Use page table
ASSERT(conf.page_table);
assert(conf.page_table);
const auto src_ptr = EmitVAddrLookup(code, ctx, bitsize, *abort, vaddr);
EmitReadMemoryMov<bitsize>(code, value_idx, src_ptr, ordered);
@ -200,7 +200,7 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
});
} else {
// Use page table
ASSERT(conf.page_table);
assert(conf.page_table);
const auto dest_ptr = EmitVAddrLookup(code, ctx, bitsize, *abort, vaddr);
EmitWriteMemoryMov<bitsize>(code, dest_ptr, value_idx, ordered);
@ -216,7 +216,7 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
template<std::size_t bitsize, auto callback>
void AxxEmitX64::EmitExclusiveReadMemory(AxxEmitContext& ctx, IR::Inst* inst) {
ASSERT(conf.global_monitor != nullptr);
assert(conf.global_monitor != nullptr);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
@ -267,7 +267,7 @@ void AxxEmitX64::EmitExclusiveReadMemory(AxxEmitContext& ctx, IR::Inst* inst) {
template<std::size_t bitsize, auto callback>
void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
ASSERT(conf.global_monitor != nullptr);
assert(conf.global_monitor != nullptr);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
@ -320,7 +320,7 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
template<std::size_t bitsize, auto callback>
void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* inst) {
ASSERT(conf.global_monitor && conf.fastmem_pointer);
assert(conf.global_monitor && conf.fastmem_pointer);
if (!exception_handler.SupportsFastmem()) {
EmitExclusiveReadMemory<bitsize, callback>(ctx, inst);
return;
@ -397,7 +397,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
template<std::size_t bitsize, auto callback>
void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* inst) {
ASSERT(conf.global_monitor && conf.fastmem_pointer);
assert(conf.global_monitor && conf.fastmem_pointer);
if (!exception_handler.SupportsFastmem()) {
EmitExclusiveWriteMemory<bitsize, callback>(ctx, inst);
return;

View file

@ -134,7 +134,7 @@ template<>
code.and_(tmp, u32((1 << valid_page_index_bits) - 1));
}
} else {
ASSERT(valid_page_index_bits < 32);
assert(valid_page_index_bits < 32);
code.mov(tmp, vaddr);
code.shr(tmp, int(page_table_const_bits));
code.test(tmp, u32(-(1 << valid_page_index_bits)));

View file

@ -8,7 +8,7 @@
#include <limits>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
#include "dynarmic/mcl/integer_of_size.hpp"
@ -118,7 +118,7 @@ void EmitX64::EmitSignedSaturation(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const size_t N = args[1].GetImmediateU8();
ASSERT(N >= 1 && N <= 32);
assert(N >= 1 && N <= 32);
if (N == 32) {
if (overflow_inst) {
@ -167,7 +167,7 @@ void EmitX64::EmitUnsignedSaturation(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const size_t N = args[1].GetImmediateU8();
ASSERT(N <= 31);
assert(N <= 31);
const u32 saturated_value = (1u << N) - 1;

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -18,7 +18,7 @@ void EmitX64::EmitSHA256Hash(EmitContext& ctx, IR::Inst* inst) {
const bool part1 = args[3].GetImmediateU1();
ASSERT(code.HasHostFeature(HostFeature::SHA));
assert(code.HasHostFeature(HostFeature::SHA));
// 3 2 1 0
// x = d c b a
@ -54,7 +54,7 @@ void EmitX64::EmitSHA256Hash(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitSHA256MessageSchedule0(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(code.HasHostFeature(HostFeature::SHA));
assert(code.HasHostFeature(HostFeature::SHA));
const Xbyak::Xmm x = ctx.reg_alloc.UseScratchXmm(code, args[0]);
const Xbyak::Xmm y = ctx.reg_alloc.UseXmm(code, args[1]);
@ -67,7 +67,7 @@ void EmitX64::EmitSHA256MessageSchedule0(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitSHA256MessageSchedule1(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(code.HasHostFeature(HostFeature::SHA));
assert(code.HasHostFeature(HostFeature::SHA));
const Xbyak::Xmm x = ctx.reg_alloc.UseScratchXmm(code, args[0]);
const Xbyak::Xmm y = ctx.reg_alloc.UseXmm(code, args[1]);

View file

@ -12,7 +12,7 @@
#include <cstdlib>
#include <type_traits>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
#include "dynarmic/mcl/function_info.hpp"
@ -189,7 +189,7 @@ static void EmitTwoArgumentFallback(BlockOfCode& code, EmitContext& ctx, IR::Ins
void EmitX64::EmitVectorGetElement8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
// TODO: DefineValue directly on Argument for index == 0
@ -213,7 +213,7 @@ void EmitX64::EmitVectorGetElement8(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorGetElement16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
// TODO: DefineValue directly on Argument for index == 0
@ -226,7 +226,7 @@ void EmitX64::EmitVectorGetElement16(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorGetElement32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
// TODO: DefineValue directly on Argument for index == 0
@ -247,7 +247,7 @@ void EmitX64::EmitVectorGetElement32(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorGetElement64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
if (index == 0) {
@ -275,7 +275,7 @@ void EmitX64::EmitVectorGetElement64(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorSetElement8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
const Xbyak::Xmm source_vector = ctx.reg_alloc.UseScratchXmm(code, args[0]);
@ -307,7 +307,7 @@ void EmitX64::EmitVectorSetElement8(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorSetElement16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
const Xbyak::Xmm source_vector = ctx.reg_alloc.UseScratchXmm(code, args[0]);
@ -320,7 +320,7 @@ void EmitX64::EmitVectorSetElement16(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorSetElement32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
const Xbyak::Xmm source_vector = ctx.reg_alloc.UseScratchXmm(code, args[0]);
@ -343,7 +343,7 @@ void EmitX64::EmitVectorSetElement32(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorSetElement64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
const Xbyak::Xmm source_vector = ctx.reg_alloc.UseScratchXmm(code, args[0]);
@ -748,9 +748,9 @@ void EmitX64::EmitVectorBroadcast64(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorBroadcastElementLower8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 16);
assert(index < 16);
if (index > 0) {
code.psrldq(a, index);
}
@ -772,9 +772,9 @@ void EmitX64::EmitVectorBroadcastElementLower8(EmitContext& ctx, IR::Inst* inst)
void EmitX64::EmitVectorBroadcastElementLower16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 8);
assert(index < 8);
if (index > 0) {
code.psrldq(a, u8(index * 2));
}
@ -785,9 +785,9 @@ void EmitX64::EmitVectorBroadcastElementLower16(EmitContext& ctx, IR::Inst* inst
void EmitX64::EmitVectorBroadcastElementLower32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 4);
assert(index < 4);
if (index > 0) {
code.psrldq(a, u8(index * 4));
@ -801,9 +801,9 @@ void EmitX64::EmitVectorBroadcastElementLower32(EmitContext& ctx, IR::Inst* inst
void EmitX64::EmitVectorBroadcastElement8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 16);
assert(index < 16);
if (index > 0) {
code.psrldq(a, index);
}
@ -825,9 +825,9 @@ void EmitX64::EmitVectorBroadcastElement8(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorBroadcastElement16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 8);
assert(index < 8);
if (index == 0 && code.HasHostFeature(HostFeature::AVX2)) {
code.vpbroadcastw(a, a);
} else {
@ -845,9 +845,9 @@ void EmitX64::EmitVectorBroadcastElement16(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorBroadcastElement32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 4);
assert(index < 4);
code.pshufd(a, a, mcl::bit::replicate_element<2, u8>(index));
@ -857,9 +857,9 @@ void EmitX64::EmitVectorBroadcastElement32(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorBroadcastElement64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
ASSERT(args[1].IsImmediate());
assert(args[1].IsImmediate());
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 2);
assert(index < 2);
if (code.HasHostFeature(HostFeature::AVX)) {
code.vpermilpd(a, a, mcl::bit::replicate_element<1, u8>(index));
@ -1345,7 +1345,7 @@ void EmitX64::EmitVectorExtract(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const u8 position = args[2].GetImmediateU8();
ASSERT(position % 8 == 0);
assert(position % 8 == 0);
if (position == 0) {
ctx.reg_alloc.DefineValue(code, inst, args[0]);
@ -1377,7 +1377,7 @@ void EmitX64::EmitVectorExtractLower(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(code, args[0]);
const u8 position = args[2].GetImmediateU8();
ASSERT(position % 8 == 0);
assert(position % 8 == 0);
if (position != 0) {
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(code, args[1]);
@ -3527,7 +3527,7 @@ void EmitX64::EmitVectorRotateWholeVectorRight(EmitContext& ctx, IR::Inst* inst)
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(code, args[0]);
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(code);
const u8 shift_amount = args[1].GetImmediateU8();
ASSERT(shift_amount % 32 == 0);
assert(shift_amount % 32 == 0);
const u8 shuffle_imm = std::rotr<u8>(0b11100100, shift_amount / 32 * 2);
code.pshufd(result, operand, shuffle_imm);
@ -4591,7 +4591,7 @@ static void EmitVectorSignedSaturatedNarrowToUnsigned(size_t original_esize, Blo
code.punpcklbw(reconstructed, xmm0);
break;
case 32:
ASSERT(code.HasHostFeature(HostFeature::SSE41));
assert(code.HasHostFeature(HostFeature::SSE41));
code.packusdw(dest, xmm0); // SSE4.1
code.movdqa(reconstructed, dest);
code.punpcklwd(reconstructed, xmm0);
@ -4874,11 +4874,11 @@ void EmitX64::EmitVectorSub64(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorTable(EmitContext&, IR::Inst* inst) {
// Do nothing. We *want* to hold on to the refcount for our arguments, so VectorTableLookup can use our arguments.
ASSERT(inst->UseCount() == 1 && "Table cannot be used multiple times");
assert(inst->UseCount() == 1 && "Table cannot be used multiple times");
}
void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
ASSERT(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
assert(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto table = ctx.reg_alloc.GetArgumentInfo(inst->GetArg(1).GetInst());
@ -5036,7 +5036,7 @@ void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
code.pxor(xmm0, xmm0);
code.punpcklqdq(xmm_table1, xmm0);
} else {
ASSERT(table_size == 4);
assert(table_size == 4);
const Xbyak::Xmm xmm_table1_upper = ctx.reg_alloc.UseXmm(code, table[3]);
code.punpcklqdq(xmm_table1, xmm_table1_upper);
ctx.reg_alloc.Release(xmm_table1_upper);
@ -5133,7 +5133,7 @@ void EmitX64::EmitVectorTableLookup64(EmitContext& ctx, IR::Inst* inst) {
}
void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
ASSERT(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
assert(inst->GetArg(1).GetInst()->GetOpcode() == IR::Opcode::VectorTable);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto table = ctx.reg_alloc.GetArgumentInfo(inst->GetArg(1).GetInst());

View file

@ -12,7 +12,7 @@
#include <type_traits>
#include <utility>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/function_info.hpp"
#include "dynarmic/mcl/integer_of_size.hpp"
#include "dynarmic/backend/x64/xbyak.h"
@ -692,7 +692,7 @@ void EmitX64::EmitFPVectorFromSignedFixed32(EmitContext& ctx, IR::Inst* inst) {
const int fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
const bool fpcr_controlled = args[3].GetImmediateU1();
ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
assert(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
code.cvtdq2ps(xmm, xmm);
@ -710,7 +710,7 @@ void EmitX64::EmitFPVectorFromSignedFixed64(EmitContext& ctx, IR::Inst* inst) {
const int fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
const bool fpcr_controlled = args[3].GetImmediateU1();
ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
assert(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
@ -761,7 +761,7 @@ void EmitX64::EmitFPVectorFromUnsignedFixed32(EmitContext& ctx, IR::Inst* inst)
const int fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
const bool fpcr_controlled = args[3].GetImmediateU1();
ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
assert(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
if (code.HasHostFeature(HostFeature::AVX512_Ortho)) {
@ -811,7 +811,7 @@ void EmitX64::EmitFPVectorFromUnsignedFixed64(EmitContext& ctx, IR::Inst* inst)
const int fbits = args[1].GetImmediateU8();
const FP::RoundingMode rounding_mode = static_cast<FP::RoundingMode>(args[2].GetImmediateU8());
const bool fpcr_controlled = args[3].GetImmediateU1();
ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
assert(rounding_mode == ctx.FPCR(fpcr_controlled).RMode());
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {

View file

@ -12,7 +12,7 @@
#include <cstring>
#include <vector>
#include "common/assert.h"
#include <cassert>
#include <bit>
#include "common/common_types.h"
@ -104,7 +104,7 @@ static PrologueInformation GetPrologueInformation() {
entry.code.OpInfo = reg;
};
const auto alloc_large = [&](u8 offset, size_t size) {
ASSERT(size % 8 == 0);
assert(size % 8 == 0);
size /= 8;
auto& entry = next_entry();
@ -123,7 +123,7 @@ static PrologueInformation GetPrologueInformation() {
}
};
const auto save_xmm128 = [&](u8 offset, u8 reg, size_t frame_offset) {
ASSERT(frame_offset % 16 == 0);
assert(frame_offset % 16 == 0);
auto& entry = next_entry();
entry.code.CodeOffset = offset;
@ -165,7 +165,7 @@ static PrologueInformation GetPrologueInformation() {
auto& last_entry = next_entry();
last_entry.FrameOffset = 0;
}
ASSERT(ret.unwind_code.size() % 2 == 0);
assert(ret.unwind_code.size() % 2 == 0);
return ret;
}

View file

@ -10,7 +10,7 @@
#include <algorithm>
#include "common/assert.h"
#include <cassert>
namespace Dynarmic {

View file

@ -10,7 +10,7 @@
#include <bitset>
#include <xbyak/xbyak.h>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/backend/x64/xbyak.h"
@ -80,12 +80,12 @@ constexpr bool HostLocIsFlag(HostLoc reg) {
}
constexpr HostLoc HostLocRegIdx(int idx) {
ASSERT(idx >= 0 && idx <= 15);
assert(idx >= 0 && idx <= 15);
return HostLoc(idx);
}
constexpr HostLoc HostLocXmmIdx(int idx) {
ASSERT(idx >= 0 && idx <= 15);
assert(idx >= 0 && idx <= 15);
return HostLoc(size_t(HostLoc::XMM0) + idx);
}
@ -161,12 +161,12 @@ const std::bitset<32> any_xmm = BuildRegSet({
});
inline Xbyak::Reg64 HostLocToReg64(HostLoc loc) noexcept {
ASSERT(HostLocIsGPR(loc));
assert(HostLocIsGPR(loc));
return Xbyak::Reg64(int(loc));
}
inline Xbyak::Xmm HostLocToXmm(HostLoc loc) noexcept {
ASSERT(HostLocIsXMM(loc));
assert(HostLocIsXMM(loc));
return Xbyak::Xmm(int(loc) - int(HostLoc::XMM0));
}

View file

@ -8,7 +8,7 @@
#pragma once
#include "common/assert.h"
#include <cassert>
#include "dynarmic/backend/x64/xbyak.h"
namespace Dynarmic::Backend::X64 {

View file

@ -13,7 +13,7 @@
#include <fmt/ostream.h>
#include "dynarmic/backend/x64/hostloc.h"
#include "common/assert.h"
#include <cassert>
#include <bit>
#include "dynarmic/backend/x64/xbyak.h"
@ -56,11 +56,11 @@ static inline bool IsValuelessType(const IR::Type type) noexcept {
}
void HostLocInfo::ReleaseOne() noexcept {
ASSERT(is_being_used_count > 0);
assert(is_being_used_count > 0);
--is_being_used_count;
is_scratch = false;
if (current_references > 0) {
ASSERT(size_t(accumulated_uses) + 1 < (std::numeric_limits<decltype(accumulated_uses)>::max)());
assert(size_t(accumulated_uses) + 1 < (std::numeric_limits<decltype(accumulated_uses)>::max)());
++accumulated_uses;
--current_references;
if (current_references == 0)
@ -69,7 +69,7 @@ void HostLocInfo::ReleaseOne() noexcept {
}
void HostLocInfo::ReleaseAll() noexcept {
ASSERT(size_t(accumulated_uses) + current_references < (std::numeric_limits<decltype(accumulated_uses)>::max)());
assert(size_t(accumulated_uses) + current_references < (std::numeric_limits<decltype(accumulated_uses)>::max)());
accumulated_uses += current_references;
current_references = 0;
is_set_last_use = false;
@ -91,7 +91,7 @@ void HostLocInfo::AddValue(HostLoc loc, IR::Inst* inst) noexcept {
}
values.push_back(inst);
ASSERT(size_t(total_uses) + inst->UseCount() < (std::numeric_limits<decltype(total_uses)>::max)());
assert(size_t(total_uses) + inst->UseCount() < (std::numeric_limits<decltype(total_uses)>::max)());
total_uses += inst->UseCount();
max_bit_width = std::max<uint8_t>(max_bit_width, std::countr_zero(GetBitWidth(inst->GetType())));
}
@ -129,24 +129,24 @@ bool Argument::GetImmediateU1() const noexcept {
u8 Argument::GetImmediateU8() const noexcept {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm <= u64(std::numeric_limits<u8>::max()));
assert(imm <= u64(std::numeric_limits<u8>::max()));
return u8(imm);
}
u16 Argument::GetImmediateU16() const noexcept {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm <= u64(std::numeric_limits<u16>::max()));
assert(imm <= u64(std::numeric_limits<u16>::max()));
return u16(imm);
}
u32 Argument::GetImmediateU32() const noexcept {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm <= u64(std::numeric_limits<u32>::max()));
assert(imm <= u64(std::numeric_limits<u32>::max()));
return u32(imm);
}
u64 Argument::GetImmediateS32() const noexcept {
ASSERT(FitsInImmediateS32());
assert(FitsInImmediateS32());
return value.GetImmediateAsU64();
}
@ -155,12 +155,12 @@ u64 Argument::GetImmediateU64() const noexcept {
}
IR::Cond Argument::GetImmediateCond() const noexcept {
ASSERT(IsImmediate() && GetType() == IR::Type::Cond);
assert(IsImmediate() && GetType() == IR::Type::Cond);
return value.GetCond();
}
IR::AccType Argument::GetImmediateAccType() const noexcept {
ASSERT(IsImmediate() && GetType() == IR::Type::AccType);
assert(IsImmediate() && GetType() == IR::Type::AccType);
return value.GetAccType();
}
@ -201,7 +201,7 @@ RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(const IR::Inst* inst) noexcept
ret[i].value = arg;
if (!arg.IsImmediate() && !IsValuelessType(arg.GetType())) {
auto const loc = ValueLocation(arg.GetInst());
ASSERT(loc && "argument must already been defined");
assert(loc && "argument must already been defined");
LocInfo(*loc).AddArgReference();
}
}
@ -209,7 +209,7 @@ RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(const IR::Inst* inst) noexcept
}
void RegAlloc::RegisterPseudoOperation(const IR::Inst* inst) noexcept {
ASSERT(IsValueLive(inst) || !inst->HasUses());
assert(IsValueLive(inst) || !inst->HasUses());
for (size_t i = 0; i < inst->NumArgs(); i++) {
auto const arg = inst->GetArg(i);
if (!arg.IsImmediate() && !IsValuelessType(arg.GetType())) {
@ -222,37 +222,37 @@ void RegAlloc::RegisterPseudoOperation(const IR::Inst* inst) noexcept {
}
Xbyak::Reg64 RegAlloc::UseScratchGpr(BlockOfCode& code, Argument& arg) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
return HostLocToReg64(UseScratchImpl(code, arg.value, gpr_order));
}
Xbyak::Xmm RegAlloc::UseScratchXmm(BlockOfCode& code, Argument& arg) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
return HostLocToXmm(UseScratchImpl(code, arg.value, xmm_order));
}
void RegAlloc::UseScratch(BlockOfCode& code, Argument& arg, HostLoc host_loc) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
UseScratchImpl(code, arg.value, BuildRegSet({host_loc}));
}
void RegAlloc::DefineValue(BlockOfCode& code, IR::Inst* inst, const Xbyak::Reg& reg) noexcept {
ASSERT(reg.getKind() == Xbyak::Operand::XMM || reg.getKind() == Xbyak::Operand::REG);
assert(reg.getKind() == Xbyak::Operand::XMM || reg.getKind() == Xbyak::Operand::REG);
const auto hostloc = static_cast<HostLoc>(reg.getIdx() + static_cast<size_t>(reg.getKind() == Xbyak::Operand::XMM ? HostLoc::XMM0 : HostLoc::RAX));
DefineValueImpl(code, inst, hostloc);
}
void RegAlloc::DefineValue(BlockOfCode& code, IR::Inst* inst, Argument& arg) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
DefineValueImpl(code, inst, arg.value);
}
void RegAlloc::Release(const Xbyak::Reg& reg) noexcept {
ASSERT(reg.getKind() == Xbyak::Operand::XMM || reg.getKind() == Xbyak::Operand::REG);
assert(reg.getKind() == Xbyak::Operand::XMM || reg.getKind() == Xbyak::Operand::REG);
const auto hostloc = static_cast<HostLoc>(reg.getIdx() + static_cast<size_t>(reg.getKind() == Xbyak::Operand::XMM ? HostLoc::XMM0 : HostLoc::RAX));
LocInfo(hostloc).ReleaseOne();
}
@ -382,15 +382,15 @@ void RegAlloc::HostCall(
}
void RegAlloc::AllocStackSpace(BlockOfCode& code, const size_t stack_space) noexcept {
ASSERT(stack_space < size_t((std::numeric_limits<s32>::max)()));
ASSERT(reserved_stack_space == 0);
assert(stack_space < size_t((std::numeric_limits<s32>::max)()));
assert(reserved_stack_space == 0);
reserved_stack_space = stack_space;
code.sub(code.rsp, u32(stack_space));
}
void RegAlloc::ReleaseStackSpace(BlockOfCode& code, const size_t stack_space) noexcept {
ASSERT(stack_space < size_t((std::numeric_limits<s32>::max)()));
ASSERT(reserved_stack_space == stack_space);
assert(stack_space < size_t((std::numeric_limits<s32>::max)()));
assert(reserved_stack_space == stack_space);
reserved_stack_space = 0;
code.add(code.rsp, u32(stack_space));
}
@ -410,7 +410,7 @@ HostLoc RegAlloc::SelectARegister(std::bitset<32> desired_locations) const noexc
for (HostLoc i = HostLoc(0); i < HostLoc(desired_locations.size()); i = HostLoc(size_t(i) + 1)) {
if (desired_locations.test(size_t(i))) {
auto const& loc_info = LocInfo(i);
DEBUG_ASSERT(i != ABI_JIT_PTR);
assert(i != ABI_JIT_PTR);
// Abstain from using upper registers unless absolutely nescesary
if (loc_info.IsLocked()) {
// skip, not suitable for allocation
@ -448,7 +448,7 @@ HostLoc RegAlloc::SelectARegister(std::bitset<32> desired_locations) const noexc
auto const it_final = it_empty_candidate != HostLoc::FirstSpill
? it_empty_candidate : it_candidate != HostLoc::FirstSpill
? it_candidate : it_rex_candidate;
ASSERT(it_final != HostLoc::FirstSpill && "All candidate registers have already been allocated");
assert(it_final != HostLoc::FirstSpill && "All candidate registers have already been allocated");
// Evil magic - increment LRU counter (will wrap at 256)
const_cast<RegAlloc*>(this)->LocInfo(HostLoc(it_final)).lru_counter++;
return HostLoc(it_final);
@ -458,26 +458,26 @@ std::optional<HostLoc> RegAlloc::ValueLocation(const IR::Inst* value) const noex
for (size_t i = 0; i < hostloc_info.size(); i++)
if (hostloc_info[i].ContainsValue(value)) {
//for (size_t j = 0; j < hostloc_info.size(); ++j)
// ASSERT((i == j || !hostloc_info[j].ContainsValue(value)) && "duplicate defs");
// assert((i == j || !hostloc_info[j].ContainsValue(value)) && "duplicate defs");
return HostLoc(i);
}
return std::nullopt;
}
void RegAlloc::DefineValueImpl(BlockOfCode& code, IR::Inst* def_inst, HostLoc host_loc) noexcept {
ASSERT(!ValueLocation(def_inst) && "def_inst has already been defined");
assert(!ValueLocation(def_inst) && "def_inst has already been defined");
LocInfo(host_loc).AddValue(host_loc, def_inst);
ASSERT(*ValueLocation(def_inst) == host_loc);
assert(*ValueLocation(def_inst) == host_loc);
}
void RegAlloc::DefineValueImpl(BlockOfCode& code, IR::Inst* def_inst, const IR::Value& use_inst) noexcept {
ASSERT(!ValueLocation(def_inst) && "def_inst has already been defined");
assert(!ValueLocation(def_inst) && "def_inst has already been defined");
if (use_inst.IsImmediate()) {
const HostLoc location = ScratchImpl(code, gpr_order);
DefineValueImpl(code, def_inst, location);
LoadImmediate(code, use_inst, location);
} else {
ASSERT(ValueLocation(use_inst.GetInst()) && "use_inst must already be defined");
assert(ValueLocation(use_inst.GetInst()) && "use_inst must already be defined");
const HostLoc location = *ValueLocation(use_inst.GetInst());
DefineValueImpl(code, def_inst, location);
}
@ -485,22 +485,22 @@ void RegAlloc::DefineValueImpl(BlockOfCode& code, IR::Inst* def_inst, const IR::
void RegAlloc::Move(BlockOfCode& code, HostLoc to, HostLoc from) noexcept {
const size_t bit_width = LocInfo(from).GetMaxBitWidth();
ASSERT(LocInfo(to).IsEmpty() && !LocInfo(from).IsLocked());
ASSERT(bit_width <= HostLocBitWidth(to));
ASSERT(!LocInfo(from).IsEmpty() && "Mov eliminated");
assert(LocInfo(to).IsEmpty() && !LocInfo(from).IsLocked());
assert(bit_width <= HostLocBitWidth(to));
assert(!LocInfo(from).IsEmpty() && "Mov eliminated");
EmitMove(code, bit_width, to, from);
LocInfo(to) = std::exchange(LocInfo(from), {});
}
void RegAlloc::CopyToScratch(BlockOfCode& code, size_t bit_width, HostLoc to, HostLoc from) noexcept {
ASSERT(LocInfo(to).IsEmpty() && !LocInfo(from).IsEmpty());
assert(LocInfo(to).IsEmpty() && !LocInfo(from).IsEmpty());
EmitMove(code, bit_width, to, from);
}
void RegAlloc::Exchange(BlockOfCode& code, HostLoc a, HostLoc b) noexcept {
ASSERT(!LocInfo(a).IsLocked() && !LocInfo(b).IsLocked());
ASSERT(LocInfo(a).GetMaxBitWidth() <= HostLocBitWidth(b));
ASSERT(LocInfo(b).GetMaxBitWidth() <= HostLocBitWidth(a));
assert(!LocInfo(a).IsLocked() && !LocInfo(b).IsLocked());
assert(LocInfo(a).GetMaxBitWidth() <= HostLocBitWidth(b));
assert(LocInfo(b).GetMaxBitWidth() <= HostLocBitWidth(a));
if (LocInfo(a).IsEmpty()) {
Move(code, a, b);
@ -513,16 +513,16 @@ void RegAlloc::Exchange(BlockOfCode& code, HostLoc a, HostLoc b) noexcept {
}
void RegAlloc::MoveOutOfTheWay(BlockOfCode& code, HostLoc reg) noexcept {
ASSERT(!LocInfo(reg).IsLocked());
assert(!LocInfo(reg).IsLocked());
if (!LocInfo(reg).IsEmpty()) {
SpillRegister(code, reg);
}
}
void RegAlloc::SpillRegister(BlockOfCode& code, HostLoc loc) noexcept {
ASSERT(HostLocIsRegister(loc) && "Only registers can be spilled");
ASSERT(!LocInfo(loc).IsEmpty() && "There is no need to spill unoccupied registers");
ASSERT(!LocInfo(loc).IsLocked() && "Registers that have been allocated must not be spilt");
assert(HostLocIsRegister(loc) && "Only registers can be spilled");
assert(!LocInfo(loc).IsEmpty() && "There is no need to spill unoccupied registers");
assert(!LocInfo(loc).IsLocked() && "Registers that have been allocated must not be spilt");
auto const new_loc = FindFreeSpill(HostLocIsXMM(loc));
Move(code, new_loc, loc);
}
@ -558,7 +558,7 @@ HostLoc RegAlloc::FindFreeSpill(bool is_xmm) const noexcept {
}()
HostLoc RegAlloc::LoadImmediate(BlockOfCode& code, IR::Value imm, HostLoc host_loc) noexcept {
ASSERT(imm.IsImmediate() && "imm is not an immediate");
assert(imm.IsImmediate() && "imm is not an immediate");
if (HostLocIsGPR(host_loc)) {
const Xbyak::Reg64 reg = HostLocToReg64(host_loc);
const u64 imm_value = imm.GetImmediateAsU64();
@ -583,9 +583,9 @@ HostLoc RegAlloc::LoadImmediate(BlockOfCode& code, IR::Value imm, HostLoc host_l
void RegAlloc::EmitMove(BlockOfCode& code, const size_t bit_width, const HostLoc to, const HostLoc from) noexcept {
auto const spill_to_op_arg_helper = [&](HostLoc loc, size_t reserved_stack_space) {
ASSERT(HostLocIsSpill(loc));
assert(HostLocIsSpill(loc));
size_t i = size_t(loc) - size_t(HostLoc::FirstSpill);
ASSERT(i < SpillCount && "Spill index greater than number of available spill locations");
assert(i < SpillCount && "Spill index greater than number of available spill locations");
return Xbyak::util::rsp + reserved_stack_space + ABI_SHADOW_SPACE + offsetof(StackLayout, spill) + i * sizeof(StackLayout::spill[0]);
};
auto const spill_xmm_to_op = [&](const HostLoc loc) {
@ -594,21 +594,21 @@ void RegAlloc::EmitMove(BlockOfCode& code, const size_t bit_width, const HostLoc
if (HostLocIsXMM(to) && HostLocIsXMM(from)) {
MAYBE_AVX(movaps, HostLocToXmm(to), HostLocToXmm(from));
} else if (HostLocIsGPR(to) && HostLocIsGPR(from)) {
ASSERT(bit_width != 128);
assert(bit_width != 128);
if (bit_width == 64) {
code.mov(HostLocToReg64(to), HostLocToReg64(from));
} else {
code.mov(HostLocToReg64(to).cvt32(), HostLocToReg64(from).cvt32());
}
} else if (HostLocIsXMM(to) && HostLocIsGPR(from)) {
ASSERT(bit_width != 128);
assert(bit_width != 128);
if (bit_width == 64) {
MAYBE_AVX(movq, HostLocToXmm(to), HostLocToReg64(from));
} else {
MAYBE_AVX(movd, HostLocToXmm(to), HostLocToReg64(from).cvt32());
}
} else if (HostLocIsGPR(to) && HostLocIsXMM(from)) {
ASSERT(bit_width != 128);
assert(bit_width != 128);
if (bit_width == 64) {
MAYBE_AVX(movq, HostLocToReg64(to), HostLocToXmm(from));
} else {
@ -616,7 +616,7 @@ void RegAlloc::EmitMove(BlockOfCode& code, const size_t bit_width, const HostLoc
}
} else if (HostLocIsXMM(to) && HostLocIsSpill(from)) {
const Xbyak::Address spill_addr = spill_xmm_to_op(from);
ASSERT(spill_addr.getBit() >= bit_width);
assert(spill_addr.getBit() >= bit_width);
switch (bit_width) {
case 128:
MAYBE_AVX(movaps, HostLocToXmm(to), spill_addr);
@ -634,7 +634,7 @@ void RegAlloc::EmitMove(BlockOfCode& code, const size_t bit_width, const HostLoc
}
} else if (HostLocIsSpill(to) && HostLocIsXMM(from)) {
const Xbyak::Address spill_addr = spill_xmm_to_op(to);
ASSERT(spill_addr.getBit() >= bit_width);
assert(spill_addr.getBit() >= bit_width);
switch (bit_width) {
case 128:
MAYBE_AVX(movaps, spill_addr, HostLocToXmm(from));
@ -651,14 +651,14 @@ void RegAlloc::EmitMove(BlockOfCode& code, const size_t bit_width, const HostLoc
UNREACHABLE();
}
} else if (HostLocIsGPR(to) && HostLocIsSpill(from)) {
ASSERT(bit_width != 128);
assert(bit_width != 128);
if (bit_width == 64) {
code.mov(HostLocToReg64(to), Xbyak::util::qword[spill_to_op_arg_helper(from, reserved_stack_space)]);
} else {
code.mov(HostLocToReg64(to).cvt32(), Xbyak::util::dword[spill_to_op_arg_helper(from, reserved_stack_space)]);
}
} else if (HostLocIsSpill(to) && HostLocIsGPR(from)) {
ASSERT(bit_width != 128);
assert(bit_width != 128);
if (bit_width == 64) {
code.mov(Xbyak::util::qword[spill_to_op_arg_helper(to, reserved_stack_space)], HostLocToReg64(from));
} else {
@ -671,7 +671,7 @@ void RegAlloc::EmitMove(BlockOfCode& code, const size_t bit_width, const HostLoc
#undef MAYBE_AVX
void RegAlloc::EmitExchange(BlockOfCode& code, const HostLoc a, const HostLoc b) noexcept {
ASSERT(HostLocIsGPR(a) && HostLocIsGPR(b) && "Exchanging XMM registers is uneeded OR invalid emit");
assert(HostLocIsGPR(a) && HostLocIsGPR(b) && "Exchanging XMM registers is uneeded OR invalid emit");
code.xchg(HostLocToReg64(a), HostLocToReg64(b));
}

View file

@ -49,19 +49,19 @@ public:
return is_being_used_count == 0 && current_references == 1 && size_t(accumulated_uses) + 1 == size_t(total_uses);
}
inline void ReadLock() noexcept {
ASSERT(size_t(is_being_used_count) + 1 < (std::numeric_limits<decltype(is_being_used_count)>::max)());
ASSERT(!is_scratch);
assert(size_t(is_being_used_count) + 1 < (std::numeric_limits<decltype(is_being_used_count)>::max)());
assert(!is_scratch);
is_being_used_count++;
}
inline void WriteLock() noexcept {
ASSERT(is_being_used_count == 0);
assert(is_being_used_count == 0);
is_being_used_count++;
is_scratch = true;
}
inline void AddArgReference() noexcept {
ASSERT(size_t(current_references) + 1 < (std::numeric_limits<decltype(current_references)>::max)());
assert(size_t(current_references) + 1 < (std::numeric_limits<decltype(current_references)>::max)());
++current_references;
ASSERT(size_t(accumulated_uses) + current_references <= size_t(total_uses));
assert(size_t(accumulated_uses) + current_references <= size_t(total_uses));
}
void ReleaseOne() noexcept;
void ReleaseAll() noexcept;
@ -147,12 +147,12 @@ public:
return !!ValueLocation(inst);
}
inline Xbyak::Reg64 UseGpr(BlockOfCode& code, Argument& arg) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
return HostLocToReg64(UseImpl(code, arg.value, gpr_order));
}
inline Xbyak::Xmm UseXmm(BlockOfCode& code, Argument& arg) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
return HostLocToXmm(UseImpl(code, arg.value, xmm_order));
}
@ -160,7 +160,7 @@ public:
return UseGpr(code, arg);
}
inline void Use(BlockOfCode& code, Argument& arg, const HostLoc host_loc) noexcept {
ASSERT(!arg.allocated);
assert(!arg.allocated);
arg.allocated = true;
UseImpl(code, arg.value, BuildRegSet({host_loc}));
}
@ -205,7 +205,7 @@ public:
iter.ReleaseAll();
}
inline void AssertNoMoreUses() noexcept {
ASSERT(std::all_of(hostloc_info.begin(), hostloc_info.end(), [](const auto& i) noexcept { return i.IsEmpty(); }));
assert(std::all_of(hostloc_info.begin(), hostloc_info.end(), [](const auto& i) noexcept { return i.IsEmpty(); }));
}
#ifndef NDEBUG
inline void EmitVerboseDebuggingOutput(BlockOfCode& code) noexcept {
@ -234,11 +234,11 @@ private:
HostLoc FindFreeSpill(bool is_xmm) const noexcept;
inline HostLocInfo& LocInfo(const HostLoc loc) noexcept {
DEBUG_ASSERT(loc != HostLoc::RSP && loc != ABI_JIT_PTR);
assert(loc != HostLoc::RSP && loc != ABI_JIT_PTR);
return hostloc_info[size_t(loc)];
}
inline const HostLocInfo& LocInfo(const HostLoc loc) const noexcept {
DEBUG_ASSERT(loc != HostLoc::RSP && loc != ABI_JIT_PTR);
assert(loc != HostLoc::RSP && loc != ABI_JIT_PTR);
return hostloc_info[size_t(loc)];
}

View file

@ -10,7 +10,7 @@
#include <optional>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
@ -73,7 +73,7 @@ public:
/// Set rounding mode control field.
void RMode(FP::RoundingMode rounding_mode) {
ASSERT(static_cast<u32>(rounding_mode) <= 0b11 && "FPCR: Invalid rounding mode");
assert(static_cast<u32>(rounding_mode) <= 0b11 && "FPCR: Invalid rounding mode");
value = mcl::bit::set_bits<22, 23>(value, static_cast<u32>(rounding_mode));
}
@ -93,7 +93,7 @@ public:
/// Set the stride of a vector when executing AArch32 VFP instructions.
/// This field has no function in AArch64 state.
void Stride(size_t stride) {
ASSERT(stride >= 1 && stride <= 2 && "FPCR: Invalid stride");
assert(stride >= 1 && stride <= 2 && "FPCR: Invalid stride");
value = mcl::bit::set_bits<20, 21>(value, stride == 1 ? 0b00u : 0b11u);
}
@ -116,7 +116,7 @@ public:
/// Sets the length of a vector when executing AArch32 VFP instructions.
/// This field has no function in AArch64 state.
void Len(size_t len) {
ASSERT(len >= 1 && len <= 8 && "FPCR: Invalid len");
assert(len >= 1 && len <= 8 && "FPCR: Invalid len");
value = mcl::bit::set_bits<16, 18>(value, static_cast<u32>(len - 1));
}

View file

@ -10,7 +10,7 @@
#include <tuple>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/common/fp/fpcr.h"

View file

@ -8,7 +8,7 @@
#include "dynarmic/common/fp/op/FPRoundInt.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
@ -26,7 +26,7 @@ namespace Dynarmic::FP {
template<typename FPT>
u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr) {
ASSERT(rounding != RoundingMode::ToOdd);
assert(rounding != RoundingMode::ToOdd);
auto [type, sign, value] = FPUnpack<FPT>(op, fpcr, fpsr);

View file

@ -9,7 +9,7 @@
#include "dynarmic/common/fp/op/FPToFixed.h"
#include <fmt/format.h>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "common/common_types.h"
@ -25,9 +25,9 @@ namespace Dynarmic::FP {
template<typename FPT>
u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr) {
ASSERT(rounding != RoundingMode::ToOdd);
ASSERT(ibits <= 64);
ASSERT(fbits <= ibits);
assert(rounding != RoundingMode::ToOdd);
assert(ibits <= 64);
assert(fbits <= ibits);
auto [type, sign, value] = FPUnpack<FPT>(op, fpcr, fpsr);

View file

@ -8,7 +8,7 @@
#include "dynarmic/common/fp/process_exception.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
@ -18,27 +18,27 @@ namespace Dynarmic::FP {
void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr) {
switch (exception) {
case FPExc::InvalidOp:
ASSERT(!fpcr.IOE() && "Raising floating point exceptions unimplemented");
assert(!fpcr.IOE() && "Raising floating point exceptions unimplemented");
fpsr.IOC(true);
break;
case FPExc::DivideByZero:
ASSERT(!fpcr.DZE() && "Raising floating point exceptions unimplemented");
assert(!fpcr.DZE() && "Raising floating point exceptions unimplemented");
fpsr.DZC(true);
break;
case FPExc::Overflow:
ASSERT(!fpcr.OFE() && "Raising floating point exceptions unimplemented");
assert(!fpcr.OFE() && "Raising floating point exceptions unimplemented");
fpsr.OFC(true);
break;
case FPExc::Underflow:
ASSERT(!fpcr.UFE() && "Raising floating point exceptions unimplemented");
assert(!fpcr.UFE() && "Raising floating point exceptions unimplemented");
fpsr.UFC(true);
break;
case FPExc::Inexact:
ASSERT(!fpcr.IXE() && "Raising floating point exceptions unimplemented");
assert(!fpcr.IXE() && "Raising floating point exceptions unimplemented");
fpsr.IXC(true);
break;
case FPExc::InputDenorm:
ASSERT(!fpcr.IDE() && "Raising floating point exceptions unimplemented");
assert(!fpcr.IDE() && "Raising floating point exceptions unimplemented");
fpsr.IDC(true);
break;
default:

View file

@ -10,7 +10,7 @@
#include <optional>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/common/fp/fpcr.h"
@ -23,7 +23,7 @@ namespace Dynarmic::FP {
template<typename FPT>
FPT FPProcessNaN(FPType type, FPT op, FPCR fpcr, FPSR& fpsr) {
ASSERT(type == FPType::QNaN || type == FPType::SNaN);
assert(type == FPType::QNaN || type == FPType::SNaN);
constexpr size_t topfrac = FPInfo<FPT>::explicit_mantissa_width - 1;

View file

@ -85,8 +85,8 @@ std::tuple<bool, int, u64, ResidualError> Normalize(FPUnpacked op, int extra_rig
template<typename FPT>
FPT FPRoundBase(FPUnpacked op, FPCR fpcr, RoundingMode rounding, FPSR& fpsr) {
ASSERT(op.mantissa != 0);
ASSERT(rounding != RoundingMode::ToNearest_TieAwayFromZero);
assert(op.mantissa != 0);
assert(rounding != RoundingMode::ToNearest_TieAwayFromZero);
constexpr int minimum_exp = FPInfo<FPT>::exponent_min;
constexpr size_t E = FPInfo<FPT>::exponent_width;

View file

@ -15,7 +15,7 @@
# include <llvm-c/Target.h>
#endif
#include "common/assert.h"
#include <cassert>
#include <bit>
#include "common/common_types.h"
@ -37,7 +37,7 @@ std::string DisassembleX64(const void* begin, const void* end) {
while (pos < end) {
char buffer[80];
size_t inst_size = LLVMDisasmInstruction(llvm_ctx, const_cast<u8*>(pos), remaining, reinterpret_cast<u64>(pos), buffer, sizeof(buffer));
ASSERT(inst_size);
assert(inst_size);
for (const u8* i = pos; i < pos + inst_size; i++)
result += fmt::format("{:02x} ", *i);
for (size_t i = inst_size; i < 10; i++)

View file

@ -8,7 +8,7 @@
#include "dynarmic/frontend/A32/a32_ir_emitter.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/frontend/A32/a32_types.h"
#include "dynarmic/interface/A32/arch_version.h"
@ -64,12 +64,12 @@ IR::U32U64 IREmitter::GetExtendedRegister(ExtReg reg) {
}
IR::U128 IREmitter::GetVector(ExtReg reg) {
ASSERT(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
assert(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
return Inst<IR::U128>(Opcode::A32GetVector, IR::Value(reg));
}
void IREmitter::SetRegister(const Reg reg, const IR::U32& value) {
ASSERT(reg != A32::Reg::PC);
assert(reg != A32::Reg::PC);
Inst(Opcode::A32SetRegister, IR::Value(reg), value);
}
@ -84,7 +84,7 @@ void IREmitter::SetExtendedRegister(const ExtReg reg, const IR::U32U64& value) {
}
void IREmitter::SetVector(ExtReg reg, const IR::U128& value) {
ASSERT(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
assert(A32::IsDoubleExtReg(reg) || A32::IsQuadExtReg(reg));
Inst(Opcode::A32SetVector, IR::Value(reg), value);
}
@ -361,7 +361,7 @@ IR::U32 IREmitter::ExclusiveWriteMemory64(const IR::U32& vaddr, const IR::U32& v
}
void IREmitter::CoprocInternalOperation(size_t coproc_no, bool two, size_t opc1, CoprocReg CRd, CoprocReg CRn, CoprocReg CRm, size_t opc2) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(opc1),
@ -373,7 +373,7 @@ void IREmitter::CoprocInternalOperation(size_t coproc_no, bool two, size_t opc1,
}
void IREmitter::CoprocSendOneWord(size_t coproc_no, bool two, size_t opc1, CoprocReg CRn, CoprocReg CRm, size_t opc2, const IR::U32& word) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(opc1),
@ -384,7 +384,7 @@ void IREmitter::CoprocSendOneWord(size_t coproc_no, bool two, size_t opc1, Copro
}
void IREmitter::CoprocSendTwoWords(size_t coproc_no, bool two, size_t opc, CoprocReg CRm, const IR::U32& word1, const IR::U32& word2) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(opc),
@ -393,7 +393,7 @@ void IREmitter::CoprocSendTwoWords(size_t coproc_no, bool two, size_t opc, Copro
}
IR::U32 IREmitter::CoprocGetOneWord(size_t coproc_no, bool two, size_t opc1, CoprocReg CRn, CoprocReg CRm, size_t opc2) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(opc1),
@ -404,7 +404,7 @@ IR::U32 IREmitter::CoprocGetOneWord(size_t coproc_no, bool two, size_t opc1, Cop
}
IR::U64 IREmitter::CoprocGetTwoWords(size_t coproc_no, bool two, size_t opc, CoprocReg CRm) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(opc),
@ -413,7 +413,7 @@ IR::U64 IREmitter::CoprocGetTwoWords(size_t coproc_no, bool two, size_t opc, Cop
}
void IREmitter::CoprocLoadWords(size_t coproc_no, bool two, bool long_transfer, CoprocReg CRd, const IR::U32& address, bool has_option, u8 option) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(long_transfer ? 1 : 0),
@ -424,7 +424,7 @@ void IREmitter::CoprocLoadWords(size_t coproc_no, bool two, bool long_transfer,
}
void IREmitter::CoprocStoreWords(size_t coproc_no, bool two, bool long_transfer, CoprocReg CRd, const IR::U32& address, bool has_option, u8 option) {
ASSERT(coproc_no <= 15);
assert(coproc_no <= 15);
const IR::Value::CoprocessorInfo coproc_info{static_cast<u8>(coproc_no),
static_cast<u8>(two ? 1 : 0),
static_cast<u8>(long_transfer ? 1 : 0),

View file

@ -10,7 +10,7 @@
#include <string>
#include <fmt/format.h>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/interface/A32/coprocessor_util.h"
#include "dynarmic/ir/cond.h"
@ -85,7 +85,7 @@ constexpr bool IsQuadExtReg(ExtReg reg) {
}
inline size_t RegNumber(Reg reg) {
ASSERT(reg != Reg::INVALID_REG);
assert(reg != Reg::INVALID_REG);
return size_t(reg);
}
@ -95,13 +95,13 @@ inline size_t RegNumber(ExtReg reg) {
} else if (IsDoubleExtReg(reg)) {
return size_t(reg) - size_t(ExtReg::D0);
}
ASSERT(IsQuadExtReg(reg));
assert(IsQuadExtReg(reg));
return size_t(reg) - size_t(ExtReg::Q0);
}
inline Reg operator+(Reg reg, size_t number) {
const size_t new_reg = RegNumber(reg) + number;
ASSERT(new_reg <= 15);
assert(new_reg <= 15);
return static_cast<Reg>(new_reg);
}
@ -109,7 +109,7 @@ inline Reg operator+(Reg reg, size_t number) {
inline ExtReg operator+(ExtReg reg, size_t number) {
const auto new_reg = static_cast<ExtReg>(static_cast<size_t>(reg) + number);
ASSERT((IsSingleExtReg(reg) && IsSingleExtReg(new_reg))
assert((IsSingleExtReg(reg) && IsSingleExtReg(new_reg))
|| (IsDoubleExtReg(reg) && IsDoubleExtReg(new_reg))
|| (IsQuadExtReg(reg) && IsQuadExtReg(new_reg)));

View file

@ -10,7 +10,7 @@
#include <algorithm>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/frontend/A32/a32_ir_emitter.h"
@ -21,7 +21,7 @@
namespace Dynarmic::A32 {
bool CondCanContinue(const ConditionalState cond_state, const A32::IREmitter& ir) {
ASSERT(cond_state != ConditionalState::Break && "Should never happen.");
assert(cond_state != ConditionalState::Break && "Should never happen.");
if (cond_state == ConditionalState::None)
return true;
@ -32,7 +32,7 @@ bool CondCanContinue(const ConditionalState cond_state, const A32::IREmitter& ir
}
bool IsConditionPassed(TranslatorVisitor& v, IR::Cond cond) {
ASSERT(v.cond_state != ConditionalState::Break && "This should never happen. We requested a break but that wasn't honored.");
assert(v.cond_state != ConditionalState::Break && "This should never happen. We requested a break but that wasn't honored.");
if (cond == IR::Cond::NV) {
// NV conditional is obsolete

View file

@ -8,7 +8,7 @@
#include "dynarmic/frontend/A32/translate/impl/a32_translate_impl.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/interface/A32/config.h"
@ -29,7 +29,7 @@ bool TranslatorVisitor::ThumbConditionPassed() {
bool TranslatorVisitor::VFPConditionPassed(Cond cond) {
if (ir.current_location.TFlag()) {
ASSERT(cond == Cond::AL);
assert(cond == Cond::AL);
return true;
}
return ArmConditionPassed(cond);

View file

@ -8,7 +8,7 @@
#pragma once
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/a32_ir_emitter.h"

View file

@ -8,7 +8,7 @@
#include <vector>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/translate/impl/a32_translate_impl.h"

View file

@ -6,7 +6,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/translate/impl/a32_translate_impl.h"

View file

@ -8,7 +8,7 @@
#include <utility>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/translate/impl/a32_translate_impl.h"

View file

@ -6,7 +6,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/translate/impl/a32_translate_impl.h"
@ -130,7 +130,7 @@ bool ShiftRightNarrowing(TranslatorVisitor& v, bool D, size_t imm6, size_t Vd, b
}
return v.ir.VectorUnsignedSaturatedNarrow(source_esize, wide_result);
case Narrowing::SaturateToSigned:
ASSERT(signedness == Signedness::Signed);
assert(signedness == Signedness::Signed);
return v.ir.VectorSignedSaturatedNarrowToSigned(source_esize, wide_result);
}
UNREACHABLE();

View file

@ -95,7 +95,7 @@ bool TranslatorVisitor::arm_LDR_imm(Cond cond, bool P, bool U, bool W, Reg n, Re
return UnpredictableInstruction();
}
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if ((!P || W) && n == t) {
return UnpredictableInstruction();
}
@ -126,7 +126,7 @@ bool TranslatorVisitor::arm_LDR_imm(Cond cond, bool P, bool U, bool W, Reg n, Re
// LDR <Rt>, [<Rn>, #+/-<Rm>]{!}
// LDR <Rt>, [<Rn>], #+/-<Rm>
bool TranslatorVisitor::arm_LDR_reg(Cond cond, bool P, bool U, bool W, Reg n, Reg t, Imm<5> imm5, ShiftType shift, Reg m) {
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if (m == Reg::PC) {
return UnpredictableInstruction();
}
@ -184,7 +184,7 @@ bool TranslatorVisitor::arm_LDRB_imm(Cond cond, bool P, bool U, bool W, Reg n, R
return UnpredictableInstruction();
}
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if ((!P || W) && n == t) {
return UnpredictableInstruction();
}
@ -209,7 +209,7 @@ bool TranslatorVisitor::arm_LDRB_imm(Cond cond, bool P, bool U, bool W, Reg n, R
// LDRB <Rt>, [<Rn>, #+/-<Rm>]{!}
// LDRB <Rt>, [<Rn>], #+/-<Rm>
bool TranslatorVisitor::arm_LDRB_reg(Cond cond, bool P, bool U, bool W, Reg n, Reg t, Imm<5> imm5, ShiftType shift, Reg m) {
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if (t == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
}
@ -352,7 +352,7 @@ bool TranslatorVisitor::arm_LDRD_reg(Cond cond, bool P, bool U, bool W, Reg n, R
// LDRH <Rt>, [PC, #-/+<imm>]
bool TranslatorVisitor::arm_LDRH_lit(Cond cond, bool P, bool U, bool W, Reg t, Imm<4> imm8a, Imm<4> imm8b) {
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if (P == W) {
return UnpredictableInstruction();
}
@ -382,7 +382,7 @@ bool TranslatorVisitor::arm_LDRH_imm(Cond cond, bool P, bool U, bool W, Reg n, R
return UnpredictableInstruction();
}
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if ((!P || W) && n == t) {
return UnpredictableInstruction();
}
@ -407,7 +407,7 @@ bool TranslatorVisitor::arm_LDRH_imm(Cond cond, bool P, bool U, bool W, Reg n, R
// LDRH <Rt>, [<Rn>, #+/-<Rm>]{!}
// LDRH <Rt>, [<Rn>], #+/-<Rm>
bool TranslatorVisitor::arm_LDRH_reg(Cond cond, bool P, bool U, bool W, Reg n, Reg t, Reg m) {
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if (t == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
}
@ -456,7 +456,7 @@ bool TranslatorVisitor::arm_LDRSB_imm(Cond cond, bool P, bool U, bool W, Reg n,
return UnpredictableInstruction();
}
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if ((!P || W) && n == t) {
return UnpredictableInstruction();
}
@ -481,7 +481,7 @@ bool TranslatorVisitor::arm_LDRSB_imm(Cond cond, bool P, bool U, bool W, Reg n,
// LDRSB <Rt>, [<Rn>, #+/-<Rm>]{!}
// LDRSB <Rt>, [<Rn>], #+/-<Rm>
bool TranslatorVisitor::arm_LDRSB_reg(Cond cond, bool P, bool U, bool W, Reg n, Reg t, Reg m) {
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if (t == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
}
@ -529,7 +529,7 @@ bool TranslatorVisitor::arm_LDRSH_imm(Cond cond, bool P, bool U, bool W, Reg n,
return UnpredictableInstruction();
}
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if ((!P || W) && n == t) {
return UnpredictableInstruction();
}
@ -554,7 +554,7 @@ bool TranslatorVisitor::arm_LDRSH_imm(Cond cond, bool P, bool U, bool W, Reg n,
// LDRSH <Rt>, [<Rn>, #+/-<Rm>]{!}
// LDRSH <Rt>, [<Rn>], #+/-<Rm>
bool TranslatorVisitor::arm_LDRSH_reg(Cond cond, bool P, bool U, bool W, Reg n, Reg t, Reg m) {
ASSERT(!(!P && W) && "T form of instruction unimplemented");
assert(!(!P && W) && "T form of instruction unimplemented");
if (t == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
}

View file

@ -34,7 +34,7 @@ bool TranslatorVisitor::arm_MRS(Cond cond, Reg d) {
// MSR<c> <spec_reg>, #<const>
bool TranslatorVisitor::arm_MSR_imm(Cond cond, unsigned mask, int rotate, Imm<8> imm8) {
ASSERT(mask != 0 && "Decode error");
assert(mask != 0 && "Decode error");
if (!ArmConditionPassed(cond)) {
return true;

View file

@ -687,7 +687,7 @@ bool TranslatorVisitor::thumb16_NOP() {
// IT{<x>{<y>{<z>}}} <cond>
bool TranslatorVisitor::thumb16_IT(Imm<8> imm8) {
ASSERT((imm8.Bits<0, 3>() != 0b0000) && "Decode Error");
assert((imm8.Bits<0, 3>() != 0b0000) && "Decode Error");
if (imm8.Bits<4, 7>() == 0b1111 || (imm8.Bits<4, 7>() == 0b1110 && mcl::bit::count_ones(imm8.Bits<0, 3>()) != 1)) {
return UnpredictableInstruction();
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -23,7 +23,7 @@ bool TranslatorVisitor::thumb32_TST_imm(Imm<1> i, Reg n, Imm<3> imm3, Imm<8> imm
}
bool TranslatorVisitor::thumb32_AND_imm(Imm<1> i, bool S, Reg n, Imm<3> imm3, Reg d, Imm<8> imm8) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC) {
return UnpredictableInstruction();
}
@ -69,7 +69,7 @@ bool TranslatorVisitor::thumb32_MOV_imm(Imm<1> i, bool S, Imm<3> imm3, Reg d, Im
}
bool TranslatorVisitor::thumb32_ORR_imm(Imm<1> i, bool S, Reg n, Imm<3> imm3, Reg d, Imm<8> imm8) {
ASSERT(n != Reg::PC && "Decode error");
assert(n != Reg::PC && "Decode error");
if (d == Reg::PC) {
return UnpredictableInstruction();
}
@ -100,7 +100,7 @@ bool TranslatorVisitor::thumb32_MVN_imm(Imm<1> i, bool S, Imm<3> imm3, Reg d, Im
}
bool TranslatorVisitor::thumb32_ORN_imm(Imm<1> i, bool S, Reg n, Imm<3> imm3, Reg d, Imm<8> imm8) {
ASSERT(n != Reg::PC && "Decode error");
assert(n != Reg::PC && "Decode error");
if (d == Reg::PC) {
return UnpredictableInstruction();
}
@ -128,7 +128,7 @@ bool TranslatorVisitor::thumb32_TEQ_imm(Imm<1> i, Reg n, Imm<3> imm3, Imm<8> imm
}
bool TranslatorVisitor::thumb32_EOR_imm(Imm<1> i, bool S, Reg n, Imm<3> imm3, Reg d, Imm<8> imm8) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC) {
return UnpredictableInstruction();
}
@ -156,7 +156,7 @@ bool TranslatorVisitor::thumb32_CMN_imm(Imm<1> i, Reg n, Imm<3> imm3, Imm<8> imm
}
bool TranslatorVisitor::thumb32_ADD_imm_1(Imm<1> i, bool S, Reg n, Imm<3> imm3, Reg d, Imm<8> imm8) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC) {
return UnpredictableInstruction();
}
@ -214,7 +214,7 @@ bool TranslatorVisitor::thumb32_CMP_imm(Imm<1> i, Reg n, Imm<3> imm3, Imm<8> imm
}
bool TranslatorVisitor::thumb32_SUB_imm_1(Imm<1> i, bool S, Reg n, Imm<3> imm3, Reg d, Imm<8> imm8) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC) {
return UnpredictableInstruction();
}

View file

@ -6,7 +6,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/translate/impl/a32_translate_impl.h"
@ -17,7 +17,7 @@ namespace Dynarmic::A32 {
using SaturationFunction = IR::ResultAndOverflow<IR::U32> (IREmitter::*)(const IR::U32&, size_t);
static bool Saturation(TranslatorVisitor& v, bool sh, Reg n, Reg d, Imm<5> shift_amount, size_t saturate_to, SaturationFunction sat_fn) {
ASSERT(!(sh && shift_amount == 0) && "Invalid decode");
assert(!(sh && shift_amount == 0) && "Invalid decode");
if (d == Reg::PC || n == Reg::PC) {
return v.UnpredictableInstruction();

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -23,7 +23,7 @@ bool TranslatorVisitor::thumb32_TST_reg(Reg n, Imm<3> imm3, Imm<2> imm2, ShiftTy
}
bool TranslatorVisitor::thumb32_AND_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2> imm2, ShiftType type, Reg m) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
@ -67,7 +67,7 @@ bool TranslatorVisitor::thumb32_MOV_reg(bool S, Imm<3> imm3, Reg d, Imm<2> imm2,
}
bool TranslatorVisitor::thumb32_ORR_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2> imm2, ShiftType type, Reg m) {
ASSERT(n != Reg::PC && "Decode error");
assert(n != Reg::PC && "Decode error");
if (d == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
@ -97,7 +97,7 @@ bool TranslatorVisitor::thumb32_MVN_reg(bool S, Imm<3> imm3, Reg d, Imm<2> imm2,
}
bool TranslatorVisitor::thumb32_ORN_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2> imm2, ShiftType type, Reg m) {
ASSERT(n != Reg::PC && "Decode error");
assert(n != Reg::PC && "Decode error");
if (d == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
@ -125,7 +125,7 @@ bool TranslatorVisitor::thumb32_TEQ_reg(Reg n, Imm<3> imm3, Imm<2> imm2, ShiftTy
}
bool TranslatorVisitor::thumb32_EOR_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2> imm2, ShiftType type, Reg m) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
@ -168,7 +168,7 @@ bool TranslatorVisitor::thumb32_CMN_reg(Reg n, Imm<3> imm3, Imm<2> imm2, ShiftTy
}
bool TranslatorVisitor::thumb32_ADD_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2> imm2, ShiftType type, Reg m) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();
@ -224,7 +224,7 @@ bool TranslatorVisitor::thumb32_CMP_reg(Reg n, Imm<3> imm3, Imm<2> imm2, ShiftTy
}
bool TranslatorVisitor::thumb32_SUB_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2> imm2, ShiftType type, Reg m) {
ASSERT(!(d == Reg::PC && S) && "Decode error");
assert(!(d == Reg::PC && S) && "Decode error");
if ((d == Reg::PC && !S) || n == Reg::PC || m == Reg::PC) {
return UnpredictableInstruction();

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -1304,11 +1304,11 @@ bool TranslatorVisitor::vfp_VSTR(Cond cond, bool U, bool D, Reg n, size_t Vd, bo
// VSTM{mode}<c> <Rn>{!}, <list of double registers>
bool TranslatorVisitor::vfp_VSTM_a1(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8) {
if (!p && !u && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p == u && w) {
@ -1356,11 +1356,11 @@ bool TranslatorVisitor::vfp_VSTM_a1(Cond cond, bool p, bool u, bool D, bool w, R
// VSTM{mode}<c> <Rn>{!}, <list of single registers>
bool TranslatorVisitor::vfp_VSTM_a2(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8) {
if (!p && !u && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p == u && w) {
@ -1399,11 +1399,11 @@ bool TranslatorVisitor::vfp_VSTM_a2(Cond cond, bool p, bool u, bool D, bool w, R
// VLDM{mode}<c> <Rn>{!}, <list of double registers>
bool TranslatorVisitor::vfp_VLDM_a1(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8) {
if (!p && !u && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p == u && w) {
@ -1449,11 +1449,11 @@ bool TranslatorVisitor::vfp_VLDM_a1(Cond cond, bool p, bool u, bool D, bool w, R
// VLDM{mode}<c> <Rn>{!}, <list of single registers>
bool TranslatorVisitor::vfp_VLDM_a2(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8) {
if (!p && !u && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p && !w) {
ASSERT(false && "Decode error");
assert(false && "Decode error");
}
if (p == u && w) {

View file

@ -6,7 +6,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include "common/assert.h"
#include <cassert>
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/frontend/A32/a32_types.h"
@ -73,7 +73,7 @@ void TranslateArm(IR::Block& block, LocationDescriptor descriptor, TranslateCall
}
}
}
ASSERT(block.HasTerminal() && "Terminal has not been set");
assert(block.HasTerminal() && "Terminal has not been set");
block.SetEndLocation(visitor.ir.current_location);
}

View file

@ -8,7 +8,7 @@
#include <tuple>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/frontend/A32/a32_ir_emitter.h"
@ -172,7 +172,7 @@ void TranslateThumb(IR::Block& block, LocationDescriptor descriptor, TranslateCa
}
}
}
ASSERT(block.HasTerminal() && "Terminal has not been set");
assert(block.HasTerminal() && "Terminal has not been set");
block.SetEndLocation(visitor.ir.current_location);
}

View file

@ -11,7 +11,7 @@
#include <optional>
#include "common/common_types.h"
#include "common/assert.h"
#include <cassert>
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
#include "dynarmic/frontend/A64/a64_types.h"

View file

@ -11,7 +11,7 @@
#include <string>
#include <fmt/format.h>
#include "common/assert.h"
#include <cassert>
#include "common/common_types.h"
#include "dynarmic/ir/cond.h"
@ -114,14 +114,14 @@ constexpr size_t VecNumber(Vec vec) {
inline Reg operator+(Reg reg, size_t number) {
const size_t new_reg = RegNumber(reg) + number;
ASSERT(new_reg <= 31);
assert(new_reg <= 31);
return static_cast<Reg>(new_reg);
}
inline Vec operator+(Vec vec, size_t number) {
const size_t new_vec = VecNumber(vec) + number;
ASSERT(new_vec <= 31);
assert(new_vec <= 31);
return static_cast<Vec>(new_vec);
}

View file

@ -40,7 +40,7 @@ void Translate(IR::Block& block, LocationDescriptor descriptor, MemoryReadCodeFu
if (single_step && should_continue) {
visitor.ir.SetTerm(IR::Term::LinkBlock{*visitor.ir.current_location});
}
ASSERT(block.HasTerminal() && "Terminal has not been set");
assert(block.HasTerminal() && "Terminal has not been set");
block.SetEndLocation(*visitor.ir.current_location);
}

View file

@ -170,8 +170,8 @@ void TranslatorVisitor::V_scalar(size_t bitsize, Vec vec, IR::UAnyU128 value) {
}
IR::U128 TranslatorVisitor::Vpart(size_t bitsize, Vec vec, size_t part) {
ASSERT(part == 0 || part == 1);
ASSERT(bitsize == 64);
assert(part == 0 || part == 1);
assert(bitsize == 64);
if (part == 0) {
return V(64, vec);
}
@ -179,33 +179,33 @@ IR::U128 TranslatorVisitor::Vpart(size_t bitsize, Vec vec, size_t part) {
}
void TranslatorVisitor::Vpart(size_t bitsize, Vec vec, size_t part, IR::U128 value) {
ASSERT(part == 0 || part == 1);
assert(part == 0 || part == 1);
if (part == 0) {
ASSERT(bitsize == 64);
assert(bitsize == 64);
V(128, vec, ir.VectorZeroExtend(bitsize, value));
} else {
ASSERT(bitsize == 64);
assert(bitsize == 64);
V(128, vec, ir.VectorInterleaveLower(64, V(128, vec), value));
}
}
IR::UAny TranslatorVisitor::Vpart_scalar(size_t bitsize, Vec vec, size_t part) {
ASSERT(part == 0 || part == 1);
assert(part == 0 || part == 1);
if (part == 0) {
ASSERT(bitsize == 8 || bitsize == 16 || bitsize == 32 || bitsize == 64);
assert(bitsize == 8 || bitsize == 16 || bitsize == 32 || bitsize == 64);
} else {
ASSERT(bitsize == 64);
assert(bitsize == 64);
}
return ir.VectorGetElement(bitsize, V(128, vec), part);
}
void TranslatorVisitor::Vpart_scalar(size_t bitsize, Vec vec, size_t part, IR::UAny value) {
ASSERT(part == 0 || part == 1);
assert(part == 0 || part == 1);
if (part == 0) {
ASSERT(bitsize == 8 || bitsize == 16 || bitsize == 32 || bitsize == 64);
assert(bitsize == 8 || bitsize == 16 || bitsize == 32 || bitsize == 64);
V(128, vec, ir.ZeroExtendToQuad(value));
} else {
ASSERT(bitsize == 64);
assert(bitsize == 64);
V(128, vec, ir.VectorSetElement(64, V(128, vec), 1, value));
}
}
@ -315,8 +315,8 @@ IR::U32U64 TranslatorVisitor::ShiftReg(size_t bitsize, Reg reg, Imm<2> shift, IR
}
IR::U32U64 TranslatorVisitor::ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift) {
ASSERT(shift <= 4);
ASSERT(bitsize == 32 || bitsize == 64);
assert(shift <= 4);
assert(bitsize == 32 || bitsize == 64);
IR::UAny val = X(bitsize, reg);
size_t len;
IR::U32U64 extended;

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -48,7 +51,7 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, IR::MemOp
default:
return v.UnallocatedEncoding();
}
ASSERT(rpt == 1 || selem == 1);
assert(rpt == 1 || selem == 1);
if ((size == 0b11 && !Q) && selem != 1) {
return v.ReservedValue();

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -18,10 +21,10 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po
signed_ = false;
} else if (size == 0b11) {
memop = IR::MemOp::PREFETCH;
ASSERT(!opc.Bit<0>());
assert(!opc.Bit<0>());
} else {
memop = IR::MemOp::LOAD;
ASSERT(!(size == 0b10 && opc.Bit<0>() == 1));
assert(!(size == 0b10 && opc.Bit<0>() == 1));
regsize = opc.Bit<0>() ? 32 : 64;
signed_ = true;
}

View file

@ -198,7 +198,7 @@ bool ShiftRightNarrowing(TranslatorVisitor& v, Imm<4> immh, Imm<3> immb, Vec Vn,
}
return v.ir.VectorUnsignedSaturatedNarrow(source_esize, wide_result);
case Narrowing::SaturateToSigned:
ASSERT(SignednessSSSBI == SignednessSSSBI::Signed);
assert(SignednessSSSBI == SignednessSSSBI::Signed);
return v.ir.VectorSignedSaturatedNarrowToSigned(source_esize, wide_result);
}
UNREACHABLE();

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
@ -73,7 +73,7 @@ bool MultiplyByElementHalfPrecision(TranslatorVisitor& v, Imm<1> L, Imm<1> M, Im
// TODO: Currently we don't implement half-precision paths
// for regular multiplication and extended multiplication.
ASSERT(extra_behavior != ExtraBehavior::None
assert(extra_behavior != ExtraBehavior::None
&& extra_behavior != ExtraBehavior::MultiplyExtended);
if (extra_behavior == ExtraBehavior::Subtract) {
operand1 = v.ir.FPNeg(operand1);

View file

@ -127,7 +127,7 @@ bool ShiftRightNarrowingSSBI(TranslatorVisitor& v, bool Q, Imm<4> immh, Imm<3> i
}
return v.ir.VectorUnsignedSaturatedNarrow(source_esize, wide_result);
case NarrowingSSBI::SaturateToSigned:
ASSERT(SignednessSSBI == SignednessSSBI::Signed);
assert(SignednessSSBI == SignednessSSBI::Signed);
return v.ir.VectorSignedSaturatedNarrowToSigned(source_esize, wide_result);
}
UNREACHABLE();

View file

@ -66,7 +66,7 @@ bool TranslatorVisitor::FCMLA_vec(bool Q, Imm<2> size, Vec Vm, Imm<2> rot, Vec V
const size_t esize = 8U << size.ZeroExtend();
// TODO: Currently we don't support half-precision floating point
ASSERT(esize != 16);
assert(esize != 16);
const size_t datasize = Q ? 128 : 64;
const size_t num_elements = datasize / esize;
@ -135,7 +135,7 @@ bool TranslatorVisitor::FCADD_vec(bool Q, Imm<2> size, Vec Vm, Imm<1> rot, Vec V
const size_t esize = 8U << size.ZeroExtend();
// TODO: Currently we don't support half-precision floating point
ASSERT(esize != 16);
assert(esize != 16);
const size_t datasize = Q ? 128 : 64;
const size_t num_elements = datasize / esize;

View file

@ -8,7 +8,7 @@
#include <utility>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/frontend/A64/translate/impl/impl.h"
@ -223,7 +223,7 @@ bool TranslatorVisitor::FCMLA_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4
const size_t esize = 8U << size.ZeroExtend();
// TODO: We don't support the half-precision floating point variant yet.
ASSERT(esize != 16);
assert(esize != 16);
const size_t index = [=] {
if (size == 0b01) {

View file

@ -12,7 +12,7 @@
#include <array>
#include <tuple>
#include "common/assert.h"
#include <cassert>
#include "dynarmic/mcl/bit.hpp"
#include "dynarmic/mcl/function_info.hpp"
@ -99,9 +99,9 @@ struct detail {
shifts[arg_index] = bit_position;
}
}
#if !defined(DYNARMIC_IGNORE_ASSERTS) && !defined(__ANDROID__)
#if !defined(DYNARMIC_IGNORE_assertS) && !defined(__ANDROID__)
// Avoids a MSVC ICE, and avoids Android NDK issue.
ASSERT(std::all_of(masks.begin(), masks.end(), [](auto m) { return m != 0; }));
assert(std::all_of(masks.begin(), masks.end(), [](auto m) { return m != 0; }));
#endif
return std::make_tuple(masks, shifts);
}

View file

@ -10,7 +10,7 @@
#include <functional>
#include "common/assert.h"
#include <cassert>
namespace Dynarmic::Decoder {
@ -51,7 +51,7 @@ public:
/// @param v The visitor to use
/// @param instruction The instruction to decode.
inline handler_return_type call(Visitor& v, opcode_type instruction) const noexcept {
ASSERT(Matches(instruction));
assert(Matches(instruction));
return fn(v, instruction);
}

Some files were not shown because too many files have changed in this diff Show more