mirror of
https://github.com/holub/mame
synced 2025-04-09 18:17:44 +03:00
cpu/drcbearm64.cpp: Added a 64-bit ARMv8 (AArch64) DRC back-end. (#13162)
* cpu/uml.cpp: Removed unused vector type. * 3rdparty/asmjit: Update asmjit to latest upstream. * cpu/drcbex64.cpp: Fixed crash with LOG_HASHJMPS enabled (stack needs to be 16-byte aligned before calling debug_log_hashjmp_fail).
This commit is contained in:
parent
4dab319804
commit
aa5cd150b3
56
3rdparty/asmjit/CMakeLists.txt
vendored
56
3rdparty/asmjit/CMakeLists.txt
vendored
@ -1,38 +1,16 @@
|
||||
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
|
||||
cmake_minimum_required(VERSION 3.19 FATAL_ERROR)
|
||||
|
||||
cmake_policy(PUSH)
|
||||
|
||||
if (POLICY CMP0063)
|
||||
cmake_policy(SET CMP0063 NEW) # Honor visibility properties.
|
||||
endif()
|
||||
|
||||
if (POLICY CMP0092)
|
||||
cmake_policy(SET CMP0092 NEW) # Don't add -W3 warning level by default.
|
||||
endif()
|
||||
|
||||
# Don't create a project if it was already created by another CMakeLists.txt.
|
||||
# This allows one library to embed another library without making a collision.
|
||||
# Don't create a project if it was already created by another CMakeLists.txt. This makes
|
||||
# it possible to support both add_subdirectory() and include() ways of using AsmJit as a
|
||||
# dependency.
|
||||
if (NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" STREQUAL "asmjit")
|
||||
project(asmjit CXX)
|
||||
endif()
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
INCLUDE(CheckCXXSourceCompiles)
|
||||
include(CheckCXXSourceCompiles)
|
||||
include(GNUInstallDirs)
|
||||
|
||||
# AsmJit - Deprecated
|
||||
# ===================
|
||||
|
||||
if (DEFINED ASMJIT_BUILD_EMBED)
|
||||
message(DEPRECATION "ASMJIT_BUILD_EMBED is deprecated, use ASMJIT_EMBED")
|
||||
set(ASMJIT_EMBED "${ASMJIT_BUILD_EMBED}")
|
||||
endif()
|
||||
|
||||
if (DEFINED ASMJIT_BUILD_STATIC)
|
||||
message(DEPRECATION "ASMJIT_BUILD_STATIC is deprecated, use ASMJIT_STATIC")
|
||||
set(ASMJIT_STATIC "${ASMJIT_BUILD_STATIC}")
|
||||
endif()
|
||||
|
||||
# AsmJit - Configuration - Build
|
||||
# ==============================
|
||||
|
||||
@ -212,18 +190,14 @@ function(asmjit_add_target target target_type)
|
||||
add_library(${target} ${target_type} ${X_SOURCES})
|
||||
endif()
|
||||
|
||||
set_target_properties(${target} PROPERTIES DEFINE_SYMBOL "")
|
||||
target_link_libraries(${target} PRIVATE ${X_LIBRARIES})
|
||||
|
||||
# target_link_options was added in cmake v3.13, don't use it for now...
|
||||
foreach(link_flag ${ASMJIT_PRIVATE_LFLAGS})
|
||||
set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS " ${link_flag}")
|
||||
endforeach()
|
||||
|
||||
target_compile_features(${target} PUBLIC cxx_std_11)
|
||||
set_property(TARGET ${target} PROPERTY CXX_EXTENSIONS NO)
|
||||
set_property(TARGET ${target} PROPERTY CXX_VISIBILITY_PRESET hidden)
|
||||
set_target_properties(${target}
|
||||
PROPERTIES
|
||||
DEFINE_SYMBOL ""
|
||||
CXX_VISIBILITY_PRESET hidden)
|
||||
target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$<CONFIG:Debug>:${X_CFLAGS_DBG}> $<$<NOT:$<CONFIG:Debug>>:${X_CFLAGS_REL}>)
|
||||
target_compile_features(${target} PUBLIC cxx_std_11)
|
||||
target_link_options(${target} PRIVATE ${ASMJIT_PRIVATE_LFLAGS})
|
||||
target_link_libraries(${target} PRIVATE ${X_LIBRARIES})
|
||||
|
||||
if ("${target_type}" STREQUAL "TEST")
|
||||
add_test(NAME ${target} COMMAND ${target})
|
||||
@ -590,10 +564,8 @@ if (NOT ASMJIT_EMBED)
|
||||
$<BUILD_INTERFACE:${ASMJIT_INCLUDE_DIRS}>
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
|
||||
|
||||
# Add blend2d::blend2d alias.
|
||||
# Create an asmjit::asmjit alias.
|
||||
add_library(asmjit::asmjit ALIAS asmjit)
|
||||
# TODO: [CMAKE] Deprecated alias - we use projectname::libraryname convention now.
|
||||
add_library(AsmJit::AsmJit ALIAS asmjit)
|
||||
|
||||
# Add AsmJit install instructions (library and public headers).
|
||||
if (NOT ASMJIT_NO_INSTALL)
|
||||
@ -713,5 +685,3 @@ if (NOT ASMJIT_EMBED)
|
||||
|
||||
endif()
|
||||
endif()
|
||||
|
||||
cmake_policy(POP)
|
||||
|
60
3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp
vendored
60
3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp
vendored
@ -53,6 +53,21 @@ static constexpr uint32_t kWX = InstDB::kWX;
|
||||
static const uint8_t armShiftOpToLdStOptMap[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) };
|
||||
#undef VALUE
|
||||
|
||||
// a64::Assembler - ExtendOpToRegType
|
||||
// ==================================
|
||||
|
||||
static inline RegType extendOptionToRegType(uint32_t option) noexcept {
|
||||
uint32_t pred = (uint32_t(RegType::kARM_GpW) << (0x0 * 4)) | // 0b000 - UXTB.
|
||||
(uint32_t(RegType::kARM_GpW) << (0x1 * 4)) | // 0b001 - UXTH.
|
||||
(uint32_t(RegType::kARM_GpW) << (0x2 * 4)) | // 0b010 - UXTW.
|
||||
(uint32_t(RegType::kARM_GpX) << (0x3 * 4)) | // 0b011 - UXTX|LSL.
|
||||
(uint32_t(RegType::kARM_GpW) << (0x4 * 4)) | // 0b100 - SXTB.
|
||||
(uint32_t(RegType::kARM_GpW) << (0x5 * 4)) | // 0b101 - SXTH.
|
||||
(uint32_t(RegType::kARM_GpW) << (0x6 * 4)) | // 0b110 - SXTW.
|
||||
(uint32_t(RegType::kARM_GpX) << (0x7 * 4)) ; // 0b111 - SXTX.
|
||||
return RegType((pred >> (option * 4u)) & 0xFu);
|
||||
}
|
||||
|
||||
// asmjit::a64::Assembler - SizeOp
|
||||
// ===============================
|
||||
|
||||
@ -1228,9 +1243,6 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
}
|
||||
|
||||
if (isign4 == ENC_OPS3(Reg, Reg, Reg) || isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) {
|
||||
if (!checkSignature(o1, o2))
|
||||
goto InvalidInstruction;
|
||||
|
||||
uint32_t opSize = x ? 64 : 32;
|
||||
uint64_t shift = 0;
|
||||
uint32_t sType = uint32_t(ShiftOp::kLSL);
|
||||
@ -1247,11 +1259,17 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
if (sType <= uint32_t(ShiftOp::kASR)) {
|
||||
bool hasSP = o0.as<Gp>().isSP() || o1.as<Gp>().isSP();
|
||||
if (!hasSP) {
|
||||
if (!checkGpId(o0, o1, kZR))
|
||||
goto InvalidPhysId;
|
||||
if (!checkSignature(o1, o2)) {
|
||||
goto InvalidInstruction;
|
||||
}
|
||||
|
||||
if (shift >= opSize)
|
||||
if (!checkGpId(o0, o1, kZR)) {
|
||||
goto InvalidPhysId;
|
||||
}
|
||||
|
||||
if (shift >= opSize) {
|
||||
goto InvalidImmediate;
|
||||
}
|
||||
|
||||
opcode.reset(uint32_t(opData.shiftedOp) << 21);
|
||||
opcode.addImm(x, 31);
|
||||
@ -1264,8 +1282,10 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
}
|
||||
|
||||
// SP register can only be used with LSL or Extend.
|
||||
if (sType != uint32_t(ShiftOp::kLSL))
|
||||
if (sType != uint32_t(ShiftOp::kLSL)) {
|
||||
goto InvalidImmediate;
|
||||
}
|
||||
|
||||
sType = x ? uint32_t(ShiftOp::kUXTX) : uint32_t(ShiftOp::kUXTW);
|
||||
}
|
||||
|
||||
@ -1273,8 +1293,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
opcode.reset(uint32_t(opData.extendedOp) << 21);
|
||||
sType -= uint32_t(ShiftOp::kUXTB);
|
||||
|
||||
if (sType > 7 || shift > 4)
|
||||
if (sType > 7 || shift > 4) {
|
||||
goto InvalidImmediate;
|
||||
}
|
||||
|
||||
if (!(opcode.get() & B(29))) {
|
||||
// ADD|SUB (extend) - ZR is not allowed.
|
||||
@ -1287,6 +1308,11 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
goto InvalidPhysId;
|
||||
}
|
||||
|
||||
// Validate whether the register operands match extend option.
|
||||
if (o2.as<Reg>().type() != extendOptionToRegType(sType) || o1.as<Reg>().type() < o2.as<Reg>().type()) {
|
||||
goto InvalidInstruction;
|
||||
}
|
||||
|
||||
opcode.addImm(x, 31);
|
||||
opcode.addReg(o2, 16);
|
||||
opcode.addImm(sType, 13);
|
||||
@ -1412,9 +1438,6 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
}
|
||||
|
||||
if (isign4 == ENC_OPS2(Reg, Reg) || isign4 == ENC_OPS3(Reg, Reg, Imm)) {
|
||||
if (!checkSignature(o0, o1))
|
||||
goto InvalidInstruction;
|
||||
|
||||
uint32_t opSize = x ? 64 : 32;
|
||||
uint32_t sType = 0;
|
||||
uint64_t shift = 0;
|
||||
@ -1429,8 +1452,13 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
// Shift operation - LSL, LSR, ASR.
|
||||
if (sType <= uint32_t(ShiftOp::kASR)) {
|
||||
if (!hasSP) {
|
||||
if (shift >= opSize)
|
||||
if (!checkSignature(o0, o1)) {
|
||||
goto InvalidInstruction;
|
||||
}
|
||||
|
||||
if (shift >= opSize) {
|
||||
goto InvalidImmediate;
|
||||
}
|
||||
|
||||
opcode.reset(uint32_t(opData.shiftedOp) << 21);
|
||||
opcode.addImm(x, 31);
|
||||
@ -1451,8 +1479,14 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
|
||||
|
||||
// Extend operation - UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX.
|
||||
sType -= uint32_t(ShiftOp::kUXTB);
|
||||
if (sType > 7 || shift > 4)
|
||||
if (sType > 7 || shift > 4) {
|
||||
goto InvalidImmediate;
|
||||
}
|
||||
|
||||
// Validate whether the register operands match extend option.
|
||||
if (o1.as<Reg>().type() != extendOptionToRegType(sType) || o0.as<Reg>().type() < o1.as<Reg>().type()) {
|
||||
goto InvalidInstruction;
|
||||
}
|
||||
|
||||
opcode.reset(uint32_t(opData.extendedOp) << 21);
|
||||
opcode.addImm(x, 31);
|
||||
|
11
3rdparty/asmjit/src/asmjit/arm/a64emitter.h
vendored
11
3rdparty/asmjit/src/asmjit/arm/a64emitter.h
vendored
@ -84,6 +84,17 @@ struct EmitterExplicitT {
|
||||
|
||||
//! \endcond
|
||||
|
||||
|
||||
//! \name Native Registers
|
||||
//! \{
|
||||
|
||||
//! Returns either 32-bit or 64-bit GP register of the given `id` depending on the emitter's architecture.
|
||||
inline Gp gpz(uint32_t id) const noexcept { return Gp(_emitter()->_gpSignature, id); }
|
||||
//! Clones the given `reg` to either 32-bit or 64-bit GP register depending on the emitter's architecture.
|
||||
inline Gp gpz(const Gp& reg) const noexcept { return Gp(_emitter()->_gpSignature, reg.id()); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name General Purpose Instructions
|
||||
//! \{
|
||||
|
||||
|
4
3rdparty/asmjit/src/asmjit/arm/a64func.cpp
vendored
4
3rdparty/asmjit/src/asmjit/arm/a64func.cpp
vendored
@ -13,7 +13,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
namespace FuncInternal {
|
||||
|
||||
static inline bool shouldThreatAsCDecl(CallConvId ccId) noexcept {
|
||||
static inline bool shouldTreatAsCDecl(CallConvId ccId) noexcept {
|
||||
return ccId == CallConvId::kCDecl ||
|
||||
ccId == CallConvId::kStdCall ||
|
||||
ccId == CallConvId::kFastCall ||
|
||||
@ -53,7 +53,7 @@ ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Enviro
|
||||
cc.setPassedOrder(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
|
||||
cc.setNaturalStackAlignment(16);
|
||||
|
||||
if (shouldThreatAsCDecl(ccId)) {
|
||||
if (shouldTreatAsCDecl(ccId)) {
|
||||
// ARM doesn't have that many calling conventions as we can find in X86 world, treat most conventions as __cdecl.
|
||||
cc.setId(CallConvId::kCDecl);
|
||||
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
|
||||
|
2
3rdparty/asmjit/src/asmjit/arm/a64globals.h
vendored
2
3rdparty/asmjit/src/asmjit/arm/a64globals.h
vendored
@ -21,7 +21,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
//! AArch64 instruction.
|
||||
//!
|
||||
//! \note Only used to hold ARM-specific enumerations and static functions.
|
||||
struct Inst {
|
||||
namespace Inst {
|
||||
//! Instruction id.
|
||||
enum Id : uint32_t {
|
||||
// ${InstId:Begin}
|
||||
|
@ -137,11 +137,13 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
|
||||
|
||||
if (memOp.hasBase()) {
|
||||
op.addOpFlags(OpRWFlags::kMemBaseRead);
|
||||
if ((memOp.hasIndex() || memOp.hasOffset()) && memOp.isPreOrPost()) {
|
||||
op.addOpFlags(OpRWFlags::kMemBaseWrite);
|
||||
}
|
||||
}
|
||||
|
||||
if (memOp.hasIndex()) {
|
||||
op.addOpFlags(OpRWFlags::kMemIndexRead);
|
||||
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -191,10 +193,13 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
|
||||
|
||||
if (memOp.hasBase()) {
|
||||
op.addOpFlags(OpRWFlags::kMemBaseRead);
|
||||
if ((memOp.hasIndex() || memOp.hasOffset()) && memOp.isPreOrPost()) {
|
||||
op.addOpFlags(OpRWFlags::kMemBaseWrite);
|
||||
}
|
||||
}
|
||||
|
||||
if (memOp.hasIndex()) {
|
||||
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexRW : OpRWFlags::kMemIndexRead);
|
||||
op.addOpFlags(OpRWFlags::kMemIndexRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
3rdparty/asmjit/src/asmjit/arm/a64instdb.cpp
vendored
2
3rdparty/asmjit/src/asmjit/arm/a64instdb.cpp
vendored
@ -210,7 +210,7 @@ const InstInfo _instInfoTable[] = {
|
||||
INST(Ldlarb , BaseRM_NoImm , (0b0000100011011111011111, kW , kZR, 0 ) , kRWI_W , 0 , 8 ), // #149
|
||||
INST(Ldlarh , BaseRM_NoImm , (0b0100100011011111011111, kW , kZR, 0 ) , kRWI_W , 0 , 9 ), // #150
|
||||
INST(Ldnp , BaseLdpStp , (0b0010100001, 0 , kWX, 31, 2) , kRWI_WW , 0 , 0 ), // #151
|
||||
INST(Ldp , BaseLdpStp , (0b0010100101, 0b0010100011, kWX, 31, 2) , kRWI_W , 0 , 1 ), // #152
|
||||
INST(Ldp , BaseLdpStp , (0b0010100101, 0b0010100011, kWX, 31, 2) , kRWI_WW , 0 , 1 ), // #152
|
||||
INST(Ldpsw , BaseLdpStp , (0b0110100101, 0b0110100011, kX , 0 , 2) , kRWI_WW , 0 , 2 ), // #153
|
||||
INST(Ldr , BaseLdSt , (0b1011100101, 0b10111000010, 0b10111000011, 0b00011000, kWX, 30, 2, Inst::kIdLdur) , kRWI_W , 0 , 0 ), // #154
|
||||
INST(Ldraa , BaseRM_SImm10 , (0b1111100000100000000001, kX , kZR, 0, 3) , kRWI_W , 0 , 0 ), // #155
|
||||
|
48
3rdparty/asmjit/src/asmjit/arm/a64operand.h
vendored
48
3rdparty/asmjit/src/asmjit/arm/a64operand.h
vendored
@ -129,17 +129,45 @@ public:
|
||||
//! Resets vector element type to none.
|
||||
ASMJIT_INLINE_NODEBUG void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); }
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecB8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecH4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecS2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecD1() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecB8() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecB16() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecH8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecS4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecD2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecB4x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecH2x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2); }
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecH4() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecS2() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecD1() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecB16() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecH8() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecS4() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecD2() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecB4x4() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4);
|
||||
}
|
||||
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool isVecH2x4() const noexcept {
|
||||
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2);
|
||||
}
|
||||
|
||||
//! Creates a cloned register with element access.
|
||||
ASMJIT_INLINE_NODEBUG Vec at(uint32_t elementIndex) const noexcept {
|
||||
|
34
3rdparty/asmjit/src/asmjit/core.h
vendored
34
3rdparty/asmjit/src/asmjit/core.h
vendored
@ -145,8 +145,7 @@ namespace asmjit {
|
||||
//! ### Supported Backends / Architectures
|
||||
//!
|
||||
//! - **X86** and **X86_64** - Both 32-bit and 64-bit backends tested on CI.
|
||||
//! - **AArch64** - AArch64 backend is currently only partially tested (there is no native AArch64 runner to test
|
||||
//! AsmJit Builder/Compiler).
|
||||
//! - **AArch64** - Tested on CI (Native Apple runners and Linux emulated via QEMU).
|
||||
//!
|
||||
//! ### Static Builds and Embedding
|
||||
//!
|
||||
@ -740,15 +739,17 @@ namespace asmjit {
|
||||
//! JitAllocator allocator;
|
||||
//!
|
||||
//! // Allocate an executable virtual memory and handle a possible failure.
|
||||
//! void* p = allocator.alloc(estimatedSize);
|
||||
//! if (!p)
|
||||
//! JitAllocator::Span span;
|
||||
//! Error err = allocator.alloc(span, estimatedSize);
|
||||
//!
|
||||
//! if (err != kErrorOk) // <- NOTE: This must be checked, always!
|
||||
//! return 0;
|
||||
//!
|
||||
//! // Now relocate the code to the address provided by the memory allocator.
|
||||
//! // Please note that this DOESN'T COPY anything to `p`. This function will
|
||||
//! // store the address in CodeHolder and use relocation entries to patch the
|
||||
//! // existing code in all sections to respect the base address provided.
|
||||
//! code.relocateToBase((uint64_t)p);
|
||||
//! // Please note that this DOESN'T COPY anything to it. This function will
|
||||
//! // store the address in CodeHolder and use relocation entries to patch
|
||||
//! // the existing code in all sections to respect the base address provided.
|
||||
//! code.relocateToBase((uint64_t)span.rx());
|
||||
//!
|
||||
//! // This is purely optional. There are cases in which the relocation can omit
|
||||
//! // unneeded data, which would shrink the size of address table. If that
|
||||
@ -761,12 +762,17 @@ namespace asmjit {
|
||||
//! // additional options that can be used to also zero pad sections' virtual
|
||||
//! // size, etc.
|
||||
//! //
|
||||
//! // With some additional features, copyFlattenData() does roughly this:
|
||||
//! // for (Section* section : code.sections())
|
||||
//! // memcpy((uint8_t*)p + section->offset(),
|
||||
//! // section->data(),
|
||||
//! // section->bufferSize());
|
||||
//! code.copyFlattenedData(p, codeSize, CopySectionFlags::kPadSectionBuffer);
|
||||
//! // With some additional features, copyFlattenData() does roughly the following:
|
||||
//! //
|
||||
//! // allocator.write([&](JitAllocator::Span& span) {
|
||||
//! // for (Section* section : code.sections()) {
|
||||
//! // uint8_t* p = (uint8_t*)span.rw() + section->offset();
|
||||
//! // memcpy(p, section->data(), section->bufferSize());
|
||||
//! // }
|
||||
//! // }
|
||||
//! allocator.write([&](JitAllocator::Span& span) {
|
||||
//! code.copyFlattenedData(span.rw(), codeSize, CopySectionFlags::kPadSectionBuffer);
|
||||
//! });
|
||||
//!
|
||||
//! // Execute the generated function.
|
||||
//! int inA[4] = { 4, 3, 2, 1 };
|
||||
|
4
3rdparty/asmjit/src/asmjit/core/api-config.h
vendored
4
3rdparty/asmjit/src/asmjit/core/api-config.h
vendored
@ -54,8 +54,6 @@
|
||||
// Build Options
|
||||
// =============
|
||||
|
||||
#define ASMJIT_STATIC
|
||||
|
||||
// NOTE: Doxygen cannot document macros that are not defined, that's why we have to define them and then undefine
|
||||
// them immediately, so it won't use the macros with its own preprocessor.
|
||||
#ifdef _DOXYGEN
|
||||
@ -234,7 +232,7 @@ namespace asmjit {
|
||||
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS | ASMJIT_ARCH_RISCV)
|
||||
#if ASMJIT_ARCH_BITS == 0
|
||||
#undef ASMJIT_ARCH_BITS
|
||||
#if defined (__LP64__) || defined(_LP64)
|
||||
#if defined(__LP64__) || defined(_LP64)
|
||||
#define ASMJIT_ARCH_BITS 64
|
||||
#else
|
||||
#define ASMJIT_ARCH_BITS 32
|
||||
|
21
3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp
vendored
21
3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp
vendored
@ -16,10 +16,6 @@
|
||||
#endif
|
||||
#endif // ASMJIT_ARCH_X86
|
||||
|
||||
#if !defined(_WIN32)
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#if ASMJIT_ARCH_ARM
|
||||
// Required by various utilities that are required by features detection.
|
||||
#if !defined(_WIN32)
|
||||
@ -53,6 +49,17 @@
|
||||
#endif
|
||||
#endif // ASMJIT_ARCH_ARM
|
||||
|
||||
#if !defined(_WIN32) && (ASMJIT_ARCH_X86 || ASMJIT_ARCH_ARM)
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
// Unfortunately when compiling in C++11 mode MSVC would warn about unused functions as
|
||||
// [[maybe_unused]] attribute is not used in that case (it's used only by C++17 mode and later).
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable: 4505) // unreferenced local function has been removed.
|
||||
#endif // _MSC_VER
|
||||
|
||||
ASMJIT_BEGIN_NAMESPACE
|
||||
|
||||
// CpuInfo - Detect - Compatibility
|
||||
@ -198,7 +205,7 @@ static ASMJIT_FAVOR_SIZE void simplifyCpuBrand(char* s) noexcept {
|
||||
if (!c)
|
||||
break;
|
||||
|
||||
if (!(c == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@'))) {
|
||||
if (!(c == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@' || s[1] == '\0'))) {
|
||||
*d++ = c;
|
||||
prev = c;
|
||||
}
|
||||
@ -1997,4 +2004,8 @@ const CpuInfo& CpuInfo::host() noexcept {
|
||||
return cpuInfoGlobal;
|
||||
}
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(pop)
|
||||
#endif // _MSC_VER
|
||||
|
||||
ASMJIT_END_NAMESPACE
|
||||
|
3
3rdparty/asmjit/src/asmjit/core/emitter.h
vendored
3
3rdparty/asmjit/src/asmjit/core/emitter.h
vendored
@ -373,6 +373,9 @@ public:
|
||||
//! Returns the target architecture's GP register size (4 or 8 bytes).
|
||||
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); }
|
||||
|
||||
//! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture).
|
||||
ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; }
|
||||
|
||||
//! Returns instruction alignment.
|
||||
//!
|
||||
//! The following values are returned based on the target architecture:
|
||||
|
44
3rdparty/asmjit/src/asmjit/core/jitallocator.cpp
vendored
44
3rdparty/asmjit/src/asmjit/core/jitallocator.cpp
vendored
@ -744,26 +744,28 @@ void JitAllocator::reset(ResetPolicy resetPolicy) noexcept {
|
||||
JitAllocatorPool& pool = impl->pools[poolId];
|
||||
JitAllocatorBlock* block = pool.blocks.first();
|
||||
|
||||
JitAllocatorBlock* blockToKeep = nullptr;
|
||||
if (resetPolicy != ResetPolicy::kHard && uint32_t(impl->options & JitAllocatorOptions::kImmediateRelease) == 0) {
|
||||
blockToKeep = block;
|
||||
block = block->next();
|
||||
}
|
||||
|
||||
while (block) {
|
||||
JitAllocatorBlock* next = block->next();
|
||||
JitAllocatorImpl_deleteBlock(impl, block);
|
||||
block = next;
|
||||
}
|
||||
|
||||
pool.reset();
|
||||
|
||||
if (blockToKeep) {
|
||||
blockToKeep->_listNodes[0] = nullptr;
|
||||
blockToKeep->_listNodes[1] = nullptr;
|
||||
JitAllocatorImpl_wipeOutBlock(impl, blockToKeep);
|
||||
JitAllocatorImpl_insertBlock(impl, blockToKeep);
|
||||
pool.emptyBlockCount = 1;
|
||||
if (block) {
|
||||
JitAllocatorBlock* blockToKeep = nullptr;
|
||||
if (resetPolicy != ResetPolicy::kHard && uint32_t(impl->options & JitAllocatorOptions::kImmediateRelease) == 0) {
|
||||
blockToKeep = block;
|
||||
block = block->next();
|
||||
}
|
||||
|
||||
while (block) {
|
||||
JitAllocatorBlock* next = block->next();
|
||||
JitAllocatorImpl_deleteBlock(impl, block);
|
||||
block = next;
|
||||
}
|
||||
|
||||
if (blockToKeep) {
|
||||
blockToKeep->_listNodes[0] = nullptr;
|
||||
blockToKeep->_listNodes[1] = nullptr;
|
||||
JitAllocatorImpl_wipeOutBlock(impl, blockToKeep);
|
||||
JitAllocatorImpl_insertBlock(impl, blockToKeep);
|
||||
pool.emptyBlockCount = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1387,6 +1389,11 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep
|
||||
}
|
||||
}
|
||||
|
||||
static void test_jit_allocator_reset_empty() noexcept {
|
||||
JitAllocator allocator;
|
||||
allocator.reset(ResetPolicy::kSoft);
|
||||
}
|
||||
|
||||
static void test_jit_allocator_alloc_release() noexcept {
|
||||
size_t kCount = BrokenAPI::hasArg("--quick") ? 20000 : 100000;
|
||||
|
||||
@ -1553,6 +1560,7 @@ static void test_jit_allocator_query() noexcept {
|
||||
}
|
||||
|
||||
UNIT(jit_allocator) {
|
||||
test_jit_allocator_reset_empty();
|
||||
test_jit_allocator_alloc_release();
|
||||
test_jit_allocator_query();
|
||||
}
|
||||
|
42
3rdparty/asmjit/src/asmjit/core/operand.h
vendored
42
3rdparty/asmjit/src/asmjit/core/operand.h
vendored
@ -738,26 +738,24 @@ struct Operand_ {
|
||||
|
||||
//! Returns a size of a register or an X86 memory operand.
|
||||
//!
|
||||
//! At the moment only X86 and X86_64 memory operands have a size - other memory operands can use bits that represent
|
||||
//! size as an additional payload. This means that memory size is architecture specific and should be accessed via
|
||||
//! \ref x86::Mem::size(). Sometimes when the user knows that the operand is either a register or memory operand this
|
||||
//! function can be helpful as it avoids casting.
|
||||
ASMJIT_INLINE_NODEBUG constexpr uint32_t x86RmSize() const noexcept {
|
||||
return _signature.size();
|
||||
}
|
||||
|
||||
#if !defined(ASMJIT_NO_DEPRECATED)
|
||||
ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return x86RmSize() != 0u; }
|
||||
|
||||
ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return x86RmSize() == s; }
|
||||
|
||||
ASMJIT_DEPRECATED("size() is no longer portable - use x86RmSize() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
|
||||
#endif
|
||||
//! \remarks At the moment only X86 and X86_64 memory operands have a size - other memory operands can use bits
|
||||
//! that represent size as an additional payload. This means that memory size is architecture specific and should
|
||||
//! be accessed via \ref x86::Mem::size(). Sometimes when the user knows that the operand is either a register or
|
||||
//! memory operand this function can be helpful as it avoids casting, but it only works when it targets X86 and X86_64.
|
||||
ASMJIT_INLINE_NODEBUG constexpr uint32_t x86RmSize() const noexcept { return _signature.size(); }
|
||||
|
||||
//! \}
|
||||
|
||||
#if !defined(ASMJIT_NO_DEPRECATED)
|
||||
ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() or x86::Mem::hasSize() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return x86RmSize() != 0u; }
|
||||
|
||||
ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() or x86::Mem::hasSize() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return x86RmSize() == s; }
|
||||
|
||||
ASMJIT_DEPRECATED("size() is no longer portable - use x86RmSize() or x86::Mem::size() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
|
||||
#endif
|
||||
};
|
||||
|
||||
//! Base class representing an operand in AsmJit (default constructed version).
|
||||
@ -1600,9 +1598,6 @@ public:
|
||||
//! Resets the memory operand's INDEX register.
|
||||
ASMJIT_INLINE_NODEBUG void resetIndex() noexcept { _setIndex(RegType::kNone, 0); }
|
||||
|
||||
//! Sets the memory operand size (in bytes).
|
||||
ASMJIT_INLINE_NODEBUG void setSize(uint32_t size) noexcept { _signature.setField<Signature::kSizeMask>(size); }
|
||||
|
||||
//! Tests whether the memory operand has a 64-bit offset or absolute address.
|
||||
//!
|
||||
//! If this is true then `hasBase()` must always report false.
|
||||
@ -1670,6 +1665,11 @@ public:
|
||||
ASMJIT_INLINE_NODEBUG void resetOffsetLo32() noexcept { setOffsetLo32(0); }
|
||||
|
||||
//! \}
|
||||
|
||||
#if !defined(ASMJIT_NO_DEPRECATED)
|
||||
ASMJIT_DEPRECATED("setSize() is no longer portable - use setX86RmSize() or x86::Mem::setSize() instead, if your target is X86/X86_64")
|
||||
ASMJIT_INLINE_NODEBUG void setSize(uint32_t size) noexcept { _signature.setField<Signature::kSizeMask>(size); }
|
||||
#endif
|
||||
};
|
||||
|
||||
//! Type of the an immediate value.
|
||||
|
10
3rdparty/asmjit/src/asmjit/core/radefs_p.h
vendored
10
3rdparty/asmjit/src/asmjit/core/radefs_p.h
vendored
@ -559,7 +559,7 @@ public:
|
||||
|
||||
ASMJIT_FORCE_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept {
|
||||
uint32_t finalSize = x.size() + y.size();
|
||||
ASMJIT_PROPAGATE(_data.reserve(allocator, finalSize));
|
||||
ASMJIT_PROPAGATE(_data.growingReserve(allocator, finalSize));
|
||||
|
||||
T* dstPtr = _data.data();
|
||||
const T* xSpan = x.data();
|
||||
@ -694,7 +694,7 @@ typedef RALiveSpans<LiveRegSpan> LiveRegSpans;
|
||||
//! - LEA x{ W|Out}, [x{R|Use} + y{R|Out}] -> {x:R|W|Use|Out y:R|Use}
|
||||
//!
|
||||
//! It should be obvious from the example above how these flags get created. Each operand contains READ/WRITE
|
||||
//! information, which is then merged to RATiedReg's flags. However, we also need to represent the possitility
|
||||
//! information, which is then merged to RATiedReg's flags. However, we also need to represent the possibility
|
||||
//! to view the operation as two independent operations - USE and OUT, because the register allocator first
|
||||
//! allocates USE registers, and then assigns OUT registers independently of USE registers.
|
||||
enum class RATiedFlags : uint32_t {
|
||||
@ -767,6 +767,12 @@ enum class RATiedFlags : uint32_t {
|
||||
// Instruction Flags (Never used by RATiedReg)
|
||||
// -------------------------------------------
|
||||
|
||||
//! Instruction has been patched to address a memory location instead of a register.
|
||||
//!
|
||||
//! This is currently only possible on X86 or X86_64 targets. It informs rewriter to rewrite the instruction if
|
||||
//! necessary.
|
||||
kInst_RegToMemPatched = 0x40000000u,
|
||||
|
||||
//! Instruction is transformable to another instruction if necessary.
|
||||
//!
|
||||
//! This is flag that is only used by \ref RAInst to inform register allocator that the instruction has some
|
||||
|
13
3rdparty/asmjit/src/asmjit/core/ralocal.cpp
vendored
13
3rdparty/asmjit/src/asmjit/core/ralocal.cpp
vendored
@ -137,9 +137,6 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
|
||||
dst.initMaps(dstPhysToWorkMap, _tmpWorkToPhysMap);
|
||||
dst.assignWorkIdsFromPhysIds();
|
||||
|
||||
if (tryMode)
|
||||
return kErrorOk;
|
||||
|
||||
for (RegGroup group : RegGroupVirtValues{}) {
|
||||
// STEP 1
|
||||
// ------
|
||||
@ -597,10 +594,14 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
|
||||
if (rmSize <= workReg->virtReg()->virtSize()) {
|
||||
Operand& op = node->operands()[opIndex];
|
||||
op = _pass->workRegAsMem(workReg);
|
||||
op.as<BaseMem>().setSize(rmSize);
|
||||
|
||||
// NOTE: We cannot use `x86::Mem::setSize()` from here, so let's manipulate the signature directly.
|
||||
op._signature.setSize(rmSize);
|
||||
|
||||
tiedReg->_useRewriteMask = 0;
|
||||
|
||||
tiedReg->markUseDone();
|
||||
raInst->addFlags(RATiedFlags::kInst_RegToMemPatched);
|
||||
usePending--;
|
||||
|
||||
rmAllocated = true;
|
||||
@ -687,7 +688,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
|
||||
// ------
|
||||
//
|
||||
// ALLOCATE / SHUFFLE all registers that we marked as `willUse` and weren't allocated yet. This is a bit
|
||||
// complicated as the allocation is iterative. In some cases we have to wait before allocating a particual
|
||||
// complicated as the allocation is iterative. In some cases we have to wait before allocating a particular
|
||||
// physical register as it's still occupied by some other one, which we need to move before we can use it.
|
||||
// In this case we skip it and allocate another some other instead (making it free for another iteration).
|
||||
//
|
||||
@ -836,7 +837,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
|
||||
// STEP 9
|
||||
// ------
|
||||
//
|
||||
// Vector registers can be cloberred partially by invoke - find if that's the case and clobber when necessary.
|
||||
// Vector registers can be clobbered partially by invoke - find if that's the case and clobber when necessary.
|
||||
|
||||
if (node->isInvoke() && group == RegGroup::kVec) {
|
||||
const InvokeNode* invokeNode = node->as<InvokeNode>();
|
||||
|
2
3rdparty/asmjit/src/asmjit/core/rapass_p.h
vendored
2
3rdparty/asmjit/src/asmjit/core/rapass_p.h
vendored
@ -335,6 +335,8 @@ public:
|
||||
//! Clears instruction `flags` from this RAInst.
|
||||
ASMJIT_INLINE_NODEBUG void clearFlags(RATiedFlags flags) noexcept { _flags &= ~flags; }
|
||||
|
||||
//! Tests whether one operand of this instruction has been patched from Reg to Mem.
|
||||
ASMJIT_INLINE_NODEBUG bool isRegToMemPatched() const noexcept { return hasFlag(RATiedFlags::kInst_RegToMemPatched); }
|
||||
//! Tests whether this instruction can be transformed to another instruction if necessary.
|
||||
ASMJIT_INLINE_NODEBUG bool isTransformable() const noexcept { return hasFlag(RATiedFlags::kInst_IsTransformable); }
|
||||
|
||||
|
94
3rdparty/asmjit/src/asmjit/core/string.cpp
vendored
94
3rdparty/asmjit/src/asmjit/core/string.cpp
vendored
@ -14,9 +14,51 @@ ASMJIT_BEGIN_NAMESPACE
|
||||
|
||||
static const char String_baseN[] = "0123456789ABCDEF";
|
||||
|
||||
constexpr size_t kMinAllocSize = 64;
|
||||
constexpr size_t kMinAllocSize = 128;
|
||||
constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold;
|
||||
|
||||
// Based on ZoneVector_growCapacity().
|
||||
//
|
||||
// NOTE: The sizes here include null terminators - that way we can have aligned allocations that are power of 2s
|
||||
// initially.
|
||||
static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept {
|
||||
static constexpr size_t kGrowThreshold = Globals::kGrowThreshold;
|
||||
|
||||
ASMJIT_ASSERT(minimumByteSize < kMaxAllocSize);
|
||||
|
||||
// This is more than exponential growth at the beginning.
|
||||
if (byteSize < kMinAllocSize) {
|
||||
byteSize = kMinAllocSize;
|
||||
}
|
||||
else if (byteSize < 512) {
|
||||
byteSize = 512;
|
||||
}
|
||||
|
||||
if (byteSize < minimumByteSize) {
|
||||
// Exponential growth before we reach `kGrowThreshold`.
|
||||
byteSize = Support::alignUpPowerOf2(minimumByteSize);
|
||||
|
||||
// Bail to `minimumByteSize` in case of overflow - most likely whatever that is happening afterwards would just fail.
|
||||
if (byteSize < minimumByteSize) {
|
||||
return minimumByteSize;
|
||||
}
|
||||
|
||||
// Pretty much chunked growth advancing by `kGrowThreshold` after we exceed it.
|
||||
if (byteSize > kGrowThreshold) {
|
||||
// Align to kGrowThreshold.
|
||||
size_t remainder = minimumByteSize % kGrowThreshold;
|
||||
|
||||
byteSize = minimumByteSize + remainder;
|
||||
|
||||
// Bail to `minimumByteSize` in case of overflow.
|
||||
if (byteSize < minimumByteSize)
|
||||
return minimumByteSize;
|
||||
}
|
||||
}
|
||||
|
||||
return Support::min<size_t>(byteSize, kMaxAllocSize);
|
||||
}
|
||||
|
||||
// String - Clear & Reset
|
||||
// ======================
|
||||
|
||||
@ -49,13 +91,13 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
|
||||
size_t curCapacity;
|
||||
|
||||
if (isLargeOrExternal()) {
|
||||
curData = this->_large.data;
|
||||
curSize = this->_large.size;
|
||||
curCapacity = this->_large.capacity;
|
||||
curData = _large.data;
|
||||
curSize = _large.size;
|
||||
curCapacity = _large.capacity;
|
||||
}
|
||||
else {
|
||||
curData = this->_small.data;
|
||||
curSize = this->_small.type;
|
||||
curData = _small.data;
|
||||
curSize = _small.type;
|
||||
curCapacity = kSSOCapacity;
|
||||
}
|
||||
|
||||
@ -90,25 +132,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
|
||||
}
|
||||
else {
|
||||
// Prevent arithmetic overflow.
|
||||
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize))
|
||||
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1))
|
||||
return nullptr;
|
||||
|
||||
size_t newSize = size + curSize;
|
||||
size_t newSizePlusOne = newSize + 1;
|
||||
|
||||
if (newSizePlusOne > curCapacity) {
|
||||
size_t newCapacity = Support::max<size_t>(curCapacity + 1, kMinAllocSize);
|
||||
if (newSize > curCapacity) {
|
||||
size_t newCapacityPlusOne = String_growCapacity(size + 1u, newSizePlusOne);
|
||||
ASMJIT_ASSERT(newCapacityPlusOne >= newSizePlusOne);
|
||||
|
||||
if (newCapacity < newSizePlusOne && newCapacity < Globals::kGrowThreshold)
|
||||
newCapacity = Support::alignUpPowerOf2(newCapacity);
|
||||
|
||||
if (newCapacity < newSizePlusOne)
|
||||
newCapacity = Support::alignUp(newSizePlusOne, Globals::kGrowThreshold);
|
||||
|
||||
if (ASMJIT_UNLIKELY(newCapacity < newSizePlusOne))
|
||||
if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne))
|
||||
return nullptr;
|
||||
|
||||
char* newData = static_cast<char*>(::malloc(newCapacity));
|
||||
char* newData = static_cast<char*>(::malloc(newCapacityPlusOne));
|
||||
if (ASMJIT_UNLIKELY(!newData))
|
||||
return nullptr;
|
||||
|
||||
@ -119,7 +156,7 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
|
||||
|
||||
_large.type = kTypeLarge;
|
||||
_large.size = newSize;
|
||||
_large.capacity = newCapacity - 1;
|
||||
_large.capacity = newCapacityPlusOne - 1;
|
||||
_large.data = newData;
|
||||
|
||||
newData[newSize] = '\0';
|
||||
@ -488,9 +525,28 @@ bool String::equals(const char* other, size_t size) const noexcept {
|
||||
// ==============
|
||||
|
||||
#if defined(ASMJIT_TEST)
|
||||
static void test_string_grow() noexcept {
|
||||
String s;
|
||||
size_t c = s.capacity();
|
||||
|
||||
INFO("Testing string grow strategy (SSO capacity: %zu)", c);
|
||||
for (size_t i = 0; i < 1000000; i++) {
|
||||
s.append('x');
|
||||
if (s.capacity() != c) {
|
||||
c = s.capacity();
|
||||
INFO(" String reallocated to new capacity: %zu", c);
|
||||
}
|
||||
}
|
||||
|
||||
// We don't expect a 1 million character string to occupy 4MiB, for example. So verify that!
|
||||
EXPECT_LT(c, size_t(4 * 1024 * 1024));
|
||||
}
|
||||
|
||||
UNIT(core_string) {
|
||||
String s;
|
||||
|
||||
INFO("Testing string functionality");
|
||||
|
||||
EXPECT_FALSE(s.isLargeOrExternal());
|
||||
EXPECT_FALSE(s.isExternal());
|
||||
|
||||
@ -553,6 +609,8 @@ UNIT(core_string) {
|
||||
EXPECT_TRUE(sTmp.isExternal());
|
||||
EXPECT_EQ(sTmp.appendChars(' ', 1000), kErrorOk);
|
||||
EXPECT_FALSE(sTmp.isExternal());
|
||||
|
||||
test_string_grow();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
6
3rdparty/asmjit/src/asmjit/core/support.h
vendored
6
3rdparty/asmjit/src/asmjit/core/support.h
vendored
@ -1615,10 +1615,10 @@ public:
|
||||
ASMJIT_INLINE_NODEBUG bool operator>=(const ArrayReverseIterator& other) const noexcept { return _ptr >= other._ptr; }
|
||||
|
||||
ASMJIT_INLINE_NODEBUG ArrayReverseIterator& operator++() noexcept { _ptr--; return *this; }
|
||||
ASMJIT_INLINE_NODEBUG ArrayReverseIterator& operator++(int) noexcept { ArrayReverseIterator prev(*this); _ptr--; return prev; }
|
||||
|
||||
ASMJIT_INLINE_NODEBUG ArrayReverseIterator& operator--() noexcept { _ptr++; return *this; }
|
||||
ASMJIT_INLINE_NODEBUG ArrayReverseIterator& operator--(int) noexcept { ArrayReverseIterator prev(*this); _ptr++; return prev; }
|
||||
|
||||
ASMJIT_INLINE_NODEBUG ArrayReverseIterator operator++(int) noexcept { ArrayReverseIterator prev(*this); _ptr--; return prev; }
|
||||
ASMJIT_INLINE_NODEBUG ArrayReverseIterator operator--(int) noexcept { ArrayReverseIterator prev(*this); _ptr++; return prev; }
|
||||
|
||||
template<typename Diff> ASMJIT_INLINE_NODEBUG ArrayReverseIterator operator+(const Diff& n) noexcept { return ArrayReverseIterator(_ptr -= n); }
|
||||
template<typename Diff> ASMJIT_INLINE_NODEBUG ArrayReverseIterator operator-(const Diff& n) noexcept { return ArrayReverseIterator(_ptr += n); }
|
||||
|
4
3rdparty/asmjit/src/asmjit/core/zonestack.h
vendored
4
3rdparty/asmjit/src/asmjit/core/zonestack.h
vendored
@ -62,7 +62,9 @@ public:
|
||||
ASMJIT_INLINE_NODEBUG void setEnd(T* end) noexcept { _end = (void*)end; }
|
||||
|
||||
template<typename T>
|
||||
ASMJIT_INLINE_NODEBUG T* data() const noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
|
||||
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return (const T*)((const uint8_t*)(this) + sizeof(Block)); }
|
||||
template<typename T>
|
||||
ASMJIT_INLINE_NODEBUG T* data() noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
|
||||
|
||||
template<typename T>
|
||||
ASMJIT_INLINE_NODEBUG bool canPrepend() const noexcept { return _start > data<void>(); }
|
||||
|
118
3rdparty/asmjit/src/asmjit/core/zonevector.cpp
vendored
118
3rdparty/asmjit/src/asmjit/core/zonevector.cpp
vendored
@ -13,8 +13,63 @@ ASMJIT_BEGIN_NAMESPACE
|
||||
// ZoneVectorBase - Helpers
|
||||
// ========================
|
||||
|
||||
// ZoneVector is used as an array to hold short-lived data structures used during code generation. The growing
|
||||
// strategy is simple - use small capacity at the beginning (very good for ZoneAllocator) and then grow quicker
|
||||
// to prevent successive reallocations.
|
||||
static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept {
|
||||
static constexpr size_t kGrowThreshold = Globals::kGrowThreshold;
|
||||
|
||||
size_t byteSize = size_t(current) * sizeOfT;
|
||||
size_t minimumByteSize = size_t(growMinimum) * sizeOfT;
|
||||
|
||||
// This is more than exponential growth at the beginning.
|
||||
if (byteSize < 32) {
|
||||
byteSize = 32;
|
||||
}
|
||||
else if (byteSize < 128) {
|
||||
byteSize = 128;
|
||||
}
|
||||
else if (byteSize < 512) {
|
||||
byteSize = 512;
|
||||
}
|
||||
|
||||
if (byteSize < minimumByteSize) {
|
||||
// Exponential growth before we reach `kGrowThreshold`.
|
||||
byteSize = Support::alignUpPowerOf2(minimumByteSize);
|
||||
|
||||
// Bail to `growMinimum` in case of overflow - most likely whatever that is happening afterwards would just fail.
|
||||
if (byteSize < minimumByteSize) {
|
||||
return growMinimum;
|
||||
}
|
||||
|
||||
// Pretty much chunked growth advancing by `kGrowThreshold` after we exceed it.
|
||||
// This should not be a common case, so we don't really have to optimize for it.
|
||||
if (byteSize > kGrowThreshold) {
|
||||
// Align to kGrowThreshold.
|
||||
size_t remainder = minimumByteSize % kGrowThreshold;
|
||||
|
||||
byteSize = minimumByteSize + remainder;
|
||||
|
||||
// Bail to `growMinimum` in case of overflow - should never happen as it's unlikely we would hit this on a 32-bit
|
||||
// machine (consecutive near 4GiB allocation is impossible, and this should never happen on 64-bit machine as we
|
||||
// use 32-bit size & capacity, so overflow of 64 bit integer is not possible. Added just as an extreme measure.
|
||||
if (byteSize < minimumByteSize)
|
||||
return growMinimum;
|
||||
}
|
||||
}
|
||||
|
||||
size_t n = byteSize / sizeOfT;
|
||||
return uint32_t(Support::min<size_t>(n, 0xFFFFFFFFu));
|
||||
}
|
||||
|
||||
static ASMJIT_FORCE_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept {
|
||||
if (sizeof(uint32_t) < sizeof(size_t))
|
||||
return true; // there is no problem when running on a 64-bit machine.
|
||||
else
|
||||
return nBytes >= size_t(n);
|
||||
};
|
||||
|
||||
Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
|
||||
uint32_t threshold = Globals::kGrowThreshold / sizeOfT;
|
||||
uint32_t capacity = _capacity;
|
||||
uint32_t after = _size;
|
||||
|
||||
@ -25,29 +80,7 @@ Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t
|
||||
if (capacity >= after)
|
||||
return kErrorOk;
|
||||
|
||||
// ZoneVector is used as an array to hold short-lived data structures used
|
||||
// during code generation. The growing strategy is simple - use small capacity
|
||||
// at the beginning (very good for ZoneAllocator) and then grow quicker to
|
||||
// prevent successive reallocations.
|
||||
if (capacity < 4)
|
||||
capacity = 4;
|
||||
else if (capacity < 8)
|
||||
capacity = 8;
|
||||
else if (capacity < 16)
|
||||
capacity = 16;
|
||||
else if (capacity < 64)
|
||||
capacity = 64;
|
||||
else if (capacity < 256)
|
||||
capacity = 256;
|
||||
|
||||
while (capacity < after) {
|
||||
if (capacity < threshold)
|
||||
capacity *= 2;
|
||||
else
|
||||
capacity += threshold;
|
||||
}
|
||||
|
||||
return _reserve(allocator, sizeOfT, capacity);
|
||||
return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, after, sizeOfT));
|
||||
}
|
||||
|
||||
Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
|
||||
@ -55,8 +88,8 @@ Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint3
|
||||
if (oldCapacity >= n)
|
||||
return kErrorOk;
|
||||
|
||||
uint32_t nBytes = n * sizeOfT;
|
||||
if (ASMJIT_UNLIKELY(nBytes < n))
|
||||
size_t nBytes = size_t(n) * sizeOfT;
|
||||
if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n)))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
|
||||
size_t allocatedBytes;
|
||||
@ -65,19 +98,28 @@ Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint3
|
||||
if (ASMJIT_UNLIKELY(!newData))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
|
||||
uint32_t newCapacity = uint32_t(allocatedBytes / sizeOfT);
|
||||
ASMJIT_ASSERT(newCapacity >= n);
|
||||
|
||||
void* oldData = _data;
|
||||
if (oldData && _size) {
|
||||
memcpy(newData, oldData, size_t(_size) * sizeOfT);
|
||||
allocator->release(oldData, size_t(oldCapacity) * sizeOfT);
|
||||
}
|
||||
|
||||
_capacity = uint32_t(allocatedBytes / sizeOfT);
|
||||
ASMJIT_ASSERT(_capacity >= n);
|
||||
|
||||
_data = newData;
|
||||
_capacity = newCapacity;
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error ZoneVectorBase::_growingReserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
|
||||
uint32_t capacity = _capacity;
|
||||
if (capacity >= n)
|
||||
return kErrorOk;
|
||||
return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, n, sizeOfT));
|
||||
}
|
||||
|
||||
Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
|
||||
uint32_t size = _size;
|
||||
|
||||
@ -266,6 +308,8 @@ Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
|
||||
#if defined(ASMJIT_TEST)
|
||||
template<typename T>
|
||||
static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
|
||||
constexpr uint32_t kMiB = 1024 * 1024;
|
||||
|
||||
int i;
|
||||
int kMax = 100000;
|
||||
|
||||
@ -301,12 +345,22 @@ static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
|
||||
int64_t fsum = 0;
|
||||
int64_t rsum = 0;
|
||||
|
||||
for (const T& item : vec) { fsum += item; }
|
||||
for (auto it = vec.rbegin(); it != vec.rend(); ++it) { rsum += *it; }
|
||||
for (const T& item : vec) {
|
||||
fsum += item;
|
||||
}
|
||||
|
||||
for (auto it = vec.rbegin(); it != vec.rend(); ++it) {
|
||||
rsum += *it;
|
||||
}
|
||||
|
||||
EXPECT_EQ(fsum, rsum);
|
||||
|
||||
vec.release(allocator);
|
||||
|
||||
INFO("ZoneBitVector::growingReserve()");
|
||||
for (uint32_t j = 0; j < 40 / sizeof(T); j += 8) {
|
||||
EXPECT_EQ(vec.growingReserve(allocator, j * kMiB), kErrorOk);
|
||||
EXPECT_GE(vec.capacity(), j * kMiB);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_zone_bitvector(ZoneAllocator* allocator) {
|
||||
|
17
3rdparty/asmjit/src/asmjit/core/zonevector.h
vendored
17
3rdparty/asmjit/src/asmjit/core/zonevector.h
vendored
@ -58,6 +58,7 @@ protected:
|
||||
ASMJIT_API Error _grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
|
||||
ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
|
||||
ASMJIT_API Error _reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
|
||||
ASMJIT_API Error _growingReserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
|
||||
|
||||
inline void _swap(ZoneVectorBase& other) noexcept {
|
||||
std::swap(_data, other._data);
|
||||
@ -414,7 +415,21 @@ public:
|
||||
|
||||
//! Reallocates the internal array to fit at least `n` items.
|
||||
inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept {
|
||||
return n > _capacity ? ZoneVectorBase::_reserve(allocator, sizeof(T), n) : Error(kErrorOk);
|
||||
if (ASMJIT_UNLIKELY(n > _capacity))
|
||||
return ZoneVectorBase::_reserve(allocator, sizeof(T), n);
|
||||
else
|
||||
return Error(kErrorOk);
|
||||
}
|
||||
|
||||
//! Reallocates the internal array to fit at least `n` items with growing semantics.
|
||||
//!
|
||||
//! If the vector is smaller than `n` the same growing calculations will be used as if N items were appended
|
||||
//! to an empty vector, which means reserving additional space for more append operations that could follow.
|
||||
inline Error growingReserve(ZoneAllocator* allocator, uint32_t n) noexcept {
|
||||
if (ASMJIT_UNLIKELY(n > _capacity))
|
||||
return ZoneVectorBase::_growingReserve(allocator, sizeof(T), n);
|
||||
else
|
||||
return Error(kErrorOk);
|
||||
}
|
||||
|
||||
inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept {
|
||||
|
56
3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp
vendored
56
3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp
vendored
@ -345,6 +345,10 @@ static ASMJIT_FORCE_INLINE uint32_t x86AltOpcodeOf(const InstDB::InstInfo* info)
|
||||
return InstDB::_altOpcodeTable[info->_altOpcodeIndex];
|
||||
}
|
||||
|
||||
static ASMJIT_FORCE_INLINE bool x86IsMmxOrXmm(const Reg& reg) noexcept {
|
||||
return reg.type() == RegType::kX86_Mm || reg.type() == RegType::kX86_Xmm;
|
||||
}
|
||||
|
||||
// x86::Assembler - X86BufferWriter
|
||||
// ================================
|
||||
|
||||
@ -2572,37 +2576,41 @@ CaseFpuArith_Mem:
|
||||
|
||||
case InstDB::kEncodingExtMovd:
|
||||
CaseExtMovd:
|
||||
opReg = o0.id();
|
||||
opcode.add66hIf(Reg::isXmm(o0));
|
||||
if (x86IsMmxOrXmm(o0.as<Reg>())) {
|
||||
opReg = o0.id();
|
||||
opcode.add66hIf(Reg::isXmm(o0));
|
||||
|
||||
// MM/XMM <- Gp
|
||||
if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o1)) {
|
||||
rbReg = o1.id();
|
||||
goto EmitX86R;
|
||||
}
|
||||
// MM/XMM <- Gp
|
||||
if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o1)) {
|
||||
rbReg = o1.id();
|
||||
goto EmitX86R;
|
||||
}
|
||||
|
||||
// MM/XMM <- Mem
|
||||
if (isign3 == ENC_OPS2(Reg, Mem)) {
|
||||
rmRel = &o1;
|
||||
goto EmitX86M;
|
||||
// MM/XMM <- Mem
|
||||
if (isign3 == ENC_OPS2(Reg, Mem)) {
|
||||
rmRel = &o1;
|
||||
goto EmitX86M;
|
||||
}
|
||||
}
|
||||
|
||||
// The following instructions use the secondary opcode.
|
||||
opcode &= Opcode::kW;
|
||||
opcode |= x86AltOpcodeOf(instInfo);
|
||||
opReg = o1.id();
|
||||
opcode.add66hIf(Reg::isXmm(o1));
|
||||
if (x86IsMmxOrXmm(o1.as<Reg>())) {
|
||||
opcode &= Opcode::kW;
|
||||
opcode |= x86AltOpcodeOf(instInfo);
|
||||
opReg = o1.id();
|
||||
opcode.add66hIf(Reg::isXmm(o1));
|
||||
|
||||
// GP <- MM/XMM
|
||||
if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o0)) {
|
||||
rbReg = o0.id();
|
||||
goto EmitX86R;
|
||||
}
|
||||
// GP <- MM/XMM
|
||||
if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o0)) {
|
||||
rbReg = o0.id();
|
||||
goto EmitX86R;
|
||||
}
|
||||
|
||||
// Mem <- MM/XMM
|
||||
if (isign3 == ENC_OPS2(Mem, Reg)) {
|
||||
rmRel = &o0;
|
||||
goto EmitX86M;
|
||||
// Mem <- MM/XMM
|
||||
if (isign3 == ENC_OPS2(Mem, Reg)) {
|
||||
rmRel = &o0;
|
||||
goto EmitX86M;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
4
3rdparty/asmjit/src/asmjit/x86/x86emitter.h
vendored
4
3rdparty/asmjit/src/asmjit/x86/x86emitter.h
vendored
@ -147,8 +147,10 @@ struct EmitterExplicitT {
|
||||
//! \name Native Registers
|
||||
//! \{
|
||||
|
||||
//! Returns either GPD or GPQ register of the given `id` depending on the emitter's architecture.
|
||||
//! Returns either 32-bit or 64-bit GP register of the given `id` depending on the emitter's architecture.
|
||||
inline Gp gpz(uint32_t id) const noexcept { return Gp(_emitter()->_gpSignature, id); }
|
||||
//! Clones the given `reg` to either 32-bit or 64-bit GP register depending on the emitter's architecture.
|
||||
inline Gp gpz(const Gp& reg) const noexcept { return Gp(_emitter()->_gpSignature, reg.id()); }
|
||||
|
||||
inline Gp zax() const noexcept { return Gp(_emitter()->_gpSignature, Gp::kIdAx); }
|
||||
inline Gp zcx() const noexcept { return Gp(_emitter()->_gpSignature, Gp::kIdCx); }
|
||||
|
4
3rdparty/asmjit/src/asmjit/x86/x86func.cpp
vendored
4
3rdparty/asmjit/src/asmjit/x86/x86func.cpp
vendored
@ -14,7 +14,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86)
|
||||
|
||||
namespace FuncInternal {
|
||||
|
||||
static inline bool shouldThreatAsCDeclIn64BitMode(CallConvId ccId) noexcept {
|
||||
static inline bool shouldTreatAsCDeclIn64BitMode(CallConvId ccId) noexcept {
|
||||
return ccId == CallConvId::kCDecl ||
|
||||
ccId == CallConvId::kStdCall ||
|
||||
ccId == CallConvId::kThisCall ||
|
||||
@ -143,7 +143,7 @@ ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Enviro
|
||||
|
||||
// Preprocess the calling convention into a common id as many conventions are normally ignored even by C/C++
|
||||
// compilers and treated as `__cdecl`.
|
||||
if (shouldThreatAsCDeclIn64BitMode(ccId))
|
||||
if (shouldTreatAsCDeclIn64BitMode(ccId))
|
||||
ccId = winABI ? CallConvId::kX64Windows : CallConvId::kX64SystemV;
|
||||
|
||||
switch (ccId) {
|
||||
|
@ -895,8 +895,10 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz
|
||||
case Inst::kIdVpternlogq: {
|
||||
if (opCount == 4 && operands[3].isImm()) {
|
||||
uint32_t predicate = operands[3].as<Imm>().valueAs<uint8_t>();
|
||||
|
||||
if ((predicate >> 4) == (predicate & 0xF)) {
|
||||
out->_operands[0].clearOpFlags(OpRWFlags::kRead);
|
||||
out->_operands[0].setReadByteMask(0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
1383
3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp
vendored
1383
3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp
vendored
File diff suppressed because it is too large
Load Diff
3
3rdparty/asmjit/src/asmjit/x86/x86operand.h
vendored
3
3rdparty/asmjit/src/asmjit/x86/x86operand.h
vendored
@ -871,6 +871,9 @@ public:
|
||||
//! distinguish between 8-bit, 16-bit, 32-bit, and 64-bit increments.
|
||||
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
|
||||
|
||||
//! Sets the memory operand size (in bytes).
|
||||
ASMJIT_INLINE_NODEBUG void setSize(uint32_t size) noexcept { _signature.setField<Signature::kSizeMask>(size); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Address Type
|
||||
|
62
3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp
vendored
62
3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp
vendored
@ -477,6 +477,20 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (opCount == 4 && inst->op(3).isImm()) {
|
||||
const Imm& imm = inst->op(3).as<Imm>();
|
||||
|
||||
switch (inst->id()) {
|
||||
case Inst::kIdVpternlogd:
|
||||
case Inst::kIdVpternlogq: {
|
||||
uint32_t predicate = uint32_t(imm.value() & 0xFFu);
|
||||
if (predicate == 0x00u || predicate == 0xFFu) {
|
||||
ib[0]->makeWriteOnly();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (sameRegHint) {
|
||||
case InstSameRegHint::kNone:
|
||||
@ -1309,6 +1323,54 @@ ASMJIT_FAVOR_SPEED Error X86RAPass::_rewrite(BaseNode* first, BaseNode* stop) no
|
||||
}
|
||||
}
|
||||
|
||||
// If one operand was rewritten from Reg to Mem, we have to ensure that we are using the correct instruction.
|
||||
if (raInst->isRegToMemPatched()) {
|
||||
switch (inst->id()) {
|
||||
case Inst::kIdKmovb: {
|
||||
if (operands[0].isGp() && operands[1].isMem()) {
|
||||
// Transform from [V]MOVD to MOV.
|
||||
operands[1].as<Mem>().setSize(1);
|
||||
inst->setId(Inst::kIdMovzx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Inst::kIdVmovw: {
|
||||
if (operands[0].isGp() && operands[1].isMem()) {
|
||||
// Transform from [V]MOVD to MOV.
|
||||
operands[1].as<Mem>().setSize(2);
|
||||
inst->setId(Inst::kIdMovzx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Inst::kIdMovd:
|
||||
case Inst::kIdVmovd:
|
||||
case Inst::kIdKmovd: {
|
||||
if (operands[0].isGp() && operands[1].isMem()) {
|
||||
// Transform from [V]MOVD to MOV.
|
||||
operands[1].as<Mem>().setSize(4);
|
||||
inst->setId(Inst::kIdMov);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Inst::kIdMovq:
|
||||
case Inst::kIdVmovq:
|
||||
case Inst::kIdKmovq: {
|
||||
if (operands[0].isGp() && operands[1].isMem()) {
|
||||
// Transform from [V]MOVQ to MOV.
|
||||
operands[1].as<Mem>().setSize(8);
|
||||
inst->setId(Inst::kIdMov);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Transform VEX instruction to EVEX when necessary.
|
||||
if (raInst->isTransformable()) {
|
||||
if (maxRegId > 15) {
|
||||
|
@ -42,6 +42,7 @@ static void ASMJIT_NOINLINE testA64AssemblerBase(AssemblerTester<a64::Assembler>
|
||||
TEST_INSTRUCTION("E103038B", add(x1, xzr, x3));
|
||||
TEST_INSTRUCTION("5F00030B", add(wzr, w2, w3));
|
||||
TEST_INSTRUCTION("5F00038B", add(xzr, x2, x3));
|
||||
TEST_INSTRUCTION("4140238B", add(x1, x2, w3, uxtw(0)));
|
||||
TEST_INSTRUCTION("83004011", add(w3, w4, 0, lsl(12)));
|
||||
TEST_INSTRUCTION("83004091", add(x3, x4, 0, lsl(12)));
|
||||
TEST_INSTRUCTION("83005011", add(w3, w4, 1024, lsl(12)));
|
||||
@ -210,7 +211,8 @@ static void ASMJIT_NOINLINE testA64AssemblerBase(AssemblerTester<a64::Assembler>
|
||||
TEST_INSTRUCTION("3F00022B", cmn(w1, w2));
|
||||
TEST_INSTRUCTION("3F0002AB", cmn(x1, x2));
|
||||
TEST_INSTRUCTION("3F08222B", cmn(w1, w2, uxtb(2)));
|
||||
TEST_INSTRUCTION("3F0822AB", cmn(x1, x2, uxtb(2)));
|
||||
TEST_INSTRUCTION("3F0822AB", cmn(x1, w2, uxtb(2)));
|
||||
TEST_INSTRUCTION("5F4023AB", cmn(x2, w3, uxtw(0)));
|
||||
TEST_INSTRUCTION("FF43212B", cmn(wsp, w1));
|
||||
TEST_INSTRUCTION("FF07212B", cmn(wsp, w1, uxtb(1)));
|
||||
TEST_INSTRUCTION("FF6321AB", cmn(sp, x1));
|
||||
@ -224,7 +226,8 @@ static void ASMJIT_NOINLINE testA64AssemblerBase(AssemblerTester<a64::Assembler>
|
||||
TEST_INSTRUCTION("3F00026B", cmp(w1, w2));
|
||||
TEST_INSTRUCTION("3F0002EB", cmp(x1, x2));
|
||||
TEST_INSTRUCTION("3F08226B", cmp(w1, w2, uxtb(2)));
|
||||
TEST_INSTRUCTION("3F0822EB", cmp(x1, x2, uxtb(2)));
|
||||
TEST_INSTRUCTION("3F0822EB", cmp(x1, w2, uxtb(2)));
|
||||
TEST_INSTRUCTION("5F4023EB", cmp(x2, w3, uxtw(0)));
|
||||
TEST_INSTRUCTION("FF43216B", cmp(wsp, w1));
|
||||
TEST_INSTRUCTION("FF07216B", cmp(wsp, w1, uxtb(1)));
|
||||
TEST_INSTRUCTION("FF6321EB", cmp(sp, x1));
|
||||
|
1507
3rdparty/asmjit/test/asmjit_test_compiler_x86.cpp
vendored
1507
3rdparty/asmjit/test/asmjit_test_compiler_x86.cpp
vendored
File diff suppressed because it is too large
Load Diff
@ -151,6 +151,7 @@ static void testX86Arch() {
|
||||
Arch arch = Arch::kX64;
|
||||
|
||||
printInfoSimple(arch, Inst::kIdAdd, InstOptions::kNone, eax, ebx);
|
||||
printInfoSimple(arch, Inst::kIdXor, InstOptions::kNone, eax, eax);
|
||||
printInfoSimple(arch, Inst::kIdLods, InstOptions::kNone, eax, dword_ptr(rsi));
|
||||
|
||||
printInfoSimple(arch, Inst::kIdPshufd, InstOptions::kNone, xmm0, xmm1, imm(0));
|
||||
@ -167,6 +168,9 @@ static void testX86Arch() {
|
||||
printInfoSimple(arch, Inst::kIdVaddpd, InstOptions::kNone, ymm0, ymm30, ymm31);
|
||||
printInfoSimple(arch, Inst::kIdVaddpd, InstOptions::kNone, zmm0, zmm1, zmm2);
|
||||
|
||||
printInfoSimple(arch, Inst::kIdVpternlogd, InstOptions::kNone, zmm0, zmm0, zmm0, imm(0xFF));
|
||||
printInfoSimple(arch, Inst::kIdVpternlogq, InstOptions::kNone, zmm0, zmm1, zmm2, imm(0x33));
|
||||
|
||||
printInfoExtra(arch, Inst::kIdVaddpd, InstOptions::kNone, k1, zmm0, zmm1, zmm2);
|
||||
printInfoExtra(arch, Inst::kIdVaddpd, InstOptions::kX86_ZMask, k1, zmm0, zmm1, zmm2);
|
||||
#endif // !ASMJIT_NO_X86
|
||||
|
@ -177,13 +177,13 @@ static void generateGpSequenceInternal(
|
||||
cc.cmn(wA, wB);
|
||||
cc.cmn(xA, xB);
|
||||
cc.cmn(wA, wB, uxtb(2));
|
||||
cc.cmn(xA, xB, uxtb(2));
|
||||
cc.cmn(xA, wB, uxtb(2));
|
||||
cc.cmp(wA, 33);
|
||||
cc.cmp(xA, 33);
|
||||
cc.cmp(wA, wB);
|
||||
cc.cmp(xA, xB);
|
||||
cc.cmp(wA, wB, uxtb(2));
|
||||
cc.cmp(xA, xB, uxtb(2));
|
||||
cc.cmp(xA, wB, uxtb(2));
|
||||
cc.crc32b(wA, wB, wC);
|
||||
cc.crc32b(wzr, wB, wC);
|
||||
cc.crc32b(wA, wzr, wC);
|
||||
|
0
3rdparty/asmjit/tools/configure-makefiles.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-makefiles.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-ninja.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-ninja.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-sanitizers.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-sanitizers.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-xcode.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/configure-xcode.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/enumgen.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/enumgen.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/tablegen-a64.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/tablegen-a64.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/tablegen-x86.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/tablegen-x86.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/tablegen.sh
vendored
Normal file → Executable file
0
3rdparty/asmjit/tools/tablegen.sh
vendored
Normal file → Executable file
9
makefile
9
makefile
@ -390,16 +390,15 @@ endif
|
||||
|
||||
ifeq ($(findstring arm,$(UNAME)),arm)
|
||||
ARCHITECTURE :=
|
||||
ifndef FORCE_DRC_C_BACKEND
|
||||
FORCE_DRC_C_BACKEND := 1
|
||||
ifneq ($(PLATFORM),arm64)
|
||||
ifndef FORCE_DRC_C_BACKEND
|
||||
FORCE_DRC_C_BACKEND := 1
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(findstring aarch64,$(UNAME)),aarch64)
|
||||
ARCHITECTURE :=
|
||||
ifndef FORCE_DRC_C_BACKEND
|
||||
FORCE_DRC_C_BACKEND := 1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(findstring s390x,$(UNAME)),s390x)
|
||||
|
@ -710,16 +710,28 @@ end
|
||||
|
||||
if not _OPTIONS["FORCE_DRC_C_BACKEND"] then
|
||||
if _OPTIONS["BIGENDIAN"]~="1" then
|
||||
configuration { "x64" }
|
||||
defines {
|
||||
"NATIVE_DRC=drcbe_x64",
|
||||
}
|
||||
configuration { "x32" }
|
||||
defines {
|
||||
"NATIVE_DRC=drcbe_x86",
|
||||
}
|
||||
configuration { }
|
||||
if (_OPTIONS["PLATFORM"]=="arm64") then
|
||||
configuration { }
|
||||
defines {
|
||||
"NATIVE_DRC=drcbe_arm64",
|
||||
}
|
||||
else
|
||||
configuration { "x64" }
|
||||
defines {
|
||||
"NATIVE_DRC=drcbe_x64",
|
||||
}
|
||||
configuration { "x32" }
|
||||
defines {
|
||||
"NATIVE_DRC=drcbe_x86",
|
||||
}
|
||||
configuration { }
|
||||
end
|
||||
end
|
||||
|
||||
configuration { }
|
||||
defines {
|
||||
"ASMJIT_STATIC",
|
||||
}
|
||||
end
|
||||
|
||||
defines {
|
||||
|
@ -1947,6 +1947,9 @@ project "asmjit"
|
||||
end
|
||||
|
||||
configuration { }
|
||||
defines {
|
||||
"ASMJIT_STATIC",
|
||||
}
|
||||
|
||||
if _OPTIONS["targetos"]=="macosx" and _OPTIONS["gcc"]~=nil then
|
||||
if string.find(_OPTIONS["gcc"], "clang") and (version < 80000) then
|
||||
@ -1956,14 +1959,16 @@ project "asmjit"
|
||||
end
|
||||
end
|
||||
|
||||
if (_OPTIONS["PLATFORM"]=="arm64") then
|
||||
configuration { }
|
||||
defines {
|
||||
"ASMJIT_NO_X86",
|
||||
}
|
||||
end
|
||||
|
||||
files {
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/a64.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/asmjit-scope-begin.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/asmjit-scope-end.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/asmjit.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64archtraits_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64assembler.h",
|
||||
@ -1971,28 +1976,32 @@ project "asmjit"
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64builder.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64compiler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64compiler.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64emithelper.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64emithelper_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64emithelper.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64emitter.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64formatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64formatter_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64func.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64formatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64func_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64func.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64globals.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instapi.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instapi_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instapi.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instdb_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instdb.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instdb.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64instdb_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64operand.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64operand.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64rapass.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64rapass_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/armformatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/a64rapass.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/armformatter_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/armformatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/armglobals.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/armoperand.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/arm/armutils.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/asmjit-scope-begin.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/asmjit-scope-end.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/asmjit.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/api-build_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/api-config.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/archcommons.h",
|
||||
@ -2000,14 +2009,14 @@ project "asmjit"
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/archtraits.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/assembler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/assembler.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/builder_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/builder.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/builder.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/builder_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/codebuffer.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/codeholder.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/codeholder.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/codewriter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/codewriter_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/codewriter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/compiler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/compiler.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/compilerdefs.h",
|
||||
@ -2015,29 +2024,29 @@ project "asmjit"
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/constpool.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/cpuinfo.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emithelper.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emithelper_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emithelper.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emitter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emitter.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emitterutils.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emitterutils_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/emitterutils.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/environment.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/environment.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/errorhandler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/errorhandler.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/formatter_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/formatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/formatter.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/formatter_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/func.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/func.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/funcargscontext.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/funcargscontext_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/funcargscontext.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/globals.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/globals.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/inst.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/inst.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/instdb.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/instdb_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/instdb.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/jitallocator.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/jitallocator.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/jitruntime.cpp",
|
||||
@ -2047,23 +2056,23 @@ project "asmjit"
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/misc_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/operand.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/operand.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/osutils_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/osutils.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/osutils.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/osutils_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/raassignment_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rabuilders_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/radefs_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/ralocal.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/ralocal_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rapass.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/ralocal.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rapass_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rastack.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rapass.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rastack_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/rastack.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/string.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/string.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/support_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/support.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/support.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/support_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/target.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/target.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/type.cpp",
|
||||
@ -2083,6 +2092,7 @@ project "asmjit"
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/zonetree.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/zonevector.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/core/zonevector.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86archtraits_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86assembler.h",
|
||||
@ -2090,23 +2100,23 @@ project "asmjit"
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86builder.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86compiler.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86compiler.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86emithelper.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86emithelper_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86emithelper.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86emitter.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86formatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86formatter_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86func.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86formatter.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86func_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86func.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86globals.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instapi_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instdb.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86opcode_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86operand.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86operand.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86rapass_p.h",
|
||||
MAME_DIR .. "3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp",
|
||||
}
|
||||
end
|
||||
|
@ -42,12 +42,19 @@ if (CPU_INCLUDE_DRC) then
|
||||
MAME_DIR .. "src/devices/cpu/drcumlsh.h",
|
||||
}
|
||||
if not _OPTIONS["FORCE_DRC_C_BACKEND"] then
|
||||
files {
|
||||
MAME_DIR .. "src/devices/cpu/drcbex64.cpp",
|
||||
MAME_DIR .. "src/devices/cpu/drcbex64.h",
|
||||
MAME_DIR .. "src/devices/cpu/drcbex86.cpp",
|
||||
MAME_DIR .. "src/devices/cpu/drcbex86.h",
|
||||
}
|
||||
if (_OPTIONS["PLATFORM"]=="arm64") then
|
||||
files {
|
||||
MAME_DIR .. "src/devices/cpu/drcbearm64.cpp",
|
||||
MAME_DIR .. "src/devices/cpu/drcbearm64.h",
|
||||
}
|
||||
else
|
||||
files {
|
||||
MAME_DIR .. "src/devices/cpu/drcbex64.cpp",
|
||||
MAME_DIR .. "src/devices/cpu/drcbex64.h",
|
||||
MAME_DIR .. "src/devices/cpu/drcbex86.cpp",
|
||||
MAME_DIR .. "src/devices/cpu/drcbex86.h",
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
if _OPTIONS["targetos"]=="macosx" and _OPTIONS["gcc"]~=nil then
|
||||
|
4278
src/devices/cpu/drcbearm64.cpp
Normal file
4278
src/devices/cpu/drcbearm64.cpp
Normal file
File diff suppressed because it is too large
Load Diff
287
src/devices/cpu/drcbearm64.h
Normal file
287
src/devices/cpu/drcbearm64.h
Normal file
@ -0,0 +1,287 @@
|
||||
// license:BSD-3-Clause
|
||||
// copyright-holders:windyfairy
|
||||
#ifndef MAME_CPU_DRCBEARM64_H
|
||||
#define MAME_CPU_DRCBEARM64_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "drcuml.h"
|
||||
#include "drcbeut.h"
|
||||
|
||||
#include "asmjit/src/asmjit/asmjit.h"
|
||||
#include "asmjit/src/asmjit/a64.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
|
||||
namespace drc {
|
||||
|
||||
class drcbe_arm64 : public drcbe_interface
|
||||
{
|
||||
using arm64_entry_point_func = uint32_t (*)(void *entry);
|
||||
|
||||
public:
|
||||
drcbe_arm64(drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits);
|
||||
virtual ~drcbe_arm64();
|
||||
|
||||
virtual void reset() override;
|
||||
virtual int execute(uml::code_handle &entry) override;
|
||||
virtual void generate(drcuml_block &block, const uml::instruction *instlist, uint32_t numinst) override;
|
||||
virtual bool hash_exists(uint32_t mode, uint32_t pc) override;
|
||||
virtual void get_info(drcbe_info &info) override;
|
||||
virtual bool logging() const override { return false; }
|
||||
|
||||
private:
|
||||
class be_parameter
|
||||
{
|
||||
static inline constexpr int REG_MAX = 30;
|
||||
|
||||
public:
|
||||
// parameter types
|
||||
enum be_parameter_type
|
||||
{
|
||||
PTYPE_NONE = 0, // invalid
|
||||
PTYPE_IMMEDIATE, // immediate; value = sign-extended to 64 bits
|
||||
PTYPE_INT_REGISTER, // integer register; value = 0-REG_MAX
|
||||
PTYPE_FLOAT_REGISTER, // floating point register; value = 0-REG_MAX
|
||||
PTYPE_MEMORY, // memory; value = pointer to memory
|
||||
PTYPE_MAX
|
||||
};
|
||||
|
||||
typedef uint64_t be_parameter_value;
|
||||
|
||||
be_parameter() : m_type(PTYPE_NONE), m_value(0) { }
|
||||
be_parameter(uint64_t val) : m_type(PTYPE_IMMEDIATE), m_value(val) { }
|
||||
be_parameter(drcbe_arm64 &drcbe, const uml::parameter ¶m, uint32_t allowed);
|
||||
be_parameter(const be_parameter ¶m) = default;
|
||||
|
||||
static be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); }
|
||||
static be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); }
|
||||
static be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(base)); }
|
||||
static be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(const_cast<void *>(base))); }
|
||||
|
||||
bool operator==(const be_parameter &rhs) const { return (m_type == rhs.m_type && m_value == rhs.m_value); }
|
||||
bool operator!=(const be_parameter &rhs) const { return (m_type != rhs.m_type || m_value != rhs.m_value); }
|
||||
|
||||
be_parameter_type type() const { return m_type; }
|
||||
uint64_t immediate() const { assert(m_type == PTYPE_IMMEDIATE); return m_value; }
|
||||
uint32_t ireg() const { assert(m_type == PTYPE_INT_REGISTER); assert(m_value < REG_MAX); return m_value; }
|
||||
uint32_t freg() const { assert(m_type == PTYPE_FLOAT_REGISTER); assert(m_value < REG_MAX); return m_value; }
|
||||
void *memory() const { assert(m_type == PTYPE_MEMORY); return reinterpret_cast<void *>(m_value); }
|
||||
|
||||
bool is_immediate() const { return (m_type == PTYPE_IMMEDIATE); }
|
||||
bool is_int_register() const { return (m_type == PTYPE_INT_REGISTER); }
|
||||
bool is_float_register() const { return (m_type == PTYPE_FLOAT_REGISTER); }
|
||||
bool is_memory() const { return (m_type == PTYPE_MEMORY); }
|
||||
|
||||
bool is_immediate_value(uint64_t value) const { return (m_type == PTYPE_IMMEDIATE && m_value == value); }
|
||||
|
||||
asmjit::a64::Vec get_register_float(uint32_t regsize) const;
|
||||
asmjit::a64::Gp get_register_int(uint32_t regsize) const;
|
||||
asmjit::a64::Vec select_register(asmjit::a64::Vec const ®, uint32_t regsize) const;
|
||||
asmjit::a64::Gp select_register(asmjit::a64::Gp const ®, uint32_t regsize) const;
|
||||
|
||||
private:
|
||||
be_parameter(be_parameter_type type, be_parameter_value value) : m_type(type), m_value(value) { }
|
||||
|
||||
be_parameter_type m_type;
|
||||
be_parameter_value m_value;
|
||||
};
|
||||
|
||||
void op_handle(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_hash(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_label(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_comment(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_mapvar(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
|
||||
void op_nop(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_break(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_debug(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_exit(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_hashjmp(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_jmp(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_exh(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_callh(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_ret(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_callc(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_recover(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
|
||||
void op_setfmod(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_getfmod(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_getexp(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_getflgs(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_setflgs(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_save(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_restore(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
|
||||
void op_load(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_loads(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_store(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_read(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_readm(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_write(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_writem(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_carry(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_set(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_mov(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_sext(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_roland(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_rolins(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
template <asmjit::a64::Inst::Id Opcode> void op_add(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
template <asmjit::a64::Inst::Id Opcode> void op_sub(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_cmp(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_mulu(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_mululw(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_muls(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_mulslw(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
template <asmjit::a64::Inst::Id Opcode> void op_div(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_and(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_test(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_or(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_xor(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_lzcnt(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_tzcnt(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_bswap(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
template <asmjit::a64::Inst::Id Opcode> void op_shift(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_rol(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_rolc(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_rorc(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
|
||||
void op_fload(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_fstore(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_fread(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_fwrite(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_fmov(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_ftoint(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_ffrint(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_ffrflt(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_frnds(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_fcmp(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_fcopyi(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
void op_icopyf(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
|
||||
template <asmjit::a64::Inst::Id Opcode> void op_float_alu(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
template <asmjit::a64::Inst::Id Opcode> void op_float_alu2(asmjit::a64::Assembler &a, const uml::instruction &inst);
|
||||
|
||||
size_t emit(asmjit::CodeHolder &ch);
|
||||
|
||||
|
||||
// helper functions
|
||||
asmjit::a64::Vec select_register(asmjit::a64::Vec const ®, uint32_t regsize) const;
|
||||
asmjit::a64::Gp select_register(asmjit::a64::Gp const ®, uint32_t regsize) const;
|
||||
|
||||
static bool is_valid_immediate(uint64_t val, size_t bits);
|
||||
static bool is_valid_immediate_signed(int64_t val, size_t bits);
|
||||
static bool is_valid_immediate_mask(uint64_t val, size_t bytes);
|
||||
|
||||
asmjit::arm::Mem get_mem_absolute(asmjit::a64::Assembler &a, const void *ptr) const;
|
||||
void get_imm_relative(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const uint64_t ptr) const;
|
||||
|
||||
bool emit_add_optimized(asmjit::a64::Assembler &a, const asmjit::a64::Gp &dst, const asmjit::a64::Gp &src, int64_t val) const;
|
||||
bool emit_sub_optimized(asmjit::a64::Assembler &a, const asmjit::a64::Gp &dst, const asmjit::a64::Gp &src, int64_t val) const;
|
||||
|
||||
void emit_ldr_str_base_mem(asmjit::a64::Assembler &a, asmjit::a64::Inst::Id opcode, const asmjit::a64::Reg ®, const void *ptr) const;
|
||||
void emit_ldr_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_ldrb_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_ldrh_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_ldrsb_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_ldrsh_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_ldrsw_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_str_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_strb_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
void emit_strh_mem(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const void *ptr) const;
|
||||
|
||||
void emit_float_ldr_mem(asmjit::a64::Assembler &a, const asmjit::a64::Vec ®, const void *ptr) const;
|
||||
void emit_float_str_mem(asmjit::a64::Assembler &a, const asmjit::a64::Vec ®, const void *ptr) const;
|
||||
|
||||
void get_carry(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, bool inverted = false) const;
|
||||
void load_carry(asmjit::a64::Assembler &a, bool inverted = false) const;
|
||||
void store_carry(asmjit::a64::Assembler &a, bool inverted = false) const;
|
||||
void store_carry_reg(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®) const;
|
||||
|
||||
void store_unordered(asmjit::a64::Assembler &a) const;
|
||||
void get_unordered(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®) const;
|
||||
void check_unordered_condition(asmjit::a64::Assembler &a, uml::condition_t cond, asmjit::Label condition_met, bool not_equal) const;
|
||||
|
||||
void get_shifted_bit(asmjit::a64::Assembler &a, const asmjit::a64::Gp &dst, const asmjit::a64::Gp &src, uint32_t bits, uint32_t shift) const;
|
||||
|
||||
void calculate_carry_shift_left(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const asmjit::a64::Gp &shift, int maxBits) const;
|
||||
void calculate_carry_shift_left_imm(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const int shift, int maxBits) const;
|
||||
|
||||
void calculate_carry_shift_right(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const asmjit::a64::Gp &shift) const;
|
||||
void calculate_carry_shift_right_imm(asmjit::a64::Assembler &a, const asmjit::a64::Gp ®, const int shift) const;
|
||||
|
||||
void mov_float_reg_param(asmjit::a64::Assembler &a, uint32_t regsize, asmjit::a64::Vec const &dst, const be_parameter &src) const;
|
||||
void mov_float_param_param(asmjit::a64::Assembler &a, uint32_t regsize, const be_parameter &dst, const be_parameter &src) const;
|
||||
void mov_float_param_reg(asmjit::a64::Assembler &a, uint32_t regsize, const be_parameter &dst, asmjit::a64::Vec const &src) const;
|
||||
void mov_float_param_int_reg(asmjit::a64::Assembler &a, uint32_t regsize, const be_parameter &dst, asmjit::a64::Gp const &src) const;
|
||||
|
||||
void mov_reg_param(asmjit::a64::Assembler &a, uint32_t regsize, const asmjit::a64::Gp &dst, const be_parameter &src) const;
|
||||
void mov_param_reg(asmjit::a64::Assembler &a, uint32_t regsize, const be_parameter &dst, const asmjit::a64::Gp &src) const;
|
||||
void mov_param_imm(asmjit::a64::Assembler &a, uint32_t regsize, const be_parameter &dst, uint64_t src) const;
|
||||
void mov_param_param(asmjit::a64::Assembler &a, uint32_t regsize, const be_parameter &dst, const be_parameter &src) const;
|
||||
void mov_mem_param(asmjit::a64::Assembler &a, uint32_t regsize, void *dst, const be_parameter &src) const;
|
||||
void mov_signed_reg64_param32(asmjit::a64::Assembler &a, const asmjit::a64::Gp &dst, const be_parameter &src) const;
|
||||
void mov_r64_imm(asmjit::a64::Assembler &a, const asmjit::a64::Gp &dst, uint64_t const src) const;
|
||||
|
||||
void call_arm_addr(asmjit::a64::Assembler &a, const void *offs) const;
|
||||
|
||||
drc_hash_table m_hash;
|
||||
drc_map_variables m_map;
|
||||
FILE * m_log_asmjit;
|
||||
|
||||
arm64_entry_point_func m_entry;
|
||||
drccodeptr m_exit;
|
||||
drccodeptr m_nocode;
|
||||
|
||||
uint8_t *m_baseptr;
|
||||
|
||||
struct near_state
|
||||
{
|
||||
void *debug_cpu_instruction_hook;
|
||||
void *drcmap_get_value;
|
||||
void *hashstacksave;
|
||||
|
||||
uint32_t emulated_flags;
|
||||
uint32_t calldepth;
|
||||
};
|
||||
near_state &m_near;
|
||||
|
||||
using opcode_generate_func = void (drcbe_arm64::*)(asmjit::a64::Assembler &, const uml::instruction &);
|
||||
struct opcode_table_entry
|
||||
{
|
||||
uml::opcode_t opcode;
|
||||
opcode_generate_func func;
|
||||
};
|
||||
static const opcode_table_entry s_opcode_table_source[];
|
||||
static opcode_generate_func s_opcode_table[uml::OP_MAX];
|
||||
|
||||
struct resolved_handler { uintptr_t obj = 0; void *func = nullptr; };
|
||||
struct resolved_accessors
|
||||
{
|
||||
|
||||
resolved_handler read_byte;
|
||||
resolved_handler read_word;
|
||||
resolved_handler read_word_masked;
|
||||
resolved_handler read_dword;
|
||||
resolved_handler read_dword_masked;
|
||||
resolved_handler read_qword;
|
||||
resolved_handler read_qword_masked;
|
||||
|
||||
resolved_handler write_byte;
|
||||
resolved_handler write_word;
|
||||
resolved_handler write_word_masked;
|
||||
resolved_handler write_dword;
|
||||
resolved_handler write_dword_masked;
|
||||
resolved_handler write_qword;
|
||||
resolved_handler write_qword_masked;
|
||||
};
|
||||
using resolved_accessors_vector = std::vector<resolved_accessors>;
|
||||
resolved_accessors_vector m_resolved_accessors;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
using drc::drcbe_arm64;
|
||||
|
||||
#endif // MAME_CPU_DRCBEARM64_H
|
@ -1713,12 +1713,14 @@ void drcbe_x64::op_hashjmp(Assembler &a, const instruction &inst)
|
||||
}
|
||||
}
|
||||
|
||||
// fix stack alignment if "no code" landing returned from abuse of call with misaligned stack
|
||||
a.sub(rsp, 8); // sub rsp,8
|
||||
|
||||
// in all cases, if there is no code, we return here to generate the exception
|
||||
if (LOG_HASHJMPS)
|
||||
smart_call_m64(a, &m_near.debug_log_hashjmp_fail);
|
||||
|
||||
mov_mem_param(a, MABS(&m_state.exp, 4), pcp); // mov [exp],param
|
||||
a.sub(rsp, 8); // sub rsp,8
|
||||
a.call(MABS(exp.handle().codeptr_addr())); // call [exp]
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ namespace drc {
|
||||
|
||||
class drcbe_x64 : public drcbe_interface
|
||||
{
|
||||
typedef uint32_t (*x86_entry_point_func)(uint8_t *rbpvalue, x86code *entry);
|
||||
using x86_entry_point_func = uint32_t (*)(uint8_t *rbpvalue, x86code *entry);
|
||||
|
||||
public:
|
||||
// construction/destruction
|
||||
@ -50,7 +50,7 @@ private:
|
||||
{
|
||||
public:
|
||||
// HACK: leftover from x86emit
|
||||
static int const REG_MAX = 16;
|
||||
static inline constexpr int REG_MAX = 16;
|
||||
|
||||
// parameter types
|
||||
enum be_parameter_type
|
||||
@ -59,7 +59,6 @@ private:
|
||||
PTYPE_IMMEDIATE, // immediate; value = sign-extended to 64 bits
|
||||
PTYPE_INT_REGISTER, // integer register; value = 0-REG_MAX
|
||||
PTYPE_FLOAT_REGISTER, // floating point register; value = 0-REG_MAX
|
||||
PTYPE_VECTOR_REGISTER, // vector register; value = 0-REG_MAX
|
||||
PTYPE_MEMORY, // memory; value = pointer to memory
|
||||
PTYPE_MAX
|
||||
};
|
||||
@ -69,15 +68,15 @@ private:
|
||||
|
||||
// construction
|
||||
be_parameter() : m_type(PTYPE_NONE), m_value(0) { }
|
||||
be_parameter(be_parameter const ¶m) : m_type(param.m_type), m_value(param.m_value) { }
|
||||
be_parameter(uint64_t val) : m_type(PTYPE_IMMEDIATE), m_value(val) { }
|
||||
be_parameter(drcbe_x64 &drcbe, const uml::parameter ¶m, uint32_t allowed);
|
||||
be_parameter(const be_parameter ¶m) = default;
|
||||
|
||||
// creators for types that don't safely default
|
||||
static inline be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); }
|
||||
static inline be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); }
|
||||
static inline be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(base)); }
|
||||
static inline be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(const_cast<void *>(base))); }
|
||||
static be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); }
|
||||
static be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); }
|
||||
static be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(base)); }
|
||||
static be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(const_cast<void *>(base))); }
|
||||
|
||||
// operators
|
||||
bool operator==(be_parameter const &rhs) const { return (m_type == rhs.m_type && m_value == rhs.m_value); }
|
||||
|
@ -28,7 +28,7 @@ namespace drc {
|
||||
|
||||
class drcbe_x86 : public drcbe_interface
|
||||
{
|
||||
typedef uint32_t (*x86_entry_point_func)(x86code *entry);
|
||||
using x86_entry_point_func = uint32_t (*)(x86code *entry);
|
||||
|
||||
public:
|
||||
// construction/destruction
|
||||
@ -45,7 +45,7 @@ public:
|
||||
|
||||
private:
|
||||
// HACK: leftover from x86emit
|
||||
static int const REG_MAX = 16;
|
||||
static inline constexpr int REG_MAX = 16;
|
||||
|
||||
// a be_parameter is similar to a uml::parameter but maps to native registers/memory
|
||||
class be_parameter
|
||||
@ -58,7 +58,6 @@ private:
|
||||
PTYPE_IMMEDIATE, // immediate; value = sign-extended to 64 bits
|
||||
PTYPE_INT_REGISTER, // integer register; value = 0-REG_MAX
|
||||
PTYPE_FLOAT_REGISTER, // floating point register; value = 0-REG_MAX
|
||||
PTYPE_VECTOR_REGISTER, // vector register; value = 0-REG_MAX
|
||||
PTYPE_MEMORY, // memory; value = pointer to memory
|
||||
PTYPE_MAX
|
||||
};
|
||||
@ -68,15 +67,15 @@ private:
|
||||
|
||||
// construction
|
||||
be_parameter() : m_type(PTYPE_NONE), m_value(0) { }
|
||||
be_parameter(be_parameter const ¶m) : m_type(param.m_type), m_value(param.m_value) { }
|
||||
be_parameter(uint64_t val) : m_type(PTYPE_IMMEDIATE), m_value(val) { }
|
||||
be_parameter(drcbe_x86 &drcbe, const uml::parameter ¶m, uint32_t allowed);
|
||||
be_parameter(const be_parameter ¶m) = default;
|
||||
|
||||
// creators for types that don't safely default
|
||||
static inline be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); }
|
||||
static inline be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); }
|
||||
static inline be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(base)); }
|
||||
static inline be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(const_cast<void *>(base))); }
|
||||
static be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); }
|
||||
static be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); }
|
||||
static be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(base)); }
|
||||
static be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(const_cast<void *>(base))); }
|
||||
|
||||
// operators
|
||||
bool operator==(be_parameter const &rhs) const { return (m_type == rhs.m_type && m_value == rhs.m_value); }
|
||||
|
@ -37,9 +37,12 @@
|
||||
#include "emuopts.h"
|
||||
#include "drcbec.h"
|
||||
#ifdef NATIVE_DRC
|
||||
#ifndef ASMJIT_NO_X86
|
||||
#include "drcbex86.h"
|
||||
#include "drcbex64.h"
|
||||
#endif
|
||||
#include "drcbearm64.h"
|
||||
#endif
|
||||
|
||||
#include <fstream>
|
||||
|
||||
|
@ -86,7 +86,6 @@ using namespace uml;
|
||||
#define PTYPES_IMM (1 << parameter::PTYPE_IMMEDIATE)
|
||||
#define PTYPES_IREG (1 << parameter::PTYPE_INT_REGISTER)
|
||||
#define PTYPES_FREG (1 << parameter::PTYPE_FLOAT_REGISTER)
|
||||
#define PTYPES_VREG (1 << parameter::PTYPE_VECTOR_REGISTER)
|
||||
#define PTYPES_MVAR (1 << parameter::PTYPE_MAPVAR)
|
||||
#define PTYPES_MEM (1 << parameter::PTYPE_MEMORY)
|
||||
#define PTYPES_SIZE (1 << parameter::PTYPE_SIZE)
|
||||
@ -429,7 +428,6 @@ void uml::instruction::simplify()
|
||||
case SIZE_WORD: convert_to_mov_immediate(s16(m_param[1].immediate())); break;
|
||||
case SIZE_DWORD: convert_to_mov_immediate(s32(m_param[1].immediate())); break;
|
||||
case SIZE_QWORD: convert_to_mov_immediate(s64(m_param[1].immediate())); break;
|
||||
case SIZE_DQWORD: fatalerror("Invalid SEXT target size\n");
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -39,12 +39,6 @@ namespace uml
|
||||
constexpr int REG_F_COUNT = 10;
|
||||
constexpr int REG_F_END = REG_F0 + REG_F_COUNT;
|
||||
|
||||
// vector registers
|
||||
constexpr int REG_V0 = 0xc00;
|
||||
constexpr int REG_V_COUNT = 10;
|
||||
constexpr int REG_V_END = REG_V0 + REG_V_COUNT;
|
||||
|
||||
// map variables
|
||||
constexpr int MAPVAR_M0 = 0x1000;
|
||||
constexpr int MAPVAR_COUNT = 10;
|
||||
constexpr int MAPVAR_END = MAPVAR_M0 + MAPVAR_COUNT;
|
||||
@ -105,7 +99,6 @@ namespace uml
|
||||
SIZE_WORD, // 2-byte
|
||||
SIZE_DWORD, // 4-byte
|
||||
SIZE_QWORD, // 8-byte
|
||||
SIZE_DQWORD, // 16-byte (vector)
|
||||
SIZE_SHORT = SIZE_DWORD, // 4-byte (float)
|
||||
SIZE_DOUBLE = SIZE_QWORD // 8-byte (float)
|
||||
};
|
||||
@ -284,7 +277,6 @@ namespace uml
|
||||
PTYPE_IMMEDIATE, // immediate; value = sign-extended to 64 bits
|
||||
PTYPE_INT_REGISTER, // integer register; value = REG_I0 - REG_I_END
|
||||
PTYPE_FLOAT_REGISTER, // floating point register; value = REG_F0 - REG_F_END
|
||||
PTYPE_VECTOR_REGISTER, // vector register; value = REG_V0 - REG_V_END
|
||||
PTYPE_MAPVAR, // map variable; value = MAPVAR_M0 - MAPVAR_END
|
||||
PTYPE_MEMORY, // memory; value = pointer to memory
|
||||
PTYPE_SIZE, // size; value = operand_size
|
||||
@ -305,19 +297,18 @@ namespace uml
|
||||
constexpr parameter() : m_type(PTYPE_NONE), m_value(0) { }
|
||||
constexpr parameter(parameter const ¶m) : m_type(param.m_type), m_value(param.m_value) { }
|
||||
constexpr parameter(u64 val) : m_type(PTYPE_IMMEDIATE), m_value(val) { }
|
||||
parameter(operand_size size, memory_scale scale) : m_type(PTYPE_SIZE_SCALE), m_value((scale << 4) | size) { assert(size >= SIZE_BYTE && size <= SIZE_DQWORD); assert(scale >= SCALE_x1 && scale <= SCALE_x8); }
|
||||
parameter(operand_size size, memory_space space) : m_type(PTYPE_SIZE_SPACE), m_value((space << 4) | size) { assert(size >= SIZE_BYTE && size <= SIZE_DQWORD); assert(space >= SPACE_PROGRAM && space <= SPACE_IO); }
|
||||
parameter(operand_size size, memory_scale scale) : m_type(PTYPE_SIZE_SCALE), m_value((scale << 4) | size) { assert(size >= SIZE_BYTE && size <= SIZE_QWORD); assert(scale >= SCALE_x1 && scale <= SCALE_x8); }
|
||||
parameter(operand_size size, memory_space space) : m_type(PTYPE_SIZE_SPACE), m_value((space << 4) | size) { assert(size >= SIZE_BYTE && size <= SIZE_QWORD); assert(space >= SPACE_PROGRAM && space <= SPACE_IO); }
|
||||
parameter(code_handle &handle) : m_type(PTYPE_CODE_HANDLE), m_value(reinterpret_cast<parameter_value>(&handle)) { }
|
||||
constexpr parameter(code_label const &label) : m_type(PTYPE_CODE_LABEL), m_value(label) { }
|
||||
|
||||
// creators for types that don't safely default
|
||||
static parameter make_ireg(int regnum) { assert(regnum >= REG_I0 && regnum < REG_I_END); return parameter(PTYPE_INT_REGISTER, regnum); }
|
||||
static parameter make_freg(int regnum) { assert(regnum >= REG_F0 && regnum < REG_F_END); return parameter(PTYPE_FLOAT_REGISTER, regnum); }
|
||||
static parameter make_vreg(int regnum) { assert(regnum >= REG_V0 && regnum < REG_V_END); return parameter(PTYPE_VECTOR_REGISTER, regnum); }
|
||||
static parameter make_mapvar(int mvnum) { assert(mvnum >= MAPVAR_M0 && mvnum < MAPVAR_END); return parameter(PTYPE_MAPVAR, mvnum); }
|
||||
static parameter make_memory(void *base) { return parameter(PTYPE_MEMORY, reinterpret_cast<parameter_value>(base)); }
|
||||
static parameter make_memory(void const *base) { return parameter(PTYPE_MEMORY, reinterpret_cast<parameter_value>(const_cast<void *>(base))); }
|
||||
static parameter make_size(operand_size size) { assert(size >= SIZE_BYTE && size <= SIZE_DQWORD); return parameter(PTYPE_SIZE, size); }
|
||||
static parameter make_size(operand_size size) { assert(size >= SIZE_BYTE && size <= SIZE_QWORD); return parameter(PTYPE_SIZE, size); }
|
||||
static parameter make_string(char const *string) { return parameter(PTYPE_STRING, reinterpret_cast<parameter_value>(const_cast<char *>(string))); }
|
||||
static parameter make_cfunc(c_function func) { return parameter(PTYPE_C_FUNCTION, reinterpret_cast<parameter_value>(func)); }
|
||||
static parameter make_rounding(float_rounding_mode mode) { assert(mode >= ROUND_TRUNC && mode <= ROUND_DEFAULT); return parameter(PTYPE_ROUNDING, mode); }
|
||||
@ -331,7 +322,6 @@ namespace uml
|
||||
u64 immediate() const { assert(m_type == PTYPE_IMMEDIATE); return m_value; }
|
||||
int ireg() const { assert(m_type == PTYPE_INT_REGISTER); assert(m_value >= REG_I0 && m_value < REG_I_END); return m_value; }
|
||||
int freg() const { assert(m_type == PTYPE_FLOAT_REGISTER); assert(m_value >= REG_F0 && m_value < REG_F_END); return m_value; }
|
||||
int vreg() const { assert(m_type == PTYPE_VECTOR_REGISTER); assert(m_value >= REG_V0 && m_value < REG_V_END); return m_value; }
|
||||
int mapvar() const { assert(m_type == PTYPE_MAPVAR); assert(m_value >= MAPVAR_M0 && m_value < MAPVAR_END); return m_value; }
|
||||
void *memory() const { assert(m_type == PTYPE_MEMORY); return reinterpret_cast<void *>(m_value); }
|
||||
operand_size size() const { assert(m_type == PTYPE_SIZE || m_type == PTYPE_SIZE_SCALE || m_type == PTYPE_SIZE_SPACE); return operand_size(m_value & 15); }
|
||||
@ -347,7 +337,6 @@ namespace uml
|
||||
constexpr bool is_immediate() const { return m_type == PTYPE_IMMEDIATE; }
|
||||
constexpr bool is_int_register() const { return m_type == PTYPE_INT_REGISTER; }
|
||||
constexpr bool is_float_register() const { return m_type == PTYPE_FLOAT_REGISTER; }
|
||||
constexpr bool is_vector_register() const { return m_type == PTYPE_VECTOR_REGISTER; }
|
||||
constexpr bool is_mapvar() const { return m_type == PTYPE_MAPVAR; }
|
||||
constexpr bool is_memory() const { return m_type == PTYPE_MEMORY; }
|
||||
constexpr bool is_size() const { return m_type == PTYPE_SIZE; }
|
||||
@ -621,7 +610,6 @@ namespace uml
|
||||
// global inline functions to specify a register parameter by index
|
||||
inline parameter ireg(int n) { return parameter::make_ireg(REG_I0 + n); }
|
||||
inline parameter freg(int n) { return parameter::make_freg(REG_F0 + n); }
|
||||
inline parameter vreg(int n) { return parameter::make_vreg(REG_V0 + n); }
|
||||
inline parameter mapvar(int n) { return parameter::make_mapvar(MAPVAR_M0 + n); }
|
||||
|
||||
// global inline functions to define memory parameters
|
||||
@ -650,17 +638,6 @@ namespace uml
|
||||
const parameter F8(parameter::make_freg(REG_F0 + 8));
|
||||
const parameter F9(parameter::make_freg(REG_F0 + 9));
|
||||
|
||||
const parameter V0(parameter::make_vreg(REG_V0 + 0));
|
||||
const parameter V1(parameter::make_vreg(REG_V0 + 1));
|
||||
const parameter V2(parameter::make_vreg(REG_V0 + 2));
|
||||
const parameter V3(parameter::make_vreg(REG_V0 + 3));
|
||||
const parameter V4(parameter::make_vreg(REG_V0 + 4));
|
||||
const parameter V5(parameter::make_vreg(REG_V0 + 5));
|
||||
const parameter V6(parameter::make_vreg(REG_V0 + 6));
|
||||
const parameter V7(parameter::make_vreg(REG_V0 + 7));
|
||||
const parameter V8(parameter::make_vreg(REG_V0 + 8));
|
||||
const parameter V9(parameter::make_vreg(REG_V0 + 9));
|
||||
|
||||
const parameter M0(parameter::make_mapvar(MAPVAR_M0 + 0));
|
||||
const parameter M1(parameter::make_mapvar(MAPVAR_M0 + 1));
|
||||
const parameter M2(parameter::make_mapvar(MAPVAR_M0 + 2));
|
||||
|
Loading…
Reference in New Issue
Block a user