mirror of
https://github.com/holub/mame
synced 2025-04-17 22:13:04 +03:00
-cpu/drcbex64.cpp: Directly dispatch memory accesses no larger than native width to specific handlers. (#13325)
emu/emumem_aspace.cpp: Got rid of static accessors structure, added a helper for obtaining info for dispatching specific accessors.
This commit is contained in:
parent
f92cb92a42
commit
b7a80b1704
@ -570,14 +570,6 @@ configuration { "gmake or ninja" }
|
||||
|
||||
dofile ("toolchain.lua")
|
||||
|
||||
if _OPTIONS["targetos"]=="windows" then
|
||||
configuration { "x64" }
|
||||
defines {
|
||||
"X64_WINDOWS_ABI",
|
||||
}
|
||||
configuration { }
|
||||
end
|
||||
|
||||
-- Avoid error when invoking genie --help.
|
||||
if (_ACTION == nil) then return false end
|
||||
|
||||
|
@ -711,11 +711,14 @@ drcbe_x64::drcbe_x64(drcuml_state &drcuml, device_t &device, drc_cache &cache, u
|
||||
m_drcmap_get_value.set(m_map, &drc_map_variables::get_value);
|
||||
if (!m_drcmap_get_value)
|
||||
throw emu_fatalerror("Error resolving map variable get value function!\n");
|
||||
m_resolved_accessors.resize(m_space.size());
|
||||
m_memory_accessors.resize(m_space.size());
|
||||
for (int space = 0; m_space.size() > space; ++space)
|
||||
{
|
||||
if (m_space[space])
|
||||
m_resolved_accessors[space].set(*m_space[space]);
|
||||
{
|
||||
m_memory_accessors[space].resolved.set(*m_space[space]);
|
||||
m_memory_accessors[space].specific = m_space[space]->specific_accessors();
|
||||
}
|
||||
}
|
||||
|
||||
// build the opcode table (static but it doesn't hurt to regenerate it)
|
||||
@ -2525,40 +2528,149 @@ void drcbe_x64::op_read(Assembler &a, const instruction &inst)
|
||||
Gp dstreg = dstp.select_register(eax);
|
||||
|
||||
// set up a call to the read handler
|
||||
auto &resolved = m_resolved_accessors[spacesizep.space()];
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
|
||||
if (spacesizep.size() == SIZE_BYTE)
|
||||
auto const &accessors = m_memory_accessors[spacesizep.space()];
|
||||
bool const have_specific = (uintptr_t(nullptr) != accessors.specific.read.function) || accessors.specific.read.is_virtual;
|
||||
auto const addr_mask = make_bitmask<uint32_t>(accessors.specific.address_width) & ~make_bitmask<uint32_t>(accessors.specific.native_mask_bits);
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp);
|
||||
if (have_specific && ((1 << spacesizep.size()) == accessors.specific.native_bytes))
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_byte.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_byte.func, rax); // call read_byte
|
||||
a.movzx(dstreg, al); // movzx dstreg,al
|
||||
// set default mem_mask
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.mov(Gpd(REG_PARAM3), make_bitmask<uint32_t>(accessors.specific.native_bytes << 3));
|
||||
else
|
||||
a.mov(Gpq(REG_PARAM3), make_bitmask<uint64_t>(accessors.specific.native_bytes << 3));
|
||||
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.read.dispatch)); // load dispatch table pointer
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // save masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.mov(Gpq(REG_PARAM1), ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.mov(rax, ptr(Gpq(REG_PARAM1), accessors.specific.read.displacement)); // load vtable pointer
|
||||
if (accessors.specific.read.displacement)
|
||||
a.add(Gpq(REG_PARAM1), accessors.specific.read.displacement); // apply this pointer offset
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.call(ptr(rax, accessors.specific.read.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.read.function, rax); // call non-virtual member function
|
||||
}
|
||||
else if (have_specific && ((1 << spacesizep.size()) < accessors.specific.native_bytes))
|
||||
{
|
||||
// if the destination register isn't a non-volatile register, we can use it to save the shift count
|
||||
bool need_save = (dstreg != ebx) && (dstreg != r12d) && (dstreg != r13d) && (dstreg != r14d) && (dstreg != r15d);
|
||||
|
||||
if (need_save)
|
||||
a.mov(ptr(rsp, 32), Gpq(int_register_map[0])); // save I0 register
|
||||
if ((accessors.specific.native_bytes <= 4) || (spacesizep.size() != SIZE_QWORD))
|
||||
a.mov(Gpd(REG_PARAM3), imm(make_bitmask<uint32_t>(8 << spacesizep.size()))); // set default mem_mask
|
||||
else
|
||||
a.mov(Gpq(REG_PARAM3), imm(make_bitmask<uint64_t>(8 << spacesizep.size()))); // set default mem_mask
|
||||
a.mov(ecx, Gpd(REG_PARAM2)); // copy address
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
|
||||
int const shift = m_space[spacesizep.space()]->addr_shift() - 3;
|
||||
if (m_space[spacesizep.space()]->endianness() != ENDIANNESS_LITTLE)
|
||||
a.not_(ecx); // swizzle address for bit Endian spaces
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.read.dispatch)); // load dispatch table pointer
|
||||
if (shift < 0)
|
||||
a.shl(ecx, imm(-shift)); // convert address to bits (left shift)
|
||||
else if (shift > 0)
|
||||
a.shr(ecx, imm(shift)); // convert address to bits (right shift)
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // copy masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.and_(ecx, imm((accessors.specific.native_bytes - (1 << spacesizep.size())) << 3)); // mask bit address
|
||||
a.mov(rax, ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
if (need_save)
|
||||
a.mov(Gpd(int_register_map[0]), ecx); // save masked bit address
|
||||
else
|
||||
a.mov(dstreg.r32(), ecx); // save masked bit address
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.shl(Gpd(REG_PARAM3), cl); // shift mem_mask by masked bit address
|
||||
else
|
||||
a.shl(Gpq(REG_PARAM3), cl); // shift mem_mask by masked bit address
|
||||
|
||||
// need to do this after finished with CL as REG_PARAM1 is C on Windows
|
||||
a.mov(Gpq(REG_PARAM1), rax);
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.mov(rax, ptr(rax, accessors.specific.read.displacement)); // load vtable pointer
|
||||
if (accessors.specific.read.displacement)
|
||||
a.add(Gpq(REG_PARAM1), accessors.specific.read.displacement); // apply this pointer offset
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.call(ptr(rax, accessors.specific.read.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.read.function, rax); // call non-virtual member function
|
||||
|
||||
if (need_save)
|
||||
{
|
||||
a.mov(ecx, Gpd(int_register_map[0])); // restore masked bit address
|
||||
a.mov(Gpq(int_register_map[0]), ptr(rsp, 32)); // restore I0 register
|
||||
}
|
||||
else
|
||||
{
|
||||
a.mov(ecx, dstreg.r32()); // restore masked bit address
|
||||
}
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.shr(eax, cl); // shift result by masked bit address
|
||||
else
|
||||
a.shr(rax, cl); // shift result by masked bit address
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_BYTE)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_byte.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_byte.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_WORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_word.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_word.func, rax); // call read_word
|
||||
a.movzx(dstreg, ax); // movzx dstreg,ax
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_word.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_word.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_DWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_dword.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_dword.func, rax); // call read_dword
|
||||
if (dstreg != eax || inst.size() == 8)
|
||||
a.mov(dstreg, eax); // mov dstreg,eax
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_dword.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_dword.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_QWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_qword.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_qword.func, rax);
|
||||
}
|
||||
|
||||
// move or zero-extend result if necessary
|
||||
if (spacesizep.size() == SIZE_BYTE)
|
||||
{
|
||||
a.movzx(dstreg, al);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_WORD)
|
||||
{
|
||||
a.movzx(dstreg, ax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_DWORD)
|
||||
{
|
||||
if (dstreg != eax || inst.size() == 8)
|
||||
a.mov(dstreg, eax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_QWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_qword.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_qword.func, rax); // call read_qword
|
||||
if (dstreg != eax)
|
||||
a.mov(dstreg.r64(), rax); // mov dstreg,rax
|
||||
a.mov(dstreg.r64(), rax);
|
||||
}
|
||||
|
||||
// store result
|
||||
if (inst.size() == 4)
|
||||
mov_param_reg(a, dstp, dstreg); // mov dstp,dstreg
|
||||
mov_param_reg(a, dstp, dstreg);
|
||||
else
|
||||
mov_param_reg(a, dstp, dstreg.r64()); // mov dstp,dstreg
|
||||
mov_param_reg(a, dstp, dstreg.r64());
|
||||
}
|
||||
|
||||
|
||||
@ -2584,44 +2696,143 @@ void drcbe_x64::op_readm(Assembler &a, const instruction &inst)
|
||||
Gp dstreg = dstp.select_register(eax);
|
||||
|
||||
// set up a call to the read handler
|
||||
auto &resolved = m_resolved_accessors[spacesizep.space()];
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
|
||||
auto const &accessors = m_memory_accessors[spacesizep.space()];
|
||||
bool const have_specific = (uintptr_t(nullptr) != accessors.specific.read.function) || accessors.specific.read.is_virtual;
|
||||
auto const addr_mask = make_bitmask<uint32_t>(accessors.specific.address_width) & ~make_bitmask<uint32_t>(accessors.specific.native_mask_bits);
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp);
|
||||
if (spacesizep.size() != SIZE_QWORD)
|
||||
mov_reg_param(a, Gpd(REG_PARAM3), maskp); // mov param3,maskp
|
||||
mov_reg_param(a, Gpd(REG_PARAM3), maskp);
|
||||
else
|
||||
mov_reg_param(a, Gpq(REG_PARAM3), maskp); // mov param3,maskp
|
||||
if (spacesizep.size() == SIZE_BYTE)
|
||||
mov_reg_param(a, Gpq(REG_PARAM3), maskp);
|
||||
if (have_specific && ((1 << spacesizep.size()) == accessors.specific.native_bytes))
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_byte_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_byte_masked.func, rax); // call read_byte_masked
|
||||
a.movzx(dstreg, al); // movzx dstreg,al
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.read.dispatch)); // load dispatch table pointer
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // save masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.mov(Gpq(REG_PARAM1), ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.mov(rax, ptr(Gpq(REG_PARAM1), accessors.specific.read.displacement)); // load vtable pointer
|
||||
if (accessors.specific.read.displacement)
|
||||
a.add(Gpq(REG_PARAM1), accessors.specific.read.displacement); // apply this pointer offset
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.call(ptr(rax, accessors.specific.read.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.read.function, rax); // call non-virtual member function
|
||||
}
|
||||
else if (have_specific && ((1 << spacesizep.size()) < accessors.specific.native_bytes))
|
||||
{
|
||||
// if the destination register isn't a non-volatile register, we can use it to save the shift count
|
||||
bool need_save = (dstreg != ebx) && (dstreg != r12d) && (dstreg != r13d) && (dstreg != r14d) && (dstreg != r15d);
|
||||
|
||||
if (need_save)
|
||||
a.mov(ptr(rsp, 32), Gpq(int_register_map[0])); // save I0 register
|
||||
a.mov(ecx, Gpd(REG_PARAM2)); // copy address
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
|
||||
int const shift = m_space[spacesizep.space()]->addr_shift() - 3;
|
||||
if (m_space[spacesizep.space()]->endianness() != ENDIANNESS_LITTLE)
|
||||
a.not_(ecx); // swizzle address for bit Endian spaces
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.read.dispatch)); // load dispatch table pointer
|
||||
if (shift < 0)
|
||||
a.shl(ecx, imm(-shift)); // convert address to bits (left shift)
|
||||
else if (shift > 0)
|
||||
a.shr(ecx, imm(shift)); // convert address to bits (right shift)
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // copy masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.mov(rax, ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
a.and_(ecx, imm((accessors.specific.native_bytes - (1 << spacesizep.size())) << 3)); // mask bit address
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
if (need_save)
|
||||
a.mov(Gpd(int_register_map[0]), ecx); // save masked bit address
|
||||
else
|
||||
a.mov(dstreg.r32(), ecx); // save masked bit address
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.mov(r10, ptr(rax, accessors.specific.read.displacement)); // load vtable pointer
|
||||
if (accessors.specific.read.displacement)
|
||||
a.add(rax, accessors.specific.read.displacement); // apply this pointer offset
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.shl(Gpd(REG_PARAM3), cl); // shift mem_mask by masked bit address
|
||||
else
|
||||
a.shl(Gpq(REG_PARAM3), cl); // shift mem_mask by masked bit address
|
||||
|
||||
// need to do this after finished with CL as REG_PARAM1 is C on Windows
|
||||
a.mov(Gpq(REG_PARAM1), rax);
|
||||
if (accessors.specific.read.is_virtual)
|
||||
a.call(ptr(r10, accessors.specific.read.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.read.function, rax); // call non-virtual member function
|
||||
|
||||
if (need_save)
|
||||
{
|
||||
a.mov(ecx, Gpd(int_register_map[0])); // restore masked bit address
|
||||
a.mov(Gpq(int_register_map[0]), ptr(rsp, 32)); // restore I0 register
|
||||
}
|
||||
else
|
||||
{
|
||||
a.mov(ecx, dstreg.r32()); // restore masked bit address
|
||||
}
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.shr(eax, cl); // shift result by masked bit address
|
||||
else
|
||||
a.shr(rax, cl); // shift result by masked bit address
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_BYTE)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_byte_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_byte_masked.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_WORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_word_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_word_masked.func, rax); // call read_word_masked
|
||||
a.movzx(dstreg, ax); // movzx dstreg,ax
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_word_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_word_masked.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_DWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_dword_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_dword_masked.func, rax); // call read_dword_masked
|
||||
if (dstreg != eax || inst.size() == 8)
|
||||
a.mov(dstreg, eax); // mov dstreg,eax
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_dword_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_dword_masked.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_QWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.read_qword_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.read_qword_masked.func, rax);
|
||||
}
|
||||
|
||||
// move or zero-extend result if necessary
|
||||
if (spacesizep.size() == SIZE_BYTE)
|
||||
{
|
||||
a.movzx(dstreg, al);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_WORD)
|
||||
{
|
||||
a.movzx(dstreg, ax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_DWORD)
|
||||
{
|
||||
if (dstreg != eax || inst.size() == 8)
|
||||
a.mov(dstreg, eax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_QWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_qword_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.read_qword_masked.func, rax); // call read_qword_masked
|
||||
if (dstreg != eax)
|
||||
a.mov(dstreg.r64(), rax); // mov dstreg,rax
|
||||
a.mov(dstreg.r64(), rax);
|
||||
}
|
||||
|
||||
// store result
|
||||
if (inst.size() == 4)
|
||||
mov_param_reg(a, dstp, dstreg); // mov dstp,dstreg
|
||||
mov_param_reg(a, dstp, dstreg);
|
||||
else
|
||||
mov_param_reg(a, dstp, dstreg.r64()); // mov dstp,dstreg
|
||||
mov_param_reg(a, dstp, dstreg.r64());
|
||||
}
|
||||
|
||||
|
||||
@ -2643,31 +2854,113 @@ void drcbe_x64::op_write(Assembler &a, const instruction &inst)
|
||||
assert(spacesizep.is_size_space());
|
||||
|
||||
// set up a call to the write handler
|
||||
auto &resolved = m_resolved_accessors[spacesizep.space()];
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
|
||||
auto const &accessors = m_memory_accessors[spacesizep.space()];
|
||||
bool const have_specific = (uintptr_t(nullptr) != accessors.specific.write.function) || accessors.specific.write.is_virtual;
|
||||
auto const addr_mask = make_bitmask<uint32_t>(accessors.specific.address_width) & ~make_bitmask<uint32_t>(accessors.specific.native_mask_bits);
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp);
|
||||
if (spacesizep.size() != SIZE_QWORD)
|
||||
mov_reg_param(a, Gpd(REG_PARAM3), srcp); // mov param3,srcp
|
||||
mov_reg_param(a, Gpd(REG_PARAM3), srcp);
|
||||
else
|
||||
mov_reg_param(a, Gpq(REG_PARAM3), srcp); // mov param3,srcp
|
||||
if (spacesizep.size() == SIZE_BYTE)
|
||||
mov_reg_param(a, Gpq(REG_PARAM3), srcp);
|
||||
if (have_specific && ((1 << spacesizep.size()) == accessors.specific.native_bytes))
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_byte.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_byte.func, rax); // call write_byte
|
||||
// set default mem_mask
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.mov(Gpd(REG_PARAM4), make_bitmask<uint32_t>(accessors.specific.native_bytes << 3));
|
||||
else
|
||||
a.mov(Gpq(REG_PARAM4), make_bitmask<uint64_t>(accessors.specific.native_bytes << 3));
|
||||
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.write.dispatch)); // load dispatch table pointer
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // save masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.mov(Gpq(REG_PARAM1), ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.mov(rax, ptr(Gpq(REG_PARAM1), accessors.specific.write.displacement)); // load vtable pointer
|
||||
if (accessors.specific.write.displacement)
|
||||
a.add(Gpq(REG_PARAM1), accessors.specific.write.displacement); // apply this pointer offset
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.call(ptr(rax, accessors.specific.write.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.write.function, rax); // call non-virtual member function
|
||||
}
|
||||
else if (have_specific && ((1 << spacesizep.size()) < accessors.specific.native_bytes))
|
||||
{
|
||||
a.mov(ecx, Gpd(REG_PARAM2)); // copy address
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
|
||||
int const shift = m_space[spacesizep.space()]->addr_shift() - 3;
|
||||
if (m_space[spacesizep.space()]->endianness() != ENDIANNESS_LITTLE)
|
||||
a.not_(ecx); // swizzle address for bit Endian spaces
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.write.dispatch)); // load dispatch table pointer
|
||||
if (shift < 0)
|
||||
a.shl(ecx, imm(-shift)); // convert address to bits (left shift)
|
||||
else if (shift > 0)
|
||||
a.shr(ecx, imm(shift)); // convert address to bits (right shift)
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // copy masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.mov(rax, ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
a.and_(ecx, imm((accessors.specific.native_bytes - (1 << spacesizep.size())) << 3)); // mask bit address
|
||||
if ((accessors.specific.native_bytes <= 4) || (spacesizep.size() != SIZE_QWORD))
|
||||
a.mov(r11d, imm(make_bitmask<uint32_t>(8 << spacesizep.size()))); // set default mem_mask
|
||||
else
|
||||
a.mov(r11, imm(make_bitmask<uint64_t>(8 << spacesizep.size()))); // set default mem_mask
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.mov(r10, ptr(rax, accessors.specific.write.displacement)); // load vtable pointer
|
||||
if (accessors.specific.write.displacement)
|
||||
a.add(rax, accessors.specific.write.displacement); // apply this pointer offset
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
{
|
||||
a.shl(r11d, cl); // shift mem_mask by masked bit address
|
||||
a.shl(Gpd(REG_PARAM3), cl); // shift data by masked bit address
|
||||
}
|
||||
else
|
||||
{
|
||||
a.shl(r11, cl); // shift mem_mask by masked bit address
|
||||
a.shl(Gpq(REG_PARAM3), cl); // shift data by masked bit address
|
||||
}
|
||||
|
||||
// need to do this after finished with CL as REG_PARAM1 is C on Windows and REG_PARAM4 is C on SysV
|
||||
a.mov(Gpq(REG_PARAM1), rax);
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
a.mov(Gpd(REG_PARAM4), r11d); // copy mem_mask to parameter 4 (ECX on SysV)
|
||||
else
|
||||
a.mov(Gpq(REG_PARAM4), r11); // copy mem_mask to parameter 4 (RCX on SysV)
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.call(ptr(r10, accessors.specific.write.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.write.function, rax); // call non-virtual member function
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_BYTE)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_byte.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_byte.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_WORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_word.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_word.func, rax); // call write_word
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_word.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_word.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_DWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_dword.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_dword.func, rax); // call write_dword
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_dword.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_dword.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_QWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_qword.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_qword.func, rax); // call write_qword
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_qword.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_qword.func, rax);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2691,37 +2984,114 @@ void drcbe_x64::op_writem(Assembler &a, const instruction &inst)
|
||||
assert(spacesizep.is_size_space());
|
||||
|
||||
// set up a call to the write handler
|
||||
auto &resolved = m_resolved_accessors[spacesizep.space()];
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
|
||||
auto const &accessors = m_memory_accessors[spacesizep.space()];
|
||||
bool const have_specific = (uintptr_t(nullptr) != accessors.specific.write.function) || accessors.specific.write.is_virtual;
|
||||
auto const addr_mask = make_bitmask<uint32_t>(accessors.specific.address_width) & ~make_bitmask<uint32_t>(accessors.specific.native_mask_bits);
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp);
|
||||
if (spacesizep.size() != SIZE_QWORD)
|
||||
{
|
||||
mov_reg_param(a, Gpd(REG_PARAM3), srcp); // mov param3,srcp
|
||||
mov_reg_param(a, Gpd(REG_PARAM4), maskp); // mov param4,maskp
|
||||
}
|
||||
mov_reg_param(a, Gpd(REG_PARAM3), srcp);
|
||||
else
|
||||
mov_reg_param(a, Gpq(REG_PARAM3), srcp);
|
||||
if (have_specific && ((1 << spacesizep.size()) == accessors.specific.native_bytes))
|
||||
{
|
||||
mov_reg_param(a, Gpq(REG_PARAM3), srcp); // mov param3,srcp
|
||||
mov_reg_param(a, Gpq(REG_PARAM4), maskp); // mov param4,maskp
|
||||
if (spacesizep.size() != SIZE_QWORD)
|
||||
mov_reg_param(a, Gpd(REG_PARAM4), maskp); // get mem_mask
|
||||
else
|
||||
mov_reg_param(a, Gpq(REG_PARAM4), maskp); // get mem_mask
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.write.dispatch)); // load dispatch table pointer
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // save masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.mov(Gpq(REG_PARAM1), ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.mov(rax, ptr(Gpq(REG_PARAM1), accessors.specific.write.displacement)); // load vtable pointer
|
||||
if (accessors.specific.write.displacement)
|
||||
a.add(Gpq(REG_PARAM1), accessors.specific.write.displacement); // apply this pointer offset
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.call(ptr(rax, accessors.specific.write.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.write.function, rax); // call non-virtual member function
|
||||
}
|
||||
if (spacesizep.size() == SIZE_BYTE)
|
||||
else if (have_specific && ((1 << spacesizep.size()) < accessors.specific.native_bytes))
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_byte_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_byte_masked.func, rax); // call write_byte_masked
|
||||
a.mov(ecx, Gpd(REG_PARAM2)); // copy address
|
||||
if (spacesizep.size() != SIZE_QWORD)
|
||||
mov_reg_param(a, r11d, maskp); // get mem_mask
|
||||
else
|
||||
mov_reg_param(a, r11, maskp); // get mem_mask
|
||||
a.and_(Gpd(REG_PARAM2), imm(addr_mask)); // apply address mask
|
||||
|
||||
int const shift = m_space[spacesizep.space()]->addr_shift() - 3;
|
||||
if (m_space[spacesizep.space()]->endianness() != ENDIANNESS_LITTLE)
|
||||
a.not_(ecx); // swizzle address for bit Endian spaces
|
||||
mov_r64_imm(a, rax, uintptr_t(accessors.specific.write.dispatch)); // load dispatch table pointer
|
||||
if (shift < 0)
|
||||
a.shl(ecx, imm(-shift)); // convert address to bits (left shift)
|
||||
else if (shift > 0)
|
||||
a.shr(ecx, imm(shift)); // convert address to bits (right shift)
|
||||
if (accessors.specific.low_bits)
|
||||
{
|
||||
a.mov(r10d, Gpd(REG_PARAM2)); // copy masked address
|
||||
a.shr(Gpd(REG_PARAM2), accessors.specific.low_bits); // shift off low bits
|
||||
}
|
||||
a.and_(ecx, imm((accessors.specific.native_bytes - (1 << spacesizep.size())) << 3)); // mask bit address
|
||||
a.mov(rax, ptr(rax, Gpq(REG_PARAM2), 3)); // load dispatch table entry
|
||||
if (accessors.specific.low_bits)
|
||||
a.mov(Gpd(REG_PARAM2), r10d); // restore masked address
|
||||
if (accessors.specific.native_bytes <= 4)
|
||||
{
|
||||
a.shl(r11d, cl); // shift mem_mask by masked bit address
|
||||
a.shl(Gpd(REG_PARAM3), cl); // shift data by masked bit address
|
||||
a.mov(Gpd(REG_PARAM4), r11d); // copy mem_mask to parameter 4 (ECX on SysV)
|
||||
}
|
||||
else
|
||||
{
|
||||
a.shl(r11, cl); // shift mem_mask by masked bit address
|
||||
a.shl(Gpq(REG_PARAM3), cl); // shift data by masked bit address
|
||||
a.mov(Gpq(REG_PARAM4), r11); // copy mem_mask to parameter 4 (RCX on SysV)
|
||||
}
|
||||
|
||||
// need to do this after finished with CL as REG_PARAM1 is C on Windows
|
||||
a.mov(Gpq(REG_PARAM1), rax);
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.mov(rax, ptr(rax, accessors.specific.write.displacement)); // load vtable pointer
|
||||
if (accessors.specific.write.displacement)
|
||||
a.add(Gpq(REG_PARAM1), accessors.specific.write.displacement); // apply this pointer offset
|
||||
if (accessors.specific.write.is_virtual)
|
||||
a.call(ptr(rax, accessors.specific.write.function)); // call virtual member function
|
||||
else
|
||||
smart_call_r64(a, (x86code *)accessors.specific.write.function, rax); // call non-virtual member function
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_BYTE)
|
||||
{
|
||||
mov_reg_param(a, Gpd(REG_PARAM4), maskp);
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_byte_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_byte_masked.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_WORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_word_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_word_masked.func, rax); // call write_word_masked
|
||||
mov_reg_param(a, Gpd(REG_PARAM4), maskp);
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_word_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_word_masked.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_DWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_dword_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_dword_masked.func, rax); // call write_dword_masked
|
||||
mov_reg_param(a, Gpd(REG_PARAM4), maskp);
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_dword_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_dword_masked.func, rax);
|
||||
}
|
||||
else if (spacesizep.size() == SIZE_QWORD)
|
||||
{
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_qword_masked.obj); // mov param1,space
|
||||
smart_call_r64(a, resolved.write_qword_masked.func, rax); // call write_qword_masked
|
||||
mov_reg_param(a, Gpq(REG_PARAM4), maskp);
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessors.resolved.write_qword_masked.obj);
|
||||
smart_call_r64(a, accessors.resolved.write_qword_masked.func, rax);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4312,8 +4682,8 @@ void drcbe_x64::op_fread(Assembler &a, const instruction &inst)
|
||||
assert((1 << spacep.size()) == inst.size());
|
||||
|
||||
// set up a call to the read dword/qword handler
|
||||
auto const &accessors = m_resolved_accessors[spacep.space()];
|
||||
auto const &accessor = (inst.size() == 4) ? accessors.write_dword : accessors.write_qword;
|
||||
auto const &accessors = m_memory_accessors[spacep.space()];
|
||||
auto const &accessor = (inst.size() == 4) ? accessors.resolved.read_dword : accessors.resolved.read_qword;
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp);
|
||||
mov_r64_imm(a, Gpq(REG_PARAM1), accessor.obj);
|
||||
smart_call_r64(a, accessor.func, rax);
|
||||
@ -4355,8 +4725,8 @@ void drcbe_x64::op_fwrite(Assembler &a, const instruction &inst)
|
||||
assert((1 << spacep.size()) == inst.size());
|
||||
|
||||
// general case
|
||||
auto const &accessors = m_resolved_accessors[spacep.space()];
|
||||
auto const &accessor = (inst.size() == 4) ? accessors.write_dword : accessors.write_qword;
|
||||
auto const &accessors = m_memory_accessors[spacep.space()];
|
||||
auto const &accessor = (inst.size() == 4) ? accessors.resolved.write_dword : accessors.resolved.write_qword;
|
||||
mov_reg_param(a, Gpd(REG_PARAM2), addrp);
|
||||
|
||||
if (inst.size() == 4)
|
||||
|
@ -268,9 +268,14 @@ private:
|
||||
near_state & m_near;
|
||||
|
||||
// resolved memory handler functions
|
||||
struct memory_accessors
|
||||
{
|
||||
resolved_memory_accessors resolved;
|
||||
address_space::specific_access_info specific;
|
||||
};
|
||||
resolved_member_function m_debug_cpu_instruction_hook;
|
||||
resolved_member_function m_drcmap_get_value;
|
||||
resolved_memory_accessors_vector m_resolved_accessors;
|
||||
std::vector<memory_accessors> m_memory_accessors;
|
||||
|
||||
// globals
|
||||
using opcode_generate_func = void (drcbe_x64::*)(asmjit::x86::Assembler &, const uml::instruction &);
|
||||
|
122
src/emu/emumem.h
122
src/emu/emumem.h
@ -71,28 +71,6 @@ using offs_t = u32;
|
||||
// address map constructors are delegates that build up an address_map
|
||||
using address_map_constructor = named_delegate<void (address_map &)>;
|
||||
|
||||
// struct with function pointers for accessors; use is generally discouraged unless necessary
|
||||
struct data_accessors
|
||||
{
|
||||
u8 (*read_byte)(address_space &space, offs_t address);
|
||||
u8 (*read_byte_masked)(address_space &space, offs_t address, u8 mask);
|
||||
u16 (*read_word)(address_space &space, offs_t address);
|
||||
u16 (*read_word_masked)(address_space &space, offs_t address, u16 mask);
|
||||
u32 (*read_dword)(address_space &space, offs_t address);
|
||||
u32 (*read_dword_masked)(address_space &space, offs_t address, u32 mask);
|
||||
u64 (*read_qword)(address_space &space, offs_t address);
|
||||
u64 (*read_qword_masked)(address_space &space, offs_t address, u64 mask);
|
||||
|
||||
void (*write_byte)(address_space &space, offs_t address, u8 data);
|
||||
void (*write_byte_masked)(address_space &space, offs_t address, u8 data, u8 mask);
|
||||
void (*write_word)(address_space &space, offs_t address, u16 data);
|
||||
void (*write_word_masked)(address_space &space, offs_t address, u16 data, u16 mask);
|
||||
void (*write_dword)(address_space &space, offs_t address, u32 data);
|
||||
void (*write_dword_masked)(address_space &space, offs_t address, u32 data, u32 mask);
|
||||
void (*write_qword)(address_space &space, offs_t address, u64 data);
|
||||
void (*write_qword_masked)(address_space &space, offs_t address, u64 data, u64 mask);
|
||||
};
|
||||
|
||||
// a line in the memory structure dump
|
||||
struct memory_entry_context {
|
||||
memory_view *view;
|
||||
@ -1643,85 +1621,85 @@ template<int Width, int AddrShift, endianness_t Endian, int TargetWidth, bool Al
|
||||
|
||||
template<int Level, int Width, int AddrShift> emu::detail::handler_entry_size_t<Width> dispatch_read(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_read<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->read(offset, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->read(offset, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
template<int Level, int Width, int AddrShift> void dispatch_write(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> data, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_write<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->write(offset, data, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->write(offset, data, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
template<int Level, int Width, int AddrShift> std::pair<emu::detail::handler_entry_size_t<Width>, u16> dispatch_read_flags(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_read<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->read_flags(offset, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->read_flags(offset, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
template<int Level, int Width, int AddrShift> u16 dispatch_write_flags(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> data, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_write<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->write_flags(offset, data, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->write_flags(offset, data, mem_mask);
|
||||
}
|
||||
|
||||
template<int Level, int Width, int AddrShift> u16 dispatch_lookup_read_flags(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_read<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->lookup_flags(offset, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->lookup_flags(offset, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
template<int Level, int Width, int AddrShift> u16 dispatch_lookup_write_flags(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_write<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->lookup_flags(offset, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->lookup_flags(offset, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
|
||||
template<int Level, int Width, int AddrShift> emu::detail::handler_entry_size_t<Width> dispatch_read_interruptible(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_read<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->read_interruptible(offset, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->read_interruptible(offset, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
template<int Level, int Width, int AddrShift> void dispatch_write_interruptible(offs_t mask, offs_t offset, emu::detail::handler_entry_size_t<Width> data, emu::detail::handler_entry_size_t<Width> mem_mask, const handler_entry_write<Width, AddrShift> *const *dispatch)
|
||||
{
|
||||
static constexpr u32 LowBits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LowBits]->write_interruptible(offset, data, mem_mask);
|
||||
constexpr u32 LOW_BITS = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
return dispatch[(offset & mask) >> LOW_BITS]->write_interruptible(offset, data, mem_mask);
|
||||
}
|
||||
|
||||
|
||||
namespace emu::detail {
|
||||
|
||||
// ======================> memory_access_specific
|
||||
|
||||
// memory_access_specific does uncached but faster accesses by shortcutting the address_space virtual call
|
||||
|
||||
namespace emu::detail {
|
||||
|
||||
template<int Level, int Width, int AddrShift, endianness_t Endian> class memory_access_specific
|
||||
{
|
||||
friend class ::address_space;
|
||||
|
||||
using NativeType = emu::detail::handler_entry_size_t<Width>;
|
||||
static constexpr u32 NATIVE_BYTES = 1 << Width;
|
||||
static constexpr u32 NATIVE_MASK = Width + AddrShift >= 0 ? (1 << (Width + AddrShift)) - 1 : 0;
|
||||
static constexpr u32 NATIVE_MASK = (Width + AddrShift >= 0) ? ((1 << (Width + AddrShift)) - 1) : 0;
|
||||
|
||||
public:
|
||||
// construction/destruction
|
||||
memory_access_specific()
|
||||
: m_space(nullptr),
|
||||
m_addrmask(0),
|
||||
m_dispatch_read(nullptr),
|
||||
m_dispatch_write(nullptr)
|
||||
memory_access_specific() :
|
||||
m_space(nullptr),
|
||||
m_addrmask(0),
|
||||
m_dispatch_read(nullptr),
|
||||
m_dispatch_write(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
inline address_space &space() const {
|
||||
address_space &space() const {
|
||||
return *m_space;
|
||||
}
|
||||
|
||||
@ -1824,11 +1802,11 @@ public:
|
||||
u16 lookup_write_qword_unaligned_flags(offs_t address, u64 mask) { return lookup_memory_write_generic_flags<Width, AddrShift, Endian, 3, false>(lwopf(), address, mask); }
|
||||
|
||||
NativeType read_interruptible(offs_t address, NativeType mask = ~NativeType(0)) {
|
||||
return dispatch_read_interruptible<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, mask, m_dispatch_read);
|
||||
return dispatch_read_interruptible<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, mask, m_dispatch_read);
|
||||
}
|
||||
|
||||
void write_interruptible(offs_t address, NativeType data, NativeType mask = ~NativeType(0)) {
|
||||
dispatch_write_interruptible<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, data, mask, m_dispatch_write);
|
||||
dispatch_write_interruptible<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, data, mask, m_dispatch_write);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -1840,27 +1818,27 @@ private:
|
||||
const handler_entry_write<Width, AddrShift> *const *m_dispatch_write;
|
||||
|
||||
NativeType read_native(offs_t address, NativeType mask = ~NativeType(0)) {
|
||||
return dispatch_read<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, mask, m_dispatch_read);
|
||||
return dispatch_read<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, mask, m_dispatch_read);
|
||||
}
|
||||
|
||||
void write_native(offs_t address, NativeType data, NativeType mask = ~NativeType(0)) {
|
||||
dispatch_write<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, data, mask, m_dispatch_write);
|
||||
dispatch_write<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, data, mask, m_dispatch_write);
|
||||
}
|
||||
|
||||
std::pair<NativeType, u16> read_native_flags(offs_t address, NativeType mask = ~NativeType(0)) {
|
||||
return dispatch_read_flags<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, mask, m_dispatch_read);
|
||||
return dispatch_read_flags<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, mask, m_dispatch_read);
|
||||
}
|
||||
|
||||
u16 write_native_flags(offs_t address, NativeType data, NativeType mask = ~NativeType(0)) {
|
||||
return dispatch_write_flags<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, data, mask, m_dispatch_write);
|
||||
return dispatch_write_flags<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, data, mask, m_dispatch_write);
|
||||
}
|
||||
|
||||
u16 lookup_read_native_flags(offs_t address, NativeType mask = ~NativeType(0)) {
|
||||
return dispatch_lookup_read_flags<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, mask, m_dispatch_read);
|
||||
return dispatch_lookup_read_flags<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, mask, m_dispatch_read);
|
||||
}
|
||||
|
||||
u16 lookup_write_native_flags(offs_t address, NativeType mask = ~NativeType(0)) {
|
||||
return dispatch_lookup_write_flags<Level, Width, AddrShift>(offs_t(-1), address & m_addrmask, mask, m_dispatch_write);
|
||||
return dispatch_lookup_write_flags<Level, Width, AddrShift>(~offs_t(0), address & m_addrmask, mask, m_dispatch_write);
|
||||
}
|
||||
|
||||
void set(address_space *space, std::pair<const void *, const void *> rw);
|
||||
@ -2327,13 +2305,13 @@ protected:
|
||||
void check_optimize_mirror(const char *function, offs_t addrstart, offs_t addrend, offs_t addrmirror, offs_t &nstart, offs_t &nend, offs_t &nmask, offs_t &nmirror);
|
||||
void check_address(const char *function, offs_t addrstart, offs_t addrend);
|
||||
|
||||
address_space_installer(const address_space_config &config, memory_manager &manager)
|
||||
: m_config(config),
|
||||
m_manager(manager),
|
||||
m_addrmask(make_bitmask<offs_t>(m_config.addr_width())),
|
||||
m_logaddrmask(make_bitmask<offs_t>(m_config.logaddr_width())),
|
||||
m_addrchars((m_config.addr_width() + 3) / 4),
|
||||
m_logaddrchars((m_config.logaddr_width() + 3) / 4)
|
||||
address_space_installer(const address_space_config &config, memory_manager &manager) :
|
||||
m_config(config),
|
||||
m_manager(manager),
|
||||
m_addrmask(make_bitmask<offs_t>(m_config.addr_width())),
|
||||
m_logaddrmask(make_bitmask<offs_t>(m_config.logaddr_width())),
|
||||
m_addrchars((m_config.addr_width() + 3) / 4),
|
||||
m_logaddrchars((m_config.logaddr_width() + 3) / 4)
|
||||
{}
|
||||
|
||||
const address_space_config &m_config; // configuration of this space
|
||||
@ -2357,6 +2335,24 @@ protected:
|
||||
address_space(memory_manager &manager, device_memory_interface &memory, int spacenum);
|
||||
|
||||
public:
|
||||
struct specific_access_info
|
||||
{
|
||||
struct side
|
||||
{
|
||||
void const *const *dispatch;
|
||||
uintptr_t function;
|
||||
ptrdiff_t displacement;
|
||||
bool is_virtual;
|
||||
};
|
||||
|
||||
unsigned native_bytes;
|
||||
unsigned native_mask_bits;
|
||||
unsigned address_width;
|
||||
unsigned low_bits;
|
||||
side read;
|
||||
side write;
|
||||
};
|
||||
|
||||
virtual ~address_space();
|
||||
|
||||
// getters
|
||||
@ -2420,7 +2416,7 @@ public:
|
||||
void set_log_unmap(bool log) { m_log_unmap = log; }
|
||||
|
||||
// general accessors
|
||||
virtual void accessors(data_accessors &accessors) const = 0;
|
||||
virtual specific_access_info specific_accessors() const = 0;
|
||||
virtual void *get_read_ptr(offs_t address) const = 0;
|
||||
virtual void *get_write_ptr(offs_t address) const = 0;
|
||||
|
||||
|
@ -310,25 +310,62 @@ public:
|
||||
m_root_write->detach(handlers);
|
||||
}
|
||||
|
||||
// generate accessor table
|
||||
virtual void accessors(data_accessors &accessors) const override
|
||||
// generate specific accessor info
|
||||
virtual specific_access_info specific_accessors() const override
|
||||
{
|
||||
accessors.read_byte = reinterpret_cast<u8 (*)(address_space &, offs_t)>(&read_byte_static);
|
||||
accessors.read_byte_masked = reinterpret_cast<u8 (*)(address_space &, offs_t, u8)>(&read_byte_masked_static);
|
||||
accessors.read_word = reinterpret_cast<u16 (*)(address_space &, offs_t)>(&read_word_static);
|
||||
accessors.read_word_masked = reinterpret_cast<u16 (*)(address_space &, offs_t, u16)>(&read_word_masked_static);
|
||||
accessors.read_dword = reinterpret_cast<u32 (*)(address_space &, offs_t)>(&read_dword_static);
|
||||
accessors.read_dword_masked = reinterpret_cast<u32 (*)(address_space &, offs_t, u32)>(&read_dword_masked_static);
|
||||
accessors.read_qword = reinterpret_cast<u64 (*)(address_space &, offs_t)>(&read_qword_static);
|
||||
accessors.read_qword_masked = reinterpret_cast<u64 (*)(address_space &, offs_t, u64)>(&read_qword_masked_static);
|
||||
accessors.write_byte = reinterpret_cast<void (*)(address_space &, offs_t, u8)>(&write_byte_static);
|
||||
accessors.write_byte_masked = reinterpret_cast<void (*)(address_space &, offs_t, u8, u8)>(&write_byte_masked_static);
|
||||
accessors.write_word = reinterpret_cast<void (*)(address_space &, offs_t, u16)>(&write_word_static);
|
||||
accessors.write_word_masked = reinterpret_cast<void (*)(address_space &, offs_t, u16, u16)>(&write_word_masked_static);
|
||||
accessors.write_dword = reinterpret_cast<void (*)(address_space &, offs_t, u32)>(&write_dword_static);
|
||||
accessors.write_dword_masked = reinterpret_cast<void (*)(address_space &, offs_t, u32, u32)>(&write_dword_masked_static);
|
||||
accessors.write_qword = reinterpret_cast<void (*)(address_space &, offs_t, u64)>(&write_qword_static);
|
||||
accessors.write_qword_masked = reinterpret_cast<void (*)(address_space &, offs_t, u64, u64)>(&write_qword_masked_static);
|
||||
specific_access_info accessors;
|
||||
|
||||
accessors.native_bytes = 1 << Width;
|
||||
accessors.native_mask_bits = ((Width + AddrShift) >= 0) ? (Width + AddrShift) : 0;
|
||||
accessors.address_width = addr_width();
|
||||
accessors.low_bits = emu::detail::handler_entry_dispatch_level_to_lowbits(Level, Width, AddrShift);
|
||||
accessors.read.dispatch = reinterpret_cast<void const *const *>(m_dispatch_read);
|
||||
accessors.write.dispatch = reinterpret_cast<void const *const *>(m_dispatch_write);
|
||||
accessors.read.function = accessors.write.function = uintptr_t(nullptr);
|
||||
accessors.read.displacement = accessors.write.displacement = 0;
|
||||
accessors.read.is_virtual = accessors.write.is_virtual = false;
|
||||
|
||||
auto readfunc = &handler_entry_read<Width, AddrShift>::read;
|
||||
auto writefunc = &handler_entry_write<Width, AddrShift>::write;
|
||||
if (MAME_ABI_CXX_TYPE == MAME_ABI_CXX_ITANIUM) {
|
||||
struct { std::uintptr_t ptr; std::ptrdiff_t adj; } equiv;
|
||||
constexpr uintptr_t funcmask = ~uintptr_t((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? 0 : 1);
|
||||
constexpr unsigned deltashift = (MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? 1 : 0;
|
||||
|
||||
assert(sizeof(readfunc) == sizeof(equiv));
|
||||
*reinterpret_cast<decltype(readfunc) *>(&equiv) = readfunc;
|
||||
accessors.read.function = equiv.ptr & funcmask;
|
||||
accessors.read.displacement = equiv.adj >> deltashift;
|
||||
accessors.read.is_virtual = BIT((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? equiv.adj : equiv.ptr, 0);
|
||||
|
||||
assert(sizeof(writefunc) == sizeof(equiv));
|
||||
*reinterpret_cast<decltype(writefunc) *>(&equiv) = writefunc;
|
||||
accessors.write.function = equiv.ptr & funcmask;
|
||||
accessors.write.displacement = equiv.adj >> deltashift;
|
||||
accessors.write.is_virtual = BIT((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? equiv.adj : equiv.ptr, 0);
|
||||
} else if (MAME_ABI_CXX_TYPE == MAME_ABI_CXX_MSVC) {
|
||||
struct single { std::uintptr_t entrypoint; };
|
||||
struct multi { std::uintptr_t entrypoint; int this_delta; };
|
||||
struct { std::uintptr_t entrypoint; int this_delta; int vptr_offs; int vt_index; } const *unknown;
|
||||
|
||||
assert(sizeof(*unknown) >= sizeof(readfunc));
|
||||
unknown = reinterpret_cast<decltype(unknown)>(&readfunc);
|
||||
if ((sizeof(*unknown) > sizeof(readfunc)) || !unknown->vt_index) {
|
||||
accessors.read.function = unknown->entrypoint;
|
||||
accessors.read.displacement = (sizeof(single) < sizeof(readfunc)) ? unknown->this_delta : 0;
|
||||
accessors.read.is_virtual = false;
|
||||
}
|
||||
|
||||
assert(sizeof(*unknown) >= sizeof(writefunc));
|
||||
unknown = reinterpret_cast<decltype(unknown)>(&writefunc);
|
||||
if ((sizeof(*unknown) > sizeof(writefunc)) || !unknown->vt_index) {
|
||||
accessors.write.function = unknown->entrypoint;
|
||||
accessors.write.displacement = (sizeof(single) < sizeof(writefunc)) ? unknown->this_delta : 0;
|
||||
accessors.write.is_virtual = false;
|
||||
}
|
||||
}
|
||||
|
||||
return accessors;
|
||||
}
|
||||
|
||||
// return a pointer to the read bank, or nullptr if none
|
||||
@ -401,24 +438,6 @@ public:
|
||||
void write_qword_unaligned(offs_t address, u64 data) override { memory_write_generic<Width, AddrShift, Endian, 3, false>(wop(), address, data, 0xffffffffffffffffU); }
|
||||
void write_qword_unaligned(offs_t address, u64 data, u64 mask) override { memory_write_generic<Width, AddrShift, Endian, 3, false>(wop(), address, data, mask); }
|
||||
|
||||
// static access to these functions
|
||||
static u8 read_byte_static(this_type &space, offs_t address) { return Width == 0 ? space.read_native(address & ~NATIVE_MASK) : memory_read_generic<Width, AddrShift, Endian, 0, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, 0xff); }
|
||||
static u8 read_byte_masked_static(this_type &space, offs_t address, u8 mask) { return memory_read_generic<Width, AddrShift, Endian, 0, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, mask); }
|
||||
static u16 read_word_static(this_type &space, offs_t address) { return Width == 1 ? space.read_native(address & ~NATIVE_MASK) : memory_read_generic<Width, AddrShift, Endian, 1, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, 0xffff); }
|
||||
static u16 read_word_masked_static(this_type &space, offs_t address, u16 mask) { return memory_read_generic<Width, AddrShift, Endian, 1, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, mask); }
|
||||
static u32 read_dword_static(this_type &space, offs_t address) { return Width == 2 ? space.read_native(address & ~NATIVE_MASK) : memory_read_generic<Width, AddrShift, Endian, 2, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, 0xffffffff); }
|
||||
static u32 read_dword_masked_static(this_type &space, offs_t address, u32 mask) { return memory_read_generic<Width, AddrShift, Endian, 2, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, mask); }
|
||||
static u64 read_qword_static(this_type &space, offs_t address) { return Width == 3 ? space.read_native(address & ~NATIVE_MASK) : memory_read_generic<Width, AddrShift, Endian, 3, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, 0xffffffffffffffffU); }
|
||||
static u64 read_qword_masked_static(this_type &space, offs_t address, u64 mask) { return memory_read_generic<Width, AddrShift, Endian, 3, true>([&space](offs_t offset, NativeType mask) -> NativeType { return space.read_native(offset, mask); }, address, mask); }
|
||||
static void write_byte_static(this_type &space, offs_t address, u8 data) { if (Width == 0) space.write_native(address & ~NATIVE_MASK, data); else memory_write_generic<Width, AddrShift, Endian, 0, true>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, 0xff); }
|
||||
static void write_byte_masked_static(this_type &space, offs_t address, u8 data, u8 mask) { memory_write_generic<Width, AddrShift, Endian, 0, true>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, mask); }
|
||||
static void write_word_static(this_type &space, offs_t address, u16 data) { if (Width == 1) space.write_native(address & ~NATIVE_MASK, data); else memory_write_generic<Width, AddrShift, Endian, 1, true>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, 0xffff); }
|
||||
static void write_word_masked_static(this_type &space, offs_t address, u16 data, u16 mask) { memory_write_generic<Width, AddrShift, Endian, 1, true>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, mask); }
|
||||
static void write_dword_static(this_type &space, offs_t address, u32 data) { if (Width == 2) space.write_native(address & ~NATIVE_MASK, data); else memory_write_generic<Width, AddrShift, Endian, 2, true>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, 0xffffffff); }
|
||||
static void write_dword_masked_static(this_type &space, offs_t address, u32 data, u32 mask) { memory_write_generic<Width, AddrShift, Endian, 2, true>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, mask); }
|
||||
static void write_qword_static(this_type &space, offs_t address, u64 data) { if (Width == 3) space.write_native(address & ~NATIVE_MASK, data); else memory_write_generic<Width, AddrShift, Endian, 3, false>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, 0xffffffffffffffffU); }
|
||||
static void write_qword_masked_static(this_type &space, offs_t address, u64 data, u64 mask) { memory_write_generic<Width, AddrShift, Endian, 3, false>([&space](offs_t offset, NativeType data, NativeType mask) { space.write_native(offset, data, mask); }, address, data, mask); }
|
||||
|
||||
handler_entry_read <Width, AddrShift> *m_root_read;
|
||||
handler_entry_write<Width, AddrShift> *m_root_write;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user