From 6fff18773b6d23fecf177dd9bea13e5242e982d1 Mon Sep 17 00:00:00 2001 From: AJR Date: Sun, 19 May 2019 17:41:44 -0400 Subject: [PATCH] i386: Improve splitting of unaligned accesses (excluding program fetches) (nw) This entails a major code reorganization just to keep the scale of it all halfway sane. --- scripts/src/cpu.lua | 3 + src/devices/cpu/i386/athlon.cpp | 954 +++++++ src/devices/cpu/i386/athlon.h | 75 + src/devices/cpu/i386/cpuidmsrs.hxx | 311 --- src/devices/cpu/i386/cycles.h | 333 --- src/devices/cpu/i386/i386.cpp | 3786 +++++++--------------------- src/devices/cpu/i386/i386.h | 121 +- src/devices/cpu/i386/i386ops.h | 21 - src/devices/cpu/i386/i386priv.h | 1066 +++----- src/devices/cpu/i386/i386segs.hxx | 2464 ++++++++++++++++++ src/mame/drivers/nforcepc.cpp | 2 +- 11 files changed, 4864 insertions(+), 4272 deletions(-) create mode 100644 src/devices/cpu/i386/athlon.cpp create mode 100644 src/devices/cpu/i386/athlon.h create mode 100644 src/devices/cpu/i386/i386segs.hxx diff --git a/scripts/src/cpu.lua b/scripts/src/cpu.lua index 536bcf974bf..1fb7e3262cd 100644 --- a/scripts/src/cpu.lua +++ b/scripts/src/cpu.lua @@ -1087,6 +1087,8 @@ if (CPUS["I386"]~=null) then files { MAME_DIR .. "src/devices/cpu/i386/i386.cpp", MAME_DIR .. "src/devices/cpu/i386/i386.h", + MAME_DIR .. "src/devices/cpu/i386/athlon.cpp", + MAME_DIR .. "src/devices/cpu/i386/athlon.h", MAME_DIR .. "src/devices/cpu/i386/cache.h", MAME_DIR .. "src/devices/cpu/i386/cycles.h", MAME_DIR .. "src/devices/cpu/i386/i386op16.hxx", @@ -1094,6 +1096,7 @@ if (CPUS["I386"]~=null) then MAME_DIR .. "src/devices/cpu/i386/i386ops.h", MAME_DIR .. "src/devices/cpu/i386/i386ops.hxx", MAME_DIR .. "src/devices/cpu/i386/i386priv.h", + MAME_DIR .. "src/devices/cpu/i386/i386segs.hxx", MAME_DIR .. "src/devices/cpu/i386/i486ops.hxx", MAME_DIR .. "src/devices/cpu/i386/pentops.hxx", MAME_DIR .. "src/devices/cpu/i386/x87ops.hxx", diff --git a/src/devices/cpu/i386/athlon.cpp b/src/devices/cpu/i386/athlon.cpp new file mode 100644 index 00000000000..d315e5a3428 --- /dev/null +++ b/src/devices/cpu/i386/athlon.cpp @@ -0,0 +1,954 @@ +// license:BSD-3-Clause +// copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett + +#include "emu.h" +#include "athlon.h" +#include "i386priv.h" + +DEFINE_DEVICE_TYPE(ATHLONXP, athlonxp_device, "athlonxp", "Amd Athlon XP") + +athlonxp_device::athlonxp_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock) + : pentium_device(mconfig, ATHLONXP, tag, owner, clock) + , m_data_config("mmio", ENDIANNESS_LITTLE, 32, 32, 0, 32, 12) + , m_opcodes_config("debugger", ENDIANNESS_LITTLE, 32, 32, 0, 32, 12) +{ + // TODO: put correct value + set_vtlb_dynamic_entries(256); +} + +/*****************************************************************************/ +/* AMD Athlon XP + Model: Athlon XP 2400+ + Part number: AXDA2400DKV3C + Stepping code: AIUCP + Date code: 0240MPMW +*/ + +void athlonxp_device::device_start() +{ + i386_common_init(); + register_state_i386_x87_xmm(); + m_data = &space(AS_DATA); + m_opcodes = &space(AS_OPCODES); + mmacache32 = m_data->cache<2, 0, ENDIANNESS_LITTLE>(); + m_opcodes->install_read_handler(0, 0xffffffff, read32_delegate(FUNC(athlonxp_device::debug_read_memory), this)); + + build_x87_opcode_table(); + build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE); + m_cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM].get(); // TODO: generate own cycle tables + m_cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM].get(); // TODO: generate own cycle tables + + // put savestate calls here + save_item(NAME(m_processor_name_string)); + save_item(NAME(m_msr_top_mem)); + save_item(NAME(m_msr_sys_cfg)); + save_item(NAME(m_msr_smm_base)); + save_item(NAME(m_msr_smm_mask)); + save_item(NAME(m_msr_mtrrfix)); + save_item(NAME(m_memory_ranges_1m)); +} + +void athlonxp_device::device_reset() +{ + zero_state(); + + m_sreg[CS].selector = 0xf000; + m_sreg[CS].base = 0xffff0000; + m_sreg[CS].limit = 0xffff; + m_sreg[CS].flags = 0x0093; + + m_sreg[DS].base = m_sreg[ES].base = m_sreg[FS].base = m_sreg[GS].base = m_sreg[SS].base = 0x00000000; + m_sreg[DS].limit = m_sreg[ES].limit = m_sreg[FS].limit = m_sreg[GS].limit = m_sreg[SS].limit = 0xffff; + m_sreg[DS].flags = m_sreg[ES].flags = m_sreg[FS].flags = m_sreg[GS].flags = m_sreg[SS].flags = 0x0093; + + m_idtr.base = 0; + m_idtr.limit = 0x3ff; + + m_a20_mask = ~0; + + m_cr[0] = 0x60000010; + m_eflags = 0x00200000; + m_eflags_mask = 0x00277fd7; /* TODO: is this correct? */ + m_eip = 0xfff0; + m_mxcsr = 0x1f80; + m_smm = false; + m_smi_latched = false; + m_smbase = 0x30000; + m_nmi_masked = false; + m_nmi_latched = false; + + x87_reset(); + + // [11:8] Family + // [ 7:4] Model + // [ 3:0] Stepping ID + // Family 6, Model 8, Stepping 1 + REG32(EAX) = 0; + REG32(EDX) = (6 << 8) | (8 << 4) | (1); + + m_cpuid_id0 = ('h' << 24) | ('t' << 16) | ('u' << 8) | 'A'; // Auth + m_cpuid_id1 = ('i' << 24) | ('t' << 16) | ('n' << 8) | 'e'; // enti + m_cpuid_id2 = ('D' << 24) | ('M' << 16) | ('A' << 8) | 'c'; // cAMD + memset(m_processor_name_string, 0, 48); + strcpy((char *)m_processor_name_string, "AMD Athlon(tm) Processor"); + for (int n = 0; n < 11; n++) + m_msr_mtrrfix[n] = 0; + for (int n = 0; n < (1024 / 4); n++) + m_memory_ranges_1m[n] = 0; + m_msr_top_mem = 1024 * 1024; + m_msr_sys_cfg = 0; + m_msr_smm_base = m_smbase; + m_msr_smm_mask = 0; + + m_cpuid_max_input_value_eax = 0x01; + m_cpu_version = REG32(EDX); + + // see FEATURE_FLAGS enum for bit names + m_feature_flags = 0x0383fbff; + + CHANGE_PC(m_eip); +} + +device_memory_interface::space_config_vector athlonxp_device::memory_space_config() const +{ + return space_config_vector{ + std::make_pair(AS_PROGRAM, &m_program_config), + std::make_pair(AS_IO, &m_io_config), + std::make_pair(AS_DATA, &m_data_config), + std::make_pair(AS_OPCODES, &m_opcodes_config) + }; +} + +void athlonxp_device::enter_smm() +{ + u64 data; + + if (m_msr_smm_mask & 1) + data = 0x1818181818181818; // when smm is active + else + data = m_msr_mtrrfix[2]; + parse_mtrrfix(data, 0xa0000, 16); + i386_device::enter_smm(); +} + +void athlonxp_device::leave_smm() +{ + u64 data; + + i386_device::leave_smm(); + if (m_msr_smm_mask & 1) + data = 0; // when smm is not active + else + data = m_msr_mtrrfix[2]; + parse_mtrrfix(data, 0xa0000, 16); +} + +void athlonxp_device::parse_mtrrfix(u64 mtrr, offs_t base, int kblock) +{ + int nb = kblock / 4; + int range = (int)(base >> 12); // base must never be higher than 1 megabyte + + for (int n = 0; n < 8; n++) + { + uint8_t type = mtrr & 0xff; + + for (int b = 0; b < nb; b++) + { + m_memory_ranges_1m[range] = type; + range++; + } + mtrr = mtrr >> 8; + } +} + +int athlonxp_device::check_cacheable(offs_t address) +{ + offs_t block; + int disabled; + + disabled = 0; + if (m_cr[0] & (1 << 30)) + disabled = 128; + if (address >= 0x100000) + return disabled; + block = address >> 12; + return m_memory_ranges_1m[block] | disabled; +} + +template +int athlonxp_device::address_mode(offs_t address) +{ + if (address >= m_msr_top_mem) + return 1; + if (address >= 1 * 1024 * 1024) + return 0; + if ((m_memory_ranges_1m[address >> 12] & (1 << (3 + wr))) != 0) + return 0; + return 1; +} + +READ32_MEMBER(athlonxp_device::debug_read_memory) +{ + offs_t address = offset << 2; + int mode = check_cacheable(address); + bool nocache = false; + address_space *m = m_program; + u8 *data; + + if ((mode & 7) == 0) + nocache = true; + if (mode & 1) + nocache = true; + if (nocache == false) + { + int offset = (address & 63); + data = cache.search(address); + if (data) + return *(u32 *)(data + offset); + } + if (address_mode<1>(address)) + m = m_data; + return m->read_dword(address); +} + +template +dt athlonxp_device::opcode_read_cache(offs_t address) +{ + int mode = check_cacheable(address); + bool nocache = false; + memory_access_cache<2, 0, ENDIANNESS_LITTLE> *m = macache32; + u8 *data; + + if ((mode & 7) == 0) + nocache = true; + if (mode & 1) + nocache = true; + if (nocache == false) + { + int offset = (address & 63) ^ xorle; + data = cache.search(address); + if (data) + return *(dt *)(data + offset); + if (!(mode & 128)) + { + bool dirty = cache.allocate(address, &data); + address = cache.base(address); + if (dirty) + { + offs_t old_address = cache.old(); + + for (int w = 0; w < 64; w += 4) + m->write_dword(old_address + w, *(u32 *)(data + w)); + } + for (int r = 0; r < 64; r += 4) + *(u32 *)(data + r) = m->read_dword(address + r); + return *(dt *)(data + offset); + } + } + if (address_mode<1>(address)) + m = mmacache32; + if (sizeof(dt) == 1) + return m->read_byte(address); + else if (sizeof(dt) == 2) + return m->read_word(address); + else + return m->read_dword(address); +} + +uint32_t athlonxp_device::program_read_cache(offs_t address, uint32_t mask) +{ + int mode = check_cacheable(address); + bool nocache = false; + address_space *m = m_program; + u8 *data; + + if ((mode & 7) == 0) + nocache = true; + if (mode & 1) + nocache = true; + if (nocache == false) + { + int offset = address & 63; + data = cache.search(address); + if (data) + return *(u32 *)(data + offset) & mask; + if (!(mode & 128)) + { + bool dirty = cache.allocate(address, &data); + address = cache.base(address); + if (dirty) + { + offs_t old_address = cache.old(); + + for (int w = 0; w < 64; w += 4) + m->write_dword(old_address + w, *(u32 *)(data + w)); + } + for (int r = 0; r < 64; r += 4) + *(u32 *)(data + r) = m->read_dword(address + r); + return *(u32 *)(data + offset) & mask; + } + } + if (address_mode<1>(address)) + m = m_data; + return m->read_dword(address, mask) & mask; +} + +void athlonxp_device::program_write_cache(offs_t address, uint32_t data, uint32_t mask) +{ + int mode = check_cacheable(address); + bool nocache = false; + address_space *m = m_program; + u8 *dataw; + + if ((mode & 7) == 0) + nocache = true; + if (mode & 1) + nocache = true; + if (nocache == false) + { + int offset = address & 63; + dataw = cache.search(address); + if (dataw) + { + *(u32 *)(dataw + offset) = (*(u32 *)(dataw + offset) & ~mask) | (data & mask); + return; + } + if (!(mode & 128)) + { + bool dirty = cache.allocate(address, &dataw); + address = cache.base(address); + if (dirty) + { + offs_t old_address = cache.old(); + + for (int w = 0; w < 64; w += 4) + m->write_dword(old_address + w, *(u32 *)(dataw + w)); + } + for (int r = 0; r < 64; r += 4) + *(u32 *)(dataw + r) = m->read_dword(address + r); + *(u32 *)(dataw + offset) = (*(u32 *)(dataw + offset) & ~mask) | (data & mask); + return; + } + } + if (address_mode<0>(address)) + m = m_data; + m->write_dword(address, data, mask); +} + +void athlonxp_device::cache_writeback() +{ + // dirty cachelines are written back to memory + address_space *m = m_program; + u32 base; + u8 *data; + + data = cache.first_dirty(base, false); + while (data != nullptr) + { + for (int w = 0; w < 64; w += 4) + m->write_dword(base + w, *(u32 *)(data + w)); + data = cache.next_dirty(base, false); + } +} + +void athlonxp_device::cache_invalidate() +{ + // dirty cachelines are not written back to memory + cache.reset(); +} + +void athlonxp_device::cache_clean() +{ + // dirty cachelines are marked as clean but not written back to memory + u32 base; + u8 *data; + + data = cache.first_dirty(base, true); + while (data != nullptr) + data = cache.next_dirty(base, true); +} + +uint8_t athlonxp_device::READ8PL(uint32_t ea, uint8_t privilege) +{ + uint32_t address = ea, error; + + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + + uint8_t shift = 8 * (ea & 3); + return program_read_cache(address - (ea & 3), uint32_t(0xff) << shift) >> shift; +} + +uint16_t athlonxp_device::READ16PL(uint32_t ea, uint8_t privilege) +{ + uint16_t value; + uint32_t address = ea, error; + + switch (ea & 3) + { + case 0: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = program_read_cache(address, 0x0000ffff) & 0xffff; + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = (program_read_cache(address - 1, 0x00ffff00) >> 8) & 0xffff; + break; + + case 2: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = (program_read_cache(address - 2, 0xffff0000) >> 16) & 0xffff; + break; + + case 3: + value = READ8PL(ea, privilege); + value |= READ8PL(ea + 1, privilege) << 8; + break; + } + + return value; +} + +uint32_t athlonxp_device::READ32PL(uint32_t ea, uint8_t privilege) +{ + uint32_t value; + uint32_t address = ea, error; + + switch (ea & 3) + { + case 0: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = program_read_cache(address, 0xffffffff); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = program_read_cache(address - 1, 0xffffff00) >> 8; + value |= READ8PL(ea + 3, privilege) << 24; + break; + + case 2: + value = READ16PL(ea, privilege); + value |= READ16PL(ea + 2, privilege) << 16; + break; + + case 3: + value = READ8PL(ea, privilege); + + address = ea + 1; + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value |= program_read_cache(address, 0x00ffffff) << 8; + break; + } + + return value; +} + +uint64_t athlonxp_device::READ64PL(uint32_t ea, uint8_t privilege) +{ + uint64_t value; + uint32_t address = ea, error; + + switch (ea & 3) + { + case 0: + value = READ32PL(ea, privilege); + value |= uint64_t(READ32PL(ea + 2, privilege)) << 32; + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = program_read_cache(address - 1, 0xffffff00) >> 8; + value |= uint64_t(READ32PL(ea + 3, privilege)) << 24; + value |= uint64_t(READ8PL(ea + 7, privilege)) << 56; + break; + + case 2: + value = READ16PL(ea, privilege); + value |= uint64_t(READ32PL(ea + 2, privilege)) << 16; + value |= uint64_t(READ16PL(ea + 6, privilege)) << 48; + break; + + case 3: + value = READ8PL(ea, privilege); + value |= uint64_t(READ32PL(ea + 1, privilege)) << 8; + + address = ea + 5; + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value |= uint64_t(program_read_cache(address, 0x00ffffff)) << 40; + break; + } + + return value; +} + +void athlonxp_device::WRITE8PL(uint32_t ea, uint8_t privilege, uint8_t value) +{ + uint32_t address = ea, error; + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + + uint8_t shift = 8 * (ea & 3); + program_write_cache(address - (ea & 3), value << shift, uint32_t(0xff) << shift); +} + +void athlonxp_device::WRITE16PL(uint32_t ea, uint8_t privilege, uint16_t value) +{ + uint32_t address = ea, error; + + switch(ea & 3) + { + case 0: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address, value, 0x0000ffff); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address - 1, value << 8, 0x00ffff00); + break; + + case 2: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address - 2, value << 16, 0xffff0000); + break; + + case 3: + WRITE8PL(ea, privilege, value & 0xff); + WRITE8PL(ea + 1, privilege, (value >> 8) & 0xff); + break; + } +} + +void athlonxp_device::WRITE32PL(uint32_t ea, uint8_t privilege, uint32_t value) +{ + uint32_t address = ea, error; + + switch(ea & 3) + { + case 0: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address, value, 0xffffffff); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address - 1, (value << 8) & 0xffffff00, 0xffffff00); + WRITE8PL(ea + 3, privilege, (value >> 24) & 0xff); + break; + + case 2: + WRITE16PL(ea, privilege, value & 0xffff); + WRITE16PL(ea + 2, privilege, (value >> 16) & 0xffff); + break; + + case 3: + WRITE8PL(ea, privilege, value & 0xff); + + address = ea + 1; + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address, value >> 8, 0x00ffffff); + break; + } +} + +void athlonxp_device::WRITE64PL(uint32_t ea, uint8_t privilege, uint64_t value) +{ + uint32_t address = ea, error; + + switch(ea & 3) + { + case 0: + WRITE32PL(ea, privilege, value & 0xffffffff); + WRITE32PL(ea + 2, privilege, (value >> 32) & 0xffffffff); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address - 1, value << 8, 0xffffff00); + WRITE32PL(ea + 3, privilege, (value >> 24) & 0xffffffff); + WRITE8PL(ea + 7, privilege, (value >> 56) & 0xff ); + break; + + case 2: + WRITE16PL(ea, privilege, value & 0xffff); + WRITE32PL(ea + 2, privilege, (value >> 16) & 0xffffffff); + WRITE16PL(ea + 6, privilege, (value >> 48) & 0xffff); + break; + + case 3: + WRITE8PL(ea, privilege, value & 0xff); + WRITE32PL(ea + 1, privilege, (value >> 8) & 0xffffffff); + + address = ea + 5; + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + program_write_cache(address, (value >> 40) & 0x00ffffff, 0x00ffffff); + break; + } +} + +/**********************************************************************************/ + +void athlonxp_device::opcode_cpuid() +{ + switch (REG32(EAX)) + { + case 0x80000000: + { + REG32(EAX) = 0x80000008; + REG32(EBX) = m_cpuid_id0; + REG32(ECX) = m_cpuid_id2; + REG32(EDX) = m_cpuid_id1; + CYCLES(CYCLES_CPUID); + break; + } + + case 0x80000001: + { + REG32(EAX) = m_cpu_version + 0x100; // family+1 as specified in AMD documentation + REG32(EDX) = m_feature_flags; + CYCLES(CYCLES_CPUID); + break; + } + + case 0x80000002: + case 0x80000003: + case 0x80000004: + { + int offset = (REG32(EAX) - 0x80000002) << 4; + uint8_t *b = m_processor_name_string + offset; + REG32(EAX) = b[ 0] + (b[ 1] << 8) + (b[ 2] << 16) + (b[ 3] << 24); + REG32(EBX) = b[ 4] + (b[ 5] << 8) + (b[ 6] << 16) + (b[ 7] << 24); + REG32(ECX) = b[ 8] + (b[ 9] << 8) + (b[10] << 16) + (b[11] << 24); + REG32(EDX) = b[12] + (b[13] << 8) + (b[14] << 16) + (b[15] << 24); + CYCLES(CYCLES_CPUID); + break; + } + + case 0x80000005: + { + REG32(EAX) = 0x0408FF08; // 2M/4M data tlb associativity 04 data tlb number of entries 08 instruction tlb associativity FF instruction tlb number of entries 08 + REG32(EBX) = 0xFF20FF10; // 4K data tlb associativity FF data tlb number of entries 20 instruction tlb associativity FF instruction tlb number of entries 10 + REG32(ECX) = 0x40020140; // L1 data cache size in K 40 associativity 02 lines per tag 01 line size in bytes 40 + REG32(EDX) = 0x40020140; // L1 instruction cache size in K 40 associativity 02 lines per tag 01 line size in bytes 40 + CYCLES(CYCLES_CPUID); + break; + } + + case 0x80000006: + { + REG32(EAX) = 0; + REG32(EBX) = 0x41004100; // 4 100 4 100 + REG32(ECX) = 0x01008140; // L2 cache size in K 0100 associativity 8=16-way lines per tag 1 line size in bytes 40 + CYCLES(CYCLES_CPUID); + break; + } + + case 0x80000007: + { + REG32(EDX) = 1; // Advanced power management information, temperature sensor present + CYCLES(CYCLES_CPUID); + break; + } + + case 0x80000008: + { + REG32(EAX) = 0x00002022; + CYCLES(CYCLES_CPUID); + break; + } + + default: + i386_device::opcode_cpuid(); + } +} + +uint64_t athlonxp_device::opcode_rdmsr(bool &valid_msr) +{ + uint64_t ret; + uint32_t offset = REG32(ECX); + + ret = 0; + switch (offset) + { + case 0x10: // TSC + break; + case 0x1b: // APIC_BASE + break; + case 0xfe: // MTRRcap + // 7-0 MTRRCapVCnt - Number of variable range MTRRs (8) + // 8 MtrrCapFix - Fixed range MTRRs available (1) + // 10 MtrrCapWc - Write combining memory type available (1) + ret = 0x508; + break; + case 0x17b: // MCG_CTL + break; + case 0x200: // MTRRphysBase0-7 + case 0x202: + case 0x204: + case 0x206: + case 0x208: + case 0x20a: + case 0x20c: + case 0x20e: + // 7-0 Type - Memory type for this memory range + // 39-12 PhyBase27-0 - Base address for this memory range + /* Format of type field: + Bits 2-0 specify the memory type with the following encoding + 0 UC Uncacheable + 1 WC Write Combining + 4 WT Write Through + 5 WP Write Protect + 6 WB Write Back + 7 UC Uncacheable used only in PAT register + Bit 3 WrMem 1 write to memory 0 write to mmio, present only in fixed range MTRRs + Bit 4 RdMem 1 read from memory 0 read from mmio, present only in fixed range MTRRs + Other bits are unused + */ + break; + case 0x201: // MTRRphysMask0-7 + case 0x203: + case 0x205: + case 0x207: + case 0x209: + case 0x20b: + case 0x20d: + case 0x20f: + // 11 Valid - Memory range active + // 39-12 PhyMask27-0 - Address mask + break; + case 0x2ff: // MTRRdefType + // 7-0 MtrrDefMemType - Default memory type + // 10 MtrrDefTypeFixEn - Enable fixed range MTRRs + // 11 MtrrDefTypeEn - Enable MTRRs + break; + case 0x250: // MTRRfix64K_00000 + // 8 bits for each 64k block starting at address 0 + ret = m_msr_mtrrfix[0]; + break; + case 0x258: // MTRRfix16K_80000 + // 8 bits for each 16k block starting at address 0x80000 + ret = m_msr_mtrrfix[1]; + break; + case 0x259: // MTRRfix16K_A0000 + // 8 bits for each 16k block starting at address 0xa0000 + ret = m_msr_mtrrfix[2]; + break; + case 0x268: // MTRRfix4K_C0000 + case 0x269: // MTRRfix4K_C8000 + case 0x26a: // MTRRfix4K_D0000 + case 0x26b: // MTRRfix4K_D8000 + case 0x26c: // MTRRfix4K_E0000 + case 0x26d: // MTRRfix4K_E8000 + case 0x26e: // MTRRfix4K_F0000 + case 0x26f: // MTRRfix4K_F8000 + // 8 bits for each 4k block + ret = m_msr_mtrrfix[3 + offset - 0x268]; + break; + case 0x400: // MC0_CTL + break; + case 0x404: // MC1_CTL + break; + case 0x408: // MC2_CTL + break; + case 0x40c: // MC3_CTL + break; + case 0xC0010010: // SYS_CFG + // 20 MtrrVarDramEn - Enable top of memory address and I/O range registers + // 19 MtrrFixDramModEn - Enable modification of RdDram and WrDram bits in fixed MTRRs + // 18 MtrrFixDramEn - Enable RdDram and WrDram attributes in fixed MTRRs + ret = m_msr_sys_cfg; + break; + case 0xC0010015: // HWCR + break; + case 0xC0010016: // IORRBase0-1 + case 0xC0010018: + // 39-12 Base27-0 - Base address for this memory range + // 4 RdDram - Read from DRAM + // 3 WrDram - Write to DRAM + break; + case 0xC0010017: // IORRMask0-1 + case 0xC0010019: + // 39-12 Mask27-0 - Address mask + // 11 V - Register enabled + break; + case 0xC001001A: // TOP_MEM + // 39-23 TOM16-0 - Top of Memory, accesses from this address onward are directed to mmio + ret = (uint64_t)m_msr_top_mem; + break; + case 0xC001001D: // TOP_MEM2 + break; + case 0xC0010111: // SMM_BASE + // address of system management mode area + ret = (uint64_t)m_msr_smm_base; + break; + case 0xC0010113: // SMM_MASK + // 1 TValid - Enable TSeg SMRAM Range + // 0 AValid - Enable ASeg SMRAM Range + /* Access to the ASeg (a0000-bffff) depends on bit 0 of smm_mask + if the bit is 0 use the associated fixed mtrr + if the bit is 1 + if smm is active + access goes to dram (wrmem 1 rdmem 1) + if smm not active + access goes to mmio (wrmem 0 rdmem 0) */ + ret = m_msr_smm_mask; + break; + } + valid_msr = true; + return ret; +} + +void athlonxp_device::opcode_wrmsr(uint64_t data, bool &valid_msr) +{ + uint32_t offset = REG32(ECX); + + switch (offset) + { + case 0x1b: // APIC_BASE + break; + case 0x17b: // MCG_CTL + break; + case 0x200: // MTRRphysBase0-7 + case 0x201: // MTRRphysMask0-7 + case 0x202: + case 0x203: + case 0x204: + case 0x205: + case 0x206: + case 0x207: + case 0x208: + case 0x209: + case 0x20a: + case 0x20b: + case 0x20c: + case 0x20d: + case 0x20e: + case 0x20f: + break; + case 0x2ff: // MTRRdefType + break; + case 0x250: // MTRRfix64K_00000 + m_msr_mtrrfix[0] = data; + parse_mtrrfix(data, 0, 64); + break; + case 0x258: // MTRRfix16K_80000 + m_msr_mtrrfix[1] = data; + parse_mtrrfix(data, 0x80000, 16); + break; + case 0x259: // MTRRfix16K_A0000 + m_msr_mtrrfix[2] = data; + if (m_msr_smm_mask & 1) + { + if (m_smm) + data = 0x1818181818181818; // when smm is active + else + data = 0; // when smm is not active + } + parse_mtrrfix(data, 0xa0000, 16); + break; + case 0x268: // MTRRfix4K_C0000-F8000 + case 0x269: + case 0x26a: + case 0x26b: + case 0x26c: + case 0x26d: + case 0x26e: + case 0x26f: + m_msr_mtrrfix[3 + offset - 0x268] = data; + parse_mtrrfix(data, 0xc0000 + (offset - 0x268) * 0x8000, 4); + break; + case 0x400: // MC0_CTL + break; + case 0x404: // MC1_CTL + break; + case 0x408: // MC2_CTL + break; + case 0x40c: // MC3_CTL + break; + case 0xC0010010: // SYS_CFG + m_msr_sys_cfg = data; + break; + case 0xC0010015: // HWCR + break; + case 0xC0010016: // IORRBase + case 0xC0010017: // IORRMask + case 0xC0010018: + case 0xC0010019: + break; + case 0xC001001A: // TOP_MEM + m_msr_top_mem = (offs_t)data; + break; + case 0xC0010111: // SMM_BASE + m_msr_smm_base = (offs_t)data; + m_smbase = m_msr_smm_base; + break; + case 0xC0010113: // SMM_MASK + m_msr_smm_mask = data; + if (m_msr_smm_mask & 1) + { + if (m_smm) + data = 0x1818181818181818; // when smm is active + else + data = 0; // when smm is not active + } + else + data = m_msr_mtrrfix[2]; + parse_mtrrfix(data, 0xa0000, 16); + break; + } + valid_msr = true; +} diff --git a/src/devices/cpu/i386/athlon.h b/src/devices/cpu/i386/athlon.h new file mode 100644 index 00000000000..a4559fcb682 --- /dev/null +++ b/src/devices/cpu/i386/athlon.h @@ -0,0 +1,75 @@ +// license:BSD-3-Clause +// copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett + +#ifndef MAME_CPU_I386_ATHLON_H +#define MAME_CPU_I386_ATHLON_H + +#pragma once + +#include "i386.h" +#include "cache.h" + +class athlonxp_device : public pentium_device +{ +public: + // construction/destruction + athlonxp_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock); + +protected: + virtual void opcode_cpuid() override; + virtual uint64_t opcode_rdmsr(bool &valid_msr) override; + virtual void opcode_wrmsr(uint64_t data, bool &valid_msr) override; + virtual void cache_writeback() override; + virtual void cache_invalidate() override; + virtual void cache_clean() override; + virtual void device_start() override; + virtual void device_reset() override; + virtual void enter_smm() override; + virtual void leave_smm() override; + + virtual u8 mem_pr8(offs_t address) override { return opcode_read_cache(address); } + virtual u16 mem_pr16(offs_t address) override { return opcode_read_cache(address); } + virtual u32 mem_pr32(offs_t address) override { return opcode_read_cache(address); } + + virtual uint8_t READ8PL(uint32_t ea, uint8_t privilege) override; + virtual uint16_t READ16PL(uint32_t ea, uint8_t privilege) override; + virtual uint32_t READ32PL(uint32_t ea, uint8_t privilege) override; + virtual uint64_t READ64PL(uint32_t ea, uint8_t privilege) override; + virtual void WRITE8PL(uint32_t ea, uint8_t privilege, uint8_t value) override; + virtual void WRITE16PL(uint32_t ea, uint8_t privilege, uint16_t value) override; + virtual void WRITE32PL(uint32_t ea, uint8_t privilege, uint32_t value) override; + virtual void WRITE64PL(uint32_t ea, uint8_t privilege, uint64_t value) override; + + // device_memory_interface override + virtual space_config_vector memory_space_config() const override; + +private: + void parse_mtrrfix(u64 mtrr, offs_t base, int kblock); + inline int check_cacheable(offs_t address); + template int address_mode(offs_t address); + + template dt opcode_read_cache(offs_t address); + uint32_t program_read_cache(offs_t address, uint32_t mask); + void program_write_cache(offs_t address, uint32_t data, uint32_t mask); + + DECLARE_READ32_MEMBER(debug_read_memory); + + address_space_config m_data_config; + address_space *m_data; + address_space_config m_opcodes_config; + address_space *m_opcodes; + memory_access_cache<2, 0, ENDIANNESS_LITTLE> *mmacache32; + uint8_t m_processor_name_string[48]; + offs_t m_msr_top_mem; + uint64_t m_msr_sys_cfg; + offs_t m_msr_smm_base; + uint64_t m_msr_smm_mask; + uint64_t m_msr_mtrrfix[11]; + uint8_t m_memory_ranges_1m[1024 / 4]; + cpucache<17, 9, Cache2Way, CacheLineBytes64> cache; // 512 sets, 2 ways (cachelines per set), 64 bytes per cacheline +}; + + +DECLARE_DEVICE_TYPE(ATHLONXP, athlonxp_device) + +#endif // MAME_CPU_I386_ATHLON_H diff --git a/src/devices/cpu/i386/cpuidmsrs.hxx b/src/devices/cpu/i386/cpuidmsrs.hxx index 8ea746a8e19..dcbc8faff37 100644 --- a/src/devices/cpu/i386/cpuidmsrs.hxx +++ b/src/devices/cpu/i386/cpuidmsrs.hxx @@ -178,314 +178,3 @@ void pentium4_device::opcode_wrmsr(uint64_t data, bool &valid_msr) break; } } - -void athlonxp_device::opcode_cpuid() -{ - switch (REG32(EAX)) - { - case 0x80000000: - { - REG32(EAX) = 0x80000008; - REG32(EBX) = m_cpuid_id0; - REG32(ECX) = m_cpuid_id2; - REG32(EDX) = m_cpuid_id1; - CYCLES(CYCLES_CPUID); - break; - } - - case 0x80000001: - { - REG32(EAX) = m_cpu_version + 0x100; // family+1 as specified in AMD documentation - REG32(EDX) = m_feature_flags; - CYCLES(CYCLES_CPUID); - break; - } - - case 0x80000002: - case 0x80000003: - case 0x80000004: - { - int offset = (REG32(EAX) - 0x80000002) << 4; - uint8_t *b = m_processor_name_string + offset; - REG32(EAX) = b[ 0] + (b[ 1] << 8) + (b[ 2] << 16) + (b[ 3] << 24); - REG32(EBX) = b[ 4] + (b[ 5] << 8) + (b[ 6] << 16) + (b[ 7] << 24); - REG32(ECX) = b[ 8] + (b[ 9] << 8) + (b[10] << 16) + (b[11] << 24); - REG32(EDX) = b[12] + (b[13] << 8) + (b[14] << 16) + (b[15] << 24); - CYCLES(CYCLES_CPUID); - break; - } - - case 0x80000005: - { - REG32(EAX) = 0x0408FF08; // 2M/4M data tlb associativity 04 data tlb number of entries 08 instruction tlb associativity FF instruction tlb number of entries 08 - REG32(EBX) = 0xFF20FF10; // 4K data tlb associativity FF data tlb number of entries 20 instruction tlb associativity FF instruction tlb number of entries 10 - REG32(ECX) = 0x40020140; // L1 data cache size in K 40 associativity 02 lines per tag 01 line size in bytes 40 - REG32(EDX) = 0x40020140; // L1 instruction cache size in K 40 associativity 02 lines per tag 01 line size in bytes 40 - CYCLES(CYCLES_CPUID); - break; - } - - case 0x80000006: - { - REG32(EAX) = 0; - REG32(EBX) = 0x41004100; // 4 100 4 100 - REG32(ECX) = 0x01008140; // L2 cache size in K 0100 associativity 8=16-way lines per tag 1 line size in bytes 40 - CYCLES(CYCLES_CPUID); - break; - } - - case 0x80000007: - { - REG32(EDX) = 1; // Advanced power management information, temperature sensor present - CYCLES(CYCLES_CPUID); - break; - } - - case 0x80000008: - { - REG32(EAX) = 0x00002022; - CYCLES(CYCLES_CPUID); - break; - } - - default: - i386_device::opcode_cpuid(); - } -} - -uint64_t athlonxp_device::opcode_rdmsr(bool &valid_msr) -{ - uint64_t ret; - uint32_t offset = REG32(ECX); - - ret = 0; - switch (offset) - { - case 0x10: // TSC - break; - case 0x1b: // APIC_BASE - break; - case 0xfe: // MTRRcap - // 7-0 MTRRCapVCnt - Number of variable range MTRRs (8) - // 8 MtrrCapFix - Fixed range MTRRs available (1) - // 10 MtrrCapWc - Write combining memory type available (1) - ret = 0x508; - break; - case 0x17b: // MCG_CTL - break; - case 0x200: // MTRRphysBase0-7 - case 0x202: - case 0x204: - case 0x206: - case 0x208: - case 0x20a: - case 0x20c: - case 0x20e: - // 7-0 Type - Memory type for this memory range - // 39-12 PhyBase27-0 - Base address for this memory range - /* Format of type field: - Bits 2-0 specify the memory type with the following encoding - 0 UC Uncacheable - 1 WC Write Combining - 4 WT Write Through - 5 WP Write Protect - 6 WB Write Back - 7 UC Uncacheable used only in PAT register - Bit 3 WrMem 1 write to memory 0 write to mmio, present only in fixed range MTRRs - Bit 4 RdMem 1 read from memory 0 read from mmio, present only in fixed range MTRRs - Other bits are unused - */ - break; - case 0x201: // MTRRphysMask0-7 - case 0x203: - case 0x205: - case 0x207: - case 0x209: - case 0x20b: - case 0x20d: - case 0x20f: - // 11 Valid - Memory range active - // 39-12 PhyMask27-0 - Address mask - break; - case 0x2ff: // MTRRdefType - // 7-0 MtrrDefMemType - Default memory type - // 10 MtrrDefTypeFixEn - Enable fixed range MTRRs - // 11 MtrrDefTypeEn - Enable MTRRs - break; - case 0x250: // MTRRfix64K_00000 - // 8 bits for each 64k block starting at address 0 - ret = m_msr_mtrrfix[0]; - break; - case 0x258: // MTRRfix16K_80000 - // 8 bits for each 16k block starting at address 0x80000 - ret = m_msr_mtrrfix[1]; - break; - case 0x259: // MTRRfix16K_A0000 - // 8 bits for each 16k block starting at address 0xa0000 - ret = m_msr_mtrrfix[2]; - break; - case 0x268: // MTRRfix4K_C0000 - case 0x269: // MTRRfix4K_C8000 - case 0x26a: // MTRRfix4K_D0000 - case 0x26b: // MTRRfix4K_D8000 - case 0x26c: // MTRRfix4K_E0000 - case 0x26d: // MTRRfix4K_E8000 - case 0x26e: // MTRRfix4K_F0000 - case 0x26f: // MTRRfix4K_F8000 - // 8 bits for each 4k block - ret = m_msr_mtrrfix[3 + offset - 0x268]; - break; - case 0x400: // MC0_CTL - break; - case 0x404: // MC1_CTL - break; - case 0x408: // MC2_CTL - break; - case 0x40c: // MC3_CTL - break; - case 0xC0010010: // SYS_CFG - // 20 MtrrVarDramEn - Enable top of memory address and I/O range registers - // 19 MtrrFixDramModEn - Enable modification of RdDram and WrDram bits in fixed MTRRs - // 18 MtrrFixDramEn - Enable RdDram and WrDram attributes in fixed MTRRs - ret = m_msr_sys_cfg; - break; - case 0xC0010015: // HWCR - break; - case 0xC0010016: // IORRBase0-1 - case 0xC0010018: - // 39-12 Base27-0 - Base address for this memory range - // 4 RdDram - Read from DRAM - // 3 WrDram - Write to DRAM - break; - case 0xC0010017: // IORRMask0-1 - case 0xC0010019: - // 39-12 Mask27-0 - Address mask - // 11 V - Register enabled - break; - case 0xC001001A: // TOP_MEM - // 39-23 TOM16-0 - Top of Memory, accesses from this address onward are directed to mmio - ret = (uint64_t)m_msr_top_mem; - break; - case 0xC001001D: // TOP_MEM2 - break; - case 0xC0010111: // SMM_BASE - // address of system management mode area - ret = (uint64_t)m_msr_smm_base; - break; - case 0xC0010113: // SMM_MASK - // 1 TValid - Enable TSeg SMRAM Range - // 0 AValid - Enable ASeg SMRAM Range - /* Access to the ASeg (a0000-bffff) depends on bit 0 of smm_mask - if the bit is 0 use the associated fixed mtrr - if the bit is 1 - if smm is active - access goes to dram (wrmem 1 rdmem 1) - if smm not active - access goes to mmio (wrmem 0 rdmem 0) */ - ret = m_msr_smm_mask; - break; - } - valid_msr = true; - return ret; -} - -void athlonxp_device::opcode_wrmsr(uint64_t data, bool &valid_msr) -{ - uint32_t offset = REG32(ECX); - - switch (offset) - { - case 0x1b: // APIC_BASE - break; - case 0x17b: // MCG_CTL - break; - case 0x200: // MTRRphysBase0-7 - case 0x201: // MTRRphysMask0-7 - case 0x202: - case 0x203: - case 0x204: - case 0x205: - case 0x206: - case 0x207: - case 0x208: - case 0x209: - case 0x20a: - case 0x20b: - case 0x20c: - case 0x20d: - case 0x20e: - case 0x20f: - break; - case 0x2ff: // MTRRdefType - break; - case 0x250: // MTRRfix64K_00000 - m_msr_mtrrfix[0] = data; - parse_mtrrfix(data, 0, 64); - break; - case 0x258: // MTRRfix16K_80000 - m_msr_mtrrfix[1] = data; - parse_mtrrfix(data, 0x80000, 16); - break; - case 0x259: // MTRRfix16K_A0000 - m_msr_mtrrfix[2] = data; - if (m_msr_smm_mask & 1) - { - if (m_smm) - data = 0x1818181818181818; // when smm is active - else - data = 0; // when smm is not active - } - parse_mtrrfix(data, 0xa0000, 16); - break; - case 0x268: // MTRRfix4K_C0000-F8000 - case 0x269: - case 0x26a: - case 0x26b: - case 0x26c: - case 0x26d: - case 0x26e: - case 0x26f: - m_msr_mtrrfix[3 + offset - 0x268] = data; - parse_mtrrfix(data, 0xc0000 + (offset - 0x268) * 0x8000, 4); - break; - case 0x400: // MC0_CTL - break; - case 0x404: // MC1_CTL - break; - case 0x408: // MC2_CTL - break; - case 0x40c: // MC3_CTL - break; - case 0xC0010010: // SYS_CFG - m_msr_sys_cfg = data; - break; - case 0xC0010015: // HWCR - break; - case 0xC0010016: // IORRBase - case 0xC0010017: // IORRMask - case 0xC0010018: - case 0xC0010019: - break; - case 0xC001001A: // TOP_MEM - m_msr_top_mem = (offs_t)data; - break; - case 0xC0010111: // SMM_BASE - m_msr_smm_base = (offs_t)data; - m_smbase = m_msr_smm_base; - break; - case 0xC0010113: // SMM_MASK - m_msr_smm_mask = data; - if (m_msr_smm_mask & 1) - { - if (m_smm) - data = 0x1818181818181818; // when smm is active - else - data = 0; // when smm is not active - } - else - data = m_msr_mtrrfix[2]; - parse_mtrrfix(data, 0xa0000, 16); - break; - } - valid_msr = true; -} diff --git a/src/devices/cpu/i386/cycles.h b/src/devices/cpu/i386/cycles.h index b6399706858..2ca28aff180 100644 --- a/src/devices/cpu/i386/cycles.h +++ b/src/devices/cpu/i386/cycles.h @@ -6,339 +6,6 @@ #ifndef __CYCLES_H__ #define __CYCLES_H__ -enum X86_CYCLES -{ - CYCLES_MOV_REG_REG, - CYCLES_MOV_REG_MEM, - CYCLES_MOV_MEM_REG, - CYCLES_MOV_IMM_REG, - CYCLES_MOV_IMM_MEM, - CYCLES_MOV_ACC_MEM, - CYCLES_MOV_MEM_ACC, - CYCLES_MOV_REG_SREG, - CYCLES_MOV_MEM_SREG, - CYCLES_MOV_SREG_REG, - CYCLES_MOV_SREG_MEM, - CYCLES_MOVSX_REG_REG, - CYCLES_MOVSX_MEM_REG, - CYCLES_MOVZX_REG_REG, - CYCLES_MOVZX_MEM_REG, - CYCLES_PUSH_RM, - CYCLES_PUSH_REG_SHORT, - CYCLES_PUSH_SREG, - CYCLES_PUSH_IMM, - CYCLES_PUSHA, - CYCLES_POP_RM, - CYCLES_POP_REG_SHORT, - CYCLES_POP_SREG, - CYCLES_POPA, - CYCLES_XCHG_REG_REG, - CYCLES_XCHG_REG_MEM, - CYCLES_IN, - CYCLES_IN_VAR, - CYCLES_OUT, - CYCLES_OUT_VAR, - CYCLES_LEA, - CYCLES_LDS, - CYCLES_LES, - CYCLES_LFS, - CYCLES_LGS, - CYCLES_LSS, - CYCLES_CLC, - CYCLES_CLD, - CYCLES_CLI, - CYCLES_CLTS, - CYCLES_CMC, - CYCLES_LAHF, - CYCLES_POPF, - CYCLES_PUSHF, - CYCLES_SAHF, - CYCLES_STC, - CYCLES_STD, - CYCLES_STI, - CYCLES_ALU_REG_REG, - CYCLES_ALU_REG_MEM, - CYCLES_ALU_MEM_REG, - CYCLES_ALU_IMM_REG, - CYCLES_ALU_IMM_MEM, - CYCLES_ALU_IMM_ACC, - CYCLES_INC_REG, - CYCLES_INC_MEM, - CYCLES_DEC_REG, - CYCLES_DEC_MEM, - CYCLES_CMP_REG_REG, - CYCLES_CMP_REG_MEM, - CYCLES_CMP_MEM_REG, - CYCLES_CMP_IMM_REG, - CYCLES_CMP_IMM_MEM, - CYCLES_CMP_IMM_ACC, - CYCLES_TEST_REG_REG, - CYCLES_TEST_REG_MEM, - CYCLES_TEST_IMM_REG, - CYCLES_TEST_IMM_MEM, - CYCLES_TEST_IMM_ACC, - CYCLES_NEG_REG, - CYCLES_NEG_MEM, - CYCLES_AAA, - CYCLES_AAS, - CYCLES_DAA, - CYCLES_DAS, - CYCLES_MUL8_ACC_REG, - CYCLES_MUL8_ACC_MEM, - CYCLES_MUL16_ACC_REG, - CYCLES_MUL16_ACC_MEM, - CYCLES_MUL32_ACC_REG, - CYCLES_MUL32_ACC_MEM, - CYCLES_IMUL8_ACC_REG, - CYCLES_IMUL8_ACC_MEM, - CYCLES_IMUL16_ACC_REG, - CYCLES_IMUL16_ACC_MEM, - CYCLES_IMUL32_ACC_REG, - CYCLES_IMUL32_ACC_MEM, - CYCLES_IMUL8_REG_REG, - CYCLES_IMUL8_REG_MEM, - CYCLES_IMUL16_REG_REG, - CYCLES_IMUL16_REG_MEM, - CYCLES_IMUL32_REG_REG, - CYCLES_IMUL32_REG_MEM, - CYCLES_IMUL16_REG_IMM_REG, - CYCLES_IMUL16_MEM_IMM_REG, - CYCLES_IMUL32_REG_IMM_REG, - CYCLES_IMUL32_MEM_IMM_REG, - CYCLES_DIV8_ACC_REG, - CYCLES_DIV8_ACC_MEM, - CYCLES_DIV16_ACC_REG, - CYCLES_DIV16_ACC_MEM, - CYCLES_DIV32_ACC_REG, - CYCLES_DIV32_ACC_MEM, - CYCLES_IDIV8_ACC_REG, - CYCLES_IDIV8_ACC_MEM, - CYCLES_IDIV16_ACC_REG, - CYCLES_IDIV16_ACC_MEM, - CYCLES_IDIV32_ACC_REG, - CYCLES_IDIV32_ACC_MEM, - CYCLES_AAD, - CYCLES_AAM, - CYCLES_CBW, - CYCLES_CWD, - CYCLES_ROTATE_REG, - CYCLES_ROTATE_MEM, - CYCLES_ROTATE_CARRY_REG, - CYCLES_ROTATE_CARRY_MEM, - CYCLES_SHLD_REG, - CYCLES_SHLD_MEM, - CYCLES_SHRD_REG, - CYCLES_SHRD_MEM, - CYCLES_NOT_REG, - CYCLES_NOT_MEM, - CYCLES_CMPS, - CYCLES_INS, - CYCLES_LODS, - CYCLES_MOVS, - CYCLES_OUTS, - CYCLES_SCAS, - CYCLES_STOS, - CYCLES_XLAT, - CYCLES_REP_CMPS_BASE, - CYCLES_REP_INS_BASE, - CYCLES_REP_LODS_BASE, - CYCLES_REP_MOVS_BASE, - CYCLES_REP_OUTS_BASE, - CYCLES_REP_SCAS_BASE, - CYCLES_REP_STOS_BASE, - CYCLES_REP_CMPS, - CYCLES_REP_INS, - CYCLES_REP_LODS, - CYCLES_REP_MOVS, - CYCLES_REP_OUTS, - CYCLES_REP_SCAS, - CYCLES_REP_STOS, - CYCLES_BSF_BASE, - CYCLES_BSF, - CYCLES_BSR_BASE, - CYCLES_BSR, - CYCLES_BT_IMM_REG, - CYCLES_BT_IMM_MEM, - CYCLES_BT_REG_REG, - CYCLES_BT_REG_MEM, - CYCLES_BTC_IMM_REG, - CYCLES_BTC_IMM_MEM, - CYCLES_BTC_REG_REG, - CYCLES_BTC_REG_MEM, - CYCLES_BTR_IMM_REG, - CYCLES_BTR_IMM_MEM, - CYCLES_BTR_REG_REG, - CYCLES_BTR_REG_MEM, - CYCLES_BTS_IMM_REG, - CYCLES_BTS_IMM_MEM, - CYCLES_BTS_REG_REG, - CYCLES_BTS_REG_MEM, - CYCLES_CALL, // E8 - CYCLES_CALL_REG, // FF /2 - CYCLES_CALL_MEM, // FF /2 - CYCLES_CALL_INTERSEG, // 9A - CYCLES_CALL_REG_INTERSEG, // FF /3 - CYCLES_CALL_MEM_INTERSEG, // FF /3 - CYCLES_JMP_SHORT, // EB - CYCLES_JMP, // E9 - CYCLES_JMP_REG, // FF /4 - CYCLES_JMP_MEM, // FF /4 - CYCLES_JMP_INTERSEG, // EA - CYCLES_JMP_REG_INTERSEG, // FF /5 - CYCLES_JMP_MEM_INTERSEG, // FF /5 - CYCLES_RET, // C3 - CYCLES_RET_IMM, // C2 - CYCLES_RET_INTERSEG, // CB - CYCLES_RET_IMM_INTERSEG, // CA - CYCLES_JCC_DISP8, - CYCLES_JCC_FULL_DISP, - CYCLES_JCC_DISP8_NOBRANCH, - CYCLES_JCC_FULL_DISP_NOBRANCH, - CYCLES_JCXZ, - CYCLES_JCXZ_NOBRANCH, - CYCLES_LOOP, - CYCLES_LOOPZ, - CYCLES_LOOPNZ, - CYCLES_SETCC_REG, - CYCLES_SETCC_MEM, - CYCLES_ENTER, - CYCLES_LEAVE, - CYCLES_INT, - CYCLES_INT3, - CYCLES_INTO_OF1, - CYCLES_INTO_OF0, - CYCLES_BOUND_IN_RANGE, - CYCLES_BOUND_OUT_RANGE, - CYCLES_IRET, - CYCLES_HLT, - CYCLES_MOV_REG_CR0, - CYCLES_MOV_REG_CR2, - CYCLES_MOV_REG_CR3, - CYCLES_MOV_CR_REG, - CYCLES_MOV_REG_DR0_3, - CYCLES_MOV_REG_DR6_7, - CYCLES_MOV_DR6_7_REG, - CYCLES_MOV_DR0_3_REG, - CYCLES_MOV_REG_TR6_7, - CYCLES_MOV_TR6_7_REG, - CYCLES_NOP, - CYCLES_WAIT, - CYCLES_ARPL_REG, - CYCLES_ARPL_MEM, - CYCLES_LAR_REG, - CYCLES_LAR_MEM, - CYCLES_LGDT, - CYCLES_LIDT, - CYCLES_LLDT_REG, - CYCLES_LLDT_MEM, - CYCLES_LMSW_REG, - CYCLES_LMSW_MEM, - CYCLES_LSL_REG, - CYCLES_LSL_MEM, - CYCLES_LTR_REG, - CYCLES_LTR_MEM, - CYCLES_SGDT, - CYCLES_SIDT, - CYCLES_SLDT_REG, - CYCLES_SLDT_MEM, - CYCLES_SMSW_REG, - CYCLES_SMSW_MEM, - CYCLES_STR_REG, - CYCLES_STR_MEM, - CYCLES_VERR_REG, - CYCLES_VERR_MEM, - CYCLES_VERW_REG, - CYCLES_VERW_MEM, - CYCLES_LOCK, - - CYCLES_BSWAP, - CYCLES_CMPXCHG8B, - CYCLES_CMPXCHG, - CYCLES_CPUID, - CYCLES_CPUID_EAX1, - CYCLES_INVD, - CYCLES_XADD, - CYCLES_RDTSC, - CYCLES_RSM, - CYCLES_RDMSR, - - CYCLES_FABS, - CYCLES_FADD, - CYCLES_FBLD, - CYCLES_FBSTP, - CYCLES_FCHS, - CYCLES_FCLEX, - CYCLES_FCOM, - CYCLES_FCOS, - CYCLES_FDECSTP, - CYCLES_FDISI, - CYCLES_FDIV, - CYCLES_FDIVR, - CYCLES_FENI, - CYCLES_FFREE, - CYCLES_FIADD, - CYCLES_FICOM, - CYCLES_FIDIV, - CYCLES_FILD, - CYCLES_FIMUL, - CYCLES_FINCSTP, - CYCLES_FINIT, - CYCLES_FIST, - CYCLES_FISUB, - CYCLES_FLD, - CYCLES_FLDZ, - CYCLES_FLD1, - CYCLES_FLDL2E, - CYCLES_FLDL2T, - CYCLES_FLDLG2, - CYCLES_FLDLN2, - CYCLES_FLDPI, - CYCLES_FLDCW, - CYCLES_FLDENV, - CYCLES_FMUL, - CYCLES_FNOP, - CYCLES_FPATAN, - CYCLES_FPREM, - CYCLES_FPREM1, - CYCLES_FPTAN, - CYCLES_FRNDINT, - CYCLES_FRSTOR, - CYCLES_FSAVE, - CYCLES_FSCALE, - CYCLES_FSETPM, - CYCLES_FSIN, - CYCLES_FSINCOS, - CYCLES_FSQRT, - CYCLES_FST, - CYCLES_FSTCW, - CYCLES_FSTENV, - CYCLES_FSTSW, - CYCLES_FSUB, - CYCLES_FSUBR, - CYCLES_FTST, - CYCLES_FUCOM, - CYCLES_FXAM, - CYCLES_FXCH, - CYCLES_FXTRACT, - CYCLES_FYL2X, - CYCLES_FYL2XPI, - CYCLES_CMPXCHG_REG_REG_T, - CYCLES_CMPXCHG_REG_REG_F, - CYCLES_CMPXCHG_REG_MEM_T, - CYCLES_CMPXCHG_REG_MEM_F, - CYCLES_XADD_REG_REG, - CYCLES_XADD_REG_MEM, - - CYCLES_NUM_OPCODES -}; - - -#define CPU_CYCLES_I386 0 -#define CPU_CYCLES_I486 1 -#define CPU_CYCLES_PENTIUM 2 -#define CPU_CYCLES_MEDIAGX 3 - - struct X86_CYCLE_TABLE { X86_CYCLES op; diff --git a/src/devices/cpu/i386/i386.cpp b/src/devices/cpu/i386/i386.cpp index 3c68bbf1471..c791e0b36a1 100644 --- a/src/devices/cpu/i386/i386.cpp +++ b/src/devices/cpu/i386/i386.cpp @@ -14,7 +14,7 @@ Intel Pentium Pro Intel Pentium II Intel Pentium III - Amd Athlon XP + Amd Athlon XP (athlon.cpp) Intel Pentium 4 */ @@ -41,7 +41,6 @@ DEFINE_DEVICE_TYPE(MEDIAGX, mediagx_device, "mediagx", "Cyrix MediaG DEFINE_DEVICE_TYPE(PENTIUM_PRO, pentium_pro_device, "pentium_pro", "Intel Pentium Pro") DEFINE_DEVICE_TYPE(PENTIUM2, pentium2_device, "pentium2", "Intel Pentium II") DEFINE_DEVICE_TYPE(PENTIUM3, pentium3_device, "pentium3", "Intel Pentium III") -DEFINE_DEVICE_TYPE(ATHLONXP, athlonxp_device, "athlonxp", "Amd Athlon XP") DEFINE_DEVICE_TYPE(PENTIUM4, pentium4_device, "pentium4", "Intel Pentium 4") @@ -131,15 +130,6 @@ pentium3_device::pentium3_device(const machine_config &mconfig, const char *tag, set_vtlb_dynamic_entries(96); } -athlonxp_device::athlonxp_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock) - : pentium_device(mconfig, ATHLONXP, tag, owner, clock) - , m_data_config("mmio", ENDIANNESS_LITTLE, 32, 32, 0, 32, 12) - , m_opcodes_config("debugger", ENDIANNESS_LITTLE, 32, 32, 0, 32, 12) -{ - // TODO: put correct value - set_vtlb_dynamic_entries(256); -} - pentium4_device::pentium4_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock) : pentium_device(mconfig, PENTIUM4, tag, owner, clock) { @@ -319,173 +309,1015 @@ bool i386_device::translate_address(int pl, int type, uint32_t *address, uint32_ return true; } -uint32_t i386_device::i386_load_protected_mode_segment(I386_SREG *seg, uint64_t *desc ) +/***********************************************************************************/ + +void i386_device::CHANGE_PC(uint32_t pc) { - uint32_t v1,v2; - uint32_t base, limit; - int entry; - - if(!seg->selector) - { - seg->flags = 0; - seg->base = 0; - seg->limit = 0; - seg->d = 0; - seg->valid = false; - return 0; - } - - if ( seg->selector & 0x4 ) - { - base = m_ldtr.base; - limit = m_ldtr.limit; - } else { - base = m_gdtr.base; - limit = m_gdtr.limit; - } - - entry = seg->selector & ~0x7; - if (limit == 0 || entry + 7 > limit) - return 0; - - v1 = READ32PL0(base + entry ); - v2 = READ32PL0(base + entry + 4 ); - - seg->flags = (v2 >> 8) & 0xf0ff; - seg->base = (v2 & 0xff000000) | ((v2 & 0xff) << 16) | ((v1 >> 16) & 0xffff); - seg->limit = (v2 & 0xf0000) | (v1 & 0xffff); - if (seg->flags & 0x8000) - seg->limit = (seg->limit << 12) | 0xfff; - seg->d = (seg->flags & 0x4000) ? 1 : 0; - seg->valid = true; - - if(desc) - *desc = ((uint64_t)v2<<32)|v1; - return 1; + m_pc = i386_translate(CS, pc, -1 ); } -void i386_device::i386_load_call_gate(I386_CALL_GATE *gate) +void i386_device::NEAR_BRANCH(int32_t offs) { - uint32_t v1,v2; - uint32_t base,limit; - int entry; - - if ( gate->segment & 0x4 ) - { - base = m_ldtr.base; - limit = m_ldtr.limit; - } else { - base = m_gdtr.base; - limit = m_gdtr.limit; - } - - entry = gate->segment & ~0x7; - if (limit == 0 || entry + 7 > limit) - return; - - v1 = READ32PL0(base + entry ); - v2 = READ32PL0(base + entry + 4 ); - - /* Note that for task gates, offset and dword_count are not used */ - gate->selector = (v1 >> 16) & 0xffff; - gate->offset = (v1 & 0x0000ffff) | (v2 & 0xffff0000); - gate->ar = (v2 >> 8) & 0xff; - gate->dword_count = v2 & 0x001f; - gate->present = (gate->ar >> 7) & 0x01; - gate->dpl = (gate->ar >> 5) & 0x03; + /* TODO: limit */ + m_eip += offs; + m_pc += offs; } -void i386_device::i386_set_descriptor_accessed(uint16_t selector) +uint8_t i386_device::FETCH() { - // assume the selector is valid, we don't need to check it again - uint32_t base, addr; - uint8_t rights; - if(!(selector & ~3)) - return; + uint8_t value; + uint32_t address = m_pc, error; - if ( selector & 0x4 ) - base = m_ldtr.base; + if(!translate_address(m_CPL,TRANSLATE_FETCH,&address,&error)) + PF_THROW(error); + + value = mem_pr8(address & m_a20_mask); +#ifdef DEBUG_MISSING_OPCODE + m_opcode_bytes[m_opcode_bytes_length] = value; + m_opcode_bytes_length = (m_opcode_bytes_length + 1) & 15; +#endif + m_eip++; + m_pc++; + return value; +} +uint16_t i386_device::FETCH16() +{ + uint16_t value; + uint32_t address = m_pc, error; + + if( !WORD_ALIGNED(address) ) { /* Unaligned read */ + value = (FETCH() << 0); + value |= (FETCH() << 8); + } else { + if(!translate_address(m_CPL,TRANSLATE_FETCH,&address,&error)) + PF_THROW(error); + address &= m_a20_mask; + value = mem_pr16(address); + m_eip += 2; + m_pc += 2; + } + return value; +} +uint32_t i386_device::FETCH32() +{ + uint32_t value; + uint32_t address = m_pc, error; + + if( !DWORD_ALIGNED(m_pc) ) { /* Unaligned read */ + value = (FETCH() << 0); + value |= (FETCH() << 8); + value |= (FETCH() << 16); + value |= (FETCH() << 24); + } else { + if(!translate_address(m_CPL,TRANSLATE_FETCH,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = mem_pr32(address); + m_eip += 4; + m_pc += 4; + } + return value; +} + +uint8_t i386_device::READ8PL(uint32_t ea, uint8_t privilege) +{ + uint32_t address = ea, error; + + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + return m_program->read_byte(address); +} + +uint16_t i386_device::READ16PL(uint32_t ea, uint8_t privilege) +{ + uint16_t value; + uint32_t address = ea, error; + + switch (ea & 3) + { + case 0: + case 2: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = m_program->read_word(address); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = m_program->read_dword(address - 1, 0x00ffff00) >> 8; + break; + + case 3: + value = READ8PL(ea, privilege); + value |= READ8PL(ea + 1, privilege) << 8; + break; + } + + return value; +} + +uint32_t i386_device::READ32PL(uint32_t ea, uint8_t privilege) +{ + uint32_t value; + uint32_t address = ea, error; + + switch (ea & 3) + { + case 0: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = m_program->read_dword(address); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = m_program->read_dword(address - 1, 0xffffff00) >> 8; + value |= READ8PL(ea + 3, privilege) << 24; + break; + + case 2: + value = READ16PL(ea, privilege); + value |= READ16PL(ea + 2, privilege) << 16; + break; + + case 3: + value = READ8PL(ea, privilege); + + address = ea + 1; + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value |= m_program->read_dword(address, 0x00ffffff) << 8; + break; + } + + return value; +} + +uint64_t i386_device::READ64PL(uint32_t ea, uint8_t privilege) +{ + uint64_t value; + uint32_t address = ea, error; + + switch (ea & 3) + { + case 0: + value = READ32PL(ea, privilege); + value |= uint64_t(READ32PL(ea + 2, privilege)) << 32; + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value = m_program->read_dword(address - 1, 0xffffff00) >> 8; + value |= uint64_t(READ32PL(ea + 3, privilege)) << 24; + value |= uint64_t(READ8PL(ea + 7, privilege)) << 56; + break; + + case 2: + value = READ16PL(ea, privilege); + value |= uint64_t(READ32PL(ea + 2, privilege)) << 16; + value |= uint64_t(READ16PL(ea + 6, privilege)) << 48; + break; + + case 3: + value = READ8PL(ea, privilege); + value |= uint64_t(READ32PL(ea + 1, privilege)) << 8; + + address = ea + 5; + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + value |= uint64_t(m_program->read_dword(address, 0x00ffffff)) << 40; + break; + } + + return value; +} + +uint16_t i386sx_device::READ16PL(uint32_t ea, uint8_t privilege) +{ + uint16_t value; + uint32_t address = ea, error; + + if (WORD_ALIGNED(ea)) + { + if(!translate_address(privilege,TRANSLATE_READ,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + return m_program->read_word(address); + } else - base = m_gdtr.base; - - addr = base + (selector & ~7) + 5; - i386_translate_address(TRANSLATE_READ, &addr, nullptr); - rights = m_program->read_byte(addr); - // Should a fault be thrown if the table is read only? - m_program->write_byte(addr, rights | 1); + { + /* Unaligned read */ + value = READ8PL(ea, privilege); + value |= READ8PL(ea + 1, privilege) << 8; + return value; + } } -void i386_device::i386_load_segment_descriptor(int segment ) +uint32_t i386sx_device::READ32PL(uint32_t ea, uint8_t privilege) +{ + uint32_t value; + + if (WORD_ALIGNED(ea)) + { + value = READ16PL(ea, privilege); + value |= READ16PL(ea + 2, privilege) << 16; + return value; + } + else + { + value = READ8PL(ea, privilege); + value |= READ16PL(ea + 1, privilege) << 8; + value |= READ8PL(ea + 3, privilege) << 24; + return value; + } +} + +uint64_t i386sx_device::READ64PL(uint32_t ea, uint8_t privilege) +{ + uint64_t value; + + if (WORD_ALIGNED(ea)) + { + value = READ16PL(ea, privilege); + value |= uint64_t(READ16PL(ea + 2, privilege)) << 16; + value |= uint64_t(READ16PL(ea + 4, privilege)) << 32; + value |= uint64_t(READ16PL(ea + 6, privilege)) << 48; + return value; + } + else + { + value = READ8PL(ea, privilege); + value |= uint64_t(READ16PL(ea + 1, privilege)) << 8; + value |= uint64_t(READ16PL(ea + 3, privilege)) << 24; + value |= uint64_t(READ16PL(ea + 5, privilege)) << 40; + value |= uint64_t(READ8PL(ea + 7, privilege)) << 56; + return value; + } +} + +void i386_device::WRITE_TEST(uint32_t ea) +{ + uint32_t address = ea, error; + if(!translate_address(m_CPL,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); +} + +void i386_device::WRITE8PL(uint32_t ea, uint8_t privilege, uint8_t value) +{ + uint32_t address = ea, error; + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_byte(address, value); +} + +void i386_device::WRITE16PL(uint32_t ea, uint8_t privilege, uint16_t value) +{ + uint32_t address = ea, error; + + switch(ea & 3) + { + case 0: + case 2: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_word(address, value); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_dword(address - 1, value << 8, 0x00ffff00); + break; + + case 3: + WRITE8PL(ea, privilege, value & 0xff); + WRITE8PL(ea + 1, privilege, (value >> 8) & 0xff); + break; + } +} + +void i386_device::WRITE32PL(uint32_t ea, uint8_t privilege, uint32_t value) +{ + uint32_t address = ea, error; + + switch(ea & 3) + { + case 0: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_dword(address, value); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_dword(address - 1, (value << 8) & 0xffffff00, 0xffffff00); + WRITE8PL(ea + 3, privilege, (value >> 24) & 0xff); + break; + + case 2: + WRITE16PL(ea, privilege, value & 0xffff); + WRITE16PL(ea + 2, privilege, (value >> 16) & 0xffff); + break; + + case 3: + WRITE8PL(ea, privilege, value & 0xff); + + address = ea + 1; + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_dword(address, value >> 8, 0x00ffffff); + break; + } +} + +void i386_device::WRITE64PL(uint32_t ea, uint8_t privilege, uint64_t value) +{ + uint32_t address = ea, error; + + switch(ea & 3) + { + case 0: + WRITE32PL(ea, privilege, value & 0xffffffff); + WRITE32PL(ea + 2, privilege, (value >> 32) & 0xffffffff); + break; + + case 1: + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_dword(address - 1, value << 8, 0xffffff00); + WRITE32PL(ea + 3, privilege, (value >> 24) & 0xffffffff); + WRITE8PL(ea + 7, privilege, (value >> 56) & 0xff ); + break; + + case 2: + WRITE16PL(ea, privilege, value & 0xffff); + WRITE32PL(ea + 2, privilege, (value >> 16) & 0xffffffff); + WRITE16PL(ea + 6, privilege, (value >> 48) & 0xffff); + break; + + case 3: + WRITE8PL(ea, privilege, value & 0xff); + WRITE32PL(ea + 1, privilege, (value >> 8) & 0xffffffff); + + address = ea + 5; + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_dword(address, (value >> 40) & 0x00ffffff, 0x00ffffff); + break; + } +} + +void i386sx_device::WRITE16PL(uint32_t ea, uint8_t privilege, uint16_t value) +{ + uint32_t address = ea, error; + + if (WORD_ALIGNED(ea)) + { + if(!translate_address(privilege,TRANSLATE_WRITE,&address,&error)) + PF_THROW(error); + + address &= m_a20_mask; + m_program->write_word(address, value); + } + else + { + WRITE8PL(ea, privilege, value & 0xff); + WRITE8PL(ea + 1, privilege, (value >> 8) & 0xff); + } +} + +void i386sx_device::WRITE32PL(uint32_t ea, uint8_t privilege, uint32_t value) +{ + if (WORD_ALIGNED(ea)) + { + WRITE16PL(ea, privilege, value & 0xffff); + WRITE16PL(ea + 2, privilege, (value >> 16) & 0xffff); + } + else + { + WRITE8PL(ea, privilege, value & 0xff); + WRITE16PL(ea + 1, privilege, (value >> 8) & 0xffff); + WRITE8PL(ea + 3, privilege, (value >> 24) & 0xff); + } +} + +void i386sx_device::WRITE64PL(uint32_t ea, uint8_t privilege, uint64_t value) +{ + if (WORD_ALIGNED(ea)) + { + WRITE16PL(ea, privilege, value & 0xffff); + WRITE16PL(ea + 2, privilege, (value >> 16) & 0xffff); + WRITE16PL(ea + 4, privilege, (value >> 32) & 0xffff); + WRITE16PL(ea + 6, privilege, (value >> 48) & 0xffff); + } + else + { + WRITE8PL(ea, privilege, value & 0xff); + WRITE16PL(ea + 1, privilege, (value >> 8) & 0xffff); + WRITE16PL(ea + 3, privilege, (value >> 24) & 0xffff); + WRITE16PL(ea + 5, privilege, (value >> 40) & 0xffff); + WRITE8PL(ea + 7, privilege, (value >> 56) & 0xff); + } +} + +/***********************************************************************************/ + +uint8_t i386_device::OR8(uint8_t dst, uint8_t src) +{ + uint8_t res = dst | src; + m_CF = m_OF = 0; + SetSZPF8(res); + return res; +} +uint16_t i386_device::OR16(uint16_t dst, uint16_t src) +{ + uint16_t res = dst | src; + m_CF = m_OF = 0; + SetSZPF16(res); + return res; +} +uint32_t i386_device::OR32(uint32_t dst, uint32_t src) +{ + uint32_t res = dst | src; + m_CF = m_OF = 0; + SetSZPF32(res); + return res; +} + +uint8_t i386_device::AND8(uint8_t dst, uint8_t src) +{ + uint8_t res = dst & src; + m_CF = m_OF = 0; + SetSZPF8(res); + return res; +} +uint16_t i386_device::AND16(uint16_t dst, uint16_t src) +{ + uint16_t res = dst & src; + m_CF = m_OF = 0; + SetSZPF16(res); + return res; +} +uint32_t i386_device::AND32(uint32_t dst, uint32_t src) +{ + uint32_t res = dst & src; + m_CF = m_OF = 0; + SetSZPF32(res); + return res; +} + +uint8_t i386_device::XOR8(uint8_t dst, uint8_t src) +{ + uint8_t res = dst ^ src; + m_CF = m_OF = 0; + SetSZPF8(res); + return res; +} +uint16_t i386_device::XOR16(uint16_t dst, uint16_t src) +{ + uint16_t res = dst ^ src; + m_CF = m_OF = 0; + SetSZPF16(res); + return res; +} +uint32_t i386_device::XOR32(uint32_t dst, uint32_t src) +{ + uint32_t res = dst ^ src; + m_CF = m_OF = 0; + SetSZPF32(res); + return res; +} + +#define SUB8(dst, src) SBB8(dst, src, 0) +uint8_t i386_device::SBB8(uint8_t dst, uint8_t src, uint8_t b) +{ + uint16_t res = (uint16_t)dst - (uint16_t)src - (uint8_t)b; + SetCF8(res); + SetOF_Sub8(res,src,dst); + SetAF(res,src,dst); + SetSZPF8(res); + return (uint8_t)res; +} + +#define SUB16(dst, src) SBB16(dst, src, 0) +uint16_t i386_device::SBB16(uint16_t dst, uint16_t src, uint16_t b) +{ + uint32_t res = (uint32_t)dst - (uint32_t)src - (uint32_t)b; + SetCF16(res); + SetOF_Sub16(res,src,dst); + SetAF(res,src,dst); + SetSZPF16(res); + return (uint16_t)res; +} + +#define SUB32(dst, src) SBB32(dst, src, 0) +uint32_t i386_device::SBB32(uint32_t dst, uint32_t src, uint32_t b) +{ + uint64_t res = (uint64_t)dst - (uint64_t)src - (uint64_t) b; + SetCF32(res); + SetOF_Sub32(res,src,dst); + SetAF(res,src,dst); + SetSZPF32(res); + return (uint32_t)res; +} + +#define ADD8(dst, src) ADC8(dst, src, 0) +uint8_t i386_device::ADC8(uint8_t dst, uint8_t src, uint8_t c) +{ + uint16_t res = (uint16_t)dst + (uint16_t)src + (uint16_t)c; + SetCF8(res); + SetOF_Add8(res,src,dst); + SetAF(res,src,dst); + SetSZPF8(res); + return (uint8_t)res; +} + +#define ADD16(dst, src) ADC16(dst, src, 0) +uint16_t i386_device::ADC16(uint16_t dst, uint16_t src, uint8_t c) +{ + uint32_t res = (uint32_t)dst + (uint32_t)src + (uint32_t)c; + SetCF16(res); + SetOF_Add16(res,src,dst); + SetAF(res,src,dst); + SetSZPF16(res); + return (uint16_t)res; +} + +#define ADD32(dst, src) ADC32(dst, src, 0) +uint32_t i386_device::ADC32(uint32_t dst, uint32_t src, uint32_t c) +{ + uint64_t res = (uint64_t)dst + (uint64_t)src + (uint64_t) c; + SetCF32(res); + SetOF_Add32(res,src,dst); + SetAF(res,src,dst); + SetSZPF32(res); + return (uint32_t)res; +} + +uint8_t i386_device::INC8(uint8_t dst) +{ + uint16_t res = (uint16_t)dst + 1; + SetOF_Add8(res,1,dst); + SetAF(res,1,dst); + SetSZPF8(res); + return (uint8_t)res; +} +uint16_t i386_device::INC16(uint16_t dst) +{ + uint32_t res = (uint32_t)dst + 1; + SetOF_Add16(res,1,dst); + SetAF(res,1,dst); + SetSZPF16(res); + return (uint16_t)res; +} +uint32_t i386_device::INC32(uint32_t dst) +{ + uint64_t res = (uint64_t)dst + 1; + SetOF_Add32(res,1,dst); + SetAF(res,1,dst); + SetSZPF32(res); + return (uint32_t)res; +} + +uint8_t i386_device::DEC8(uint8_t dst) +{ + uint16_t res = (uint16_t)dst - 1; + SetOF_Sub8(res,1,dst); + SetAF(res,1,dst); + SetSZPF8(res); + return (uint8_t)res; +} +uint16_t i386_device::DEC16(uint16_t dst) +{ + uint32_t res = (uint32_t)dst - 1; + SetOF_Sub16(res,1,dst); + SetAF(res,1,dst); + SetSZPF16(res); + return (uint16_t)res; +} +uint32_t i386_device::DEC32(uint32_t dst) +{ + uint64_t res = (uint64_t)dst - 1; + SetOF_Sub32(res,1,dst); + SetAF(res,1,dst); + SetSZPF32(res); + return (uint32_t)res; +} + + + +void i386_device::PUSH16(uint16_t value) +{ + uint32_t ea, new_esp; + if( STACK_32BIT ) { + new_esp = REG32(ESP) - 2; + ea = i386_translate(SS, new_esp, 1); + WRITE16(ea, value ); + REG32(ESP) = new_esp; + } else { + new_esp = (REG16(SP) - 2) & 0xffff; + ea = i386_translate(SS, new_esp, 1); + WRITE16(ea, value ); + REG16(SP) = new_esp; + } +} +void i386_device::PUSH32(uint32_t value) +{ + uint32_t ea, new_esp; + if( STACK_32BIT ) { + new_esp = REG32(ESP) - 4; + ea = i386_translate(SS, new_esp, 1); + WRITE32(ea, value ); + REG32(ESP) = new_esp; + } else { + new_esp = (REG16(SP) - 4) & 0xffff; + ea = i386_translate(SS, new_esp, 1); + WRITE32(ea, value ); + REG16(SP) = new_esp; + } +} + +void i386_device::PUSH32SEG(uint32_t value) +{ + uint32_t ea, new_esp; + if( STACK_32BIT ) { + new_esp = REG32(ESP) - 4; + ea = i386_translate(SS, new_esp, 1); + ((m_cpu_version & 0xf00) == 0x300) ? WRITE16(ea, value) : WRITE32(ea, value ); // 486 also? + REG32(ESP) = new_esp; + } else { + new_esp = (REG16(SP) - 4) & 0xffff; + ea = i386_translate(SS, new_esp, 1); + ((m_cpu_version & 0xf00) == 0x300) ? WRITE16(ea, value) : WRITE32(ea, value ); + REG16(SP) = new_esp; + } +} + +void i386_device::PUSH8(uint8_t value) +{ + if( m_operand_size ) { + PUSH32((int32_t)(int8_t)value); + } else { + PUSH16((int16_t)(int8_t)value); + } +} + +uint8_t i386_device::POP8() +{ + uint8_t value; + uint32_t ea, new_esp; + if( STACK_32BIT ) { + new_esp = REG32(ESP) + 1; + ea = i386_translate(SS, new_esp - 1, 0); + value = READ8(ea ); + REG32(ESP) = new_esp; + } else { + new_esp = REG16(SP) + 1; + ea = i386_translate(SS, (new_esp - 1) & 0xffff, 0); + value = READ8(ea ); + REG16(SP) = new_esp; + } + return value; +} +uint16_t i386_device::POP16() +{ + uint16_t value; + uint32_t ea, new_esp; + if( STACK_32BIT ) { + new_esp = REG32(ESP) + 2; + ea = i386_translate(SS, new_esp - 2, 0); + value = READ16(ea ); + REG32(ESP) = new_esp; + } else { + new_esp = REG16(SP) + 2; + ea = i386_translate(SS, (new_esp - 2) & 0xffff, 0); + value = READ16(ea ); + REG16(SP) = new_esp; + } + return value; +} +uint32_t i386_device::POP32() +{ + uint32_t value; + uint32_t ea, new_esp; + if( STACK_32BIT ) { + new_esp = REG32(ESP) + 4; + ea = i386_translate(SS, new_esp - 4, 0); + value = READ32(ea ); + REG32(ESP) = new_esp; + } else { + new_esp = REG16(SP) + 4; + ea = i386_translate(SS, (new_esp - 4) & 0xffff, 0); + value = READ32(ea ); + REG16(SP) = new_esp; + } + return value; +} + +void i386_device::BUMP_SI(int adjustment) +{ + if ( m_address_size ) + REG32(ESI) += ((m_DF) ? -adjustment : +adjustment); + else + REG16(SI) += ((m_DF) ? -adjustment : +adjustment); +} + +void i386_device::BUMP_DI(int adjustment) +{ + if ( m_address_size ) + REG32(EDI) += ((m_DF) ? -adjustment : +adjustment); + else + REG16(DI) += ((m_DF) ? -adjustment : +adjustment); +} + +void i386_device::CYCLES(int x) { if (PROTECTED_MODE) { - uint16_t old_flags = m_sreg[segment].flags; - if (!V8086_MODE) - { - i386_load_protected_mode_segment(&m_sreg[segment], nullptr); - if (m_sreg[segment].selector) - { - i386_set_descriptor_accessed(m_sreg[segment].selector); - m_sreg[segment].flags |= 0x0001; - } - } - else - { - m_sreg[segment].base = m_sreg[segment].selector << 4; - m_sreg[segment].limit = 0xffff; - m_sreg[segment].flags = (segment == CS) ? 0x00fb : 0x00f3; - m_sreg[segment].d = 0; - m_sreg[segment].valid = true; - } - if (segment == CS && m_sreg[segment].flags != old_flags) - debugger_privilege_hook(); + m_cycles -= m_cycle_table_pm[x]; } else { - m_sreg[segment].base = m_sreg[segment].selector << 4; - m_sreg[segment].d = 0; - m_sreg[segment].valid = true; + m_cycles -= m_cycle_table_rm[x]; + } +} - if (segment == CS) +void i386_device::CYCLES_RM(int modrm, int r, int m) +{ + if (modrm >= 0xc0) + { + if (PROTECTED_MODE) { - if (!m_performed_intersegment_jump) - m_sreg[segment].base |= 0xfff00000; - if (m_cpu_version < 0x500) - m_sreg[segment].flags = 0x93; + m_cycles -= m_cycle_table_pm[r]; + } + else + { + m_cycles -= m_cycle_table_rm[r]; + } + } + else + { + if (PROTECTED_MODE) + { + m_cycles -= m_cycle_table_pm[m]; + } + else + { + m_cycles -= m_cycle_table_rm[m]; } } } -/* Retrieves the stack selector located in the current TSS */ -uint32_t i386_device::i386_get_stack_segment(uint8_t privilege) +/**********************************************************************************/ + +void i386_device::check_ioperm(offs_t port, uint8_t mask) { - uint32_t ret; - if(privilege >= 3) - return 0; + uint8_t IOPL, map; + uint16_t IOPB; + uint32_t address; - if(m_task.flags & 8) - ret = READ32PL0((m_task.base+8) + (8*privilege)); - else - ret = READ16PL0((m_task.base+4) + (4*privilege)); + if(!PROTECTED_MODE) + return; - return ret; + IOPL = m_IOP1 | (m_IOP2 << 1); + if(!V8086_MODE && (m_CPL <= IOPL)) + return; + + if((m_task.limit < 0x67) || ((m_task.flags & 0xd) != 9)) + FAULT_THROW(FAULT_GP,0); + + address = m_task.base; + IOPB = READ16PL(address+0x66,0); + if((IOPB+(port/8)) > m_task.limit) + FAULT_THROW(FAULT_GP,0); + + map = READ8PL(address+IOPB+(port/8),0); + map >>= (port%8); + if(map & mask) + FAULT_THROW(FAULT_GP,0); } -/* Retrieves the stack pointer located in the current TSS */ -uint32_t i386_device::i386_get_stack_ptr(uint8_t privilege) +uint8_t i386_device::READPORT8(offs_t port) { - uint32_t ret; - if(privilege >= 3) - return 0; - - if(m_task.flags & 8) - ret = READ32PL0((m_task.base+4) + (8*privilege)); - else - ret = READ16PL0((m_task.base+2) + (4*privilege)); - - return ret; + check_ioperm(port, 1); + return m_io->read_byte(port); } +void i386_device::WRITEPORT8(offs_t port, uint8_t value) +{ + check_ioperm(port, 1); + m_io->write_byte(port, value); +} + +uint16_t i386_device::READPORT16(offs_t port) +{ + uint16_t value; + + switch (port & 3) + { + case 0: + case 2: + check_ioperm(port, 3); + value = m_io->read_word(port); + break; + + case 1: + check_ioperm(port, 3); + value = m_io->read_dword(port - 1, 0x00ffff00) >> 8; + break; + + case 3: + value = READPORT8(port); + value |= (READPORT8(port + 1) << 8); + break; + } + + return value; +} + +void i386_device::WRITEPORT16(offs_t port, uint16_t value) +{ + switch (port & 3) + { + case 0: + case 2: + check_ioperm(port, 3); + m_io->write_word(port, value); + break; + + case 1: + check_ioperm(port, 3); + m_io->write_dword(port - 1, value << 8, 0x00ffff00); + break; + + case 3: + WRITEPORT8(port, value & 0xff); + WRITEPORT8(port + 1, (value >> 8) & 0xff); + break; + } +} + +uint32_t i386_device::READPORT32(offs_t port) +{ + uint32_t value; + + switch (port & 3) + { + case 0: + check_ioperm(port, 0xf); + value = m_io->read_dword(port); + break; + + case 1: + check_ioperm(port, 7); + value = m_io->read_dword(port - 1, 0xffffff00) >> 8; + value |= READPORT8(port + 3) << 24; + break; + + case 2: + value = READPORT16(port); + value |= READPORT16(port + 2) << 16; + break; + + case 3: + value = READPORT8(port); + check_ioperm(port + 1, 7); + value |= m_io->read_dword(port + 1, 0x00ffffff) << 8; + break; + } + + return value; +} + +void i386_device::WRITEPORT32(offs_t port, uint32_t value) +{ + switch (port & 3) + { + case 0: + check_ioperm(port, 0xf); + m_io->write_dword(port, value); + break; + + case 1: + check_ioperm(port, 7); + m_io->write_dword(port - 1, value << 8, 0xffffff00); + WRITEPORT8(port + 3, (value >> 24) & 0xff); + break; + + case 2: + WRITEPORT16(port, value & 0xffff); + WRITEPORT16(port + 2, (value >> 16) & 0xffff); + break; + + case 3: + WRITEPORT8(port, value & 0xff); + check_ioperm(port + 1, 7); + m_io->write_dword(port + 1, value >> 8, 0x00ffffff); + break; + } +} + +uint16_t i386sx_device::READPORT16(offs_t port) +{ + if (port & 1) + { + uint16_t value = READPORT8(port); + value |= (READPORT8(port + 1) << 8); + return value; + } + else + { + check_ioperm(port, 3); + return m_io->read_word(port); + } +} + +void i386sx_device::WRITEPORT16(offs_t port, uint16_t value) +{ + if (port & 1) + { + WRITEPORT8(port, value & 0xff); + WRITEPORT8(port + 1, (value >> 8) & 0xff); + } + else + { + check_ioperm(port, 3); + m_io->write_word(port, value); + } +} + +uint32_t i386sx_device::READPORT32(offs_t port) +{ + if (port & 1) + { + uint32_t value = READPORT8(port); + value |= (READPORT16(port + 1) << 8); + value |= (READPORT8(port + 3) << 24); + return value; + } + else + { + uint16_t value = READPORT16(port); + value |= (READPORT16(port + 2) << 16); + return value; + } +} + +void i386sx_device::WRITEPORT32(offs_t port, uint32_t value) +{ + if (port & 1) + { + WRITEPORT8(port, value & 0xff); + WRITEPORT16(port + 1, (value >> 8) & 0xffff); + WRITEPORT8(port + 3, (value >> 24) & 0xff); + } + else + { + WRITEPORT16(port, value & 0xffff); + WRITEPORT16((port + 2), (value >> 16) & 0xffff); + } +} + +/***********************************************************************************/ + uint32_t i386_device::get_flags() const { uint32_t f = 0x2; @@ -680,864 +1512,6 @@ uint32_t i386_device::GetEA(uint8_t modrm, int rwn) return i386_translate(segment, ea, rwn ); } -/* Check segment register for validity when changing privilege level after an RETF */ -void i386_device::i386_check_sreg_validity(int reg) -{ - uint16_t selector = m_sreg[reg].selector; - uint8_t CPL = m_CPL; - uint8_t DPL,RPL; - I386_SREG desc; - int invalid; - - memset(&desc, 0, sizeof(desc)); - desc.selector = selector; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = selector & 0x03; - - /* Must be within the relevant descriptor table limits */ - if(selector & 0x04) - { - if((selector & ~0x07) > m_ldtr.limit) - invalid = 1; - } - else - { - if((selector & ~0x07) > m_gdtr.limit) - invalid = 1; - } - - /* Must be either a data or readable code segment */ - if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0002)) || (desc.flags & 0x0018) == 0x0010) - invalid = 0; - else - invalid = 1; - - /* If a data segment or non-conforming code segment, then either DPL >= CPL or DPL >= RPL */ - if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0004) == 0) || (desc.flags & 0x0018) == 0x0010) - { - if((DPL < CPL) || (DPL < RPL)) - invalid = 1; - } - - /* if segment is invalid, then segment register is nulled */ - if(invalid != 0) - { - m_sreg[reg].selector = 0; - i386_load_segment_descriptor(reg); - } -} - -int i386_device::i386_limit_check(int seg, uint32_t offset) -{ - if(PROTECTED_MODE && !V8086_MODE) - { - if((m_sreg[seg].flags & 0x0018) == 0x0010 && m_sreg[seg].flags & 0x0004) // if expand-down data segment - { - // compare if greater then 0xffffffff when we're passed the access size - if((offset <= m_sreg[seg].limit) || ((m_sreg[seg].d)?0:(offset > 0xffff))) - { - logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x (expand-down)\n",m_pc,m_sreg[seg].selector,m_sreg[seg].limit,offset); - return 1; - } - } - else - { - if(offset > m_sreg[seg].limit) - { - logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x\n",m_pc,m_sreg[seg].selector,m_sreg[seg].limit,offset); - return 1; - } - } - } - return 0; -} - -void i386_device::i386_sreg_load(uint16_t selector, uint8_t reg, bool *fault) -{ - // Checks done when MOV changes a segment register in protected mode - uint8_t CPL,RPL,DPL; - - CPL = m_CPL; - RPL = selector & 0x0003; - - if(!PROTECTED_MODE || V8086_MODE) - { - m_sreg[reg].selector = selector; - i386_load_segment_descriptor(reg); - if(fault) *fault = false; - return; - } - - if(fault) *fault = true; - if(reg == SS) - { - I386_SREG stack; - - memset(&stack, 0, sizeof(stack)); - stack.selector = selector; - i386_load_protected_mode_segment(&stack,nullptr); - DPL = (stack.flags >> 5) & 0x03; - - if((selector & ~0x0003) == 0) - { - logerror("SReg Load (%08x): Selector is null.\n",m_pc); - FAULT(FAULT_GP,0) - } - if(selector & 0x0004) // LDT - { - if((selector & ~0x0007) > m_ldtr.limit) - { - logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - } - else // GDT - { - if((selector & ~0x0007) > m_gdtr.limit) - { - logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - } - if (RPL != CPL) - { - logerror("SReg Load (%08x): Selector RPL does not equal CPL.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0) - { - logerror("SReg Load (%08x): Segment is not a writable data segment.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - if(DPL != CPL) - { - logerror("SReg Load (%08x): Segment DPL does not equal CPL.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - if(!(stack.flags & 0x0080)) - { - logerror("SReg Load (%08x): Segment is not present.\n",m_pc); - FAULT(FAULT_SS,selector & ~0x03) - } - } - if(reg == DS || reg == ES || reg == FS || reg == GS) - { - I386_SREG desc; - - if((selector & ~0x0003) == 0) - { - m_sreg[reg].selector = selector; - i386_load_segment_descriptor(reg ); - if(fault) *fault = false; - return; - } - - memset(&desc, 0, sizeof(desc)); - desc.selector = selector; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; - - if(selector & 0x0004) // LDT - { - if((selector & ~0x0007) > m_ldtr.limit) - { - logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - } - else // GDT - { - if((selector & ~0x0007) > m_gdtr.limit) - { - logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - } - if((desc.flags & 0x0018) != 0x10) - { - if((((desc.flags & 0x0002) != 0) && ((desc.flags & 0x0018) != 0x18)) || !(desc.flags & 0x10)) - { - logerror("SReg Load (%08x): Segment is not a data segment or readable code segment.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - } - if(((desc.flags & 0x0018) == 0x10) || ((!(desc.flags & 0x0004)) && ((desc.flags & 0x0018) == 0x18))) - { - // if data or non-conforming code segment - if((RPL > DPL) || (CPL > DPL)) - { - logerror("SReg Load (%08x): Selector RPL or CPL is not less or equal to segment DPL.\n",m_pc); - FAULT(FAULT_GP,selector & ~0x03) - } - } - if(!(desc.flags & 0x0080)) - { - logerror("SReg Load (%08x): Segment is not present.\n",m_pc); - FAULT(FAULT_NP,selector & ~0x03) - } - } - - m_sreg[reg].selector = selector; - i386_load_segment_descriptor(reg ); - if(fault) *fault = false; -} - -void i386_device::i386_trap(int irq, int irq_gate, int trap_level) -{ - /* I386 Interrupts/Traps/Faults: - * - * 0x00 Divide by zero - * 0x01 Debug exception - * 0x02 NMI - * 0x03 Int3 - * 0x04 Overflow - * 0x05 Array bounds check - * 0x06 Illegal Opcode - * 0x07 FPU not available - * 0x08 Double fault - * 0x09 Coprocessor segment overrun - * 0x0a Invalid task state - * 0x0b Segment not present - * 0x0c Stack exception - * 0x0d General Protection Fault - * 0x0e Page fault - * 0x0f Reserved - * 0x10 Coprocessor error - */ - uint32_t v1, v2; - uint32_t offset, oldflags = get_flags(); - uint16_t segment; - int entry = irq * (PROTECTED_MODE ? 8 : 4); - int SetRPL; - m_lock = false; - - if( !(PROTECTED_MODE) ) - { - /* 16-bit */ - PUSH16(oldflags & 0xffff ); - PUSH16(m_sreg[CS].selector ); - if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1) - PUSH16(m_eip ); - else - PUSH16(m_prev_eip ); - - m_sreg[CS].selector = READ16(m_idtr.base + entry + 2 ); - m_eip = READ16(m_idtr.base + entry ); - - m_TF = 0; - m_IF = 0; - } - else - { - int type; - uint16_t flags; - I386_SREG desc; - uint8_t CPL = m_CPL, DPL; //, RPL = 0; - - /* 32-bit */ - v1 = READ32PL0(m_idtr.base + entry ); - v2 = READ32PL0(m_idtr.base + entry + 4 ); - offset = (v2 & 0xffff0000) | (v1 & 0xffff); - segment = (v1 >> 16) & 0xffff; - type = (v2>>8) & 0x1F; - flags = (v2>>8) & 0xf0ff; - - if(trap_level == 2) - { - logerror("IRQ: Double fault.\n"); - FAULT_EXP(FAULT_DF,0); - } - if(trap_level >= 3) - { - logerror("IRQ: Triple fault. CPU reset.\n"); - pulse_input_line(INPUT_LINE_RESET, attotime::zero); - return; - } - - /* segment privilege checks */ - if(entry >= m_idtr.limit) - { - logerror("IRQ (%08x): Vector %02xh is past IDT limit.\n",m_pc,entry); - FAULT_EXP(FAULT_GP,entry+2) - } - /* segment must be interrupt gate, trap gate, or task gate */ - if(type != 0x05 && type != 0x06 && type != 0x07 && type != 0x0e && type != 0x0f) - { - logerror("IRQ#%02x (%08x): Vector segment %04x is not an interrupt, trap or task gate.\n",irq,m_pc,segment); - FAULT_EXP(FAULT_GP,entry+2) - } - - if(m_ext == 0) // if software interrupt (caused by INT/INTO/INT3) - { - if(((flags >> 5) & 0x03) < CPL) - { - logerror("IRQ (%08x): Software IRQ - gate DPL is less than CPL.\n",m_pc); - FAULT_EXP(FAULT_GP,entry+2) - } - if(V8086_MODE) - { - if((!m_IOP1 || !m_IOP2) && (m_opcode != 0xcc)) - { - logerror("IRQ (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",m_pc); - FAULT(FAULT_GP,0) - } - - } - } - - if((flags & 0x0080) == 0) - { - logerror("IRQ: Vector segment is not present.\n"); - FAULT_EXP(FAULT_NP,entry+2) - } - - if(type == 0x05) - { - /* Task gate */ - memset(&desc, 0, sizeof(desc)); - desc.selector = segment; - i386_load_protected_mode_segment(&desc,nullptr); - if(segment & 0x04) - { - logerror("IRQ: Task gate: TSS is not in the GDT.\n"); - FAULT_EXP(FAULT_TS,segment & ~0x03); - } - else - { - if(segment > m_gdtr.limit) - { - logerror("IRQ: Task gate: TSS is past GDT limit.\n"); - FAULT_EXP(FAULT_TS,segment & ~0x03); - } - } - if((desc.flags & 0x000f) != 0x09 && (desc.flags & 0x000f) != 0x01) - { - logerror("IRQ: Task gate: TSS is not an available TSS.\n"); - FAULT_EXP(FAULT_TS,segment & ~0x03); - } - if((desc.flags & 0x0080) == 0) - { - logerror("IRQ: Task gate: TSS is not present.\n"); - FAULT_EXP(FAULT_NP,segment & ~0x03); - } - if(!(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)) - m_eip = m_prev_eip; - if(desc.flags & 0x08) - i386_task_switch(desc.selector,1); - else - i286_task_switch(desc.selector,1); - return; - } - else - { - /* Interrupt or Trap gate */ - memset(&desc, 0, sizeof(desc)); - desc.selector = segment; - i386_load_protected_mode_segment(&desc,nullptr); - CPL = m_CPL; // current privilege level - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level -// RPL = segment & 0x03; // requested privilege level - - if((segment & ~0x03) == 0) - { - logerror("IRQ: Gate segment is null.\n"); - FAULT_EXP(FAULT_GP,m_ext) - } - if(segment & 0x04) - { - if((segment & ~0x07) > m_ldtr.limit) - { - logerror("IRQ: Gate segment is past LDT limit.\n"); - FAULT_EXP(FAULT_GP,(segment & 0x03)+m_ext) - } - } - else - { - if((segment & ~0x07) > m_gdtr.limit) - { - logerror("IRQ: Gate segment is past GDT limit.\n"); - FAULT_EXP(FAULT_GP,(segment & 0x03)+m_ext) - } - } - if((desc.flags & 0x0018) != 0x18) - { - logerror("IRQ: Gate descriptor is not a code segment.\n"); - FAULT_EXP(FAULT_GP,(segment & 0x03)+m_ext) - } - if((desc.flags & 0x0080) == 0) - { - logerror("IRQ: Gate segment is not present.\n"); - FAULT_EXP(FAULT_NP,(segment & 0x03)+m_ext) - } - if((desc.flags & 0x0004) == 0 && (DPL < CPL)) - { - /* IRQ to inner privilege */ - I386_SREG stack; - uint32_t newESP,oldSS,oldESP; - - if(V8086_MODE && DPL) - { - logerror("IRQ: Gate to CPL>0 from VM86 mode.\n"); - FAULT_EXP(FAULT_GP,segment & ~0x03); - } - /* Check new stack segment in TSS */ - memset(&stack, 0, sizeof(stack)); - stack.selector = i386_get_stack_segment(DPL); - i386_load_protected_mode_segment(&stack,nullptr); - oldSS = m_sreg[SS].selector; - if(flags & 0x0008) - oldESP = REG32(ESP); - else - oldESP = REG16(SP); - if((stack.selector & ~0x03) == 0) - { - logerror("IRQ: New stack selector is null.\n"); - FAULT_EXP(FAULT_GP,m_ext) - } - if(stack.selector & 0x04) - { - if((stack.selector & ~0x07) > m_ldtr.base) - { - logerror("IRQ: New stack selector is past LDT limit.\n"); - FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) - } - } - else - { - if((stack.selector & ~0x07) > m_gdtr.base) - { - logerror("IRQ: New stack selector is past GDT limit.\n"); - FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) - } - } - if((stack.selector & 0x03) != DPL) - { - logerror("IRQ: New stack selector RPL is not equal to code segment DPL.\n"); - FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) - } - if(((stack.flags >> 5) & 0x03) != DPL) - { - logerror("IRQ: New stack segment DPL is not equal to code segment DPL.\n"); - FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) - } - if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0) - { - logerror("IRQ: New stack segment is not a writable data segment.\n"); - FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) // #TS(stack selector + EXT) - } - if((stack.flags & 0x0080) == 0) - { - logerror("IRQ: New stack segment is not present.\n"); - FAULT_EXP(FAULT_SS,(stack.selector & ~0x03)+m_ext) // #TS(stack selector + EXT) - } - newESP = i386_get_stack_ptr(DPL); - if(type & 0x08) // 32-bit gate - { - if(((newESP < (V8086_MODE?36:20)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?36:20))) && (stack.flags & 0x4))) - { - logerror("IRQ: New stack has no space for return addresses.\n"); - FAULT_EXP(FAULT_SS,0) - } - } - else // 16-bit gate - { - newESP &= 0xffff; - if(((newESP < (V8086_MODE?18:10)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?18:10))) && (stack.flags & 0x4))) - { - logerror("IRQ: New stack has no space for return addresses.\n"); - FAULT_EXP(FAULT_SS,0) - } - } - if(offset > desc.limit) - { - logerror("IRQ: New EIP is past code segment limit.\n"); - FAULT_EXP(FAULT_GP,0) - } - /* change CPL before accessing the stack */ - m_CPL = DPL; - /* check for page fault at new stack TODO: check if stack frame crosses page boundary */ - WRITE_TEST(stack.base+newESP-1); - /* Load new stack segment descriptor */ - m_sreg[SS].selector = stack.selector; - i386_load_protected_mode_segment(&m_sreg[SS],nullptr); - i386_set_descriptor_accessed(stack.selector); - REG32(ESP) = newESP; - if(V8086_MODE) - { - //logerror("IRQ (%08x): Interrupt during V8086 task\n",m_pc); - if(type & 0x08) - { - PUSH32SEG(m_sreg[GS].selector & 0xffff); - PUSH32SEG(m_sreg[FS].selector & 0xffff); - PUSH32SEG(m_sreg[DS].selector & 0xffff); - PUSH32SEG(m_sreg[ES].selector & 0xffff); - } - else - { - PUSH16(m_sreg[GS].selector); - PUSH16(m_sreg[FS].selector); - PUSH16(m_sreg[DS].selector); - PUSH16(m_sreg[ES].selector); - } - m_sreg[GS].selector = 0; - m_sreg[FS].selector = 0; - m_sreg[DS].selector = 0; - m_sreg[ES].selector = 0; - m_VM = 0; - i386_load_segment_descriptor(GS); - i386_load_segment_descriptor(FS); - i386_load_segment_descriptor(DS); - i386_load_segment_descriptor(ES); - } - if(type & 0x08) - { - // 32-bit gate - PUSH32SEG(oldSS); - PUSH32(oldESP); - } - else - { - // 16-bit gate - PUSH16(oldSS); - PUSH16(oldESP); - } - SetRPL = 1; - } - else - { - int stack_limit; - if((desc.flags & 0x0004) || (DPL == CPL)) - { - /* IRQ to same privilege */ - if(V8086_MODE && !m_ext) - { - logerror("IRQ: Gate to same privilege from VM86 mode.\n"); - FAULT_EXP(FAULT_GP,segment & ~0x03); - } - if(type == 0x0e || type == 0x0f) // 32-bit gate - stack_limit = 10; - else - stack_limit = 6; - // TODO: Add check for error code (2 extra bytes) - if(REG32(ESP) < stack_limit) - { - logerror("IRQ: Stack has no space left (needs %i bytes).\n",stack_limit); - FAULT_EXP(FAULT_SS,0) - } - if(offset > desc.limit) - { - logerror("IRQ: Gate segment offset is past segment limit.\n"); - FAULT_EXP(FAULT_GP,0) - } - SetRPL = 1; - } - else - { - logerror("IRQ: Gate descriptor is non-conforming, and DPL does not equal CPL.\n"); - FAULT_EXP(FAULT_GP,segment) - } - } - } - uint32_t tempSP = REG32(ESP); - try - { - // this is ugly but the alternative is worse - if(type != 0x0e && type != 0x0f) // if not 386 interrupt or trap gate - { - PUSH16(oldflags & 0xffff ); - PUSH16(m_sreg[CS].selector ); - if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1) - PUSH16(m_eip ); - else - PUSH16(m_prev_eip ); - } - else - { - PUSH32(oldflags & 0x00ffffff ); - PUSH32SEG(m_sreg[CS].selector ); - if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1) - PUSH32(m_eip ); - else - PUSH32(m_prev_eip ); - } - } - catch(uint64_t e) - { - REG32(ESP) = tempSP; - throw e; - } - if(SetRPL != 0) - segment = (segment & ~0x03) | m_CPL; - m_sreg[CS].selector = segment; - m_eip = offset; - - if(type == 0x0e || type == 0x06) - m_IF = 0; - m_TF = 0; - m_NT = 0; - } - - i386_load_segment_descriptor(CS); - CHANGE_PC(m_eip); - -} - -void i386_device::i386_trap_with_error(int irq, int irq_gate, int trap_level, uint32_t error) -{ - i386_trap(irq,irq_gate,trap_level); - if(irq == 8 || irq == 10 || irq == 11 || irq == 12 || irq == 13 || irq == 14) - { - // for these exceptions, an error code is pushed onto the stack by the processor. - // no error code is pushed for software interrupts, either. - if(PROTECTED_MODE) - { - uint32_t entry = irq * 8; - uint32_t v2,type; - v2 = READ32PL0(m_idtr.base + entry + 4 ); - type = (v2>>8) & 0x1F; - if(type == 5) - { - v2 = READ32PL0(m_idtr.base + entry); - v2 = READ32PL0(m_gdtr.base + ((v2 >> 16) & 0xfff8) + 4); - type = (v2>>8) & 0x1F; - } - if(type >= 9) - PUSH32(error); - else - PUSH16(error); - } - else - PUSH16(error); - } -} - - -void i386_device::i286_task_switch(uint16_t selector, uint8_t nested) -{ - uint32_t tss; - I386_SREG seg; - uint16_t old_task; - uint8_t ar_byte; // access rights byte - - /* TODO: Task State Segment privilege checks */ - - /* For tasks that aren't nested, clear the busy bit in the task's descriptor */ - if(nested == 0) - { - if(m_task.segment & 0x0004) - { - ar_byte = READ8(m_ldtr.base + (m_task.segment & ~0x0007) + 5); - WRITE8(m_ldtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); - } - else - { - ar_byte = READ8(m_gdtr.base + (m_task.segment & ~0x0007) + 5); - WRITE8(m_gdtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); - } - } - - /* Save the state of the current task in the current TSS (TR register base) */ - tss = m_task.base; - WRITE16(tss+0x0e,m_eip & 0x0000ffff); - WRITE16(tss+0x10,get_flags() & 0x0000ffff); - WRITE16(tss+0x12,REG16(AX)); - WRITE16(tss+0x14,REG16(CX)); - WRITE16(tss+0x16,REG16(DX)); - WRITE16(tss+0x18,REG16(BX)); - WRITE16(tss+0x1a,REG16(SP)); - WRITE16(tss+0x1c,REG16(BP)); - WRITE16(tss+0x1e,REG16(SI)); - WRITE16(tss+0x20,REG16(DI)); - WRITE16(tss+0x22,m_sreg[ES].selector); - WRITE16(tss+0x24,m_sreg[CS].selector); - WRITE16(tss+0x26,m_sreg[SS].selector); - WRITE16(tss+0x28,m_sreg[DS].selector); - - old_task = m_task.segment; - - /* Load task register with the selector of the incoming task */ - m_task.segment = selector; - memset(&seg, 0, sizeof(seg)); - seg.selector = m_task.segment; - i386_load_protected_mode_segment(&seg,nullptr); - m_task.limit = seg.limit; - m_task.base = seg.base; - m_task.flags = seg.flags; - - /* Set TS bit in CR0 */ - m_cr[0] |= 0x08; - - /* Load incoming task state from the new task's TSS */ - tss = m_task.base; - m_ldtr.segment = READ16(tss+0x2a) & 0xffff; - seg.selector = m_ldtr.segment; - i386_load_protected_mode_segment(&seg,nullptr); - m_ldtr.limit = seg.limit; - m_ldtr.base = seg.base; - m_ldtr.flags = seg.flags; - m_eip = READ16(tss+0x0e); - set_flags(READ16(tss+0x10)); - REG16(AX) = READ16(tss+0x12); - REG16(CX) = READ16(tss+0x14); - REG16(DX) = READ16(tss+0x16); - REG16(BX) = READ16(tss+0x18); - REG16(SP) = READ16(tss+0x1a); - REG16(BP) = READ16(tss+0x1c); - REG16(SI) = READ16(tss+0x1e); - REG16(DI) = READ16(tss+0x20); - m_sreg[ES].selector = READ16(tss+0x22) & 0xffff; - i386_load_segment_descriptor(ES); - m_sreg[CS].selector = READ16(tss+0x24) & 0xffff; - i386_load_segment_descriptor(CS); - m_sreg[SS].selector = READ16(tss+0x26) & 0xffff; - i386_load_segment_descriptor(SS); - m_sreg[DS].selector = READ16(tss+0x28) & 0xffff; - i386_load_segment_descriptor(DS); - - /* Set the busy bit in the new task's descriptor */ - if(selector & 0x0004) - { - ar_byte = READ8(m_ldtr.base + (selector & ~0x0007) + 5); - WRITE8(m_ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); - } - else - { - ar_byte = READ8(m_gdtr.base + (selector & ~0x0007) + 5); - WRITE8(m_gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); - } - - /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS, - and set the NT flag in the EFLAGS register */ - if(nested != 0) - { - WRITE16(tss+0,old_task); - m_NT = 1; - } - CHANGE_PC(m_eip); - - m_CPL = (m_sreg[SS].flags >> 5) & 3; -// printf("286 Task Switch from selector %04x to %04x\n",old_task,selector); -} - -void i386_device::i386_task_switch(uint16_t selector, uint8_t nested) -{ - uint32_t tss; - I386_SREG seg; - uint16_t old_task; - uint8_t ar_byte; // access rights byte - uint32_t oldcr3 = m_cr[3]; - - /* TODO: Task State Segment privilege checks */ - - /* For tasks that aren't nested, clear the busy bit in the task's descriptor */ - if(nested == 0) - { - if(m_task.segment & 0x0004) - { - ar_byte = READ8(m_ldtr.base + (m_task.segment & ~0x0007) + 5); - WRITE8(m_ldtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); - } - else - { - ar_byte = READ8(m_gdtr.base + (m_task.segment & ~0x0007) + 5); - WRITE8(m_gdtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); - } - } - - /* Save the state of the current task in the current TSS (TR register base) */ - tss = m_task.base; - WRITE32(tss+0x1c,m_cr[3]); // correct? - WRITE32(tss+0x20,m_eip); - WRITE32(tss+0x24,get_flags()); - WRITE32(tss+0x28,REG32(EAX)); - WRITE32(tss+0x2c,REG32(ECX)); - WRITE32(tss+0x30,REG32(EDX)); - WRITE32(tss+0x34,REG32(EBX)); - WRITE32(tss+0x38,REG32(ESP)); - WRITE32(tss+0x3c,REG32(EBP)); - WRITE32(tss+0x40,REG32(ESI)); - WRITE32(tss+0x44,REG32(EDI)); - WRITE32(tss+0x48,m_sreg[ES].selector); - WRITE32(tss+0x4c,m_sreg[CS].selector); - WRITE32(tss+0x50,m_sreg[SS].selector); - WRITE32(tss+0x54,m_sreg[DS].selector); - WRITE32(tss+0x58,m_sreg[FS].selector); - WRITE32(tss+0x5c,m_sreg[GS].selector); - - old_task = m_task.segment; - - /* Load task register with the selector of the incoming task */ - m_task.segment = selector; - memset(&seg, 0, sizeof(seg)); - seg.selector = m_task.segment; - i386_load_protected_mode_segment(&seg,nullptr); - m_task.limit = seg.limit; - m_task.base = seg.base; - m_task.flags = seg.flags; - - /* Set TS bit in CR0 */ - m_cr[0] |= 0x08; - - /* Load incoming task state from the new task's TSS */ - tss = m_task.base; - m_ldtr.segment = READ32(tss+0x60) & 0xffff; - seg.selector = m_ldtr.segment; - i386_load_protected_mode_segment(&seg,nullptr); - m_ldtr.limit = seg.limit; - m_ldtr.base = seg.base; - m_ldtr.flags = seg.flags; - m_eip = READ32(tss+0x20); - set_flags(READ32(tss+0x24)); - REG32(EAX) = READ32(tss+0x28); - REG32(ECX) = READ32(tss+0x2c); - REG32(EDX) = READ32(tss+0x30); - REG32(EBX) = READ32(tss+0x34); - REG32(ESP) = READ32(tss+0x38); - REG32(EBP) = READ32(tss+0x3c); - REG32(ESI) = READ32(tss+0x40); - REG32(EDI) = READ32(tss+0x44); - m_sreg[ES].selector = READ32(tss+0x48) & 0xffff; - i386_load_segment_descriptor(ES); - m_sreg[CS].selector = READ32(tss+0x4c) & 0xffff; - i386_load_segment_descriptor(CS); - m_sreg[SS].selector = READ32(tss+0x50) & 0xffff; - i386_load_segment_descriptor(SS); - m_sreg[DS].selector = READ32(tss+0x54) & 0xffff; - i386_load_segment_descriptor(DS); - m_sreg[FS].selector = READ32(tss+0x58) & 0xffff; - i386_load_segment_descriptor(FS); - m_sreg[GS].selector = READ32(tss+0x5c) & 0xffff; - i386_load_segment_descriptor(GS); - /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS, - and set the NT flag in the EFLAGS register before setting cr3 as the old tss address might be gone */ - if(nested != 0) - { - WRITE32(tss+0,old_task); - m_NT = 1; - } - m_cr[3] = READ32(tss+0x1c); // CR3 (PDBR) - if(oldcr3 != m_cr[3]) - vtlb_flush_dynamic(); - - /* Set the busy bit in the new task's descriptor */ - if(selector & 0x0004) - { - ar_byte = READ8(m_ldtr.base + (selector & ~0x0007) + 5); - WRITE8(m_ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); - } - else - { - ar_byte = READ8(m_gdtr.base + (selector & ~0x0007) + 5); - WRITE8(m_gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); - } - - CHANGE_PC(m_eip); - - m_CPL = (m_sreg[SS].flags >> 5) & 3; -// printf("386 Task Switch from selector %04x to %04x\n",old_task,selector); -} - void i386_device::i386_check_irq_line() { if(!m_smm && m_smi) @@ -1554,1442 +1528,6 @@ void i386_device::i386_check_irq_line() } } -void i386_device::i386_protected_mode_jump(uint16_t seg, uint32_t off, int indirect, int operand32) -{ - I386_SREG desc; - I386_CALL_GATE call_gate; - uint8_t CPL,DPL,RPL; - uint8_t SetRPL; - uint16_t segment = seg; - uint32_t offset = off; - - /* Check selector is not null */ - if((segment & ~0x03) == 0) - { - logerror("JMP: Segment is null.\n"); - FAULT(FAULT_GP,0) - } - /* Selector is within descriptor table limit */ - if((segment & 0x04) == 0) - { - /* check GDT limit */ - if((segment & ~0x07) > (m_gdtr.limit)) - { - logerror("JMP: Segment is past GDT limit.\n"); - FAULT(FAULT_GP,segment & 0xfffc) - } - } - else - { - /* check LDT limit */ - if((segment & ~0x07) > (m_ldtr.limit)) - { - logerror("JMP: Segment is past LDT limit.\n"); - FAULT(FAULT_GP,segment & 0xfffc) - } - } - /* Determine segment type */ - memset(&desc, 0, sizeof(desc)); - desc.selector = segment; - i386_load_protected_mode_segment(&desc,nullptr); - CPL = m_CPL; // current privilege level - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = segment & 0x03; // requested privilege level - if((desc.flags & 0x0018) == 0x0018) - { - /* code segment */ - if((desc.flags & 0x0004) == 0) - { - /* non-conforming */ - if(RPL > CPL) - { - logerror("JMP: RPL %i is less than CPL %i\n",RPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if(DPL != CPL) - { - logerror("JMP: DPL %i is not equal CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - } - else - { - /* conforming */ - if(DPL > CPL) - { - logerror("JMP: DPL %i is less than CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - } - SetRPL = 1; - if((desc.flags & 0x0080) == 0) - { - logerror("JMP: Segment is not present\n"); - FAULT(FAULT_NP,segment & 0xfffc) - } - if(offset > desc.limit) - { - logerror("JMP: Offset is past segment limit\n"); - FAULT(FAULT_GP,0) - } - } - else - { - if((desc.flags & 0x0010) != 0) - { - logerror("JMP: Segment is a data segment\n"); - FAULT(FAULT_GP,segment & 0xfffc) // #GP (cannot execute code in a data segment) - } - else - { - switch(desc.flags & 0x000f) - { - case 0x01: // 286 Available TSS - case 0x09: // 386 Available TSS - logerror("JMP: Available 386 TSS at %08x\n",m_pc); - memset(&desc, 0, sizeof(desc)); - desc.selector = segment; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - if(DPL < CPL) - { - logerror("JMP: TSS: DPL %i is less than CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if(DPL < RPL) - { - logerror("JMP: TSS: DPL %i is less than TSS RPL %i\n",DPL,RPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if((desc.flags & 0x0080) == 0) - { - logerror("JMP: TSS: Segment is not present\n"); - FAULT(FAULT_GP,segment & 0xfffc) - } - if(desc.flags & 0x0008) - i386_task_switch(desc.selector,0); - else - i286_task_switch(desc.selector,0); - return; - case 0x04: // 286 Call Gate - case 0x0c: // 386 Call Gate - //logerror("JMP: Call gate at %08x\n",m_pc); - SetRPL = 1; - memset(&call_gate, 0, sizeof(call_gate)); - call_gate.segment = segment; - i386_load_call_gate(&call_gate); - DPL = call_gate.dpl; - if(DPL < CPL) - { - logerror("JMP: Call Gate: DPL %i is less than CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if(DPL < RPL) - { - logerror("JMP: Call Gate: DPL %i is less than RPL %i\n",DPL,RPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if((desc.flags & 0x0080) == 0) - { - logerror("JMP: Call Gate: Segment is not present\n"); - FAULT(FAULT_NP,segment & 0xfffc) - } - /* Now we examine the segment that the call gate refers to */ - if(call_gate.selector == 0) - { - logerror("JMP: Call Gate: Gate selector is null\n"); - FAULT(FAULT_GP,0) - } - if(call_gate.selector & 0x04) - { - if((call_gate.selector & ~0x07) > m_ldtr.limit) - { - logerror("JMP: Call Gate: Gate Selector is past LDT segment limit\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - } - else - { - if((call_gate.selector & ~0x07) > m_gdtr.limit) - { - logerror("JMP: Call Gate: Gate Selector is past GDT segment limit\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - } - desc.selector = call_gate.selector; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; - if((desc.flags & 0x0018) != 0x18) - { - logerror("JMP: Call Gate: Gate does not point to a code segment\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - if((desc.flags & 0x0004) == 0) - { // non-conforming - if(DPL != CPL) - { - logerror("JMP: Call Gate: Gate DPL does not equal CPL\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - } - else - { // conforming - if(DPL > CPL) - { - logerror("JMP: Call Gate: Gate DPL is greater than CPL\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - } - if((desc.flags & 0x0080) == 0) - { - logerror("JMP: Call Gate: Gate Segment is not present\n"); - FAULT(FAULT_NP,call_gate.selector & 0xfffc) - } - if(call_gate.offset > desc.limit) - { - logerror("JMP: Call Gate: Gate offset is past Gate segment limit\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - segment = call_gate.selector; - offset = call_gate.offset; - break; - case 0x05: // Task Gate - logerror("JMP: Task gate at %08x\n",m_pc); - memset(&call_gate, 0, sizeof(call_gate)); - call_gate.segment = segment; - i386_load_call_gate(&call_gate); - DPL = call_gate.dpl; - if(DPL < CPL) - { - logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if(DPL < RPL) - { - logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,segment & 0xfffc) - } - if(call_gate.present == 0) - { - logerror("JMP: Task Gate: Gate is not present.\n"); - FAULT(FAULT_GP,segment & 0xfffc) - } - /* Check the TSS that the task gate points to */ - desc.selector = call_gate.selector; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = call_gate.selector & 0x03; // requested privilege level - if(call_gate.selector & 0x04) - { - logerror("JMP: Task Gate TSS: TSS must be global.\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - else - { - if((call_gate.selector & ~0x07) > m_gdtr.limit) - { - logerror("JMP: Task Gate TSS: TSS is past GDT limit.\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - } - if((call_gate.ar & 0x000f) == 0x0009 || (call_gate.ar & 0x000f) == 0x0001) - { - logerror("JMP: Task Gate TSS: Segment is not an available TSS.\n"); - FAULT(FAULT_GP,call_gate.selector & 0xfffc) - } - if(call_gate.present == 0) - { - logerror("JMP: Task Gate TSS: TSS is not present.\n"); - FAULT(FAULT_NP,call_gate.selector & 0xfffc) - } - if(call_gate.ar & 0x08) - i386_task_switch(call_gate.selector,0); - else - i286_task_switch(call_gate.selector,0); - return; - default: // invalid segment type - logerror("JMP: Invalid segment type (%i) to jump to.\n",desc.flags & 0x000f); - FAULT(FAULT_GP,segment & 0xfffc) - } - } - } - - if(SetRPL != 0) - segment = (segment & ~0x03) | m_CPL; - if(operand32 == 0) - m_eip = offset & 0x0000ffff; - else - m_eip = offset; - m_sreg[CS].selector = segment; - m_performed_intersegment_jump = 1; - i386_load_segment_descriptor(CS); - CHANGE_PC(m_eip); -} - -void i386_device::i386_protected_mode_call(uint16_t seg, uint32_t off, int indirect, int operand32) -{ - I386_SREG desc; - I386_CALL_GATE gate; - uint8_t SetRPL; - uint8_t CPL, DPL, RPL; - uint16_t selector = seg; - uint32_t offset = off; - int x; - - if((selector & ~0x03) == 0) - { - logerror("CALL (%08x): Selector is null.\n",m_pc); - FAULT(FAULT_GP,0) // #GP(0) - } - if(selector & 0x04) - { - if((selector & ~0x07) > m_ldtr.limit) - { - logerror("CALL: Selector is past LDT limit.\n"); - FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) - } - } - else - { - if((selector & ~0x07) > m_gdtr.limit) - { - logerror("CALL: Selector is past GDT limit.\n"); - FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) - } - } - - /* Determine segment type */ - memset(&desc, 0, sizeof(desc)); - desc.selector = selector; - i386_load_protected_mode_segment(&desc,nullptr); - CPL = m_CPL; // current privilege level - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = selector & 0x03; // requested privilege level - if((desc.flags & 0x0018) == 0x18) // is a code segment - { - if(desc.flags & 0x0004) - { - /* conforming */ - if(DPL > CPL) - { - logerror("CALL: Code segment DPL %i is greater than CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) - } - } - else - { - /* non-conforming */ - if(RPL > CPL) - { - logerror("CALL: RPL %i is greater than CPL %i\n",RPL,CPL); - FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) - } - if(DPL != CPL) - { - logerror("CALL: Code segment DPL %i is not equal to CPL %i\n",DPL,CPL); - FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) - } - } - SetRPL = 1; - if((desc.flags & 0x0080) == 0) - { - logerror("CALL (%08x): Code segment is not present.\n",m_pc); - FAULT(FAULT_NP,selector & ~0x03) // #NP(selector) - } - if (operand32 != 0) // if 32-bit - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff); - if(i386_limit_check(SS, offset)) - { - logerror("CALL (%08x): Stack has no room for return address.\n",m_pc); - FAULT(FAULT_SS,0) // #SS(0) - } - } - else - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff); - if(i386_limit_check(SS, offset)) - { - logerror("CALL (%08x): Stack has no room for return address.\n",m_pc); - FAULT(FAULT_SS,0) // #SS(0) - } - } - if(offset > desc.limit) - { - logerror("CALL: EIP is past segment limit.\n"); - FAULT(FAULT_GP,0) // #GP(0) - } - } - else - { - /* special segment type */ - if(desc.flags & 0x0010) - { - logerror("CALL: Segment is a data segment.\n"); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - else - { - switch(desc.flags & 0x000f) - { - case 0x01: // Available 286 TSS - case 0x09: // Available 386 TSS - logerror("CALL: Available TSS at %08x\n",m_pc); - if(DPL < CPL) - { - logerror("CALL: TSS: DPL is less than CPL.\n"); - FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) - } - if(DPL < RPL) - { - logerror("CALL: TSS: DPL is less than RPL.\n"); - FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) - } - if(desc.flags & 0x0002) - { - logerror("CALL: TSS: TSS is busy.\n"); - FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) - } - if((desc.flags & 0x0080) == 0) - { - logerror("CALL: TSS: Segment %02x is not present.\n",selector); - FAULT(FAULT_NP,selector & ~0x03) // #NP(selector) - } - if(desc.flags & 0x08) - i386_task_switch(desc.selector,1); - else - i286_task_switch(desc.selector,1); - return; - case 0x04: // 286 call gate - case 0x0c: // 386 call gate - if((desc.flags & 0x000f) == 0x04) - operand32 = 0; - else - operand32 = 1; - memset(&gate, 0, sizeof(gate)); - gate.segment = selector; - i386_load_call_gate(&gate); - DPL = gate.dpl; - //logerror("CALL: Call gate at %08x (%i parameters)\n",m_pc,gate.dword_count); - if(DPL < CPL) - { - logerror("CALL: Call gate DPL %i is less than CPL %i.\n",DPL,CPL); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - if(DPL < RPL) - { - logerror("CALL: Call gate DPL %i is less than RPL %i.\n",DPL,RPL); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - if(gate.present == 0) - { - logerror("CALL: Call gate is not present.\n"); - FAULT(FAULT_NP,desc.selector & ~0x03) // #GP(selector) - } - desc.selector = gate.selector; - if((gate.selector & ~0x03) == 0) - { - logerror("CALL: Call gate: Segment is null.\n"); - FAULT(FAULT_GP,0) // #GP(0) - } - if(desc.selector & 0x04) - { - if((desc.selector & ~0x07) > m_ldtr.limit) - { - logerror("CALL: Call gate: Segment is past LDT limit\n"); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - } - else - { - if((desc.selector & ~0x07) > m_gdtr.limit) - { - logerror("CALL: Call gate: Segment is past GDT limit\n"); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - } - i386_load_protected_mode_segment(&desc,nullptr); - if((desc.flags & 0x0018) != 0x18) - { - logerror("CALL: Call gate: Segment is not a code segment.\n"); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - DPL = ((desc.flags >> 5) & 0x03); - if(DPL > CPL) - { - logerror("CALL: Call gate: Segment DPL %i is greater than CPL %i.\n",DPL,CPL); - FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) - } - if((desc.flags & 0x0080) == 0) - { - logerror("CALL (%08x): Code segment is not present.\n",m_pc); - FAULT(FAULT_NP,desc.selector & ~0x03) // #NP(selector) - } - if(DPL < CPL && (desc.flags & 0x0004) == 0) - { - I386_SREG stack; - I386_SREG temp; - uint32_t oldSS,oldESP; - /* more privilege */ - /* Check new SS segment for privilege level from TSS */ - memset(&stack, 0, sizeof(stack)); - stack.selector = i386_get_stack_segment(DPL); - i386_load_protected_mode_segment(&stack,nullptr); - if((stack.selector & ~0x03) == 0) - { - logerror("CALL: Call gate: TSS selector is null\n"); - FAULT(FAULT_TS,0) // #TS(0) - } - if(stack.selector & 0x04) - { - if((stack.selector & ~0x07) > m_ldtr.limit) - { - logerror("CALL: Call gate: TSS selector is past LDT limit\n"); - FAULT(FAULT_TS,stack.selector) // #TS(SS selector) - } - } - else - { - if((stack.selector & ~0x07) > m_gdtr.limit) - { - logerror("CALL: Call gate: TSS selector is past GDT limit\n"); - FAULT(FAULT_TS,stack.selector) // #TS(SS selector) - } - } - if((stack.selector & 0x03) != DPL) - { - logerror("CALL: Call gate: Stack selector RPL does not equal code segment DPL %i\n",DPL); - FAULT(FAULT_TS,stack.selector) // #TS(SS selector) - } - if(((stack.flags >> 5) & 0x03) != DPL) - { - logerror("CALL: Call gate: Stack DPL does not equal code segment DPL %i\n",DPL); - FAULT(FAULT_TS,stack.selector) // #TS(SS selector) - } - if((stack.flags & 0x0018) != 0x10 && (stack.flags & 0x0002)) - { - logerror("CALL: Call gate: Stack segment is not a writable data segment\n"); - FAULT(FAULT_TS,stack.selector) // #TS(SS selector) - } - if((stack.flags & 0x0080) == 0) - { - logerror("CALL: Call gate: Stack segment is not present\n"); - FAULT(FAULT_SS,stack.selector) // #SS(SS selector) - } - uint32_t newESP = i386_get_stack_ptr(DPL); - if(!stack.d) - { - newESP &= 0xffff; - } - if(operand32 != 0) - { - if(newESP < ((gate.dword_count & 0x1f) + 16)) - { - logerror("CALL: Call gate: New stack has no room for 32-bit return address and parameters.\n"); - FAULT(FAULT_SS,0) // #SS(0) - } - if(gate.offset > desc.limit) - { - logerror("CALL: Call gate: EIP is past segment limit.\n"); - FAULT(FAULT_GP,0) // #GP(0) - } - } - else - { - if(newESP < ((gate.dword_count & 0x1f) + 8)) - { - logerror("CALL: Call gate: New stack has no room for 16-bit return address and parameters.\n"); - FAULT(FAULT_SS,0) // #SS(0) - } - if((gate.offset & 0xffff) > desc.limit) - { - logerror("CALL: Call gate: IP is past segment limit.\n"); - FAULT(FAULT_GP,0) // #GP(0) - } - } - selector = gate.selector; - offset = gate.offset; - - m_CPL = (stack.flags >> 5) & 0x03; - /* check for page fault at new stack */ - WRITE_TEST(stack.base+newESP-1); - /* switch to new stack */ - oldSS = m_sreg[SS].selector; - m_sreg[SS].selector = i386_get_stack_segment(m_CPL); - if(operand32 != 0) - { - oldESP = REG32(ESP); - } - else - { - oldESP = REG16(SP); - } - i386_load_segment_descriptor(SS ); - REG32(ESP) = newESP; - - if(operand32 != 0) - { - PUSH32SEG(oldSS); - PUSH32(oldESP); - } - else - { - PUSH16(oldSS); - PUSH16(oldESP & 0xffff); - } - - memset(&temp, 0, sizeof(temp)); - temp.selector = oldSS; - i386_load_protected_mode_segment(&temp,nullptr); - /* copy parameters from old stack to new stack */ - for(x=(gate.dword_count & 0x1f)-1;x>=0;x--) - { - uint32_t addr = oldESP + (operand32?(x*4):(x*2)); - addr = temp.base + (temp.d?addr:(addr&0xffff)); - if(operand32) - PUSH32(READ32(addr)); - else - PUSH16(READ16(addr)); - } - SetRPL = 1; - } - else - { - /* same privilege */ - if (operand32 != 0) // if 32-bit - { - uint32_t stkoff = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff); - if(i386_limit_check(SS, stkoff)) - { - logerror("CALL: Stack has no room for return address.\n"); - FAULT(FAULT_SS,0) // #SS(0) - } - selector = gate.selector; - offset = gate.offset; - } - else - { - uint32_t stkoff = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff); - if(i386_limit_check(SS, stkoff)) - { - logerror("CALL: Stack has no room for return address.\n"); - FAULT(FAULT_SS,0) // #SS(0) - } - selector = gate.selector; - offset = gate.offset & 0xffff; - } - if(offset > desc.limit) - { - logerror("CALL: EIP is past segment limit.\n"); - FAULT(FAULT_GP,0) // #GP(0) - } - SetRPL = 1; - } - break; - case 0x05: // task gate - logerror("CALL: Task gate at %08x\n",m_pc); - memset(&gate, 0, sizeof(gate)); - gate.segment = selector; - i386_load_call_gate(&gate); - DPL = gate.dpl; - if(DPL < CPL) - { - logerror("CALL: Task Gate: Gate DPL is less than CPL.\n"); - FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) - } - if(DPL < RPL) - { - logerror("CALL: Task Gate: Gate DPL is less than RPL.\n"); - FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) - } - if((gate.ar & 0x0080) == 0) - { - logerror("CALL: Task Gate: Gate is not present.\n"); - FAULT(FAULT_NP,selector & ~0x03) // #NP(selector) - } - /* Check the TSS that the task gate points to */ - desc.selector = gate.selector; - i386_load_protected_mode_segment(&desc,nullptr); - if(gate.selector & 0x04) - { - logerror("CALL: Task Gate: TSS is not global.\n"); - FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector) - } - else - { - if((gate.selector & ~0x07) > m_gdtr.limit) - { - logerror("CALL: Task Gate: TSS is past GDT limit.\n"); - FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector) - } - } - if(desc.flags & 0x0002) - { - logerror("CALL: Task Gate: TSS is busy.\n"); - FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector) - } - if((desc.flags & 0x0080) == 0) - { - logerror("CALL: Task Gate: TSS is not present.\n"); - FAULT(FAULT_NP,gate.selector & ~0x03) // #TS(selector) - } - if(desc.flags & 0x08) - i386_task_switch(desc.selector,1); // with nesting - else - i286_task_switch(desc.selector,1); - return; - default: - logerror("CALL: Invalid special segment type (%i) to jump to.\n",desc.flags & 0x000f); - FAULT(FAULT_GP,selector & ~0x07) // #GP(selector) - } - } - } - - if(SetRPL != 0) - selector = (selector & ~0x03) | m_CPL; - - uint32_t tempSP = REG32(ESP); - try - { - // this is ugly but the alternative is worse - if(operand32 == 0) - { - /* 16-bit operand size */ - PUSH16(m_sreg[CS].selector ); - PUSH16(m_eip & 0x0000ffff ); - m_sreg[CS].selector = selector; - m_performed_intersegment_jump = 1; - m_eip = offset; - i386_load_segment_descriptor(CS); - } - else - { - /* 32-bit operand size */ - PUSH32SEG(m_sreg[CS].selector ); - PUSH32(m_eip ); - m_sreg[CS].selector = selector; - m_performed_intersegment_jump = 1; - m_eip = offset; - i386_load_segment_descriptor(CS ); - } - } - catch(uint64_t e) - { - REG32(ESP) = tempSP; - throw e; - } - - CHANGE_PC(m_eip); -} - -void i386_device::i386_protected_mode_retf(uint8_t count, uint8_t operand32) -{ - uint32_t newCS, newEIP; - I386_SREG desc; - uint8_t CPL, RPL, DPL; - - uint32_t ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0); - - if(operand32 == 0) - { - newEIP = READ16(ea) & 0xffff; - newCS = READ16(ea+2) & 0xffff; - } - else - { - newEIP = READ32(ea); - newCS = READ32(ea+4) & 0xffff; - } - - memset(&desc, 0, sizeof(desc)); - desc.selector = newCS; - i386_load_protected_mode_segment(&desc,nullptr); - CPL = m_CPL; // current privilege level - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = newCS & 0x03; - - if(RPL < CPL) - { - logerror("RETF (%08x): Return segment RPL is less than CPL.\n",m_pc); - FAULT(FAULT_GP,newCS & ~0x03) - } - - if(RPL == CPL) - { - /* same privilege level */ - if((newCS & ~0x03) == 0) - { - logerror("RETF: Return segment is null.\n"); - FAULT(FAULT_GP,0) - } - if(newCS & 0x04) - { - if((newCS & ~0x07) >= m_ldtr.limit) - { - logerror("RETF: Return segment is past LDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if((newCS & ~0x07) >= m_gdtr.limit) - { - logerror("RETF: Return segment is past GDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - if((desc.flags & 0x0018) != 0x0018) - { - logerror("RETF: Return segment is not a code segment.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - if(desc.flags & 0x0004) - { - if(DPL > RPL) - { - logerror("RETF: Conforming code segment DPL is greater than CS RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if(DPL != RPL) - { - logerror("RETF: Non-conforming code segment DPL does not equal CS RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - if((desc.flags & 0x0080) == 0) - { - logerror("RETF (%08x): Code segment is not present.\n",m_pc); - FAULT(FAULT_NP,newCS & ~0x03) - } - if(newEIP > desc.limit) - { - logerror("RETF: EIP is past code segment limit.\n"); - FAULT(FAULT_GP,0) - } - if(operand32 == 0) - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+count+3) != 0) - { - logerror("RETF (%08x): SP is past stack segment limit.\n",m_pc); - FAULT(FAULT_SS,0) - } - } - else - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+count+7) != 0) - { - logerror("RETF: ESP is past stack segment limit.\n"); - FAULT(FAULT_SS,0) - } - } - if(STACK_32BIT) - REG32(ESP) += (operand32 ? 8 : 4) + count; - else - REG16(SP) += (operand32 ? 8 : 4) + count; - } - else if(RPL > CPL) - { - uint32_t newSS, newESP; // when changing privilege - /* outer privilege level */ - if(operand32 == 0) - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+count+7) != 0) - { - logerror("RETF (%08x): SP is past stack segment limit.\n",m_pc); - FAULT(FAULT_SS,0) - } - } - else - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+count+15) != 0) - { - logerror("RETF: ESP is past stack segment limit.\n"); - FAULT(FAULT_SS,0) - } - } - /* Check CS selector and descriptor */ - if((newCS & ~0x03) == 0) - { - logerror("RETF: CS segment is null.\n"); - FAULT(FAULT_GP,0) - } - if(newCS & 0x04) - { - if((newCS & ~0x07) >= m_ldtr.limit) - { - logerror("RETF: CS segment selector is past LDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if((newCS & ~0x07) >= m_gdtr.limit) - { - logerror("RETF: CS segment selector is past GDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - if((desc.flags & 0x0018) != 0x0018) - { - logerror("RETF: CS segment is not a code segment.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - if(desc.flags & 0x0004) - { - if(DPL > RPL) - { - logerror("RETF: Conforming CS segment DPL is greater than return selector RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if(DPL != RPL) - { - logerror("RETF: Non-conforming CS segment DPL is not equal to return selector RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - if((desc.flags & 0x0080) == 0) - { - logerror("RETF: CS segment is not present.\n"); - FAULT(FAULT_NP,newCS & ~0x03) - } - if(newEIP > desc.limit) - { - logerror("RETF: EIP is past return CS segment limit.\n"); - FAULT(FAULT_GP,0) - } - - if(operand32 == 0) - { - ea += count+4; - newESP = READ16(ea) & 0xffff; - newSS = READ16(ea+2) & 0xffff; - } - else - { - ea += count+8; - newESP = READ32(ea); - newSS = READ32(ea+4) & 0xffff; - } - - /* Check SS selector and descriptor */ - desc.selector = newSS; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - if((newSS & ~0x07) == 0) - { - logerror("RETF: SS segment is null.\n"); - FAULT(FAULT_GP,0) - } - if(newSS & 0x04) - { - if((newSS & ~0x07) > m_ldtr.limit) - { - logerror("RETF (%08x): SS segment selector is past LDT limit.\n",m_pc); - FAULT(FAULT_GP,newSS & ~0x03) - } - } - else - { - if((newSS & ~0x07) > m_gdtr.limit) - { - logerror("RETF (%08x): SS segment selector is past GDT limit.\n",m_pc); - FAULT(FAULT_GP,newSS & ~0x03) - } - } - if((newSS & 0x03) != RPL) - { - logerror("RETF: SS segment RPL is not equal to CS segment RPL.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if((desc.flags & 0x0018) != 0x0010 || (desc.flags & 0x0002) == 0) - { - logerror("RETF: SS segment is not a writable data segment.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if(((desc.flags >> 5) & 0x03) != RPL) - { - logerror("RETF: SS DPL is not equal to CS segment RPL.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if((desc.flags & 0x0080) == 0) - { - logerror("RETF: SS segment is not present.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - m_CPL = newCS & 0x03; - - /* Load new SS:(E)SP */ - if(operand32 == 0) - REG16(SP) = (newESP+count) & 0xffff; - else - REG32(ESP) = newESP+count; - m_sreg[SS].selector = newSS; - i386_load_segment_descriptor(SS ); - - /* Check that DS, ES, FS and GS are valid for the new privilege level */ - i386_check_sreg_validity(DS); - i386_check_sreg_validity(ES); - i386_check_sreg_validity(FS); - i386_check_sreg_validity(GS); - } - - /* Load new CS:(E)IP */ - if(operand32 == 0) - m_eip = newEIP & 0xffff; - else - m_eip = newEIP; - m_sreg[CS].selector = newCS; - i386_load_segment_descriptor(CS ); - CHANGE_PC(m_eip); -} - -void i386_device::i386_protected_mode_iret(int operand32) -{ - uint32_t newCS, newEIP; - uint32_t newSS, newESP; // when changing privilege - I386_SREG desc,stack; - uint8_t CPL, RPL, DPL; - uint32_t newflags; - uint8_t IOPL = m_IOP1 | (m_IOP2 << 1); - - CPL = m_CPL; - uint32_t ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0); - if(operand32 == 0) - { - newEIP = READ16(ea) & 0xffff; - newCS = READ16(ea+2) & 0xffff; - newflags = READ16(ea+4) & 0xffff; - } - else - { - newEIP = READ32(ea); - newCS = READ32(ea+4) & 0xffff; - newflags = READ32(ea+8); - } - - if(V8086_MODE) - { - uint32_t oldflags = get_flags(); - if(IOPL != 3) - { - logerror("IRET (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",m_pc); - FAULT(FAULT_GP,0) - } - if(operand32 == 0) - { - m_eip = newEIP & 0xffff; - m_sreg[CS].selector = newCS & 0xffff; - newflags &= ~(3<<12); - newflags |= (((oldflags>>12)&3)<<12); // IOPL cannot be changed in V86 mode - set_flags((newflags & 0xffff) | (oldflags & ~0xffff)); - REG16(SP) += 6; - } - else - { - m_eip = newEIP; - m_sreg[CS].selector = newCS & 0xffff; - newflags &= ~(3<<12); - newflags |= 0x20000 | (((oldflags>>12)&3)<<12); // IOPL and VM cannot be changed in V86 mode - set_flags(newflags); - REG32(ESP) += 12; - } - } - else if(NESTED_TASK) - { - uint32_t task = READ32(m_task.base); - /* Task Return */ - logerror("IRET (%08x): Nested task return.\n",m_pc); - /* Check back-link selector in TSS */ - if(task & 0x04) - { - logerror("IRET: Task return: Back-linked TSS is not in GDT.\n"); - FAULT(FAULT_TS,task & ~0x03) - } - if((task & ~0x07) >= m_gdtr.limit) - { - logerror("IRET: Task return: Back-linked TSS is not in GDT.\n"); - FAULT(FAULT_TS,task & ~0x03) - } - memset(&desc, 0, sizeof(desc)); - desc.selector = task; - i386_load_protected_mode_segment(&desc,nullptr); - if((desc.flags & 0x001f) != 0x000b) - { - logerror("IRET (%08x): Task return: Back-linked TSS is not a busy TSS.\n",m_pc); - FAULT(FAULT_TS,task & ~0x03) - } - if((desc.flags & 0x0080) == 0) - { - logerror("IRET: Task return: Back-linked TSS is not present.\n"); - FAULT(FAULT_NP,task & ~0x03) - } - if(desc.flags & 0x08) - i386_task_switch(desc.selector,0); - else - i286_task_switch(desc.selector,0); - return; - } - else - { - if(newflags & 0x00020000) // if returning to virtual 8086 mode - { - // 16-bit iret can't reach here - newESP = READ32(ea+12); - newSS = READ32(ea+16) & 0xffff; - /* Return to v86 mode */ - //logerror("IRET (%08x): Returning to Virtual 8086 mode.\n",m_pc); - if(CPL != 0) - { - uint32_t oldflags = get_flags(); - newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000); - if(CPL > IOPL) - newflags = (newflags & ~0x200 ) | (oldflags & 0x200); - } - set_flags(newflags); - m_eip = POP32() & 0xffff; // high 16 bits are ignored - m_sreg[CS].selector = POP32() & 0xffff; - POP32(); // already set flags - newESP = POP32(); - newSS = POP32() & 0xffff; - m_sreg[ES].selector = POP32() & 0xffff; - m_sreg[DS].selector = POP32() & 0xffff; - m_sreg[FS].selector = POP32() & 0xffff; - m_sreg[GS].selector = POP32() & 0xffff; - REG32(ESP) = newESP; // all 32 bits are loaded - m_sreg[SS].selector = newSS; - i386_load_segment_descriptor(ES); - i386_load_segment_descriptor(DS); - i386_load_segment_descriptor(FS); - i386_load_segment_descriptor(GS); - i386_load_segment_descriptor(SS); - m_CPL = 3; // Virtual 8086 tasks are always run at CPL 3 - } - else - { - if(operand32 == 0) - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+3) != 0) - { - logerror("IRET: Data on stack is past SS limit.\n"); - FAULT(FAULT_SS,0) - } - } - else - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+7) != 0) - { - logerror("IRET: Data on stack is past SS limit.\n"); - FAULT(FAULT_SS,0) - } - } - RPL = newCS & 0x03; - if(RPL < CPL) - { - logerror("IRET (%08x): Return CS RPL is less than CPL.\n",m_pc); - FAULT(FAULT_GP,newCS & ~0x03) - } - if(RPL == CPL) - { - /* return to same privilege level */ - if(operand32 == 0) - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+5) != 0) - { - logerror("IRET (%08x): Data on stack is past SS limit.\n",m_pc); - FAULT(FAULT_SS,0) - } - } - else - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+11) != 0) - { - logerror("IRET (%08x): Data on stack is past SS limit.\n",m_pc); - FAULT(FAULT_SS,0) - } - } - if((newCS & ~0x03) == 0) - { - logerror("IRET: Return CS selector is null.\n"); - FAULT(FAULT_GP,0) - } - if(newCS & 0x04) - { - if((newCS & ~0x07) >= m_ldtr.limit) - { - logerror("IRET: Return CS selector (%04x) is past LDT limit.\n",newCS); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if((newCS & ~0x07) >= m_gdtr.limit) - { - logerror("IRET: Return CS selector is past GDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - memset(&desc, 0, sizeof(desc)); - desc.selector = newCS; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = newCS & 0x03; - if((desc.flags & 0x0018) != 0x0018) - { - logerror("IRET (%08x): Return CS segment is not a code segment.\n",m_pc); - FAULT(FAULT_GP,newCS & ~0x07) - } - if(desc.flags & 0x0004) - { - if(DPL > RPL) - { - logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if(DPL != RPL) - { - logerror("IRET: Non-conforming return CS DPL is not equal to CS RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - if((desc.flags & 0x0080) == 0) - { - logerror("IRET: (%08x) Return CS segment is not present.\n", m_pc); - FAULT(FAULT_NP,newCS & ~0x03) - } - if(newEIP > desc.limit) - { - logerror("IRET: Return EIP is past return CS limit.\n"); - FAULT(FAULT_GP,0) - } - - if(CPL != 0) - { - uint32_t oldflags = get_flags(); - newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000); - if(CPL > IOPL) - newflags = (newflags & ~0x200 ) | (oldflags & 0x200); - } - - if(operand32 == 0) - { - m_eip = newEIP; - m_sreg[CS].selector = newCS; - set_flags(newflags); - REG16(SP) += 6; - } - else - { - m_eip = newEIP; - m_sreg[CS].selector = newCS & 0xffff; - set_flags(newflags); - REG32(ESP) += 12; - } - } - else if(RPL > CPL) - { - /* return to outer privilege level */ - memset(&desc, 0, sizeof(desc)); - desc.selector = newCS; - i386_load_protected_mode_segment(&desc,nullptr); - DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level - RPL = newCS & 0x03; - if(operand32 == 0) - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+9) != 0) - { - logerror("IRET: SP is past SS limit.\n"); - FAULT(FAULT_SS,0) - } - } - else - { - uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); - if(i386_limit_check(SS,offset+19) != 0) - { - logerror("IRET: ESP is past SS limit.\n"); - FAULT(FAULT_SS,0) - } - } - /* Check CS selector and descriptor */ - if((newCS & ~0x03) == 0) - { - logerror("IRET: Return CS selector is null.\n"); - FAULT(FAULT_GP,0) - } - if(newCS & 0x04) - { - if((newCS & ~0x07) >= m_ldtr.limit) - { - logerror("IRET: Return CS selector is past LDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03); - } - } - else - { - if((newCS & ~0x07) >= m_gdtr.limit) - { - logerror("IRET: Return CS selector is past GDT limit.\n"); - FAULT(FAULT_GP,newCS & ~0x03); - } - } - if((desc.flags & 0x0018) != 0x0018) - { - logerror("IRET: Return CS segment is not a code segment.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - if(desc.flags & 0x0004) - { - if(DPL > RPL) - { - logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - else - { - if(DPL != RPL) - { - logerror("IRET: Non-conforming return CS DPL does not equal CS RPL.\n"); - FAULT(FAULT_GP,newCS & ~0x03) - } - } - if((desc.flags & 0x0080) == 0) - { - logerror("IRET: Return CS segment is not present.\n"); - FAULT(FAULT_NP,newCS & ~0x03) - } - - /* Check SS selector and descriptor */ - if(operand32 == 0) - { - newESP = READ16(ea+6) & 0xffff; - newSS = READ16(ea+8) & 0xffff; - } - else - { - newESP = READ32(ea+12); - newSS = READ32(ea+16) & 0xffff; - } - memset(&stack, 0, sizeof(stack)); - stack.selector = newSS; - i386_load_protected_mode_segment(&stack,nullptr); - DPL = (stack.flags >> 5) & 0x03; - if((newSS & ~0x03) == 0) - { - logerror("IRET: Return SS selector is null.\n"); - FAULT(FAULT_GP,0) - } - if(newSS & 0x04) - { - if((newSS & ~0x07) >= m_ldtr.limit) - { - logerror("IRET: Return SS selector is past LDT limit.\n"); - FAULT(FAULT_GP,newSS & ~0x03); - } - } - else - { - if((newSS & ~0x07) >= m_gdtr.limit) - { - logerror("IRET: Return SS selector is past GDT limit.\n"); - FAULT(FAULT_GP,newSS & ~0x03); - } - } - if((newSS & 0x03) != RPL) - { - logerror("IRET: Return SS RPL is not equal to return CS RPL.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if((stack.flags & 0x0018) != 0x0010) - { - logerror("IRET: Return SS segment is not a data segment.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if((stack.flags & 0x0002) == 0) - { - logerror("IRET: Return SS segment is not writable.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if(DPL != RPL) - { - logerror("IRET: Return SS DPL does not equal SS RPL.\n"); - FAULT(FAULT_GP,newSS & ~0x03) - } - if((stack.flags & 0x0080) == 0) - { - logerror("IRET: Return SS segment is not present.\n"); - FAULT(FAULT_NP,newSS & ~0x03) - } - if(newEIP > desc.limit) - { - logerror("IRET: EIP is past return CS limit.\n"); - FAULT(FAULT_GP,0) - } - -// if(operand32 == 0) -// REG16(SP) += 10; -// else -// REG32(ESP) += 20; - - // IOPL can only change if CPL is zero - if(CPL != 0) - { - uint32_t oldflags = get_flags(); - newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000); - if(CPL > IOPL) - newflags = (newflags & ~0x200 ) | (oldflags & 0x200); - } - - if(operand32 == 0) - { - m_eip = newEIP & 0xffff; - m_sreg[CS].selector = newCS; - set_flags(newflags); - REG16(SP) = newESP & 0xffff; - m_sreg[SS].selector = newSS; - } - else - { - m_eip = newEIP; - m_sreg[CS].selector = newCS & 0xffff; - set_flags(newflags); - REG32(ESP) = newESP; - m_sreg[SS].selector = newSS & 0xffff; - } - m_CPL = newCS & 0x03; - i386_load_segment_descriptor(SS); - - /* Check that DS, ES, FS and GS are valid for the new privilege level */ - i386_check_sreg_validity(DS); - i386_check_sreg_validity(ES); - i386_check_sreg_validity(FS); - i386_check_sreg_validity(GS); - } - } - } - - i386_load_segment_descriptor(CS); - CHANGE_PC(m_eip); -} - void i386_device::build_cycle_table() { int i, j; @@ -3040,6 +1578,7 @@ void i386_device::report_invalid_modrm(const char* opcode, uint8_t modrm) #include "pentops.hxx" #include "x87ops.hxx" #include "cpuidmsrs.hxx" +#include "i386segs.hxx" void i386_device::i386_decode_opcode() @@ -4760,371 +3299,6 @@ void pentium3_device::device_reset() CHANGE_PC(m_eip); } -/*****************************************************************************/ -/* AMD Athlon XP - Model: Athlon XP 2400+ - Part number: AXDA2400DKV3C - Stepping code: AIUCP - Date code: 0240MPMW -*/ - -void athlonxp_device::device_start() -{ - i386_common_init(); - register_state_i386_x87_xmm(); - m_data = &space(AS_DATA); - m_opcodes = &space(AS_OPCODES); - mmacache32 = m_data->cache<2, 0, ENDIANNESS_LITTLE>(); - m_opcodes->install_read_handler(0, 0xffffffff, read32_delegate(FUNC(athlonxp_device::debug_read_memory), this)); - - build_x87_opcode_table(); - build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE); - m_cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM].get(); // TODO: generate own cycle tables - m_cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM].get(); // TODO: generate own cycle tables - - // put savestate calls here - save_item(NAME(m_processor_name_string)); - save_item(NAME(m_msr_top_mem)); - save_item(NAME(m_msr_sys_cfg)); - save_item(NAME(m_msr_smm_base)); - save_item(NAME(m_msr_smm_mask)); - save_item(NAME(m_msr_mtrrfix)); - save_item(NAME(m_memory_ranges_1m)); -} - -void athlonxp_device::device_reset() -{ - zero_state(); - - m_sreg[CS].selector = 0xf000; - m_sreg[CS].base = 0xffff0000; - m_sreg[CS].limit = 0xffff; - m_sreg[CS].flags = 0x0093; - - m_sreg[DS].base = m_sreg[ES].base = m_sreg[FS].base = m_sreg[GS].base = m_sreg[SS].base = 0x00000000; - m_sreg[DS].limit = m_sreg[ES].limit = m_sreg[FS].limit = m_sreg[GS].limit = m_sreg[SS].limit = 0xffff; - m_sreg[DS].flags = m_sreg[ES].flags = m_sreg[FS].flags = m_sreg[GS].flags = m_sreg[SS].flags = 0x0093; - - m_idtr.base = 0; - m_idtr.limit = 0x3ff; - - m_a20_mask = ~0; - - m_cr[0] = 0x60000010; - m_eflags = 0x00200000; - m_eflags_mask = 0x00277fd7; /* TODO: is this correct? */ - m_eip = 0xfff0; - m_mxcsr = 0x1f80; - m_smm = false; - m_smi_latched = false; - m_smbase = 0x30000; - m_nmi_masked = false; - m_nmi_latched = false; - - x87_reset(); - - // [11:8] Family - // [ 7:4] Model - // [ 3:0] Stepping ID - // Family 6, Model 8, Stepping 1 - REG32(EAX) = 0; - REG32(EDX) = (6 << 8) | (8 << 4) | (1); - - m_cpuid_id0 = ('h' << 24) | ('t' << 16) | ('u' << 8) | 'A'; // Auth - m_cpuid_id1 = ('i' << 24) | ('t' << 16) | ('n' << 8) | 'e'; // enti - m_cpuid_id2 = ('D' << 24) | ('M' << 16) | ('A' << 8) | 'c'; // cAMD - memset(m_processor_name_string, 0, 48); - strcpy((char *)m_processor_name_string, "AMD Athlon(tm) Processor"); - for (int n = 0; n < 11; n++) - m_msr_mtrrfix[n] = 0; - for (int n = 0; n < (1024 / 4); n++) - m_memory_ranges_1m[n] = 0; - m_msr_top_mem = 1024 * 1024; - m_msr_sys_cfg = 0; - m_msr_smm_base = m_smbase; - m_msr_smm_mask = 0; - - m_cpuid_max_input_value_eax = 0x01; - m_cpu_version = REG32(EDX); - - // see FEATURE_FLAGS enum for bit names - m_feature_flags = 0x0383fbff; - - CHANGE_PC(m_eip); -} - -device_memory_interface::space_config_vector athlonxp_device::memory_space_config() const -{ - return space_config_vector{ - std::make_pair(AS_PROGRAM, &m_program_config), - std::make_pair(AS_IO, &m_io_config), - std::make_pair(AS_DATA, &m_data_config), - std::make_pair(AS_OPCODES, &m_opcodes_config) - }; -} - -void athlonxp_device::enter_smm() -{ - u64 data; - - if (m_msr_smm_mask & 1) - data = 0x1818181818181818; // when smm is active - else - data = m_msr_mtrrfix[2]; - parse_mtrrfix(data, 0xa0000, 16); - i386_device::enter_smm(); -} - -void athlonxp_device::leave_smm() -{ - u64 data; - - i386_device::leave_smm(); - if (m_msr_smm_mask & 1) - data = 0; // when smm is not active - else - data = m_msr_mtrrfix[2]; - parse_mtrrfix(data, 0xa0000, 16); -} - -void athlonxp_device::parse_mtrrfix(u64 mtrr, offs_t base, int kblock) -{ - int nb = kblock / 4; - int range = (int)(base >> 12); // base must never be higher than 1 megabyte - - for (int n = 0; n < 8; n++) - { - uint8_t type = mtrr & 0xff; - - for (int b = 0; b < nb; b++) - { - m_memory_ranges_1m[range] = type; - range++; - } - mtrr = mtrr >> 8; - } -} - -int athlonxp_device::check_cacheable(offs_t address) -{ - offs_t block; - int disabled; - - disabled = 0; - if (m_cr[0] & (1 << 30)) - disabled = 128; - if (address >= 0x100000) - return disabled; - block = address >> 12; - return m_memory_ranges_1m[block] | disabled; -} - -template -int athlonxp_device::address_mode(offs_t address) -{ - if (address >= m_msr_top_mem) - return 1; - if (address >= 1 * 1024 * 1024) - return 0; - if ((m_memory_ranges_1m[address >> 12] & (1 << (3 + wr))) != 0) - return 0; - return 1; -} - -READ32_MEMBER(athlonxp_device::debug_read_memory) -{ - offs_t address = offset << 2; - int mode = check_cacheable(address); - bool nocache = false; - address_space *m = m_program; - u8 *data; - - if ((mode & 7) == 0) - nocache = true; - if (mode & 1) - nocache = true; - if (nocache == false) - { - int offset = (address & 63); - data = cache.search(address); - if (data) - return *(u32 *)(data + offset); - } - if (address_mode<1>(address)) - m = m_data; - return m->read_dword(address); -} - -template -dt athlonxp_device::opcode_read_cache(offs_t address) -{ - int mode = check_cacheable(address); - bool nocache = false; - memory_access_cache<2, 0, ENDIANNESS_LITTLE> *m = macache32; - u8 *data; - - if ((mode & 7) == 0) - nocache = true; - if (mode & 1) - nocache = true; - if (nocache == false) - { - int offset = (address & 63) ^ xorle; - data = cache.search(address); - if (data) - return *(dt *)(data + offset); - if (!(mode & 128)) - { - bool dirty = cache.allocate(address, &data); - address = cache.base(address); - if (dirty) - { - offs_t old_address = cache.old(); - - for (int w = 0; w < 64; w += 4) - m->write_dword(old_address + w, *(u32 *)(data + w)); - } - for (int r = 0; r < 64; r += 4) - *(u32 *)(data + r) = m->read_dword(address + r); - return *(dt *)(data + offset); - } - } - if (address_mode<1>(address)) - m = mmacache32; - if (sizeof(dt) == 1) - return m->read_byte(address); - else if (sizeof(dt) == 2) - return m->read_word(address); - else - return m->read_dword(address); -} - -template -dt athlonxp_device::program_read_cache(offs_t address) -{ - int mode = check_cacheable(address); - bool nocache = false; - address_space *m = m_program; - u8 *data; - - if ((mode & 7) == 0) - nocache = true; - if (mode & 1) - nocache = true; - if (nocache == false) - { - int offset = (address & 63) ^ xorle; - data = cache.search(address); - if (data) - return *(dt *)(data + offset); - if (!(mode & 128)) - { - bool dirty = cache.allocate(address, &data); - address = cache.base(address); - if (dirty) - { - offs_t old_address = cache.old(); - - for (int w = 0; w < 64; w += 4) - m->write_dword(old_address + w, *(u32 *)(data + w)); - } - for (int r = 0; r < 64; r += 4) - *(u32 *)(data + r) = m->read_dword(address + r); - return *(dt *)(data + offset); - } - } - if (address_mode<1>(address)) - m = m_data; - if (sizeof(dt) == 1) - return m->read_byte(address); - else if (sizeof(dt) == 2) - return m->read_word(address); - else - return m->read_dword(address); -} - -template -void athlonxp_device::program_write_cache(offs_t address, dt data) -{ - int mode = check_cacheable(address); - bool nocache = false; - address_space *m = m_program; - u8 *dataw; - - if ((mode & 7) == 0) - nocache = true; - if (mode & 1) - nocache = true; - if (nocache == false) - { - int offset = (address & 63) ^ xorle; - dataw = cache.search(address); - if (dataw) - { - *(dt *)(dataw + offset) = data; - return; - } - if (!(mode & 128)) - { - bool dirty = cache.allocate(address, &dataw); - address = cache.base(address); - if (dirty) - { - offs_t old_address = cache.old(); - - for (int w = 0; w < 64; w += 4) - m->write_dword(old_address + w, *(u32 *)(dataw + w)); - } - for (int r = 0; r < 64; r += 4) - *(u32 *)(dataw + r) = m->read_dword(address + r); - *(dt *)(dataw + offset) = data; - return; - } - } - if (address_mode<0>(address)) - m = m_data; - if (sizeof(dt) == 1) - m->write_byte(address, data); - else if (sizeof(dt) == 2) - m->write_word(address, data); - else - m->write_dword(address, data); -} - -void athlonxp_device::cache_writeback() -{ - // dirty cachelines are written back to memory - address_space *m = m_program; - u32 base; - u8 *data; - - data = cache.first_dirty(base, false); - while (data != nullptr) - { - for (int w = 0; w < 64; w += 4) - m->write_dword(base + w, *(u32 *)(data + w)); - data = cache.next_dirty(base, false); - } -} - -void athlonxp_device::cache_invalidate() -{ - // dirty cachelines are not written back to memory - cache.reset(); -} - -void athlonxp_device::cache_clean() -{ - // dirty cachelines are marked as clean but not written back to memory - u32 base; - u8 *data; - - data = cache.first_dirty(base, true); - while (data != nullptr) - data = cache.next_dirty(base, true); -} - - /*****************************************************************************/ /* Intel Pentium 4 */ diff --git a/src/devices/cpu/i386/i386.h b/src/devices/cpu/i386/i386.h index 4a691f9a1e7..1de76d49339 100644 --- a/src/devices/cpu/i386/i386.h +++ b/src/devices/cpu/i386/i386.h @@ -16,7 +16,6 @@ #include "divtlb.h" #include "i386dasm.h" -#include "cache.h" #define INPUT_LINE_A20 1 #define INPUT_LINE_SMI 2 @@ -95,13 +94,6 @@ protected: virtual u16 mem_pr16(offs_t address) { return macache32->read_word(address); } virtual u32 mem_pr32(offs_t address) { return macache32->read_dword(address); } - virtual u8 mem_prd8(offs_t address) { return m_program->read_byte(address); } - virtual u16 mem_prd16(offs_t address) { return m_program->read_word(address); } - virtual u32 mem_prd32(offs_t address) { return m_program->read_dword(address); } - virtual void mem_pwd8(offs_t address, u8 data) { m_program->write_byte(address, data); } - virtual void mem_pwd16(offs_t address, u16 data) { m_program->write_word(address, data); } - virtual void mem_pwd32(offs_t address, u32 data) { m_program->write_dword(address, data); } - address_space_config m_program_config; address_space_config m_io_config; @@ -391,27 +383,32 @@ protected: void register_state_i386(); void register_state_i386_x87(); void register_state_i386_x87_xmm(); - inline uint32_t i386_translate(int segment, uint32_t ip, int rwn); + uint32_t i386_translate(int segment, uint32_t ip, int rwn); inline vtlb_entry get_permissions(uint32_t pte, int wp); bool i386_translate_address(int intention, offs_t *address, vtlb_entry *entry); - inline bool translate_address(int pl, int type, uint32_t *address, uint32_t *error); - inline void CHANGE_PC(uint32_t pc); + bool translate_address(int pl, int type, uint32_t *address, uint32_t *error); + void CHANGE_PC(uint32_t pc); inline void NEAR_BRANCH(int32_t offs); inline uint8_t FETCH(); inline uint16_t FETCH16(); inline uint32_t FETCH32(); - inline uint8_t READ8(uint32_t ea); - inline uint16_t READ16(uint32_t ea); - inline uint32_t READ32(uint32_t ea); - inline uint64_t READ64(uint32_t ea); - inline uint8_t READ8PL0(uint32_t ea); - inline uint16_t READ16PL0(uint32_t ea); - inline uint32_t READ32PL0(uint32_t ea); + inline uint8_t READ8(uint32_t ea) { return READ8PL(ea, m_CPL); } + inline uint16_t READ16(uint32_t ea) { return READ16PL(ea, m_CPL); } + inline uint32_t READ32(uint32_t ea) { return READ32PL(ea, m_CPL); } + inline uint64_t READ64(uint32_t ea) { return READ64PL(ea, m_CPL); } + virtual uint8_t READ8PL(uint32_t ea, uint8_t privilege); + virtual uint16_t READ16PL(uint32_t ea, uint8_t privilege); + virtual uint32_t READ32PL(uint32_t ea, uint8_t privilege); + virtual uint64_t READ64PL(uint32_t ea, uint8_t privilege); inline void WRITE_TEST(uint32_t ea); - inline void WRITE8(uint32_t ea, uint8_t value); - inline void WRITE16(uint32_t ea, uint16_t value); - inline void WRITE32(uint32_t ea, uint32_t value); - inline void WRITE64(uint32_t ea, uint64_t value); + inline void WRITE8(uint32_t ea, uint8_t value) { WRITE8PL(ea, m_CPL, value); } + inline void WRITE16(uint32_t ea, uint16_t value) { WRITE16PL(ea, m_CPL, value); } + inline void WRITE32(uint32_t ea, uint32_t value) { WRITE32PL(ea, m_CPL, value); } + inline void WRITE64(uint32_t ea, uint64_t value) { WRITE64PL(ea, m_CPL, value); } + virtual void WRITE8PL(uint32_t ea, uint8_t privilege, uint8_t value); + virtual void WRITE16PL(uint32_t ea, uint8_t privilege, uint16_t value); + virtual void WRITE32PL(uint32_t ea, uint8_t privilege, uint32_t value); + virtual void WRITE64PL(uint32_t ea, uint8_t privilege, uint64_t value); inline uint8_t OR8(uint8_t dst, uint8_t src); inline uint16_t OR16(uint16_t dst, uint16_t src); inline uint32_t OR32(uint32_t dst, uint32_t src); @@ -445,10 +442,10 @@ protected: inline void check_ioperm(offs_t port, uint8_t mask); inline uint8_t READPORT8(offs_t port); inline void WRITEPORT8(offs_t port, uint8_t value); - inline uint16_t READPORT16(offs_t port); - inline void WRITEPORT16(offs_t port, uint16_t value); - inline uint32_t READPORT32(offs_t port); - inline void WRITEPORT32(offs_t port, uint32_t value); + virtual uint16_t READPORT16(offs_t port); + virtual void WRITEPORT16(offs_t port, uint16_t value); + virtual uint32_t READPORT32(offs_t port); + virtual void WRITEPORT32(offs_t port, uint32_t value); uint32_t i386_load_protected_mode_segment(I386_SREG *seg, uint64_t *desc ); void i386_load_call_gate(I386_CALL_GATE *gate); void i386_set_descriptor_accessed(uint16_t selector); @@ -490,7 +487,7 @@ protected: void i386_decode_four_byte38f3(); uint8_t read8_debug(uint32_t ea, uint8_t *data); uint32_t i386_get_debug_desc(I386_SREG *seg); - inline void CYCLES(int x); + void CYCLES(int x); inline void CYCLES_RM(int modrm, int r, int m); uint8_t i386_shift_rotate8(uint8_t modrm, uint32_t value, uint8_t shift); void i386_adc_rm8_r8(); @@ -1524,6 +1521,17 @@ protected: virtual u8 mem_pr8(offs_t address) override { return macache16->read_byte(address); }; virtual u16 mem_pr16(offs_t address) override { return macache16->read_word(address); }; virtual u32 mem_pr32(offs_t address) override { return macache16->read_dword(address); }; + + virtual uint16_t READ16PL(uint32_t ea, uint8_t privilege) override; + virtual uint32_t READ32PL(uint32_t ea, uint8_t privilege) override; + virtual uint64_t READ64PL(uint32_t ea, uint8_t privilege) override; + virtual void WRITE16PL(uint32_t ea, uint8_t privilege, uint16_t value) override; + virtual void WRITE32PL(uint32_t ea, uint8_t privilege, uint32_t value) override; + virtual void WRITE64PL(uint32_t ea, uint8_t privilege, uint64_t value) override; + virtual uint16_t READPORT16(offs_t port) override; + virtual void WRITEPORT16(offs_t port, uint16_t value) override; + virtual uint32_t READPORT32(offs_t port) override; + virtual void WRITEPORT32(offs_t port, uint32_t value) override; }; class i486_device : public i386_device @@ -1632,64 +1640,6 @@ protected: }; -class athlonxp_device : public pentium_device -{ -public: - // construction/destruction - athlonxp_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock); - -protected: - virtual void opcode_cpuid() override; - virtual uint64_t opcode_rdmsr(bool &valid_msr) override; - virtual void opcode_wrmsr(uint64_t data, bool &valid_msr) override; - virtual void cache_writeback() override; - virtual void cache_invalidate() override; - virtual void cache_clean() override; - virtual void device_start() override; - virtual void device_reset() override; - virtual void enter_smm() override; - virtual void leave_smm() override; - - virtual u8 mem_pr8(offs_t address) override { return opcode_read_cache(address); } - virtual u16 mem_pr16(offs_t address) override { return opcode_read_cache(address); } - virtual u32 mem_pr32(offs_t address) override { return opcode_read_cache(address); } - virtual u8 mem_prd8(offs_t address) override { return program_read_cache(address); } - virtual u16 mem_prd16(offs_t address) override { return program_read_cache(address); } - virtual u32 mem_prd32(offs_t address) override { return program_read_cache(address); } - virtual void mem_pwd8(offs_t address, u8 data) override { program_write_cache(address, data); } - virtual void mem_pwd16(offs_t address, u16 data) override { program_write_cache(address, data); } - virtual void mem_pwd32(offs_t address, u32 data) override { program_write_cache(address, data); } - - // device_memory_interface override - virtual space_config_vector memory_space_config() const override; - -private: - void parse_mtrrfix(u64 mtrr, offs_t base, int kblock); - inline int check_cacheable(offs_t address); - template int address_mode(offs_t address); - - template dt opcode_read_cache(offs_t address); - template dt program_read_cache(offs_t address); - template void program_write_cache(offs_t address, dt data); - - DECLARE_READ32_MEMBER(debug_read_memory); - - address_space_config m_data_config; - address_space *m_data; - address_space_config m_opcodes_config; - address_space *m_opcodes; - memory_access_cache<2, 0, ENDIANNESS_LITTLE> *mmacache32; - uint8_t m_processor_name_string[48]; - offs_t m_msr_top_mem; - uint64_t m_msr_sys_cfg; - offs_t m_msr_smm_base; - uint64_t m_msr_smm_mask; - uint64_t m_msr_mtrrfix[11]; - uint8_t m_memory_ranges_1m[1024 / 4]; - cpucache<17, 9, Cache2Way, CacheLineBytes64> cache; // 512 sets, 2 ways (cachelines per set), 64 bytes per cacheline -}; - - class pentium4_device : public pentium_device { public: @@ -1714,7 +1664,6 @@ DECLARE_DEVICE_TYPE(MEDIAGX, mediagx_device) DECLARE_DEVICE_TYPE(PENTIUM_PRO, pentium_pro_device) DECLARE_DEVICE_TYPE(PENTIUM2, pentium2_device) DECLARE_DEVICE_TYPE(PENTIUM3, pentium3_device) -DECLARE_DEVICE_TYPE(ATHLONXP, athlonxp_device) DECLARE_DEVICE_TYPE(PENTIUM4, pentium4_device) #endif // MAME_CPU_I386_I386_H diff --git a/src/devices/cpu/i386/i386ops.h b/src/devices/cpu/i386/i386ops.h index 372b3127dc3..cf5b3c588aa 100644 --- a/src/devices/cpu/i386/i386ops.h +++ b/src/devices/cpu/i386/i386ops.h @@ -1,26 +1,5 @@ // license:BSD-3-Clause // copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett -#define OP_I386 0x1 -#define OP_FPU 0x2 -#define OP_I486 0x4 -#define OP_PENTIUM 0x8 -#define OP_MMX 0x10 -#define OP_PPRO 0x20 -#define OP_SSE 0x40 -#define OP_SSE2 0x80 -#define OP_SSE3 0x100 -#define OP_CYRIX 0x8000 -#define OP_2BYTE 0x80000000 -#define OP_3BYTE66 0x40000000 -#define OP_3BYTEF2 0x20000000 -#define OP_3BYTEF3 0x10000000 -#define OP_3BYTE38 0x08000000 -#define OP_3BYTE3A 0x04000000 -#define OP_4BYTE3866 0x02000000 -#define OP_4BYTE3A66 0x01000000 -#define OP_4BYTE38F2 0x00800000 -#define OP_4BYTE3AF2 0x00400000 -#define OP_4BYTE38F3 0x00200000 const i386_device::X86_OPCODE i386_device::s_x86_opcode_table[] = { diff --git a/src/devices/cpu/i386/i386priv.h b/src/devices/cpu/i386/i386priv.h index e67665ecbe8..6831407e23d 100644 --- a/src/devices/cpu/i386/i386priv.h +++ b/src/devices/cpu/i386/i386priv.h @@ -372,720 +372,358 @@ extern MODRM_TABLE i386_MODRM_table[256]; /***********************************************************************************/ -void i386_device::CHANGE_PC(uint32_t pc) +enum X86_CYCLES { - m_pc = i386_translate(CS, pc, -1 ); -} - -void i386_device::NEAR_BRANCH(int32_t offs) -{ - /* TODO: limit */ - m_eip += offs; - m_pc += offs; -} - -uint8_t i386_device::FETCH() -{ - uint8_t value; - uint32_t address = m_pc, error; - - if(!translate_address(m_CPL,TRANSLATE_FETCH,&address,&error)) - PF_THROW(error); - - value = mem_pr8(address & m_a20_mask); -#ifdef DEBUG_MISSING_OPCODE - m_opcode_bytes[m_opcode_bytes_length] = value; - m_opcode_bytes_length = (m_opcode_bytes_length + 1) & 15; -#endif - m_eip++; - m_pc++; - return value; -} -uint16_t i386_device::FETCH16() -{ - uint16_t value; - uint32_t address = m_pc, error; - - if( !WORD_ALIGNED(address) ) { /* Unaligned read */ - value = (FETCH() << 0); - value |= (FETCH() << 8); - } else { - if(!translate_address(m_CPL,TRANSLATE_FETCH,&address,&error)) - PF_THROW(error); - address &= m_a20_mask; - value = mem_pr16(address); - m_eip += 2; - m_pc += 2; - } - return value; -} -uint32_t i386_device::FETCH32() -{ - uint32_t value; - uint32_t address = m_pc, error; - - if( !DWORD_ALIGNED(m_pc) ) { /* Unaligned read */ - value = (FETCH() << 0); - value |= (FETCH() << 8); - value |= (FETCH() << 16); - value |= (FETCH() << 24); - } else { - if(!translate_address(m_CPL,TRANSLATE_FETCH,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - value = mem_pr32(address); - m_eip += 4; - m_pc += 4; - } - return value; -} - -uint8_t i386_device::READ8(uint32_t ea) -{ - uint32_t address = ea, error; - - if(!translate_address(m_CPL,TRANSLATE_READ,&address, &error)) - PF_THROW(error); - - address &= m_a20_mask; - return mem_prd8(address); -} -uint16_t i386_device::READ16(uint32_t ea) -{ - uint16_t value; - uint32_t address = ea, error; - - if( !WORD_ALIGNED(ea) ) { /* Unaligned read */ - value = (READ8( address+0 ) << 0); - value |= (READ8( address+1 ) << 8); - } else { - if(!translate_address(m_CPL,TRANSLATE_READ,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - value = mem_prd16( address ); - } - return value; -} -uint32_t i386_device::READ32(uint32_t ea) -{ - uint32_t value; - uint32_t address = ea, error; - - if( !DWORD_ALIGNED(ea) ) { /* Unaligned read */ - value = (READ8( address+0 ) << 0); - value |= (READ8( address+1 ) << 8); - value |= (READ8( address+2 ) << 16), - value |= (READ8( address+3 ) << 24); - } else { - if(!translate_address(m_CPL,TRANSLATE_READ,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - value = mem_prd32( address ); - } - return value; -} - -uint64_t i386_device::READ64(uint32_t ea) -{ - uint64_t value; - uint32_t address = ea, error; - - if( !QWORD_ALIGNED(ea) ) { /* Unaligned read */ - value = (((uint64_t) READ8( address+0 )) << 0); - value |= (((uint64_t) READ8( address+1 )) << 8); - value |= (((uint64_t) READ8( address+2 )) << 16); - value |= (((uint64_t) READ8( address+3 )) << 24); - value |= (((uint64_t) READ8( address+4 )) << 32); - value |= (((uint64_t) READ8( address+5 )) << 40); - value |= (((uint64_t) READ8( address+6 )) << 48); - value |= (((uint64_t) READ8( address+7 )) << 56); - } else { - if(!translate_address(m_CPL,TRANSLATE_READ,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - value = (((uint64_t) mem_prd32( address+0 )) << 0); - value |= (((uint64_t) mem_prd32( address+4 )) << 32); - } - return value; -} -uint8_t i386_device::READ8PL0(uint32_t ea) -{ - uint32_t address = ea, error; - - if(!translate_address(0,TRANSLATE_READ,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - return mem_prd8(address); -} -uint16_t i386_device::READ16PL0(uint32_t ea) -{ - uint16_t value; - uint32_t address = ea, error; - - if( !WORD_ALIGNED(ea) ) { /* Unaligned read */ - value = (READ8PL0( address+0 ) << 0); - value |= (READ8PL0( address+1 ) << 8); - } else { - if(!translate_address(0,TRANSLATE_READ,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - value = mem_prd16( address ); - } - return value; -} - -uint32_t i386_device::READ32PL0(uint32_t ea) -{ - uint32_t value; - uint32_t address = ea, error; - - if( !DWORD_ALIGNED(ea) ) { /* Unaligned read */ - value = (READ8PL0( address+0 ) << 0); - value |= (READ8PL0( address+1 ) << 8); - value |= (READ8PL0( address+2 ) << 16); - value |= (READ8PL0( address+3 ) << 24); - } else { - if(!translate_address(0,TRANSLATE_READ,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - value = mem_prd32( address ); - } - return value; -} - -void i386_device::WRITE_TEST(uint32_t ea) -{ - uint32_t address = ea, error; - if(!translate_address(m_CPL,TRANSLATE_WRITE,&address,&error)) - PF_THROW(error); -} - -void i386_device::WRITE8(uint32_t ea, uint8_t value) -{ - uint32_t address = ea, error; - - if(!translate_address(m_CPL,TRANSLATE_WRITE,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - mem_pwd8(address, value); -} -void i386_device::WRITE16(uint32_t ea, uint16_t value) -{ - uint32_t address = ea, error; - - if( !WORD_ALIGNED(ea) ) { /* Unaligned write */ - WRITE8( address+0, value & 0xff ); - WRITE8( address+1, (value >> 8) & 0xff ); - } else { - if(!translate_address(m_CPL,TRANSLATE_WRITE,&address,&error)) - PF_THROW(error); - - address &= m_a20_mask; - mem_pwd16(address, value); - } -} -void i386_device::WRITE32(uint32_t ea, uint32_t value) -{ - uint32_t address = ea, error; - - if( !DWORD_ALIGNED(ea) ) { /* Unaligned write */ - WRITE8( address+0, value & 0xff ); - WRITE8( address+1, (value >> 8) & 0xff ); - WRITE8( address+2, (value >> 16) & 0xff ); - WRITE8( address+3, (value >> 24) & 0xff ); - } else { - if(!translate_address(m_CPL,TRANSLATE_WRITE,&address,&error)) - PF_THROW(error); - - ea &= m_a20_mask; - mem_pwd32(address, value); - } -} - -void i386_device::WRITE64(uint32_t ea, uint64_t value) -{ - uint32_t address = ea, error; - - if( !QWORD_ALIGNED(ea) ) { /* Unaligned write */ - WRITE8( address+0, value & 0xff ); - WRITE8( address+1, (value >> 8) & 0xff ); - WRITE8( address+2, (value >> 16) & 0xff ); - WRITE8( address+3, (value >> 24) & 0xff ); - WRITE8( address+4, (value >> 32) & 0xff ); - WRITE8( address+5, (value >> 40) & 0xff ); - WRITE8( address+6, (value >> 48) & 0xff ); - WRITE8( address+7, (value >> 56) & 0xff ); - } else { - if(!translate_address(m_CPL,TRANSLATE_WRITE,&address,&error)) - PF_THROW(error); - - ea &= m_a20_mask; - mem_pwd32(address+0, value & 0xffffffff); - mem_pwd32(address+4, (value >> 32) & 0xffffffff); - } -} - -/***********************************************************************************/ - -uint8_t i386_device::OR8(uint8_t dst, uint8_t src) -{ - uint8_t res = dst | src; - m_CF = m_OF = 0; - SetSZPF8(res); - return res; -} -uint16_t i386_device::OR16(uint16_t dst, uint16_t src) -{ - uint16_t res = dst | src; - m_CF = m_OF = 0; - SetSZPF16(res); - return res; -} -uint32_t i386_device::OR32(uint32_t dst, uint32_t src) -{ - uint32_t res = dst | src; - m_CF = m_OF = 0; - SetSZPF32(res); - return res; -} - -uint8_t i386_device::AND8(uint8_t dst, uint8_t src) -{ - uint8_t res = dst & src; - m_CF = m_OF = 0; - SetSZPF8(res); - return res; -} -uint16_t i386_device::AND16(uint16_t dst, uint16_t src) -{ - uint16_t res = dst & src; - m_CF = m_OF = 0; - SetSZPF16(res); - return res; -} -uint32_t i386_device::AND32(uint32_t dst, uint32_t src) -{ - uint32_t res = dst & src; - m_CF = m_OF = 0; - SetSZPF32(res); - return res; -} - -uint8_t i386_device::XOR8(uint8_t dst, uint8_t src) -{ - uint8_t res = dst ^ src; - m_CF = m_OF = 0; - SetSZPF8(res); - return res; -} -uint16_t i386_device::XOR16(uint16_t dst, uint16_t src) -{ - uint16_t res = dst ^ src; - m_CF = m_OF = 0; - SetSZPF16(res); - return res; -} -uint32_t i386_device::XOR32(uint32_t dst, uint32_t src) -{ - uint32_t res = dst ^ src; - m_CF = m_OF = 0; - SetSZPF32(res); - return res; -} - -#define SUB8(dst, src) SBB8(dst, src, 0) -uint8_t i386_device::SBB8(uint8_t dst, uint8_t src, uint8_t b) -{ - uint16_t res = (uint16_t)dst - (uint16_t)src - (uint8_t)b; - SetCF8(res); - SetOF_Sub8(res,src,dst); - SetAF(res,src,dst); - SetSZPF8(res); - return (uint8_t)res; -} - -#define SUB16(dst, src) SBB16(dst, src, 0) -uint16_t i386_device::SBB16(uint16_t dst, uint16_t src, uint16_t b) -{ - uint32_t res = (uint32_t)dst - (uint32_t)src - (uint32_t)b; - SetCF16(res); - SetOF_Sub16(res,src,dst); - SetAF(res,src,dst); - SetSZPF16(res); - return (uint16_t)res; -} - -#define SUB32(dst, src) SBB32(dst, src, 0) -uint32_t i386_device::SBB32(uint32_t dst, uint32_t src, uint32_t b) -{ - uint64_t res = (uint64_t)dst - (uint64_t)src - (uint64_t) b; - SetCF32(res); - SetOF_Sub32(res,src,dst); - SetAF(res,src,dst); - SetSZPF32(res); - return (uint32_t)res; -} - -#define ADD8(dst, src) ADC8(dst, src, 0) -uint8_t i386_device::ADC8(uint8_t dst, uint8_t src, uint8_t c) -{ - uint16_t res = (uint16_t)dst + (uint16_t)src + (uint16_t)c; - SetCF8(res); - SetOF_Add8(res,src,dst); - SetAF(res,src,dst); - SetSZPF8(res); - return (uint8_t)res; -} - -#define ADD16(dst, src) ADC16(dst, src, 0) -uint16_t i386_device::ADC16(uint16_t dst, uint16_t src, uint8_t c) -{ - uint32_t res = (uint32_t)dst + (uint32_t)src + (uint32_t)c; - SetCF16(res); - SetOF_Add16(res,src,dst); - SetAF(res,src,dst); - SetSZPF16(res); - return (uint16_t)res; -} - -#define ADD32(dst, src) ADC32(dst, src, 0) -uint32_t i386_device::ADC32(uint32_t dst, uint32_t src, uint32_t c) -{ - uint64_t res = (uint64_t)dst + (uint64_t)src + (uint64_t) c; - SetCF32(res); - SetOF_Add32(res,src,dst); - SetAF(res,src,dst); - SetSZPF32(res); - return (uint32_t)res; -} - -uint8_t i386_device::INC8(uint8_t dst) -{ - uint16_t res = (uint16_t)dst + 1; - SetOF_Add8(res,1,dst); - SetAF(res,1,dst); - SetSZPF8(res); - return (uint8_t)res; -} -uint16_t i386_device::INC16(uint16_t dst) -{ - uint32_t res = (uint32_t)dst + 1; - SetOF_Add16(res,1,dst); - SetAF(res,1,dst); - SetSZPF16(res); - return (uint16_t)res; -} -uint32_t i386_device::INC32(uint32_t dst) -{ - uint64_t res = (uint64_t)dst + 1; - SetOF_Add32(res,1,dst); - SetAF(res,1,dst); - SetSZPF32(res); - return (uint32_t)res; -} - -uint8_t i386_device::DEC8(uint8_t dst) -{ - uint16_t res = (uint16_t)dst - 1; - SetOF_Sub8(res,1,dst); - SetAF(res,1,dst); - SetSZPF8(res); - return (uint8_t)res; -} -uint16_t i386_device::DEC16(uint16_t dst) -{ - uint32_t res = (uint32_t)dst - 1; - SetOF_Sub16(res,1,dst); - SetAF(res,1,dst); - SetSZPF16(res); - return (uint16_t)res; -} -uint32_t i386_device::DEC32(uint32_t dst) -{ - uint64_t res = (uint64_t)dst - 1; - SetOF_Sub32(res,1,dst); - SetAF(res,1,dst); - SetSZPF32(res); - return (uint32_t)res; -} - - - -void i386_device::PUSH16(uint16_t value) -{ - uint32_t ea, new_esp; - if( STACK_32BIT ) { - new_esp = REG32(ESP) - 2; - ea = i386_translate(SS, new_esp, 1); - WRITE16(ea, value ); - REG32(ESP) = new_esp; - } else { - new_esp = (REG16(SP) - 2) & 0xffff; - ea = i386_translate(SS, new_esp, 1); - WRITE16(ea, value ); - REG16(SP) = new_esp; - } -} -void i386_device::PUSH32(uint32_t value) -{ - uint32_t ea, new_esp; - if( STACK_32BIT ) { - new_esp = REG32(ESP) - 4; - ea = i386_translate(SS, new_esp, 1); - WRITE32(ea, value ); - REG32(ESP) = new_esp; - } else { - new_esp = (REG16(SP) - 4) & 0xffff; - ea = i386_translate(SS, new_esp, 1); - WRITE32(ea, value ); - REG16(SP) = new_esp; - } -} - -void i386_device::PUSH32SEG(uint32_t value) -{ - uint32_t ea, new_esp; - if( STACK_32BIT ) { - new_esp = REG32(ESP) - 4; - ea = i386_translate(SS, new_esp, 1); - ((m_cpu_version & 0xf00) == 0x300) ? WRITE16(ea, value) : WRITE32(ea, value ); // 486 also? - REG32(ESP) = new_esp; - } else { - new_esp = (REG16(SP) - 4) & 0xffff; - ea = i386_translate(SS, new_esp, 1); - ((m_cpu_version & 0xf00) == 0x300) ? WRITE16(ea, value) : WRITE32(ea, value ); - REG16(SP) = new_esp; - } -} - -void i386_device::PUSH8(uint8_t value) -{ - if( m_operand_size ) { - PUSH32((int32_t)(int8_t)value); - } else { - PUSH16((int16_t)(int8_t)value); - } -} - -uint8_t i386_device::POP8() -{ - uint8_t value; - uint32_t ea, new_esp; - if( STACK_32BIT ) { - new_esp = REG32(ESP) + 1; - ea = i386_translate(SS, new_esp - 1, 0); - value = READ8(ea ); - REG32(ESP) = new_esp; - } else { - new_esp = REG16(SP) + 1; - ea = i386_translate(SS, (new_esp - 1) & 0xffff, 0); - value = READ8(ea ); - REG16(SP) = new_esp; - } - return value; -} -uint16_t i386_device::POP16() -{ - uint16_t value; - uint32_t ea, new_esp; - if( STACK_32BIT ) { - new_esp = REG32(ESP) + 2; - ea = i386_translate(SS, new_esp - 2, 0); - value = READ16(ea ); - REG32(ESP) = new_esp; - } else { - new_esp = REG16(SP) + 2; - ea = i386_translate(SS, (new_esp - 2) & 0xffff, 0); - value = READ16(ea ); - REG16(SP) = new_esp; - } - return value; -} -uint32_t i386_device::POP32() -{ - uint32_t value; - uint32_t ea, new_esp; - if( STACK_32BIT ) { - new_esp = REG32(ESP) + 4; - ea = i386_translate(SS, new_esp - 4, 0); - value = READ32(ea ); - REG32(ESP) = new_esp; - } else { - new_esp = REG16(SP) + 4; - ea = i386_translate(SS, (new_esp - 4) & 0xffff, 0); - value = READ32(ea ); - REG16(SP) = new_esp; - } - return value; -} - -void i386_device::BUMP_SI(int adjustment) -{ - if ( m_address_size ) - REG32(ESI) += ((m_DF) ? -adjustment : +adjustment); - else - REG16(SI) += ((m_DF) ? -adjustment : +adjustment); -} - -void i386_device::BUMP_DI(int adjustment) -{ - if ( m_address_size ) - REG32(EDI) += ((m_DF) ? -adjustment : +adjustment); - else - REG16(DI) += ((m_DF) ? -adjustment : +adjustment); -} - -void i386_device::CYCLES(int x) -{ - if (PROTECTED_MODE) - { - m_cycles -= m_cycle_table_pm[x]; - } - else - { - m_cycles -= m_cycle_table_rm[x]; - } -} - -void i386_device::CYCLES_RM(int modrm, int r, int m) -{ - if (modrm >= 0xc0) - { - if (PROTECTED_MODE) - { - m_cycles -= m_cycle_table_pm[r]; - } - else - { - m_cycles -= m_cycle_table_rm[r]; - } - } - else - { - if (PROTECTED_MODE) - { - m_cycles -= m_cycle_table_pm[m]; - } - else - { - m_cycles -= m_cycle_table_rm[m]; - } - } -} - - - -/*********************************************************************************** - I/O ACCESS -***********************************************************************************/ - -void i386_device::check_ioperm(offs_t port, uint8_t mask) -{ - uint8_t IOPL, map; - uint16_t IOPB; - uint32_t address; - - if(!PROTECTED_MODE) - return; - - IOPL = m_IOP1 | (m_IOP2 << 1); - if(!V8086_MODE && (m_CPL <= IOPL)) - return; - - if((m_task.limit < 0x67) || ((m_task.flags & 0xd) != 9)) - FAULT_THROW(FAULT_GP,0); - - address = m_task.base; - IOPB = READ16PL0(address+0x66); - if((IOPB+(port/8)) > m_task.limit) - FAULT_THROW(FAULT_GP,0); - - map = READ8PL0(address+IOPB+(port/8)); - map >>= (port%8); - if(map & mask) - FAULT_THROW(FAULT_GP,0); -} - -uint8_t i386_device::READPORT8(offs_t port) -{ - check_ioperm(port, 1); - return m_io->read_byte(port); -} - -void i386_device::WRITEPORT8(offs_t port, uint8_t value) -{ - check_ioperm(port, 1); - m_io->write_byte(port, value); -} - -uint16_t i386_device::READPORT16(offs_t port) -{ - if (port & 1) - { - uint16_t value = READPORT8(port); - value |= (READPORT8(port + 1) << 8); - return value; - } - else - { - check_ioperm(port, 3); - return m_io->read_word(port); - } -} - -void i386_device::WRITEPORT16(offs_t port, uint16_t value) -{ - if (port & 1) - { - WRITEPORT8(port, value & 0xff); - WRITEPORT8(port + 1, (value >> 8) & 0xff); - } - else - { - check_ioperm(port, 3); - m_io->write_word(port, value); - } -} - -uint32_t i386_device::READPORT32(offs_t port) -{ - if (port & 3) - { - uint32_t value = READPORT8(port); - value |= (READPORT8(port + 1) << 8); - value |= (READPORT8(port + 2) << 16); - value |= (READPORT8(port + 3) << 24); - return value; - } - else - { - check_ioperm(port, 0xf); - return m_io->read_dword(port); - } -} - -void i386_device::WRITEPORT32(offs_t port, uint32_t value) -{ - if (port & 3) - { - WRITEPORT8(port, value & 0xff); - WRITEPORT8(port + 1, (value >> 8) & 0xff); - WRITEPORT8(port + 2, (value >> 16) & 0xff); - WRITEPORT8(port + 3, (value >> 24) & 0xff); - } - else - { - check_ioperm(port, 0xf); - m_io->write_dword(port, value); - } -} + CYCLES_MOV_REG_REG, + CYCLES_MOV_REG_MEM, + CYCLES_MOV_MEM_REG, + CYCLES_MOV_IMM_REG, + CYCLES_MOV_IMM_MEM, + CYCLES_MOV_ACC_MEM, + CYCLES_MOV_MEM_ACC, + CYCLES_MOV_REG_SREG, + CYCLES_MOV_MEM_SREG, + CYCLES_MOV_SREG_REG, + CYCLES_MOV_SREG_MEM, + CYCLES_MOVSX_REG_REG, + CYCLES_MOVSX_MEM_REG, + CYCLES_MOVZX_REG_REG, + CYCLES_MOVZX_MEM_REG, + CYCLES_PUSH_RM, + CYCLES_PUSH_REG_SHORT, + CYCLES_PUSH_SREG, + CYCLES_PUSH_IMM, + CYCLES_PUSHA, + CYCLES_POP_RM, + CYCLES_POP_REG_SHORT, + CYCLES_POP_SREG, + CYCLES_POPA, + CYCLES_XCHG_REG_REG, + CYCLES_XCHG_REG_MEM, + CYCLES_IN, + CYCLES_IN_VAR, + CYCLES_OUT, + CYCLES_OUT_VAR, + CYCLES_LEA, + CYCLES_LDS, + CYCLES_LES, + CYCLES_LFS, + CYCLES_LGS, + CYCLES_LSS, + CYCLES_CLC, + CYCLES_CLD, + CYCLES_CLI, + CYCLES_CLTS, + CYCLES_CMC, + CYCLES_LAHF, + CYCLES_POPF, + CYCLES_PUSHF, + CYCLES_SAHF, + CYCLES_STC, + CYCLES_STD, + CYCLES_STI, + CYCLES_ALU_REG_REG, + CYCLES_ALU_REG_MEM, + CYCLES_ALU_MEM_REG, + CYCLES_ALU_IMM_REG, + CYCLES_ALU_IMM_MEM, + CYCLES_ALU_IMM_ACC, + CYCLES_INC_REG, + CYCLES_INC_MEM, + CYCLES_DEC_REG, + CYCLES_DEC_MEM, + CYCLES_CMP_REG_REG, + CYCLES_CMP_REG_MEM, + CYCLES_CMP_MEM_REG, + CYCLES_CMP_IMM_REG, + CYCLES_CMP_IMM_MEM, + CYCLES_CMP_IMM_ACC, + CYCLES_TEST_REG_REG, + CYCLES_TEST_REG_MEM, + CYCLES_TEST_IMM_REG, + CYCLES_TEST_IMM_MEM, + CYCLES_TEST_IMM_ACC, + CYCLES_NEG_REG, + CYCLES_NEG_MEM, + CYCLES_AAA, + CYCLES_AAS, + CYCLES_DAA, + CYCLES_DAS, + CYCLES_MUL8_ACC_REG, + CYCLES_MUL8_ACC_MEM, + CYCLES_MUL16_ACC_REG, + CYCLES_MUL16_ACC_MEM, + CYCLES_MUL32_ACC_REG, + CYCLES_MUL32_ACC_MEM, + CYCLES_IMUL8_ACC_REG, + CYCLES_IMUL8_ACC_MEM, + CYCLES_IMUL16_ACC_REG, + CYCLES_IMUL16_ACC_MEM, + CYCLES_IMUL32_ACC_REG, + CYCLES_IMUL32_ACC_MEM, + CYCLES_IMUL8_REG_REG, + CYCLES_IMUL8_REG_MEM, + CYCLES_IMUL16_REG_REG, + CYCLES_IMUL16_REG_MEM, + CYCLES_IMUL32_REG_REG, + CYCLES_IMUL32_REG_MEM, + CYCLES_IMUL16_REG_IMM_REG, + CYCLES_IMUL16_MEM_IMM_REG, + CYCLES_IMUL32_REG_IMM_REG, + CYCLES_IMUL32_MEM_IMM_REG, + CYCLES_DIV8_ACC_REG, + CYCLES_DIV8_ACC_MEM, + CYCLES_DIV16_ACC_REG, + CYCLES_DIV16_ACC_MEM, + CYCLES_DIV32_ACC_REG, + CYCLES_DIV32_ACC_MEM, + CYCLES_IDIV8_ACC_REG, + CYCLES_IDIV8_ACC_MEM, + CYCLES_IDIV16_ACC_REG, + CYCLES_IDIV16_ACC_MEM, + CYCLES_IDIV32_ACC_REG, + CYCLES_IDIV32_ACC_MEM, + CYCLES_AAD, + CYCLES_AAM, + CYCLES_CBW, + CYCLES_CWD, + CYCLES_ROTATE_REG, + CYCLES_ROTATE_MEM, + CYCLES_ROTATE_CARRY_REG, + CYCLES_ROTATE_CARRY_MEM, + CYCLES_SHLD_REG, + CYCLES_SHLD_MEM, + CYCLES_SHRD_REG, + CYCLES_SHRD_MEM, + CYCLES_NOT_REG, + CYCLES_NOT_MEM, + CYCLES_CMPS, + CYCLES_INS, + CYCLES_LODS, + CYCLES_MOVS, + CYCLES_OUTS, + CYCLES_SCAS, + CYCLES_STOS, + CYCLES_XLAT, + CYCLES_REP_CMPS_BASE, + CYCLES_REP_INS_BASE, + CYCLES_REP_LODS_BASE, + CYCLES_REP_MOVS_BASE, + CYCLES_REP_OUTS_BASE, + CYCLES_REP_SCAS_BASE, + CYCLES_REP_STOS_BASE, + CYCLES_REP_CMPS, + CYCLES_REP_INS, + CYCLES_REP_LODS, + CYCLES_REP_MOVS, + CYCLES_REP_OUTS, + CYCLES_REP_SCAS, + CYCLES_REP_STOS, + CYCLES_BSF_BASE, + CYCLES_BSF, + CYCLES_BSR_BASE, + CYCLES_BSR, + CYCLES_BT_IMM_REG, + CYCLES_BT_IMM_MEM, + CYCLES_BT_REG_REG, + CYCLES_BT_REG_MEM, + CYCLES_BTC_IMM_REG, + CYCLES_BTC_IMM_MEM, + CYCLES_BTC_REG_REG, + CYCLES_BTC_REG_MEM, + CYCLES_BTR_IMM_REG, + CYCLES_BTR_IMM_MEM, + CYCLES_BTR_REG_REG, + CYCLES_BTR_REG_MEM, + CYCLES_BTS_IMM_REG, + CYCLES_BTS_IMM_MEM, + CYCLES_BTS_REG_REG, + CYCLES_BTS_REG_MEM, + CYCLES_CALL, // E8 + CYCLES_CALL_REG, // FF /2 + CYCLES_CALL_MEM, // FF /2 + CYCLES_CALL_INTERSEG, // 9A + CYCLES_CALL_REG_INTERSEG, // FF /3 + CYCLES_CALL_MEM_INTERSEG, // FF /3 + CYCLES_JMP_SHORT, // EB + CYCLES_JMP, // E9 + CYCLES_JMP_REG, // FF /4 + CYCLES_JMP_MEM, // FF /4 + CYCLES_JMP_INTERSEG, // EA + CYCLES_JMP_REG_INTERSEG, // FF /5 + CYCLES_JMP_MEM_INTERSEG, // FF /5 + CYCLES_RET, // C3 + CYCLES_RET_IMM, // C2 + CYCLES_RET_INTERSEG, // CB + CYCLES_RET_IMM_INTERSEG, // CA + CYCLES_JCC_DISP8, + CYCLES_JCC_FULL_DISP, + CYCLES_JCC_DISP8_NOBRANCH, + CYCLES_JCC_FULL_DISP_NOBRANCH, + CYCLES_JCXZ, + CYCLES_JCXZ_NOBRANCH, + CYCLES_LOOP, + CYCLES_LOOPZ, + CYCLES_LOOPNZ, + CYCLES_SETCC_REG, + CYCLES_SETCC_MEM, + CYCLES_ENTER, + CYCLES_LEAVE, + CYCLES_INT, + CYCLES_INT3, + CYCLES_INTO_OF1, + CYCLES_INTO_OF0, + CYCLES_BOUND_IN_RANGE, + CYCLES_BOUND_OUT_RANGE, + CYCLES_IRET, + CYCLES_HLT, + CYCLES_MOV_REG_CR0, + CYCLES_MOV_REG_CR2, + CYCLES_MOV_REG_CR3, + CYCLES_MOV_CR_REG, + CYCLES_MOV_REG_DR0_3, + CYCLES_MOV_REG_DR6_7, + CYCLES_MOV_DR6_7_REG, + CYCLES_MOV_DR0_3_REG, + CYCLES_MOV_REG_TR6_7, + CYCLES_MOV_TR6_7_REG, + CYCLES_NOP, + CYCLES_WAIT, + CYCLES_ARPL_REG, + CYCLES_ARPL_MEM, + CYCLES_LAR_REG, + CYCLES_LAR_MEM, + CYCLES_LGDT, + CYCLES_LIDT, + CYCLES_LLDT_REG, + CYCLES_LLDT_MEM, + CYCLES_LMSW_REG, + CYCLES_LMSW_MEM, + CYCLES_LSL_REG, + CYCLES_LSL_MEM, + CYCLES_LTR_REG, + CYCLES_LTR_MEM, + CYCLES_SGDT, + CYCLES_SIDT, + CYCLES_SLDT_REG, + CYCLES_SLDT_MEM, + CYCLES_SMSW_REG, + CYCLES_SMSW_MEM, + CYCLES_STR_REG, + CYCLES_STR_MEM, + CYCLES_VERR_REG, + CYCLES_VERR_MEM, + CYCLES_VERW_REG, + CYCLES_VERW_MEM, + CYCLES_LOCK, + + CYCLES_BSWAP, + CYCLES_CMPXCHG8B, + CYCLES_CMPXCHG, + CYCLES_CPUID, + CYCLES_CPUID_EAX1, + CYCLES_INVD, + CYCLES_XADD, + CYCLES_RDTSC, + CYCLES_RSM, + CYCLES_RDMSR, + + CYCLES_FABS, + CYCLES_FADD, + CYCLES_FBLD, + CYCLES_FBSTP, + CYCLES_FCHS, + CYCLES_FCLEX, + CYCLES_FCOM, + CYCLES_FCOS, + CYCLES_FDECSTP, + CYCLES_FDISI, + CYCLES_FDIV, + CYCLES_FDIVR, + CYCLES_FENI, + CYCLES_FFREE, + CYCLES_FIADD, + CYCLES_FICOM, + CYCLES_FIDIV, + CYCLES_FILD, + CYCLES_FIMUL, + CYCLES_FINCSTP, + CYCLES_FINIT, + CYCLES_FIST, + CYCLES_FISUB, + CYCLES_FLD, + CYCLES_FLDZ, + CYCLES_FLD1, + CYCLES_FLDL2E, + CYCLES_FLDL2T, + CYCLES_FLDLG2, + CYCLES_FLDLN2, + CYCLES_FLDPI, + CYCLES_FLDCW, + CYCLES_FLDENV, + CYCLES_FMUL, + CYCLES_FNOP, + CYCLES_FPATAN, + CYCLES_FPREM, + CYCLES_FPREM1, + CYCLES_FPTAN, + CYCLES_FRNDINT, + CYCLES_FRSTOR, + CYCLES_FSAVE, + CYCLES_FSCALE, + CYCLES_FSETPM, + CYCLES_FSIN, + CYCLES_FSINCOS, + CYCLES_FSQRT, + CYCLES_FST, + CYCLES_FSTCW, + CYCLES_FSTENV, + CYCLES_FSTSW, + CYCLES_FSUB, + CYCLES_FSUBR, + CYCLES_FTST, + CYCLES_FUCOM, + CYCLES_FXAM, + CYCLES_FXCH, + CYCLES_FXTRACT, + CYCLES_FYL2X, + CYCLES_FYL2XPI, + CYCLES_CMPXCHG_REG_REG_T, + CYCLES_CMPXCHG_REG_REG_F, + CYCLES_CMPXCHG_REG_MEM_T, + CYCLES_CMPXCHG_REG_MEM_F, + CYCLES_XADD_REG_REG, + CYCLES_XADD_REG_MEM, + + CYCLES_NUM_OPCODES +}; + + +#define CPU_CYCLES_I386 0 +#define CPU_CYCLES_I486 1 +#define CPU_CYCLES_PENTIUM 2 +#define CPU_CYCLES_MEDIAGX 3 + +#define OP_I386 0x1 +#define OP_FPU 0x2 +#define OP_I486 0x4 +#define OP_PENTIUM 0x8 +#define OP_MMX 0x10 +#define OP_PPRO 0x20 +#define OP_SSE 0x40 +#define OP_SSE2 0x80 +#define OP_SSE3 0x100 +#define OP_CYRIX 0x8000 +#define OP_2BYTE 0x80000000 +#define OP_3BYTE66 0x40000000 +#define OP_3BYTEF2 0x20000000 +#define OP_3BYTEF3 0x10000000 +#define OP_3BYTE38 0x08000000 +#define OP_3BYTE3A 0x04000000 +#define OP_4BYTE3866 0x02000000 +#define OP_4BYTE3A66 0x01000000 +#define OP_4BYTE38F2 0x00800000 +#define OP_4BYTE3AF2 0x00400000 +#define OP_4BYTE38F3 0x00200000 #endif /* __I386_H__ */ diff --git a/src/devices/cpu/i386/i386segs.hxx b/src/devices/cpu/i386/i386segs.hxx new file mode 100644 index 00000000000..69d55ec95f4 --- /dev/null +++ b/src/devices/cpu/i386/i386segs.hxx @@ -0,0 +1,2464 @@ +// license:BSD-3-Clause +// copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett + +uint32_t i386_device::i386_load_protected_mode_segment(I386_SREG *seg, uint64_t *desc ) +{ + uint32_t v1,v2; + uint32_t base, limit; + int entry; + + if(!seg->selector) + { + seg->flags = 0; + seg->base = 0; + seg->limit = 0; + seg->d = 0; + seg->valid = false; + return 0; + } + + if ( seg->selector & 0x4 ) + { + base = m_ldtr.base; + limit = m_ldtr.limit; + } else { + base = m_gdtr.base; + limit = m_gdtr.limit; + } + + entry = seg->selector & ~0x7; + if (limit == 0 || entry + 7 > limit) + return 0; + + v1 = READ32PL(base + entry, 0); + v2 = READ32PL(base + entry + 4, 0); + + seg->flags = (v2 >> 8) & 0xf0ff; + seg->base = (v2 & 0xff000000) | ((v2 & 0xff) << 16) | ((v1 >> 16) & 0xffff); + seg->limit = (v2 & 0xf0000) | (v1 & 0xffff); + if (seg->flags & 0x8000) + seg->limit = (seg->limit << 12) | 0xfff; + seg->d = (seg->flags & 0x4000) ? 1 : 0; + seg->valid = true; + + if(desc) + *desc = ((uint64_t)v2<<32)|v1; + return 1; +} + +void i386_device::i386_load_call_gate(I386_CALL_GATE *gate) +{ + uint32_t v1,v2; + uint32_t base,limit; + int entry; + + if ( gate->segment & 0x4 ) + { + base = m_ldtr.base; + limit = m_ldtr.limit; + } else { + base = m_gdtr.base; + limit = m_gdtr.limit; + } + + entry = gate->segment & ~0x7; + if (limit == 0 || entry + 7 > limit) + return; + + v1 = READ32PL(base + entry, 0); + v2 = READ32PL(base + entry + 4, 0); + + /* Note that for task gates, offset and dword_count are not used */ + gate->selector = (v1 >> 16) & 0xffff; + gate->offset = (v1 & 0x0000ffff) | (v2 & 0xffff0000); + gate->ar = (v2 >> 8) & 0xff; + gate->dword_count = v2 & 0x001f; + gate->present = (gate->ar >> 7) & 0x01; + gate->dpl = (gate->ar >> 5) & 0x03; +} + +void i386_device::i386_set_descriptor_accessed(uint16_t selector) +{ + // assume the selector is valid, we don't need to check it again + uint32_t base, addr; + uint8_t rights; + if(!(selector & ~3)) + return; + + if ( selector & 0x4 ) + base = m_ldtr.base; + else + base = m_gdtr.base; + + addr = base + (selector & ~7) + 5; + i386_translate_address(TRANSLATE_READ, &addr, nullptr); + rights = m_program->read_byte(addr); + // Should a fault be thrown if the table is read only? + m_program->write_byte(addr, rights | 1); +} + +void i386_device::i386_load_segment_descriptor(int segment ) +{ + if (PROTECTED_MODE) + { + uint16_t old_flags = m_sreg[segment].flags; + if (!V8086_MODE) + { + i386_load_protected_mode_segment(&m_sreg[segment], nullptr); + if (m_sreg[segment].selector) + { + i386_set_descriptor_accessed(m_sreg[segment].selector); + m_sreg[segment].flags |= 0x0001; + } + } + else + { + m_sreg[segment].base = m_sreg[segment].selector << 4; + m_sreg[segment].limit = 0xffff; + m_sreg[segment].flags = (segment == CS) ? 0x00fb : 0x00f3; + m_sreg[segment].d = 0; + m_sreg[segment].valid = true; + } + if (segment == CS && m_sreg[segment].flags != old_flags) + debugger_privilege_hook(); + } + else + { + m_sreg[segment].base = m_sreg[segment].selector << 4; + m_sreg[segment].d = 0; + m_sreg[segment].valid = true; + + if (segment == CS) + { + if (!m_performed_intersegment_jump) + m_sreg[segment].base |= 0xfff00000; + if (m_cpu_version < 0x500) + m_sreg[segment].flags = 0x93; + } + } +} + +/* Retrieves the stack selector located in the current TSS */ +uint32_t i386_device::i386_get_stack_segment(uint8_t privilege) +{ + uint32_t ret; + if(privilege >= 3) + return 0; + + if(m_task.flags & 8) + ret = READ32PL((m_task.base+8) + (8*privilege), 0); + else + ret = READ16PL((m_task.base+4) + (4*privilege), 0); + + return ret; +} + +/* Retrieves the stack pointer located in the current TSS */ +uint32_t i386_device::i386_get_stack_ptr(uint8_t privilege) +{ + uint32_t ret; + if(privilege >= 3) + return 0; + + if(m_task.flags & 8) + ret = READ32PL((m_task.base+4) + (8*privilege), 0); + else + ret = READ16PL((m_task.base+2) + (4*privilege), 0); + + return ret; +} + +/* Check segment register for validity when changing privilege level after an RETF */ +void i386_device::i386_check_sreg_validity(int reg) +{ + uint16_t selector = m_sreg[reg].selector; + uint8_t CPL = m_CPL; + uint8_t DPL,RPL; + I386_SREG desc; + int invalid; + + memset(&desc, 0, sizeof(desc)); + desc.selector = selector; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = selector & 0x03; + + /* Must be within the relevant descriptor table limits */ + if(selector & 0x04) + { + if((selector & ~0x07) > m_ldtr.limit) + invalid = 1; + } + else + { + if((selector & ~0x07) > m_gdtr.limit) + invalid = 1; + } + + /* Must be either a data or readable code segment */ + if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0002)) || (desc.flags & 0x0018) == 0x0010) + invalid = 0; + else + invalid = 1; + + /* If a data segment or non-conforming code segment, then either DPL >= CPL or DPL >= RPL */ + if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0004) == 0) || (desc.flags & 0x0018) == 0x0010) + { + if((DPL < CPL) || (DPL < RPL)) + invalid = 1; + } + + /* if segment is invalid, then segment register is nulled */ + if(invalid != 0) + { + m_sreg[reg].selector = 0; + i386_load_segment_descriptor(reg); + } +} + +int i386_device::i386_limit_check(int seg, uint32_t offset) +{ + if(PROTECTED_MODE && !V8086_MODE) + { + if((m_sreg[seg].flags & 0x0018) == 0x0010 && m_sreg[seg].flags & 0x0004) // if expand-down data segment + { + // compare if greater then 0xffffffff when we're passed the access size + if((offset <= m_sreg[seg].limit) || ((m_sreg[seg].d)?0:(offset > 0xffff))) + { + logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x (expand-down)\n",m_pc,m_sreg[seg].selector,m_sreg[seg].limit,offset); + return 1; + } + } + else + { + if(offset > m_sreg[seg].limit) + { + logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x\n",m_pc,m_sreg[seg].selector,m_sreg[seg].limit,offset); + machine().debug_break(); + return 1; + } + } + } + return 0; +} + +void i386_device::i386_sreg_load(uint16_t selector, uint8_t reg, bool *fault) +{ + // Checks done when MOV changes a segment register in protected mode + uint8_t CPL,RPL,DPL; + + CPL = m_CPL; + RPL = selector & 0x0003; + + if(!PROTECTED_MODE || V8086_MODE) + { + m_sreg[reg].selector = selector; + i386_load_segment_descriptor(reg); + if(fault) *fault = false; + return; + } + + if(fault) *fault = true; + if(reg == SS) + { + I386_SREG stack; + + memset(&stack, 0, sizeof(stack)); + stack.selector = selector; + i386_load_protected_mode_segment(&stack,nullptr); + DPL = (stack.flags >> 5) & 0x03; + + if((selector & ~0x0003) == 0) + { + logerror("SReg Load (%08x): Selector is null.\n",m_pc); + FAULT(FAULT_GP,0) + } + if(selector & 0x0004) // LDT + { + if((selector & ~0x0007) > m_ldtr.limit) + { + logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + } + else // GDT + { + if((selector & ~0x0007) > m_gdtr.limit) + { + logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + } + if (RPL != CPL) + { + logerror("SReg Load (%08x): Selector RPL does not equal CPL.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0) + { + logerror("SReg Load (%08x): Segment is not a writable data segment.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + if(DPL != CPL) + { + logerror("SReg Load (%08x): Segment DPL does not equal CPL.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + if(!(stack.flags & 0x0080)) + { + logerror("SReg Load (%08x): Segment is not present.\n",m_pc); + FAULT(FAULT_SS,selector & ~0x03) + } + } + if(reg == DS || reg == ES || reg == FS || reg == GS) + { + I386_SREG desc; + + if((selector & ~0x0003) == 0) + { + m_sreg[reg].selector = selector; + i386_load_segment_descriptor(reg ); + if(fault) *fault = false; + return; + } + + memset(&desc, 0, sizeof(desc)); + desc.selector = selector; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; + + if(selector & 0x0004) // LDT + { + if((selector & ~0x0007) > m_ldtr.limit) + { + logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + } + else // GDT + { + if((selector & ~0x0007) > m_gdtr.limit) + { + logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + } + if((desc.flags & 0x0018) != 0x10) + { + if((((desc.flags & 0x0002) != 0) && ((desc.flags & 0x0018) != 0x18)) || !(desc.flags & 0x10)) + { + logerror("SReg Load (%08x): Segment is not a data segment or readable code segment.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + } + if(((desc.flags & 0x0018) == 0x10) || ((!(desc.flags & 0x0004)) && ((desc.flags & 0x0018) == 0x18))) + { + // if data or non-conforming code segment + if((RPL > DPL) || (CPL > DPL)) + { + logerror("SReg Load (%08x): Selector RPL or CPL is not less or equal to segment DPL.\n",m_pc); + FAULT(FAULT_GP,selector & ~0x03) + } + } + if(!(desc.flags & 0x0080)) + { + logerror("SReg Load (%08x): Segment is not present.\n",m_pc); + FAULT(FAULT_NP,selector & ~0x03) + } + } + + m_sreg[reg].selector = selector; + i386_load_segment_descriptor(reg ); + if(fault) *fault = false; +} + +void i386_device::i386_trap(int irq, int irq_gate, int trap_level) +{ + /* I386 Interrupts/Traps/Faults: + * + * 0x00 Divide by zero + * 0x01 Debug exception + * 0x02 NMI + * 0x03 Int3 + * 0x04 Overflow + * 0x05 Array bounds check + * 0x06 Illegal Opcode + * 0x07 FPU not available + * 0x08 Double fault + * 0x09 Coprocessor segment overrun + * 0x0a Invalid task state + * 0x0b Segment not present + * 0x0c Stack exception + * 0x0d General Protection Fault + * 0x0e Page fault + * 0x0f Reserved + * 0x10 Coprocessor error + */ + uint32_t v1, v2; + uint32_t offset, oldflags = get_flags(); + uint16_t segment; + int entry = irq * (PROTECTED_MODE ? 8 : 4); + int SetRPL; + m_lock = false; + + if( !(PROTECTED_MODE) ) + { + /* 16-bit */ + PUSH16(oldflags & 0xffff ); + PUSH16(m_sreg[CS].selector ); + if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1) + PUSH16(m_eip ); + else + PUSH16(m_prev_eip ); + + m_sreg[CS].selector = READ16(m_idtr.base + entry + 2 ); + m_eip = READ16(m_idtr.base + entry ); + + m_TF = 0; + m_IF = 0; + } + else + { + int type; + uint16_t flags; + I386_SREG desc; + uint8_t CPL = m_CPL, DPL; //, RPL = 0; + + /* 32-bit */ + v1 = READ32PL(m_idtr.base + entry, 0); + v2 = READ32PL(m_idtr.base + entry + 4, 0); + offset = (v2 & 0xffff0000) | (v1 & 0xffff); + segment = (v1 >> 16) & 0xffff; + type = (v2>>8) & 0x1F; + flags = (v2>>8) & 0xf0ff; + + if(trap_level == 2) + { + logerror("IRQ: Double fault.\n"); + FAULT_EXP(FAULT_DF,0); + } + if(trap_level >= 3) + { + logerror("IRQ: Triple fault. CPU reset.\n"); + pulse_input_line(INPUT_LINE_RESET, attotime::zero); + return; + } + + /* segment privilege checks */ + if(entry >= m_idtr.limit) + { + logerror("IRQ (%08x): Vector %02xh is past IDT limit.\n",m_pc,entry); + FAULT_EXP(FAULT_GP,entry+2) + } + /* segment must be interrupt gate, trap gate, or task gate */ + if(type != 0x05 && type != 0x06 && type != 0x07 && type != 0x0e && type != 0x0f) + { + logerror("IRQ#%02x (%08x): Vector segment %04x is not an interrupt, trap or task gate.\n",irq,m_pc,segment); + FAULT_EXP(FAULT_GP,entry+2) + } + + if(m_ext == 0) // if software interrupt (caused by INT/INTO/INT3) + { + if(((flags >> 5) & 0x03) < CPL) + { + logerror("IRQ (%08x): Software IRQ - gate DPL is less than CPL.\n",m_pc); + FAULT_EXP(FAULT_GP,entry+2) + } + if(V8086_MODE) + { + if((!m_IOP1 || !m_IOP2) && (m_opcode != 0xcc)) + { + logerror("IRQ (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",m_pc); + FAULT(FAULT_GP,0) + } + + } + } + + if((flags & 0x0080) == 0) + { + logerror("IRQ: Vector segment is not present.\n"); + FAULT_EXP(FAULT_NP,entry+2) + } + + if(type == 0x05) + { + /* Task gate */ + memset(&desc, 0, sizeof(desc)); + desc.selector = segment; + i386_load_protected_mode_segment(&desc,nullptr); + if(segment & 0x04) + { + logerror("IRQ: Task gate: TSS is not in the GDT.\n"); + FAULT_EXP(FAULT_TS,segment & ~0x03); + } + else + { + if(segment > m_gdtr.limit) + { + logerror("IRQ: Task gate: TSS is past GDT limit.\n"); + FAULT_EXP(FAULT_TS,segment & ~0x03); + } + } + if((desc.flags & 0x000f) != 0x09 && (desc.flags & 0x000f) != 0x01) + { + logerror("IRQ: Task gate: TSS is not an available TSS.\n"); + FAULT_EXP(FAULT_TS,segment & ~0x03); + } + if((desc.flags & 0x0080) == 0) + { + logerror("IRQ: Task gate: TSS is not present.\n"); + FAULT_EXP(FAULT_NP,segment & ~0x03); + } + if(!(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)) + m_eip = m_prev_eip; + if(desc.flags & 0x08) + i386_task_switch(desc.selector,1); + else + i286_task_switch(desc.selector,1); + return; + } + else + { + /* Interrupt or Trap gate */ + memset(&desc, 0, sizeof(desc)); + desc.selector = segment; + i386_load_protected_mode_segment(&desc,nullptr); + CPL = m_CPL; // current privilege level + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level +// RPL = segment & 0x03; // requested privilege level + + if((segment & ~0x03) == 0) + { + logerror("IRQ: Gate segment is null.\n"); + FAULT_EXP(FAULT_GP,m_ext) + } + if(segment & 0x04) + { + if((segment & ~0x07) > m_ldtr.limit) + { + logerror("IRQ: Gate segment is past LDT limit.\n"); + FAULT_EXP(FAULT_GP,(segment & 0x03)+m_ext) + } + } + else + { + if((segment & ~0x07) > m_gdtr.limit) + { + logerror("IRQ: Gate segment is past GDT limit.\n"); + FAULT_EXP(FAULT_GP,(segment & 0x03)+m_ext) + } + } + if((desc.flags & 0x0018) != 0x18) + { + logerror("IRQ: Gate descriptor is not a code segment.\n"); + FAULT_EXP(FAULT_GP,(segment & 0x03)+m_ext) + } + if((desc.flags & 0x0080) == 0) + { + logerror("IRQ: Gate segment is not present.\n"); + FAULT_EXP(FAULT_NP,(segment & 0x03)+m_ext) + } + if((desc.flags & 0x0004) == 0 && (DPL < CPL)) + { + /* IRQ to inner privilege */ + I386_SREG stack; + uint32_t newESP,oldSS,oldESP; + + if(V8086_MODE && DPL) + { + logerror("IRQ: Gate to CPL>0 from VM86 mode.\n"); + FAULT_EXP(FAULT_GP,segment & ~0x03); + } + /* Check new stack segment in TSS */ + memset(&stack, 0, sizeof(stack)); + stack.selector = i386_get_stack_segment(DPL); + i386_load_protected_mode_segment(&stack,nullptr); + oldSS = m_sreg[SS].selector; + if(flags & 0x0008) + oldESP = REG32(ESP); + else + oldESP = REG16(SP); + if((stack.selector & ~0x03) == 0) + { + logerror("IRQ: New stack selector is null.\n"); + FAULT_EXP(FAULT_GP,m_ext) + } + if(stack.selector & 0x04) + { + if((stack.selector & ~0x07) > m_ldtr.base) + { + logerror("IRQ: New stack selector is past LDT limit.\n"); + FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) + } + } + else + { + if((stack.selector & ~0x07) > m_gdtr.base) + { + logerror("IRQ: New stack selector is past GDT limit.\n"); + FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) + } + } + if((stack.selector & 0x03) != DPL) + { + logerror("IRQ: New stack selector RPL is not equal to code segment DPL.\n"); + FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) + } + if(((stack.flags >> 5) & 0x03) != DPL) + { + logerror("IRQ: New stack segment DPL is not equal to code segment DPL.\n"); + FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) + } + if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0) + { + logerror("IRQ: New stack segment is not a writable data segment.\n"); + FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+m_ext) // #TS(stack selector + EXT) + } + if((stack.flags & 0x0080) == 0) + { + logerror("IRQ: New stack segment is not present.\n"); + FAULT_EXP(FAULT_SS,(stack.selector & ~0x03)+m_ext) // #TS(stack selector + EXT) + } + newESP = i386_get_stack_ptr(DPL); + if(type & 0x08) // 32-bit gate + { + if(((newESP < (V8086_MODE?36:20)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?36:20))) && (stack.flags & 0x4))) + { + logerror("IRQ: New stack has no space for return addresses.\n"); + FAULT_EXP(FAULT_SS,0) + } + } + else // 16-bit gate + { + newESP &= 0xffff; + if(((newESP < (V8086_MODE?18:10)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?18:10))) && (stack.flags & 0x4))) + { + logerror("IRQ: New stack has no space for return addresses.\n"); + FAULT_EXP(FAULT_SS,0) + } + } + if(offset > desc.limit) + { + logerror("IRQ: New EIP is past code segment limit.\n"); + FAULT_EXP(FAULT_GP,0) + } + /* change CPL before accessing the stack */ + m_CPL = DPL; + /* check for page fault at new stack TODO: check if stack frame crosses page boundary */ + WRITE_TEST(stack.base+newESP-1); + /* Load new stack segment descriptor */ + m_sreg[SS].selector = stack.selector; + i386_load_protected_mode_segment(&m_sreg[SS],nullptr); + i386_set_descriptor_accessed(stack.selector); + REG32(ESP) = newESP; + if(V8086_MODE) + { + //logerror("IRQ (%08x): Interrupt during V8086 task\n",m_pc); + if(type & 0x08) + { + PUSH32SEG(m_sreg[GS].selector & 0xffff); + PUSH32SEG(m_sreg[FS].selector & 0xffff); + PUSH32SEG(m_sreg[DS].selector & 0xffff); + PUSH32SEG(m_sreg[ES].selector & 0xffff); + } + else + { + PUSH16(m_sreg[GS].selector); + PUSH16(m_sreg[FS].selector); + PUSH16(m_sreg[DS].selector); + PUSH16(m_sreg[ES].selector); + } + m_sreg[GS].selector = 0; + m_sreg[FS].selector = 0; + m_sreg[DS].selector = 0; + m_sreg[ES].selector = 0; + m_VM = 0; + i386_load_segment_descriptor(GS); + i386_load_segment_descriptor(FS); + i386_load_segment_descriptor(DS); + i386_load_segment_descriptor(ES); + } + if(type & 0x08) + { + // 32-bit gate + PUSH32SEG(oldSS); + PUSH32(oldESP); + } + else + { + // 16-bit gate + PUSH16(oldSS); + PUSH16(oldESP); + } + SetRPL = 1; + } + else + { + int stack_limit; + if((desc.flags & 0x0004) || (DPL == CPL)) + { + /* IRQ to same privilege */ + if(V8086_MODE && !m_ext) + { + logerror("IRQ: Gate to same privilege from VM86 mode.\n"); + FAULT_EXP(FAULT_GP,segment & ~0x03); + } + if(type == 0x0e || type == 0x0f) // 32-bit gate + stack_limit = 10; + else + stack_limit = 6; + // TODO: Add check for error code (2 extra bytes) + if(REG32(ESP) < stack_limit) + { + logerror("IRQ: Stack has no space left (needs %i bytes).\n",stack_limit); + FAULT_EXP(FAULT_SS,0) + } + if(offset > desc.limit) + { + logerror("IRQ: Gate segment offset is past segment limit.\n"); + FAULT_EXP(FAULT_GP,0) + } + SetRPL = 1; + } + else + { + logerror("IRQ: Gate descriptor is non-conforming, and DPL does not equal CPL.\n"); + FAULT_EXP(FAULT_GP,segment) + } + } + } + uint32_t tempSP = REG32(ESP); + try + { + // this is ugly but the alternative is worse + if(type != 0x0e && type != 0x0f) // if not 386 interrupt or trap gate + { + PUSH16(oldflags & 0xffff ); + PUSH16(m_sreg[CS].selector ); + if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1) + PUSH16(m_eip ); + else + PUSH16(m_prev_eip ); + } + else + { + PUSH32(oldflags & 0x00ffffff ); + PUSH32SEG(m_sreg[CS].selector ); + if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1) + PUSH32(m_eip ); + else + PUSH32(m_prev_eip ); + } + } + catch(uint64_t e) + { + REG32(ESP) = tempSP; + throw e; + } + if(SetRPL != 0) + segment = (segment & ~0x03) | m_CPL; + m_sreg[CS].selector = segment; + m_eip = offset; + + if(type == 0x0e || type == 0x06) + m_IF = 0; + m_TF = 0; + m_NT = 0; + } + + i386_load_segment_descriptor(CS); + CHANGE_PC(m_eip); + +} + +void i386_device::i386_trap_with_error(int irq, int irq_gate, int trap_level, uint32_t error) +{ + i386_trap(irq,irq_gate,trap_level); + if(irq == 8 || irq == 10 || irq == 11 || irq == 12 || irq == 13 || irq == 14) + { + // for these exceptions, an error code is pushed onto the stack by the processor. + // no error code is pushed for software interrupts, either. + if(PROTECTED_MODE) + { + uint32_t entry = irq * 8; + uint32_t v2,type; + v2 = READ32PL(m_idtr.base + entry + 4, 0); + type = (v2>>8) & 0x1F; + if(type == 5) + { + v2 = READ32PL(m_idtr.base + entry, 0); + v2 = READ32PL(m_gdtr.base + ((v2 >> 16) & 0xfff8) + 4, 0); + type = (v2>>8) & 0x1F; + } + if(type >= 9) + PUSH32(error); + else + PUSH16(error); + } + else + PUSH16(error); + } +} + + +void i386_device::i286_task_switch(uint16_t selector, uint8_t nested) +{ + uint32_t tss; + I386_SREG seg; + uint16_t old_task; + uint8_t ar_byte; // access rights byte + + /* TODO: Task State Segment privilege checks */ + + /* For tasks that aren't nested, clear the busy bit in the task's descriptor */ + if(nested == 0) + { + if(m_task.segment & 0x0004) + { + ar_byte = READ8(m_ldtr.base + (m_task.segment & ~0x0007) + 5); + WRITE8(m_ldtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); + } + else + { + ar_byte = READ8(m_gdtr.base + (m_task.segment & ~0x0007) + 5); + WRITE8(m_gdtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); + } + } + + /* Save the state of the current task in the current TSS (TR register base) */ + tss = m_task.base; + WRITE16(tss+0x0e,m_eip & 0x0000ffff); + WRITE16(tss+0x10,get_flags() & 0x0000ffff); + WRITE16(tss+0x12,REG16(AX)); + WRITE16(tss+0x14,REG16(CX)); + WRITE16(tss+0x16,REG16(DX)); + WRITE16(tss+0x18,REG16(BX)); + WRITE16(tss+0x1a,REG16(SP)); + WRITE16(tss+0x1c,REG16(BP)); + WRITE16(tss+0x1e,REG16(SI)); + WRITE16(tss+0x20,REG16(DI)); + WRITE16(tss+0x22,m_sreg[ES].selector); + WRITE16(tss+0x24,m_sreg[CS].selector); + WRITE16(tss+0x26,m_sreg[SS].selector); + WRITE16(tss+0x28,m_sreg[DS].selector); + + old_task = m_task.segment; + + /* Load task register with the selector of the incoming task */ + m_task.segment = selector; + memset(&seg, 0, sizeof(seg)); + seg.selector = m_task.segment; + i386_load_protected_mode_segment(&seg,nullptr); + m_task.limit = seg.limit; + m_task.base = seg.base; + m_task.flags = seg.flags; + + /* Set TS bit in CR0 */ + m_cr[0] |= 0x08; + + /* Load incoming task state from the new task's TSS */ + tss = m_task.base; + m_ldtr.segment = READ16(tss+0x2a) & 0xffff; + seg.selector = m_ldtr.segment; + i386_load_protected_mode_segment(&seg,nullptr); + m_ldtr.limit = seg.limit; + m_ldtr.base = seg.base; + m_ldtr.flags = seg.flags; + m_eip = READ16(tss+0x0e); + set_flags(READ16(tss+0x10)); + REG16(AX) = READ16(tss+0x12); + REG16(CX) = READ16(tss+0x14); + REG16(DX) = READ16(tss+0x16); + REG16(BX) = READ16(tss+0x18); + REG16(SP) = READ16(tss+0x1a); + REG16(BP) = READ16(tss+0x1c); + REG16(SI) = READ16(tss+0x1e); + REG16(DI) = READ16(tss+0x20); + m_sreg[ES].selector = READ16(tss+0x22) & 0xffff; + i386_load_segment_descriptor(ES); + m_sreg[CS].selector = READ16(tss+0x24) & 0xffff; + i386_load_segment_descriptor(CS); + m_sreg[SS].selector = READ16(tss+0x26) & 0xffff; + i386_load_segment_descriptor(SS); + m_sreg[DS].selector = READ16(tss+0x28) & 0xffff; + i386_load_segment_descriptor(DS); + + /* Set the busy bit in the new task's descriptor */ + if(selector & 0x0004) + { + ar_byte = READ8(m_ldtr.base + (selector & ~0x0007) + 5); + WRITE8(m_ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); + } + else + { + ar_byte = READ8(m_gdtr.base + (selector & ~0x0007) + 5); + WRITE8(m_gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); + } + + /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS, + and set the NT flag in the EFLAGS register */ + if(nested != 0) + { + WRITE16(tss+0,old_task); + m_NT = 1; + } + CHANGE_PC(m_eip); + + m_CPL = (m_sreg[SS].flags >> 5) & 3; +// printf("286 Task Switch from selector %04x to %04x\n",old_task,selector); +} + +void i386_device::i386_task_switch(uint16_t selector, uint8_t nested) +{ + uint32_t tss; + I386_SREG seg; + uint16_t old_task; + uint8_t ar_byte; // access rights byte + uint32_t oldcr3 = m_cr[3]; + + /* TODO: Task State Segment privilege checks */ + + /* For tasks that aren't nested, clear the busy bit in the task's descriptor */ + if(nested == 0) + { + if(m_task.segment & 0x0004) + { + ar_byte = READ8(m_ldtr.base + (m_task.segment & ~0x0007) + 5); + WRITE8(m_ldtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); + } + else + { + ar_byte = READ8(m_gdtr.base + (m_task.segment & ~0x0007) + 5); + WRITE8(m_gdtr.base + (m_task.segment & ~0x0007) + 5,ar_byte & ~0x02); + } + } + + /* Save the state of the current task in the current TSS (TR register base) */ + tss = m_task.base; + WRITE32(tss+0x1c,m_cr[3]); // correct? + WRITE32(tss+0x20,m_eip); + WRITE32(tss+0x24,get_flags()); + WRITE32(tss+0x28,REG32(EAX)); + WRITE32(tss+0x2c,REG32(ECX)); + WRITE32(tss+0x30,REG32(EDX)); + WRITE32(tss+0x34,REG32(EBX)); + WRITE32(tss+0x38,REG32(ESP)); + WRITE32(tss+0x3c,REG32(EBP)); + WRITE32(tss+0x40,REG32(ESI)); + WRITE32(tss+0x44,REG32(EDI)); + WRITE32(tss+0x48,m_sreg[ES].selector); + WRITE32(tss+0x4c,m_sreg[CS].selector); + WRITE32(tss+0x50,m_sreg[SS].selector); + WRITE32(tss+0x54,m_sreg[DS].selector); + WRITE32(tss+0x58,m_sreg[FS].selector); + WRITE32(tss+0x5c,m_sreg[GS].selector); + + old_task = m_task.segment; + + /* Load task register with the selector of the incoming task */ + m_task.segment = selector; + memset(&seg, 0, sizeof(seg)); + seg.selector = m_task.segment; + i386_load_protected_mode_segment(&seg,nullptr); + m_task.limit = seg.limit; + m_task.base = seg.base; + m_task.flags = seg.flags; + + /* Set TS bit in CR0 */ + m_cr[0] |= 0x08; + + /* Load incoming task state from the new task's TSS */ + tss = m_task.base; + m_ldtr.segment = READ32(tss+0x60) & 0xffff; + seg.selector = m_ldtr.segment; + i386_load_protected_mode_segment(&seg,nullptr); + m_ldtr.limit = seg.limit; + m_ldtr.base = seg.base; + m_ldtr.flags = seg.flags; + m_eip = READ32(tss+0x20); + set_flags(READ32(tss+0x24)); + REG32(EAX) = READ32(tss+0x28); + REG32(ECX) = READ32(tss+0x2c); + REG32(EDX) = READ32(tss+0x30); + REG32(EBX) = READ32(tss+0x34); + REG32(ESP) = READ32(tss+0x38); + REG32(EBP) = READ32(tss+0x3c); + REG32(ESI) = READ32(tss+0x40); + REG32(EDI) = READ32(tss+0x44); + m_sreg[ES].selector = READ32(tss+0x48) & 0xffff; + i386_load_segment_descriptor(ES); + m_sreg[CS].selector = READ32(tss+0x4c) & 0xffff; + i386_load_segment_descriptor(CS); + m_sreg[SS].selector = READ32(tss+0x50) & 0xffff; + i386_load_segment_descriptor(SS); + m_sreg[DS].selector = READ32(tss+0x54) & 0xffff; + i386_load_segment_descriptor(DS); + m_sreg[FS].selector = READ32(tss+0x58) & 0xffff; + i386_load_segment_descriptor(FS); + m_sreg[GS].selector = READ32(tss+0x5c) & 0xffff; + i386_load_segment_descriptor(GS); + /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS, + and set the NT flag in the EFLAGS register before setting cr3 as the old tss address might be gone */ + if(nested != 0) + { + WRITE32(tss+0,old_task); + m_NT = 1; + } + m_cr[3] = READ32(tss+0x1c); // CR3 (PDBR) + if(oldcr3 != m_cr[3]) + vtlb_flush_dynamic(); + + /* Set the busy bit in the new task's descriptor */ + if(selector & 0x0004) + { + ar_byte = READ8(m_ldtr.base + (selector & ~0x0007) + 5); + WRITE8(m_ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); + } + else + { + ar_byte = READ8(m_gdtr.base + (selector & ~0x0007) + 5); + WRITE8(m_gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02); + } + + CHANGE_PC(m_eip); + + m_CPL = (m_sreg[SS].flags >> 5) & 3; +// printf("386 Task Switch from selector %04x to %04x\n",old_task,selector); +} + +void i386_device::i386_protected_mode_jump(uint16_t seg, uint32_t off, int indirect, int operand32) +{ + I386_SREG desc; + I386_CALL_GATE call_gate; + uint8_t CPL,DPL,RPL; + uint8_t SetRPL; + uint16_t segment = seg; + uint32_t offset = off; + + /* Check selector is not null */ + if((segment & ~0x03) == 0) + { + logerror("JMP: Segment is null.\n"); + FAULT(FAULT_GP,0) + } + /* Selector is within descriptor table limit */ + if((segment & 0x04) == 0) + { + /* check GDT limit */ + if((segment & ~0x07) > (m_gdtr.limit)) + { + logerror("JMP: Segment is past GDT limit.\n"); + FAULT(FAULT_GP,segment & 0xfffc) + } + } + else + { + /* check LDT limit */ + if((segment & ~0x07) > (m_ldtr.limit)) + { + logerror("JMP: Segment is past LDT limit.\n"); + FAULT(FAULT_GP,segment & 0xfffc) + } + } + /* Determine segment type */ + memset(&desc, 0, sizeof(desc)); + desc.selector = segment; + i386_load_protected_mode_segment(&desc,nullptr); + CPL = m_CPL; // current privilege level + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = segment & 0x03; // requested privilege level + if((desc.flags & 0x0018) == 0x0018) + { + /* code segment */ + if((desc.flags & 0x0004) == 0) + { + /* non-conforming */ + if(RPL > CPL) + { + logerror("JMP: RPL %i is less than CPL %i\n",RPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if(DPL != CPL) + { + logerror("JMP: DPL %i is not equal CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + } + else + { + /* conforming */ + if(DPL > CPL) + { + logerror("JMP: DPL %i is less than CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + } + SetRPL = 1; + if((desc.flags & 0x0080) == 0) + { + logerror("JMP: Segment is not present\n"); + FAULT(FAULT_NP,segment & 0xfffc) + } + if(offset > desc.limit) + { + logerror("JMP: Offset is past segment limit\n"); + FAULT(FAULT_GP,0) + } + } + else + { + if((desc.flags & 0x0010) != 0) + { + logerror("JMP: Segment is a data segment\n"); + FAULT(FAULT_GP,segment & 0xfffc) // #GP (cannot execute code in a data segment) + } + else + { + switch(desc.flags & 0x000f) + { + case 0x01: // 286 Available TSS + case 0x09: // 386 Available TSS + logerror("JMP: Available 386 TSS at %08x\n",m_pc); + memset(&desc, 0, sizeof(desc)); + desc.selector = segment; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + if(DPL < CPL) + { + logerror("JMP: TSS: DPL %i is less than CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if(DPL < RPL) + { + logerror("JMP: TSS: DPL %i is less than TSS RPL %i\n",DPL,RPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if((desc.flags & 0x0080) == 0) + { + logerror("JMP: TSS: Segment is not present\n"); + FAULT(FAULT_GP,segment & 0xfffc) + } + if(desc.flags & 0x0008) + i386_task_switch(desc.selector,0); + else + i286_task_switch(desc.selector,0); + return; + case 0x04: // 286 Call Gate + case 0x0c: // 386 Call Gate + //logerror("JMP: Call gate at %08x\n",m_pc); + SetRPL = 1; + memset(&call_gate, 0, sizeof(call_gate)); + call_gate.segment = segment; + i386_load_call_gate(&call_gate); + DPL = call_gate.dpl; + if(DPL < CPL) + { + logerror("JMP: Call Gate: DPL %i is less than CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if(DPL < RPL) + { + logerror("JMP: Call Gate: DPL %i is less than RPL %i\n",DPL,RPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if((desc.flags & 0x0080) == 0) + { + logerror("JMP: Call Gate: Segment is not present\n"); + FAULT(FAULT_NP,segment & 0xfffc) + } + /* Now we examine the segment that the call gate refers to */ + if(call_gate.selector == 0) + { + logerror("JMP: Call Gate: Gate selector is null\n"); + FAULT(FAULT_GP,0) + } + if(call_gate.selector & 0x04) + { + if((call_gate.selector & ~0x07) > m_ldtr.limit) + { + logerror("JMP: Call Gate: Gate Selector is past LDT segment limit\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + } + else + { + if((call_gate.selector & ~0x07) > m_gdtr.limit) + { + logerror("JMP: Call Gate: Gate Selector is past GDT segment limit\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + } + desc.selector = call_gate.selector; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; + if((desc.flags & 0x0018) != 0x18) + { + logerror("JMP: Call Gate: Gate does not point to a code segment\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + if((desc.flags & 0x0004) == 0) + { // non-conforming + if(DPL != CPL) + { + logerror("JMP: Call Gate: Gate DPL does not equal CPL\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + } + else + { // conforming + if(DPL > CPL) + { + logerror("JMP: Call Gate: Gate DPL is greater than CPL\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + } + if((desc.flags & 0x0080) == 0) + { + logerror("JMP: Call Gate: Gate Segment is not present\n"); + FAULT(FAULT_NP,call_gate.selector & 0xfffc) + } + if(call_gate.offset > desc.limit) + { + logerror("JMP: Call Gate: Gate offset is past Gate segment limit\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + segment = call_gate.selector; + offset = call_gate.offset; + break; + case 0x05: // Task Gate + logerror("JMP: Task gate at %08x\n",m_pc); + memset(&call_gate, 0, sizeof(call_gate)); + call_gate.segment = segment; + i386_load_call_gate(&call_gate); + DPL = call_gate.dpl; + if(DPL < CPL) + { + logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if(DPL < RPL) + { + logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,segment & 0xfffc) + } + if(call_gate.present == 0) + { + logerror("JMP: Task Gate: Gate is not present.\n"); + FAULT(FAULT_GP,segment & 0xfffc) + } + /* Check the TSS that the task gate points to */ + desc.selector = call_gate.selector; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = call_gate.selector & 0x03; // requested privilege level + if(call_gate.selector & 0x04) + { + logerror("JMP: Task Gate TSS: TSS must be global.\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + else + { + if((call_gate.selector & ~0x07) > m_gdtr.limit) + { + logerror("JMP: Task Gate TSS: TSS is past GDT limit.\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + } + if((call_gate.ar & 0x000f) == 0x0009 || (call_gate.ar & 0x000f) == 0x0001) + { + logerror("JMP: Task Gate TSS: Segment is not an available TSS.\n"); + FAULT(FAULT_GP,call_gate.selector & 0xfffc) + } + if(call_gate.present == 0) + { + logerror("JMP: Task Gate TSS: TSS is not present.\n"); + FAULT(FAULT_NP,call_gate.selector & 0xfffc) + } + if(call_gate.ar & 0x08) + i386_task_switch(call_gate.selector,0); + else + i286_task_switch(call_gate.selector,0); + return; + default: // invalid segment type + logerror("JMP: Invalid segment type (%i) to jump to.\n",desc.flags & 0x000f); + FAULT(FAULT_GP,segment & 0xfffc) + } + } + } + + if(SetRPL != 0) + segment = (segment & ~0x03) | m_CPL; + if(operand32 == 0) + m_eip = offset & 0x0000ffff; + else + m_eip = offset; + m_sreg[CS].selector = segment; + m_performed_intersegment_jump = 1; + i386_load_segment_descriptor(CS); + CHANGE_PC(m_eip); +} + +void i386_device::i386_protected_mode_call(uint16_t seg, uint32_t off, int indirect, int operand32) +{ + I386_SREG desc; + I386_CALL_GATE gate; + uint8_t SetRPL; + uint8_t CPL, DPL, RPL; + uint16_t selector = seg; + uint32_t offset = off; + int x; + + if((selector & ~0x03) == 0) + { + logerror("CALL (%08x): Selector is null.\n",m_pc); + FAULT(FAULT_GP,0) // #GP(0) + } + if(selector & 0x04) + { + if((selector & ~0x07) > m_ldtr.limit) + { + logerror("CALL: Selector is past LDT limit.\n"); + FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) + } + } + else + { + if((selector & ~0x07) > m_gdtr.limit) + { + logerror("CALL: Selector is past GDT limit.\n"); + FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) + } + } + + /* Determine segment type */ + memset(&desc, 0, sizeof(desc)); + desc.selector = selector; + i386_load_protected_mode_segment(&desc,nullptr); + CPL = m_CPL; // current privilege level + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = selector & 0x03; // requested privilege level + if((desc.flags & 0x0018) == 0x18) // is a code segment + { + if(desc.flags & 0x0004) + { + /* conforming */ + if(DPL > CPL) + { + logerror("CALL: Code segment DPL %i is greater than CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) + } + } + else + { + /* non-conforming */ + if(RPL > CPL) + { + logerror("CALL: RPL %i is greater than CPL %i\n",RPL,CPL); + FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) + } + if(DPL != CPL) + { + logerror("CALL: Code segment DPL %i is not equal to CPL %i\n",DPL,CPL); + FAULT(FAULT_GP,selector & ~0x03) // #GP(selector) + } + } + SetRPL = 1; + if((desc.flags & 0x0080) == 0) + { + logerror("CALL (%08x): Code segment is not present.\n",m_pc); + FAULT(FAULT_NP,selector & ~0x03) // #NP(selector) + } + if (operand32 != 0) // if 32-bit + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff); + if(i386_limit_check(SS, offset)) + { + logerror("CALL (%08x): Stack has no room for return address.\n",m_pc); + FAULT(FAULT_SS,0) // #SS(0) + } + } + else + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff); + if(i386_limit_check(SS, offset)) + { + logerror("CALL (%08x): Stack has no room for return address.\n",m_pc); + FAULT(FAULT_SS,0) // #SS(0) + } + } + if(offset > desc.limit) + { + logerror("CALL: EIP is past segment limit.\n"); + FAULT(FAULT_GP,0) // #GP(0) + } + } + else + { + /* special segment type */ + if(desc.flags & 0x0010) + { + logerror("CALL: Segment is a data segment.\n"); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + else + { + switch(desc.flags & 0x000f) + { + case 0x01: // Available 286 TSS + case 0x09: // Available 386 TSS + logerror("CALL: Available TSS at %08x\n",m_pc); + if(DPL < CPL) + { + logerror("CALL: TSS: DPL is less than CPL.\n"); + FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) + } + if(DPL < RPL) + { + logerror("CALL: TSS: DPL is less than RPL.\n"); + FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) + } + if(desc.flags & 0x0002) + { + logerror("CALL: TSS: TSS is busy.\n"); + FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) + } + if((desc.flags & 0x0080) == 0) + { + logerror("CALL: TSS: Segment %02x is not present.\n",selector); + FAULT(FAULT_NP,selector & ~0x03) // #NP(selector) + } + if(desc.flags & 0x08) + i386_task_switch(desc.selector,1); + else + i286_task_switch(desc.selector,1); + return; + case 0x04: // 286 call gate + case 0x0c: // 386 call gate + if((desc.flags & 0x000f) == 0x04) + operand32 = 0; + else + operand32 = 1; + memset(&gate, 0, sizeof(gate)); + gate.segment = selector; + i386_load_call_gate(&gate); + DPL = gate.dpl; + //logerror("CALL: Call gate at %08x (%i parameters)\n",m_pc,gate.dword_count); + if(DPL < CPL) + { + logerror("CALL: Call gate DPL %i is less than CPL %i.\n",DPL,CPL); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + if(DPL < RPL) + { + logerror("CALL: Call gate DPL %i is less than RPL %i.\n",DPL,RPL); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + if(gate.present == 0) + { + logerror("CALL: Call gate is not present.\n"); + FAULT(FAULT_NP,desc.selector & ~0x03) // #GP(selector) + } + desc.selector = gate.selector; + if((gate.selector & ~0x03) == 0) + { + logerror("CALL: Call gate: Segment is null.\n"); + FAULT(FAULT_GP,0) // #GP(0) + } + if(desc.selector & 0x04) + { + if((desc.selector & ~0x07) > m_ldtr.limit) + { + logerror("CALL: Call gate: Segment is past LDT limit\n"); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + } + else + { + if((desc.selector & ~0x07) > m_gdtr.limit) + { + logerror("CALL: Call gate: Segment is past GDT limit\n"); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + } + i386_load_protected_mode_segment(&desc,nullptr); + if((desc.flags & 0x0018) != 0x18) + { + logerror("CALL: Call gate: Segment is not a code segment.\n"); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + DPL = ((desc.flags >> 5) & 0x03); + if(DPL > CPL) + { + logerror("CALL: Call gate: Segment DPL %i is greater than CPL %i.\n",DPL,CPL); + FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector) + } + if((desc.flags & 0x0080) == 0) + { + logerror("CALL (%08x): Code segment is not present.\n",m_pc); + FAULT(FAULT_NP,desc.selector & ~0x03) // #NP(selector) + } + if(DPL < CPL && (desc.flags & 0x0004) == 0) + { + I386_SREG stack; + I386_SREG temp; + uint32_t oldSS,oldESP; + /* more privilege */ + /* Check new SS segment for privilege level from TSS */ + memset(&stack, 0, sizeof(stack)); + stack.selector = i386_get_stack_segment(DPL); + i386_load_protected_mode_segment(&stack,nullptr); + if((stack.selector & ~0x03) == 0) + { + logerror("CALL: Call gate: TSS selector is null\n"); + FAULT(FAULT_TS,0) // #TS(0) + } + if(stack.selector & 0x04) + { + if((stack.selector & ~0x07) > m_ldtr.limit) + { + logerror("CALL: Call gate: TSS selector is past LDT limit\n"); + FAULT(FAULT_TS,stack.selector) // #TS(SS selector) + } + } + else + { + if((stack.selector & ~0x07) > m_gdtr.limit) + { + logerror("CALL: Call gate: TSS selector is past GDT limit\n"); + FAULT(FAULT_TS,stack.selector) // #TS(SS selector) + } + } + if((stack.selector & 0x03) != DPL) + { + logerror("CALL: Call gate: Stack selector RPL does not equal code segment DPL %i\n",DPL); + FAULT(FAULT_TS,stack.selector) // #TS(SS selector) + } + if(((stack.flags >> 5) & 0x03) != DPL) + { + logerror("CALL: Call gate: Stack DPL does not equal code segment DPL %i\n",DPL); + FAULT(FAULT_TS,stack.selector) // #TS(SS selector) + } + if((stack.flags & 0x0018) != 0x10 && (stack.flags & 0x0002)) + { + logerror("CALL: Call gate: Stack segment is not a writable data segment\n"); + FAULT(FAULT_TS,stack.selector) // #TS(SS selector) + } + if((stack.flags & 0x0080) == 0) + { + logerror("CALL: Call gate: Stack segment is not present\n"); + FAULT(FAULT_SS,stack.selector) // #SS(SS selector) + } + uint32_t newESP = i386_get_stack_ptr(DPL); + if(!stack.d) + { + newESP &= 0xffff; + } + if(operand32 != 0) + { + if(newESP < ((gate.dword_count & 0x1f) + 16)) + { + logerror("CALL: Call gate: New stack has no room for 32-bit return address and parameters.\n"); + FAULT(FAULT_SS,0) // #SS(0) + } + if(gate.offset > desc.limit) + { + logerror("CALL: Call gate: EIP is past segment limit.\n"); + FAULT(FAULT_GP,0) // #GP(0) + } + } + else + { + if(newESP < ((gate.dword_count & 0x1f) + 8)) + { + logerror("CALL: Call gate: New stack has no room for 16-bit return address and parameters.\n"); + FAULT(FAULT_SS,0) // #SS(0) + } + if((gate.offset & 0xffff) > desc.limit) + { + logerror("CALL: Call gate: IP is past segment limit.\n"); + FAULT(FAULT_GP,0) // #GP(0) + } + } + selector = gate.selector; + offset = gate.offset; + + m_CPL = (stack.flags >> 5) & 0x03; + /* check for page fault at new stack */ + WRITE_TEST(stack.base+newESP-1); + /* switch to new stack */ + oldSS = m_sreg[SS].selector; + m_sreg[SS].selector = i386_get_stack_segment(m_CPL); + if(operand32 != 0) + { + oldESP = REG32(ESP); + } + else + { + oldESP = REG16(SP); + } + i386_load_segment_descriptor(SS ); + REG32(ESP) = newESP; + + if(operand32 != 0) + { + PUSH32SEG(oldSS); + PUSH32(oldESP); + } + else + { + PUSH16(oldSS); + PUSH16(oldESP & 0xffff); + } + + memset(&temp, 0, sizeof(temp)); + temp.selector = oldSS; + i386_load_protected_mode_segment(&temp,nullptr); + /* copy parameters from old stack to new stack */ + for(x=(gate.dword_count & 0x1f)-1;x>=0;x--) + { + uint32_t addr = oldESP + (operand32?(x*4):(x*2)); + addr = temp.base + (temp.d?addr:(addr&0xffff)); + if(operand32) + PUSH32(READ32(addr)); + else + PUSH16(READ16(addr)); + } + SetRPL = 1; + } + else + { + /* same privilege */ + if (operand32 != 0) // if 32-bit + { + uint32_t stkoff = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff); + if(i386_limit_check(SS, stkoff)) + { + logerror("CALL: Stack has no room for return address.\n"); + FAULT(FAULT_SS,0) // #SS(0) + } + selector = gate.selector; + offset = gate.offset; + } + else + { + uint32_t stkoff = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff); + if(i386_limit_check(SS, stkoff)) + { + logerror("CALL: Stack has no room for return address.\n"); + FAULT(FAULT_SS,0) // #SS(0) + } + selector = gate.selector; + offset = gate.offset & 0xffff; + } + if(offset > desc.limit) + { + logerror("CALL: EIP is past segment limit.\n"); + FAULT(FAULT_GP,0) // #GP(0) + } + SetRPL = 1; + } + break; + case 0x05: // task gate + logerror("CALL: Task gate at %08x\n",m_pc); + memset(&gate, 0, sizeof(gate)); + gate.segment = selector; + i386_load_call_gate(&gate); + DPL = gate.dpl; + if(DPL < CPL) + { + logerror("CALL: Task Gate: Gate DPL is less than CPL.\n"); + FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) + } + if(DPL < RPL) + { + logerror("CALL: Task Gate: Gate DPL is less than RPL.\n"); + FAULT(FAULT_TS,selector & ~0x03) // #TS(selector) + } + if((gate.ar & 0x0080) == 0) + { + logerror("CALL: Task Gate: Gate is not present.\n"); + FAULT(FAULT_NP,selector & ~0x03) // #NP(selector) + } + /* Check the TSS that the task gate points to */ + desc.selector = gate.selector; + i386_load_protected_mode_segment(&desc,nullptr); + if(gate.selector & 0x04) + { + logerror("CALL: Task Gate: TSS is not global.\n"); + FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector) + } + else + { + if((gate.selector & ~0x07) > m_gdtr.limit) + { + logerror("CALL: Task Gate: TSS is past GDT limit.\n"); + FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector) + } + } + if(desc.flags & 0x0002) + { + logerror("CALL: Task Gate: TSS is busy.\n"); + FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector) + } + if((desc.flags & 0x0080) == 0) + { + logerror("CALL: Task Gate: TSS is not present.\n"); + FAULT(FAULT_NP,gate.selector & ~0x03) // #TS(selector) + } + if(desc.flags & 0x08) + i386_task_switch(desc.selector,1); // with nesting + else + i286_task_switch(desc.selector,1); + return; + default: + logerror("CALL: Invalid special segment type (%i) to jump to.\n",desc.flags & 0x000f); + FAULT(FAULT_GP,selector & ~0x07) // #GP(selector) + } + } + } + + if(SetRPL != 0) + selector = (selector & ~0x03) | m_CPL; + + uint32_t tempSP = REG32(ESP); + try + { + // this is ugly but the alternative is worse + if(operand32 == 0) + { + /* 16-bit operand size */ + PUSH16(m_sreg[CS].selector ); + PUSH16(m_eip & 0x0000ffff ); + m_sreg[CS].selector = selector; + m_performed_intersegment_jump = 1; + m_eip = offset; + i386_load_segment_descriptor(CS); + } + else + { + /* 32-bit operand size */ + PUSH32SEG(m_sreg[CS].selector ); + PUSH32(m_eip ); + m_sreg[CS].selector = selector; + m_performed_intersegment_jump = 1; + m_eip = offset; + i386_load_segment_descriptor(CS ); + } + } + catch(uint64_t e) + { + REG32(ESP) = tempSP; + throw e; + } + + CHANGE_PC(m_eip); +} + +void i386_device::i386_protected_mode_retf(uint8_t count, uint8_t operand32) +{ + uint32_t newCS, newEIP; + I386_SREG desc; + uint8_t CPL, RPL, DPL; + + uint32_t ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0); + + if(operand32 == 0) + { + newEIP = READ16(ea) & 0xffff; + newCS = READ16(ea+2) & 0xffff; + } + else + { + newEIP = READ32(ea); + newCS = READ32(ea+4) & 0xffff; + } + + memset(&desc, 0, sizeof(desc)); + desc.selector = newCS; + i386_load_protected_mode_segment(&desc,nullptr); + CPL = m_CPL; // current privilege level + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = newCS & 0x03; + + if(RPL < CPL) + { + logerror("RETF (%08x): Return segment RPL is less than CPL.\n",m_pc); + FAULT(FAULT_GP,newCS & ~0x03) + } + + if(RPL == CPL) + { + /* same privilege level */ + if((newCS & ~0x03) == 0) + { + logerror("RETF: Return segment is null.\n"); + FAULT(FAULT_GP,0) + } + if(newCS & 0x04) + { + if((newCS & ~0x07) >= m_ldtr.limit) + { + logerror("RETF: Return segment is past LDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if((newCS & ~0x07) >= m_gdtr.limit) + { + logerror("RETF: Return segment is past GDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + if((desc.flags & 0x0018) != 0x0018) + { + logerror("RETF: Return segment is not a code segment.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + if(desc.flags & 0x0004) + { + if(DPL > RPL) + { + logerror("RETF: Conforming code segment DPL is greater than CS RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if(DPL != RPL) + { + logerror("RETF: Non-conforming code segment DPL does not equal CS RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + if((desc.flags & 0x0080) == 0) + { + logerror("RETF (%08x): Code segment is not present.\n",m_pc); + FAULT(FAULT_NP,newCS & ~0x03) + } + if(newEIP > desc.limit) + { + logerror("RETF: EIP is past code segment limit.\n"); + FAULT(FAULT_GP,0) + } + if(operand32 == 0) + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+count+3) != 0) + { + logerror("RETF (%08x): SP is past stack segment limit.\n",m_pc); + FAULT(FAULT_SS,0) + } + } + else + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+count+7) != 0) + { + logerror("RETF: ESP is past stack segment limit.\n"); + FAULT(FAULT_SS,0) + } + } + if(STACK_32BIT) + REG32(ESP) += (operand32 ? 8 : 4) + count; + else + REG16(SP) += (operand32 ? 8 : 4) + count; + } + else if(RPL > CPL) + { + uint32_t newSS, newESP; // when changing privilege + /* outer privilege level */ + if(operand32 == 0) + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+count+7) != 0) + { + logerror("RETF (%08x): SP is past stack segment limit.\n",m_pc); + FAULT(FAULT_SS,0) + } + } + else + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+count+15) != 0) + { + logerror("RETF: ESP is past stack segment limit.\n"); + FAULT(FAULT_SS,0) + } + } + /* Check CS selector and descriptor */ + if((newCS & ~0x03) == 0) + { + logerror("RETF: CS segment is null.\n"); + FAULT(FAULT_GP,0) + } + if(newCS & 0x04) + { + if((newCS & ~0x07) >= m_ldtr.limit) + { + logerror("RETF: CS segment selector is past LDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if((newCS & ~0x07) >= m_gdtr.limit) + { + logerror("RETF: CS segment selector is past GDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + if((desc.flags & 0x0018) != 0x0018) + { + logerror("RETF: CS segment is not a code segment.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + if(desc.flags & 0x0004) + { + if(DPL > RPL) + { + logerror("RETF: Conforming CS segment DPL is greater than return selector RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if(DPL != RPL) + { + logerror("RETF: Non-conforming CS segment DPL is not equal to return selector RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + if((desc.flags & 0x0080) == 0) + { + logerror("RETF: CS segment is not present.\n"); + FAULT(FAULT_NP,newCS & ~0x03) + } + if(newEIP > desc.limit) + { + logerror("RETF: EIP is past return CS segment limit.\n"); + FAULT(FAULT_GP,0) + } + + if(operand32 == 0) + { + ea += count+4; + newESP = READ16(ea) & 0xffff; + newSS = READ16(ea+2) & 0xffff; + } + else + { + ea += count+8; + newESP = READ32(ea); + newSS = READ32(ea+4) & 0xffff; + } + + /* Check SS selector and descriptor */ + desc.selector = newSS; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + if((newSS & ~0x07) == 0) + { + logerror("RETF: SS segment is null.\n"); + FAULT(FAULT_GP,0) + } + if(newSS & 0x04) + { + if((newSS & ~0x07) > m_ldtr.limit) + { + logerror("RETF (%08x): SS segment selector is past LDT limit.\n",m_pc); + FAULT(FAULT_GP,newSS & ~0x03) + } + } + else + { + if((newSS & ~0x07) > m_gdtr.limit) + { + logerror("RETF (%08x): SS segment selector is past GDT limit.\n",m_pc); + FAULT(FAULT_GP,newSS & ~0x03) + } + } + if((newSS & 0x03) != RPL) + { + logerror("RETF: SS segment RPL is not equal to CS segment RPL.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if((desc.flags & 0x0018) != 0x0010 || (desc.flags & 0x0002) == 0) + { + logerror("RETF: SS segment is not a writable data segment.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if(((desc.flags >> 5) & 0x03) != RPL) + { + logerror("RETF: SS DPL is not equal to CS segment RPL.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if((desc.flags & 0x0080) == 0) + { + logerror("RETF: SS segment is not present.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + m_CPL = newCS & 0x03; + + /* Load new SS:(E)SP */ + if(operand32 == 0) + REG16(SP) = (newESP+count) & 0xffff; + else + REG32(ESP) = newESP+count; + m_sreg[SS].selector = newSS; + i386_load_segment_descriptor(SS ); + + /* Check that DS, ES, FS and GS are valid for the new privilege level */ + i386_check_sreg_validity(DS); + i386_check_sreg_validity(ES); + i386_check_sreg_validity(FS); + i386_check_sreg_validity(GS); + } + + /* Load new CS:(E)IP */ + if(operand32 == 0) + m_eip = newEIP & 0xffff; + else + m_eip = newEIP; + m_sreg[CS].selector = newCS; + i386_load_segment_descriptor(CS ); + CHANGE_PC(m_eip); +} + +void i386_device::i386_protected_mode_iret(int operand32) +{ + uint32_t newCS, newEIP; + uint32_t newSS, newESP; // when changing privilege + I386_SREG desc,stack; + uint8_t CPL, RPL, DPL; + uint32_t newflags; + uint8_t IOPL = m_IOP1 | (m_IOP2 << 1); + + CPL = m_CPL; + uint32_t ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0); + if(operand32 == 0) + { + newEIP = READ16(ea) & 0xffff; + newCS = READ16(ea+2) & 0xffff; + newflags = READ16(ea+4) & 0xffff; + } + else + { + newEIP = READ32(ea); + newCS = READ32(ea+4) & 0xffff; + newflags = READ32(ea+8); + } + + if(V8086_MODE) + { + uint32_t oldflags = get_flags(); + if(IOPL != 3) + { + logerror("IRET (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",m_pc); + FAULT(FAULT_GP,0) + } + if(operand32 == 0) + { + m_eip = newEIP & 0xffff; + m_sreg[CS].selector = newCS & 0xffff; + newflags &= ~(3<<12); + newflags |= (((oldflags>>12)&3)<<12); // IOPL cannot be changed in V86 mode + set_flags((newflags & 0xffff) | (oldflags & ~0xffff)); + REG16(SP) += 6; + } + else + { + m_eip = newEIP; + m_sreg[CS].selector = newCS & 0xffff; + newflags &= ~(3<<12); + newflags |= 0x20000 | (((oldflags>>12)&3)<<12); // IOPL and VM cannot be changed in V86 mode + set_flags(newflags); + REG32(ESP) += 12; + } + } + else if(NESTED_TASK) + { + uint32_t task = READ32(m_task.base); + /* Task Return */ + logerror("IRET (%08x): Nested task return.\n",m_pc); + /* Check back-link selector in TSS */ + if(task & 0x04) + { + logerror("IRET: Task return: Back-linked TSS is not in GDT.\n"); + FAULT(FAULT_TS,task & ~0x03) + } + if((task & ~0x07) >= m_gdtr.limit) + { + logerror("IRET: Task return: Back-linked TSS is not in GDT.\n"); + FAULT(FAULT_TS,task & ~0x03) + } + memset(&desc, 0, sizeof(desc)); + desc.selector = task; + i386_load_protected_mode_segment(&desc,nullptr); + if((desc.flags & 0x001f) != 0x000b) + { + logerror("IRET (%08x): Task return: Back-linked TSS is not a busy TSS.\n",m_pc); + FAULT(FAULT_TS,task & ~0x03) + } + if((desc.flags & 0x0080) == 0) + { + logerror("IRET: Task return: Back-linked TSS is not present.\n"); + FAULT(FAULT_NP,task & ~0x03) + } + if(desc.flags & 0x08) + i386_task_switch(desc.selector,0); + else + i286_task_switch(desc.selector,0); + return; + } + else + { + if(newflags & 0x00020000) // if returning to virtual 8086 mode + { + // 16-bit iret can't reach here + newESP = READ32(ea+12); + newSS = READ32(ea+16) & 0xffff; + /* Return to v86 mode */ + //logerror("IRET (%08x): Returning to Virtual 8086 mode.\n",m_pc); + if(CPL != 0) + { + uint32_t oldflags = get_flags(); + newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000); + if(CPL > IOPL) + newflags = (newflags & ~0x200 ) | (oldflags & 0x200); + } + set_flags(newflags); + m_eip = POP32() & 0xffff; // high 16 bits are ignored + m_sreg[CS].selector = POP32() & 0xffff; + POP32(); // already set flags + newESP = POP32(); + newSS = POP32() & 0xffff; + m_sreg[ES].selector = POP32() & 0xffff; + m_sreg[DS].selector = POP32() & 0xffff; + m_sreg[FS].selector = POP32() & 0xffff; + m_sreg[GS].selector = POP32() & 0xffff; + REG32(ESP) = newESP; // all 32 bits are loaded + m_sreg[SS].selector = newSS; + i386_load_segment_descriptor(ES); + i386_load_segment_descriptor(DS); + i386_load_segment_descriptor(FS); + i386_load_segment_descriptor(GS); + i386_load_segment_descriptor(SS); + m_CPL = 3; // Virtual 8086 tasks are always run at CPL 3 + } + else + { + if(operand32 == 0) + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+3) != 0) + { + logerror("IRET: Data on stack is past SS limit.\n"); + FAULT(FAULT_SS,0) + } + } + else + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+7) != 0) + { + logerror("IRET: Data on stack is past SS limit.\n"); + FAULT(FAULT_SS,0) + } + } + RPL = newCS & 0x03; + if(RPL < CPL) + { + logerror("IRET (%08x): Return CS RPL is less than CPL.\n",m_pc); + FAULT(FAULT_GP,newCS & ~0x03) + } + if(RPL == CPL) + { + /* return to same privilege level */ + if(operand32 == 0) + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+5) != 0) + { + logerror("IRET (%08x): Data on stack is past SS limit.\n",m_pc); + FAULT(FAULT_SS,0) + } + } + else + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+11) != 0) + { + logerror("IRET (%08x): Data on stack is past SS limit.\n",m_pc); + FAULT(FAULT_SS,0) + } + } + if((newCS & ~0x03) == 0) + { + logerror("IRET: Return CS selector is null.\n"); + FAULT(FAULT_GP,0) + } + if(newCS & 0x04) + { + if((newCS & ~0x07) >= m_ldtr.limit) + { + logerror("IRET: Return CS selector (%04x) is past LDT limit.\n",newCS); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if((newCS & ~0x07) >= m_gdtr.limit) + { + logerror("IRET: Return CS selector is past GDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + memset(&desc, 0, sizeof(desc)); + desc.selector = newCS; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = newCS & 0x03; + if((desc.flags & 0x0018) != 0x0018) + { + logerror("IRET (%08x): Return CS segment is not a code segment.\n",m_pc); + FAULT(FAULT_GP,newCS & ~0x07) + } + if(desc.flags & 0x0004) + { + if(DPL > RPL) + { + logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if(DPL != RPL) + { + logerror("IRET: Non-conforming return CS DPL is not equal to CS RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + if((desc.flags & 0x0080) == 0) + { + logerror("IRET: (%08x) Return CS segment is not present.\n", m_pc); + FAULT(FAULT_NP,newCS & ~0x03) + } + if(newEIP > desc.limit) + { + logerror("IRET: Return EIP is past return CS limit.\n"); + FAULT(FAULT_GP,0) + } + + if(CPL != 0) + { + uint32_t oldflags = get_flags(); + newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000); + if(CPL > IOPL) + newflags = (newflags & ~0x200 ) | (oldflags & 0x200); + } + + if(operand32 == 0) + { + m_eip = newEIP; + m_sreg[CS].selector = newCS; + set_flags(newflags); + REG16(SP) += 6; + } + else + { + m_eip = newEIP; + m_sreg[CS].selector = newCS & 0xffff; + set_flags(newflags); + REG32(ESP) += 12; + } + } + else if(RPL > CPL) + { + /* return to outer privilege level */ + memset(&desc, 0, sizeof(desc)); + desc.selector = newCS; + i386_load_protected_mode_segment(&desc,nullptr); + DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level + RPL = newCS & 0x03; + if(operand32 == 0) + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+9) != 0) + { + logerror("IRET: SP is past SS limit.\n"); + FAULT(FAULT_SS,0) + } + } + else + { + uint32_t offset = (STACK_32BIT ? REG32(ESP) : REG16(SP)); + if(i386_limit_check(SS,offset+19) != 0) + { + logerror("IRET: ESP is past SS limit.\n"); + FAULT(FAULT_SS,0) + } + } + /* Check CS selector and descriptor */ + if((newCS & ~0x03) == 0) + { + logerror("IRET: Return CS selector is null.\n"); + FAULT(FAULT_GP,0) + } + if(newCS & 0x04) + { + if((newCS & ~0x07) >= m_ldtr.limit) + { + logerror("IRET: Return CS selector is past LDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03); + } + } + else + { + if((newCS & ~0x07) >= m_gdtr.limit) + { + logerror("IRET: Return CS selector is past GDT limit.\n"); + FAULT(FAULT_GP,newCS & ~0x03); + } + } + if((desc.flags & 0x0018) != 0x0018) + { + logerror("IRET: Return CS segment is not a code segment.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + if(desc.flags & 0x0004) + { + if(DPL > RPL) + { + logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + else + { + if(DPL != RPL) + { + logerror("IRET: Non-conforming return CS DPL does not equal CS RPL.\n"); + FAULT(FAULT_GP,newCS & ~0x03) + } + } + if((desc.flags & 0x0080) == 0) + { + logerror("IRET: Return CS segment is not present.\n"); + FAULT(FAULT_NP,newCS & ~0x03) + } + + /* Check SS selector and descriptor */ + if(operand32 == 0) + { + newESP = READ16(ea+6) & 0xffff; + newSS = READ16(ea+8) & 0xffff; + } + else + { + newESP = READ32(ea+12); + newSS = READ32(ea+16) & 0xffff; + } + memset(&stack, 0, sizeof(stack)); + stack.selector = newSS; + i386_load_protected_mode_segment(&stack,nullptr); + DPL = (stack.flags >> 5) & 0x03; + if((newSS & ~0x03) == 0) + { + logerror("IRET: Return SS selector is null.\n"); + FAULT(FAULT_GP,0) + } + if(newSS & 0x04) + { + if((newSS & ~0x07) >= m_ldtr.limit) + { + logerror("IRET: Return SS selector is past LDT limit.\n"); + FAULT(FAULT_GP,newSS & ~0x03); + } + } + else + { + if((newSS & ~0x07) >= m_gdtr.limit) + { + logerror("IRET: Return SS selector is past GDT limit.\n"); + FAULT(FAULT_GP,newSS & ~0x03); + } + } + if((newSS & 0x03) != RPL) + { + logerror("IRET: Return SS RPL is not equal to return CS RPL.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if((stack.flags & 0x0018) != 0x0010) + { + logerror("IRET: Return SS segment is not a data segment.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if((stack.flags & 0x0002) == 0) + { + logerror("IRET: Return SS segment is not writable.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if(DPL != RPL) + { + logerror("IRET: Return SS DPL does not equal SS RPL.\n"); + FAULT(FAULT_GP,newSS & ~0x03) + } + if((stack.flags & 0x0080) == 0) + { + logerror("IRET: Return SS segment is not present.\n"); + FAULT(FAULT_NP,newSS & ~0x03) + } + if(newEIP > desc.limit) + { + logerror("IRET: EIP is past return CS limit.\n"); + FAULT(FAULT_GP,0) + } + +// if(operand32 == 0) +// REG16(SP) += 10; +// else +// REG32(ESP) += 20; + + // IOPL can only change if CPL is zero + if(CPL != 0) + { + uint32_t oldflags = get_flags(); + newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000); + if(CPL > IOPL) + newflags = (newflags & ~0x200 ) | (oldflags & 0x200); + } + + if(operand32 == 0) + { + m_eip = newEIP & 0xffff; + m_sreg[CS].selector = newCS; + set_flags(newflags); + REG16(SP) = newESP & 0xffff; + m_sreg[SS].selector = newSS; + } + else + { + m_eip = newEIP; + m_sreg[CS].selector = newCS & 0xffff; + set_flags(newflags); + REG32(ESP) = newESP; + m_sreg[SS].selector = newSS & 0xffff; + } + m_CPL = newCS & 0x03; + i386_load_segment_descriptor(SS); + + /* Check that DS, ES, FS and GS are valid for the new privilege level */ + i386_check_sreg_validity(DS); + i386_check_sreg_validity(ES); + i386_check_sreg_validity(FS); + i386_check_sreg_validity(GS); + } + } + } + + i386_load_segment_descriptor(CS); + CHANGE_PC(m_eip); +} diff --git a/src/mame/drivers/nforcepc.cpp b/src/mame/drivers/nforcepc.cpp index 476760a3c51..154857f3be7 100644 --- a/src/mame/drivers/nforcepc.cpp +++ b/src/mame/drivers/nforcepc.cpp @@ -22,7 +22,7 @@ #include "emu.h" -#include "cpu/i386/i386.h" +#include "cpu/i386/athlon.h" #include "machine/pci.h" #include "machine/pci-ide.h" #include "machine/intelfsh.h"