-sun4c_mmu: Improved cache behavior, though SunOS and Solaris still fail. [Ryan Holtz]

This commit is contained in:
Ryan Holtz 2020-05-17 08:29:01 +02:00
parent df66223d68
commit b399d71753
3 changed files with 376 additions and 134 deletions

View File

@ -33,12 +33,21 @@ DEFINE_DEVICE_TYPE(SUN4C_MMU, sun4c_mmu_device, "sun4c_mmu", "Sun 4c MMU")
#define LOG_TYPE1_TIMEOUT (1U << 12)
#define LOG_UNKNOWN_SPACE (1U << 13)
#define LOG_WRITE_PROTECT (1U << 14)
#define LOG_PARITY (1U << 15)
#define LOG_ALL_ASI (1U << 16) // WARNING: Heavy!
#define LOG_READ_PROTECT (1U << 15)
#define LOG_PARITY (1U << 16)
#define LOG_ALL_ASI (1U << 17) // WARNING: Heavy!
#define LOG_UNKNOWN_ASI (1U << 18)
#define LOG_SEGMENT_FLUSH (1U << 19)
#define LOG_PAGE_FLUSH (1U << 20)
#define LOG_CONTEXT_FLUSH (1U << 21)
#define LOG_CACHE_FILLS (1U << 22)
#define LOG_PAGE_ENTRIES (1U << 23)
#define VERBOSE (0)
#include "logmacro.h"
static FILE* s_mem_log = nullptr;
sun4_mmu_base_device::sun4_mmu_base_device(const machine_config &mconfig, device_type type, const char *tag, device_t *owner, uint32_t clock)
: device_t(mconfig, type, tag, owner, clock)
, m_cpu(*this, finder_base::DUMMY_TAG)
@ -71,8 +80,16 @@ sun4c_mmu_device::sun4c_mmu_device(const machine_config &mconfig, const char *ta
{
}
void sun4_mmu_base_device::device_stop()
{
fclose(s_mem_log);
}
void sun4_mmu_base_device::device_start()
{
s_mem_log = fopen("sun4c_mem.bin", "wb");
m_fpos = 0;
m_type1_r.resolve_safe(0xffffffff);
m_type1_w.resolve_safe();
@ -131,7 +148,18 @@ void sun4_mmu_base_device::device_start()
machine().debugger().console().register_command("l2p", CMDFLAG_NONE, 0, 1, 1, std::bind(&sun4_mmu_base_device::l2p_command, this, _1, _2));
}
m_cache_tag_mask = m_cache_mask >> 3;
m_cache_word_size = m_cache_line_size >> 2;
m_cache_tag_shift = 0;
while ((m_cache_word_size & (1 << m_cache_tag_shift)) == 0)
{
m_cache_tag_shift++;
}
m_cache_vaddr_shift = 14;
m_cache_tag_id_mask = 0xfffc;
m_cache_tag_id_shift = 12;//13 + (m_cache_tag_shift - 2);
m_cache_tag_mask = m_cache_mask >> m_cache_tag_shift;
printf("m_cache_tag_shift %d\n", m_cache_tag_shift);
printf("m_page_mask %08x\n", m_page_mask);
printf("m_seg_entry_shift %08x\n", m_seg_entry_shift);
printf("m_seg_entry_mask %08x\n", m_seg_entry_mask);
@ -199,9 +227,9 @@ void sun4_mmu_base_device::device_timer(emu_timer &timer, device_timer_id id, in
uint32_t sun4_mmu_base_device::fetch_insn(const bool supervisor, const uint32_t offset)
{
if (supervisor)
return insn_data_r<SUPER_INSN>(offset, 0xffffffff);
return insn_data_r<SUPER_INSN, SUPER_MODE>(offset, 0xffffffff);
else
return insn_data_r<USER_INSN>(offset, 0xffffffff);
return insn_data_r<USER_INSN, USER_MODE>(offset, 0xffffffff);
}
uint32_t sun4_mmu_base_device::read_asi(uint8_t asi, uint32_t offset, uint32_t mem_mask)
@ -216,18 +244,19 @@ uint32_t sun4_mmu_base_device::read_asi(uint8_t asi, uint32_t offset, uint32_t m
case 4:
return page_map_r(offset, mem_mask);
case 8:
return insn_data_r<USER_INSN>(offset, mem_mask);
return insn_data_r<USER_INSN, USER_MODE>(offset, mem_mask);
case 9:
return insn_data_r<SUPER_INSN>(offset, mem_mask);
return insn_data_r<SUPER_INSN, SUPER_MODE>(offset, mem_mask);
case 10:
return insn_data_r<USER_DATA>(offset, mem_mask);
return insn_data_r<USER_DATA, USER_MODE>(offset, mem_mask);
case 11:
return insn_data_r<SUPER_DATA>(offset, mem_mask);
return insn_data_r<SUPER_DATA, SUPER_MODE>(offset, mem_mask);
case 12:
case 13:
case 14:
return 0;
default:
LOGMASKED(LOG_UNKNOWN_ASI, "Unknown ASI read: %d, %08x & %08x\n", asi, offset << 2, mem_mask);
return ~0;
}
}
@ -246,17 +275,35 @@ void sun4_mmu_base_device::write_asi(uint8_t asi, uint32_t offset, uint32_t data
case 4:
page_map_w(offset, data, mem_mask);
return;
case 5:
for (uint32_t i = 0x0000; i < (0x1000 >> (m_cache_tag_shift + 2)); i++)
{
segment_flush_w(offset | (i << m_cache_tag_shift));
}
break;
case 6:
for (uint32_t i = 0x0000; i < (0x1000 >> (m_cache_tag_shift + 2)); i++)
{
page_flush_w(offset | (i << m_cache_tag_shift));
}
break;
case 7:
for (uint32_t i = 0x0000; i < (0x1000 >> (m_cache_tag_shift + 2)); i++)
{
context_flush_w(offset | (i << m_cache_tag_shift));
}
break;
case 8:
insn_data_w<USER_INSN>(offset, data, mem_mask);
insn_data_w<USER_INSN, USER_MODE>(offset, data, mem_mask);
return;
case 9:
insn_data_w<SUPER_INSN>(offset, data, mem_mask);
insn_data_w<SUPER_INSN, SUPER_MODE>(offset, data, mem_mask);
return;
case 10:
insn_data_w<USER_DATA>(offset, data, mem_mask);
insn_data_w<USER_DATA, USER_MODE>(offset, data, mem_mask);
return;
case 11:
insn_data_w<SUPER_DATA>(offset, data, mem_mask);
insn_data_w<SUPER_DATA, SUPER_MODE>(offset, data, mem_mask);
return;
case 12:
segment_flush_w(offset);
@ -267,7 +314,16 @@ void sun4_mmu_base_device::write_asi(uint8_t asi, uint32_t offset, uint32_t data
case 14:
context_flush_w(offset);
return;
case 15:
for (uint32_t i = 0x0000; i < (0x1000 >> (m_cache_tag_shift + 2)); i++)
{
const uint32_t vaddr = offset | (i << m_cache_tag_shift);
const uint32_t tag_addr = vaddr_to_cache_line(vaddr);
m_cachetags[tag_addr] &= ~(1 << 19);
}
break;
default:
LOGMASKED(LOG_UNKNOWN_ASI, "Unknown ASI write: %d, %08x = %08x & %08x\n", asi, offset << 2, data, mem_mask);
return;
}
}
@ -277,13 +333,13 @@ uint32_t sun4_mmu_base_device::parity_r(uint32_t offset, uint32_t mem_mask)
if (offset == 0)
{
const uint32_t data = m_parity_err_reg;
logerror("%s: parity_err_reg read: %08x & %08x\n", machine().describe_context(), m_parity_err_reg, mem_mask);
LOGMASKED(LOG_PARITY, "%s: parity_err_reg read: %08x & %08x\n", machine().describe_context(), m_parity_err_reg, mem_mask);
m_parity_err_reg &= ~0xcf;
return data;
}
else if (offset == 1)
{
logerror("%s: memory_err_reg read: %08x & %08x\n", machine().describe_context(), m_memory_err_reg, mem_mask);
LOGMASKED(LOG_PARITY, "%s: memory_err_reg read: %08x & %08x\n", machine().describe_context(), m_memory_err_reg, mem_mask);
return m_memory_err_reg;
}
return 0;
@ -293,43 +349,92 @@ void sun4_mmu_base_device::parity_w(uint32_t offset, uint32_t data, uint32_t mem
{
if (offset == 0)
{
logerror("%s: parity_err_reg write: %08x & %08x\n", machine().describe_context(), data, mem_mask);
LOGMASKED(LOG_PARITY, "%s: parity_err_reg write: %08x & %08x\n", machine().describe_context(), data, mem_mask);
COMBINE_DATA(&m_parity_err_reg);
}
else
{
logerror("%s: memory_err_reg write: %08x & %08x\n", machine().describe_context(), data, mem_mask);
LOGMASKED(LOG_PARITY, "%s: memory_err_reg write: %08x & %08x\n", machine().describe_context(), data, mem_mask);
COMBINE_DATA(&m_memory_err_reg);
}
}
void sun4_mmu_base_device::segment_flush_w(const uint32_t vaddr)
{
logerror("%s: segment_flush_w %08x\n", machine().describe_context(), vaddr);
LOGMASKED(LOG_SEGMENT_FLUSH, "%s: segment_flush_w %08x\n", machine().describe_context(), vaddr);
const uint32_t tag_addr = vaddr_to_cache_line(vaddr);
const uint32_t tag = m_cachetags[tag_addr];
if ((tag & (1 << 19)) != 0 && ((tag >> 22) & m_ctx_mask) == m_context_masked)
{
// Unshifted
// 00SS|SSSS|SSSS|SSPP|PPPP|BBBB|BBBB|BBBB
// 00TT|TTTT|TTTT|TTTT|LLLL|LLLL|LLLL|bbbb
//
// 00ss|ssss|ssss|ss--|LLLL|LLLL|LLLL|----
// Shifted
// SSSS|SSSS|SSSS|PPPP|PPBB|BBBB|BBBB
// TTTT|TTTT|TTTT|TTLL|LLLL|LLLL|LLww
//
// ssss|ssss|ssss|--LL|LLLL|LLLL|LL--
const uint32_t tag_id = (tag >> 4) & m_cache_tag_mask;
if (tag_id == ((vaddr >> 16) & 0xfff))
{
m_cachetags[tag_addr] &= ~(1 << 19);
LOGMASKED(LOG_SEGMENT_FLUSH, "flushing line with tag %08x from vaddr %08x\n", tag, vaddr << 2);
}
}
}
void sun4_mmu_base_device::context_flush_w(const uint32_t vaddr)
{
logerror("%s: context_flush_w %08x\n", machine().describe_context(), vaddr);
const uint32_t cache_addr = (vaddr >> 3) & m_cache_mask;
if ((m_cachetags[cache_addr] & (1 << 19)) != 0)
LOGMASKED(LOG_CONTEXT_FLUSH, "%s: context_flush_w %08x\n", machine().describe_context(), vaddr << 2);
const uint32_t tag_addr = vaddr_to_cache_line(vaddr);
const uint32_t tag = m_cachetags[tag_addr];
if ((tag & (1 << 19)) != 0)
{
if (((m_cachetags[cache_addr] >> 20) & m_ctx_mask) == m_context_masked)
LOGMASKED(LOG_CONTEXT_FLUSH, "tag is valid: %08x (%d vs. %d)\n", tag, ((tag >> 22) & m_ctx_mask), m_context_masked);
if (((tag >> 22) & m_ctx_mask) == m_context_masked && !BIT(tag, 20))
{
const uint32_t paddr = ((cache_addr << 12) & 0x0fffc000) | ((vaddr << 2) & 0x3ff8);
const uint32_t caddr = (vaddr & ~7) & m_cache_mask;
if (paddr < m_populated_ram_words)
const uint32_t tag_id = (tag >> 2) & m_cache_tag_mask;
if (tag_id == ((vaddr >> 14) & 0xfff))
{
memcpy(m_ram_ptr, &m_cachedata[caddr], sizeof(uint32_t) * 8);
LOGMASKED(LOG_CONTEXT_FLUSH, "flushing line with tag %08x from vaddr %08x\n", tag, vaddr << 2);
m_cachetags[tag_addr] &= ~(1 << 19);
}
m_cachetags[cache_addr] &= ~(1 << 19);
}
}
}
void sun4_mmu_base_device::page_flush_w(const uint32_t vaddr)
{
logerror("%s: page_flush_w %08x\n", machine().describe_context(), vaddr);
LOGMASKED(LOG_PAGE_FLUSH, "%s: page_flush_w %08x\n", machine().describe_context(), vaddr << 2);
const uint32_t tag_addr = vaddr_to_cache_line(vaddr);
const uint32_t tag = m_cachetags[tag_addr];
if ((tag & (1 << 19)) != 0 && ((tag >> 22) & m_ctx_mask) == m_context_masked)
{
// Unshifted
// 00SS|SSSS|SSSS|SSPP|PPPP|BBBB|BBBB|BBBB
// 00TT|TTTT|TTTT|TTTT|LLLL|LLLL|LLLL|bbbb
//
// 00pp|pppp|pppp|pppp|pppp|LLLL|LLLL|---- ff1dc000
// 1111|1111|0001|1101|1100|0000|0000|---- 0000fc70
// Shifted
// SSSS|SSSS|SSSS|PPPP|PPBB|BBBB|BBBB
// TTTT|TTTT|TTTT|TTLL|LLLL|LLLL|LLww
//
// pppp|pppp|pppp|pppp|ppLL|LLLL|LL--
const uint32_t tag_id = tag & m_cache_tag_id_mask;
LOGMASKED(LOG_PAGE_FLUSH, "tag is valid: %08x (%04x vs. %04x)\n", tag, tag_id, (vaddr >> (m_cache_tag_id_shift + 2)) & 0x3fff);
if (tag_id == ((vaddr >> m_cache_tag_id_shift) & 0xfffc))
{
m_cachetags[tag_addr] &= ~(1 << 19);
LOGMASKED(LOG_PAGE_FLUSH, "flushing line with tag %08x from vaddr %08x\n", tag, vaddr << 2);
}
}
}
uint32_t sun4_mmu_base_device::system_r(const uint32_t offset, const uint32_t mem_mask)
@ -361,12 +466,12 @@ uint32_t sun4_mmu_base_device::system_r(const uint32_t offset, const uint32_t me
}
case 8: // (d-)cache tags
LOGMASKED(LOG_CACHE_TAGS, "%s: sun4c_mmu: read dcache tags @ %x, PC = %x\n", machine().describe_context(), offset << 2, m_cpu->pc());
return m_cachetags[(offset >> 3) & m_cache_tag_mask];
LOGMASKED(LOG_CACHE_TAGS, "%s: sun4c_mmu: read dcache tags @ %x, %08x\n", machine().describe_context(), offset << 2, m_cachetags[vaddr_to_cache_line(offset)]);
return m_cachetags[vaddr_to_cache_line(offset)];
case 9: // (d-)cache data
LOGMASKED(LOG_CACHE_DATA, "%s: sun4c_mmu: read dcache data @ %x, PC = %x\n", machine().describe_context(), offset << 2, m_cpu->pc());
return m_cachedata[offset & m_cache_mask];
return m_cachedata[offset & 0x3fff];
case 0xf: // UART bypass
switch (offset & 3)
@ -430,7 +535,7 @@ void sun4_mmu_base_device::system_w(const uint32_t offset, const uint32_t data,
case 8: // cache tags
LOGMASKED(LOG_CACHE_TAGS, "%s: write dcache tags %08x = %08x & %08x\n", machine().describe_context(), offset << 2, data, mem_mask);
m_cachetags[(offset >> 3) & m_cache_tag_mask] = data & 0x03f8fffc;
m_cachetags[vaddr_to_cache_line(offset)] = data & 0x03f8fffc;
/*if (m_cpu->pc() == 0xFFE8B4F8)
{
m_log_mem = true;
@ -439,8 +544,8 @@ void sun4_mmu_base_device::system_w(const uint32_t offset, const uint32_t data,
case 9: // cache data
LOGMASKED(LOG_CACHE_DATA, "write cache data %08x = %08x & %08x\n", offset << 2, data, mem_mask);
COMBINE_DATA((&m_cachedata[0] + (offset & m_cache_mask)));
m_cachetags[(offset >> 3) & m_cache_tag_mask] &= ~(1 << 19);
COMBINE_DATA(&m_cachedata[offset & 0x3fff]);
m_cachetags[vaddr_to_cache_line(offset)] &= ~(1 << 19);
return;
case 0xf: // UART bypass
@ -549,64 +654,106 @@ void sun4_mmu_base_device::merge_page_entry(uint32_t index, uint32_t data, uint3
pe.writable = new_value & PM_WRITEMASK;
pe.supervisor = new_value & PM_SYSMASK;
pe.uncached = new_value & PM_CACHE;
if (pe.uncached)
logerror("page entry %05x is uncached\n", index);
else
logerror("page entry %05x is cached\n", index);
pe.type = (new_value & PM_TYPEMASK) >> 26;
pe.accessed = new_value & PM_ACCESSED;
pe.modified = new_value & PM_MODIFIED;
pe.page = (new_value & m_page_entry_mask) << m_seg_entry_shift;
pe.raw = new_value;
pe.index = index;
LOGMASKED(LOG_PAGE_ENTRIES, "page entry %05x: data %08x, sanity %08x, mem_mask %08x, valid %d, write %d, super %d, cached %d\n", index, new_value, page_entry_to_uint(index), mem_mask, pe.valid ? 1 : 0, pe.writable ? 1 : 0, pe.supervisor ? 1 : 0, pe.uncached ? 0 : 1);
}
bool sun4_mmu_base_device::cache_fetch(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data)
uint32_t sun4_mmu_base_device::vaddr_to_cache_line(uint32_t vaddr)
{
const uint32_t cache_entry = (vaddr >> 3) & m_cache_tag_mask;
if (!(m_cachetags[cache_entry] & (1 << 19)))
return (vaddr >> m_cache_tag_shift) & m_cache_tag_mask;
}
void sun4_mmu_base_device::cache_fill(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t entry_index)
{
const uint32_t cache_line = vaddr_to_cache_line(vaddr);
m_cachetags[cache_line] = (1 << 19);
m_cachetags[cache_line] |= entry.supervisor ? (1 << 20) : (0 << 20);
m_cachetags[cache_line] |= entry.writable ? (1 << 21) : (0 << 21);
m_cachetags[cache_line] |= (vaddr >> m_cache_tag_id_shift) & m_cache_tag_id_mask;
m_cachetags[cache_line] |= (m_context_masked) << 22;
const uint32_t cache_line_start = cache_line << m_cache_tag_shift;
const uint32_t mem_line_mask = (1 << m_cache_tag_shift) - 1;
const uint32_t mem_line_start = paddr & ~mem_line_mask;
if (paddr < m_populated_ram_words)
{
if (entry.valid)
{
m_cachetags[cache_entry] = (1 << 19);
m_cachetags[cache_entry] |= entry.supervisor ? (1 << 20) : (0 << 20);
m_cachetags[cache_entry] |= entry.writable ? (1 << 21) : (0 << 21);
}
else
LOGMASKED(LOG_CACHE_FILLS, "Filling cache line %04x (%08x) with data from paddr %08x, vaddr %08x, cache_line %04x, mem_line %08x, tag entry %04x\n",
cache_line, 0x80000000 | (cache_line << m_cache_tag_shift), paddr << 2, vaddr << 2, cache_line_start << 2, mem_line_start << 2,
(m_cachetags[cache_line] >> m_cache_line_size) & (0xfffc >> m_cache_tag_shift));
LOGMASKED(LOG_CACHE_FILLS, "Tag: %08x, valid %d, super %d, write %d, ctx %d\n", m_cachetags[cache_line], BIT(m_cachetags[cache_line], 19),
BIT(m_cachetags[cache_line], 20), BIT(m_cachetags[cache_line], 21), (m_cachetags[cache_line] >> 22) & 7);
LOGMASKED(LOG_CACHE_FILLS, "Entry %05x: raw: %08x, valid %d, super %d, write %d, cache %d\n", entry_index, page_entry_to_uint(entry_index),
entry.valid ? 1 : 0, entry.supervisor ? 1 : 0, entry.writable ? 1 : 0, entry.uncached ? 0 : 1);
memcpy(&m_cachedata[cache_line_start], m_ram_ptr + mem_line_start, sizeof(uint32_t) * m_cache_word_size);
}
else
{
LOGMASKED(LOG_CACHE_FILLS, "Unable to fill cache line, paddr %08x exceeds populated RAM range\n", paddr << 2);
}
}
template bool sun4_mmu_base_device::cache_fetch<sun4_mmu_base_device::USER_MODE>(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data, uint32_t entry_index);
template bool sun4_mmu_base_device::cache_fetch<sun4_mmu_base_device::SUPER_MODE>(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data, uint32_t entry_index);
template <sun4_mmu_base_device::perm_mode PERM_MODE>
bool sun4_mmu_base_device::cache_fetch(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data, uint32_t entry_index)
{
const uint32_t cache_line = vaddr_to_cache_line(vaddr);
const uint32_t tag = m_cachetags[cache_line];
if (!(tag & (1 << 19)))
{
// If the current tag is invalid, bail if the corresponding entry is invalid
if (!entry.valid)
{
return false;
}
cache_fill(entry, vaddr, paddr, entry_index);
cached_data = m_cachedata[vaddr & 0x3fff];
return true;
}
else if ((m_cachetags[cache_entry] & 0xfffc) == ((vaddr >> 12) & 0xfffc))
else if ((tag & m_cache_tag_id_mask) == ((vaddr >> m_cache_tag_id_shift) & m_cache_tag_id_mask))
{
logerror("Cache read: %08x = %08x\n", vaddr << 2, m_cachedata[vaddr & m_cache_mask]);
cached_data = m_cachedata[vaddr & m_cache_mask];
// If the current tag is valid and the tag IDs match, fetch from the cache
if (PERM_MODE == USER_MODE)
{
// If we're in user mode and the context does not match, this is a miss
if (((tag >> 22) & m_ctx_mask) != m_context_masked && (tag & (1 << 20)))
{
return false;
}
}
cached_data = m_cachedata[vaddr & 0x3fff];
return true;
}
else if (!entry.valid)
{
// If the current tag is valid, the tag IDs don't match, and the memory entry is invalid, miss
return false;
}
m_cachetags[cache_entry] &= ~0xfffc;
m_cachetags[cache_entry] |= (vaddr >> 12) & 0xfffc;
m_cachetags[cache_entry] &= ~(m_ctx_mask << 20);
m_cachetags[cache_entry] |= (m_context_masked) << 20;
const uint32_t cache_line_start = (vaddr & ~7) & m_cache_mask;
const uint32_t mem_line_start = paddr & ~7;
for (uint32_t i = 0; i < 8; i++)
else
{
m_cachedata[cache_line_start + i] = m_ram_ptr[mem_line_start + i];
// If the current tag is valid, the tag IDs don't match, and the memory entry is valid, it's a miss
cache_fill(entry, vaddr, paddr, entry_index);
cached_data = m_cachedata[vaddr & 0x3fff];
return true;
}
cached_data = m_cachedata[vaddr & m_cache_mask];
return true;
}
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_INSN>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_INSN>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_DATA>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_DATA>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_INSN, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_INSN, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_DATA, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_DATA, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_INSN, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_INSN, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_DATA, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t);
template <sun4_mmu_base_device::insn_data_mode MODE>
template <sun4_mmu_base_device::insn_data_mode MODE, sun4_mmu_base_device::perm_mode PERM_MODE>
uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t mem_mask)
{
// supervisor program fetches in boot state are special
@ -625,18 +772,46 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
uint32_t cached_data = 0;
if (entry.valid)
{
const uint32_t paddr = entry.page | (offset & m_page_mask);
entry.accessed = PM_ACCESSED;
const uint32_t tmp = entry.page | (offset & m_page_mask);
{
uint32_t tag_entry = vaddr_to_cache_line(offset);
uint32_t tag = m_cachetags[tag_entry];
bool user_mode = (PERM_MODE == USER_MODE);
bool cache_hit = (tag & m_cache_tag_id_mask) == ((offset >> m_cache_tag_id_shift) & m_cache_tag_id_mask);
bool cacheable = entry.type == 0 && !entry.uncached;
bool check_cache = cacheable && cache_hit && (m_system_enable & ENA_CACHE);
bool cache_valid = BIT(tag, 19);
bool cache_protected = user_mode && BIT(tag, 20) && (((m_cachetags[tag_entry] >> 22) & m_ctx_mask) != m_context_masked);
if (check_cache && cache_valid && cache_protected)
{
LOGMASKED(LOG_READ_PROTECT, "%s: read protect error with PTE %04x (%08x), %08x & %08x\n", machine().describe_context(),
entry_index, page_entry_to_uint(entry_index), offset << 2, mem_mask);
LOGMASKED(LOG_WRITE_PROTECT, "%s: tag %08x, line %04x, writable %d, check %d, valid %d, super %d, tag ctx %d, ctx reg %d\n",
machine().describe_context(), m_cachetags[tag_entry], tag_entry, BIT(m_cachetags[tag_entry], 21), check_cache ? 1 : 0, cache_valid ? 1 : 0,
BIT(m_cachetags[tag_entry], 20), (m_cachetags[tag_entry] >> 22) & 7, m_context_masked);
LOGMASKED(LOG_WRITE_PROTECT, "%s: entry cached %d, entry writable %d, entry super %d\n",
machine().describe_context(), entry.uncached ? 0 : 1, entry.writable ? 1 : 0, entry.supervisor ? 1 : 0);
LOGMASKED(LOG_WRITE_PROTECT, "%s: pmeg %08x, seg entry %08x\n", machine().describe_context(), pmeg, ((offset >> m_seg_entry_shift) & m_seg_entry_mask));
m_buserr[0] |= 0x0040; // read protection error
m_buserr[1] = offset << 2;
m_host->set_mae();
m_cachetags[tag_entry] &= ~(1 << 19);
return 0;
}
}
switch (entry.type)
{
case 0: // type 0 space
if (tmp < m_populated_ram_words)
if (paddr < m_populated_ram_words)
{
if (BIT(m_parity_err_reg, 4) && m_parity_err)
{
logerror("%s: ram read with parity %08x: %08x & %08x\n", machine().describe_context(), m_parity_err_reg, offset, mem_mask);
LOGMASKED(LOG_PARITY, "%s: ram read with parity %08x: %08x & %08x\n", machine().describe_context(), m_parity_err_reg, offset, mem_mask);
m_parity_err_reg |= m_parity_err;
if (BIT(m_parity_err_reg, 7))
{
@ -659,32 +834,38 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
m_buserr[1] = (offset << 2) | boffs;
m_host->set_mae();
}
const uint32_t set = (tmp >> 22) & 3;
const uint32_t set = (paddr >> 22) & 3;
const uint32_t addr_mask = m_ram_set_mask[set];
const uint32_t masked_addr = m_ram_set_base[set] + (tmp & addr_mask);
if (!entry.uncached)
const uint32_t masked_addr = m_ram_set_base[set] + (paddr & addr_mask);
if (!entry.uncached && (m_system_enable & ENA_CACHE))
{
if (cache_fetch(entry, offset, masked_addr, cached_data))
if (cache_fetch<PERM_MODE>(entry, offset, masked_addr, cached_data, entry_index))
{
//uint32_t value = masked_addr | 0x80000000;
//fwrite(&value, 1, 4, s_mem_log);
//fwrite(&cached_data, 1, 4, s_mem_log);
//m_fpos += 8;
return cached_data;
}
}
//if (m_log_mem)
//logerror("RAM read: %08x = %08x (%08x, %08x)\n", offset << 2, m_ram_ptr[masked_addr], tmp << 2, masked_addr);
//uint32_t value = masked_addr | 0x80000000;
//fwrite(&value, 1, 4, s_mem_log);
//fwrite(&m_ram_ptr[masked_addr], 1, 4, s_mem_log);
//m_fpos += 8;
return m_ram_ptr[masked_addr];
}
else if (tmp >= 0x4000000 >> 2 && tmp < 0x10000000 >> 2)
else if (paddr >= 0x4000000 >> 2 && paddr < 0x10000000 >> 2)
{
type0_timeout_r(tmp);
type0_timeout_r(paddr);
}
return ~0;
case 1: // type 1 space
m_type1_offset = offset;
return m_type1_r(tmp, mem_mask);
return m_type1_r(paddr, mem_mask);
default:
LOGMASKED(LOG_UNKNOWN_SPACE, "read unknown space type %d, %08x & %08x, PC=%08x\n", entry.type, tmp << 2, mem_mask, m_cpu->pc());
LOGMASKED(LOG_UNKNOWN_SPACE, "read unknown space type %d, %08x & %08x, PC=%08x\n", entry.type, paddr << 2, mem_mask, m_cpu->pc());
m_host->set_mae();
m_buserr[0] = 0x20;
m_buserr[1] = offset << 2;
@ -695,7 +876,7 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
{
if (!machine().side_effects_disabled())
{
if (cache_fetch(entry, offset, 0, cached_data))
if ((m_system_enable & ENA_CACHE) && cache_fetch<PERM_MODE>(entry, offset, 0, cached_data, entry_index))
{
return cached_data;
}
@ -724,12 +905,16 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
}
}
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::USER_INSN>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::SUPER_INSN>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::USER_DATA>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::SUPER_DATA>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::USER_INSN, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::SUPER_INSN, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::USER_DATA, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::SUPER_DATA, sun4_mmu_base_device::USER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::USER_INSN, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::SUPER_INSN, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::USER_DATA, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template void sun4_mmu_base_device::insn_data_w<sun4_mmu_base_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>(const uint32_t, const uint32_t, const uint32_t);
template <sun4_mmu_base_device::insn_data_mode MODE>
template <sun4_mmu_base_device::insn_data_mode MODE, sun4_mmu_base_device::perm_mode PERM_MODE>
void sun4_mmu_base_device::insn_data_w(const uint32_t offset, const uint32_t data, const uint32_t mem_mask)
{
// it's translation time
@ -739,28 +924,54 @@ void sun4_mmu_base_device::insn_data_w(const uint32_t offset, const uint32_t dat
if (m_page_valid[entry_index])
{
page_entry &entry = m_pagemap[entry_index];
if ((!entry.writable) || (entry.supervisor && MODE != SUPER_DATA && MODE != SUPER_INSN))
const uint32_t paddr = entry.page | (offset & m_page_mask);
{
LOGMASKED(LOG_WRITE_PROTECT, "write protect error with PTE %d (%08x), %08x = %08x & %08x, PC=%08x\n", entry_index, page_entry_to_uint(entry_index), offset << 2, data, mem_mask, m_cpu->pc());
m_buserr[0] |= 0x8040; // write, protection error
m_buserr[1] = offset << 2;
m_host->set_mae();
return;
uint32_t tag_entry = vaddr_to_cache_line(offset);
uint32_t tag = m_cachetags[tag_entry];
bool user_mode = PERM_MODE == USER_MODE;
bool cacheable = entry.type == 0 && !entry.uncached;
bool check_cache = cacheable && (m_system_enable & ENA_CACHE);
bool cache_valid = BIT(tag, 19);
bool cache_hit = (tag & m_cache_tag_id_mask) == ((offset >> m_cache_tag_id_shift) & m_cache_tag_id_mask);
bool cache_writable = BIT(tag, 21);
bool cache_protected = user_mode && BIT(tag, 20) && (((tag >> 22) & m_ctx_mask) != m_context_masked);
if (cacheable && (!cache_valid || !cache_hit || !cache_writable || cache_protected))
{
m_cachetags[tag_entry] &= ~(1 << 19);
}
if ((check_cache && cache_hit && cache_valid && (!cache_writable || cache_protected)) ||
(!check_cache && (!entry.writable || (user_mode && entry.supervisor))))
{
LOGMASKED(LOG_WRITE_PROTECT, "%s: write protect error with PTE %04x (%08x), %08x = %08x & %08x, mode %c\n", machine().describe_context(),
entry_index, page_entry_to_uint(entry_index), offset << 2, data, mem_mask, user_mode ? 'U' : 'S');
LOGMASKED(LOG_WRITE_PROTECT, "%s: tag %08x, line %04x, writable %d, check %d, valid %d, super %d, tag ctx %d, ctx reg %d\n",
machine().describe_context(), m_cachetags[tag_entry], tag_entry, BIT(m_cachetags[tag_entry], 21), check_cache ? 1 : 0, cache_valid ? 1 : 0,
BIT(m_cachetags[tag_entry], 20), (m_cachetags[tag_entry] >> 22) & 7, m_context_masked);
LOGMASKED(LOG_WRITE_PROTECT, "%s: entry cached %d, entry writable %d, entry super %d\n",
machine().describe_context(), entry.uncached ? 0 : 1, entry.writable ? 1 : 0, entry.supervisor ? 1 : 0);
LOGMASKED(LOG_WRITE_PROTECT, "%s: pmeg %08x, seg entry %08x\n", machine().describe_context(), pmeg, ((offset >> m_seg_entry_shift) & m_seg_entry_mask));
m_buserr[0] |= 0x8040; // write protection error
m_buserr[1] = offset << 2;
m_host->set_mae();
return;
}
}
entry.accessed = PM_ACCESSED;
entry.modified = PM_MODIFIED;
const uint32_t tmp = entry.page | (offset & m_page_mask);
switch (entry.type)
{
case 0: // type 0
if (tmp < m_populated_ram_words)
if (paddr < m_populated_ram_words)
{
if (BIT(m_parity_err_reg, 5))
{
logerror("%s: ram write with parity %08x: %08x = %08x & %08x\n", machine().describe_context(), m_parity_err_reg, offset, data, mem_mask);
LOGMASKED(LOG_PARITY, "%s: ram write with parity %08x: %08x = %08x & %08x\n", machine().describe_context(), m_parity_err_reg,
offset, data, mem_mask);
if (ACCESSING_BITS_24_31)
m_parity_err |= (1 << 0);
if (ACCESSING_BITS_16_23)
@ -770,43 +981,45 @@ void sun4_mmu_base_device::insn_data_w(const uint32_t offset, const uint32_t dat
if (ACCESSING_BITS_0_7)
m_parity_err |= (1 << 3);
}
const uint32_t set = (tmp >> 22) & 3;
const uint32_t set = (paddr >> 22) & 3;
const uint32_t addr_mask = m_ram_set_mask[set];
const uint32_t masked_addr = m_ram_set_base[set] + (tmp & addr_mask);
if (!entry.uncached)
const uint32_t masked_addr = m_ram_set_base[set] + (paddr & addr_mask);
if (!entry.uncached && (m_system_enable & ENA_CACHE))
{
const uint32_t cache_entry = (offset >> 3) & m_cache_tag_mask;
if ((m_cachetags[cache_entry] & (1 << 19)) != 0)
const uint32_t cache_entry = vaddr_to_cache_line(offset);
const uint32_t tag = m_cachetags[cache_entry];
if (tag & (1 << 19))
{
if ((m_cachetags[cache_entry] & 0xfffc) == ((offset >> 12) & 0xfffc))
if ((m_cachetags[cache_entry] & m_cache_tag_id_mask) == ((offset >> m_cache_tag_id_shift) & m_cache_tag_id_mask))
{
logerror("Cache write: %08x (%08x) = %08x\n", (offset & m_cache_mask) << 2, offset, data);
COMBINE_DATA((&m_cachedata[0] + (offset & m_cache_mask)));
COMBINE_DATA(&m_cachedata[offset & 0x3fff]);
}
else
{
logerror("Cache invalidate\n");
m_cachetags[cache_entry] &= ~(1 << 19);
//m_cachetags[cache_entry] &= ~(1 << 19);
//cache_fill(entry, offset, masked_addr, entry_index);
//COMBINE_DATA(&m_cachedata[offset & 0x3fff]);
}
}
}
COMBINE_DATA((m_ram_ptr + masked_addr));
//if (m_log_mem)
//logerror("RAM write: %08x (%08x, %08x) = %08x (setting cache entry %08x)\n", offset << 2, tmp << 2, masked_addr, data, offset & m_cache_mask);
//fwrite(&masked_addr, 1, 4, s_mem_log);
//fwrite(&m_ram_ptr[masked_addr], 1, 4, s_mem_log);
//m_fpos += 8;
}
else if (tmp >= 0x4000000 >> 2 && tmp < 0x10000000 >> 2)
else if (paddr >= 0x4000000 >> 2 && paddr < 0x10000000 >> 2)
{
type0_timeout_w(tmp);
type0_timeout_w(paddr);
}
return;
case 1: // type 1
m_type1_offset = offset;
m_type1_w(tmp, data, mem_mask);
m_type1_w(paddr, data, mem_mask);
return;
default:
LOGMASKED(LOG_UNKNOWN_SPACE, "write unknown space type %d, %08x = %08x & %08x, PC=%08x\n", entry.type, tmp << 2, data, mem_mask, m_cpu->pc());
LOGMASKED(LOG_UNKNOWN_SPACE, "write unknown space type %d, %08x = %08x & %08x, PC=%08x\n", entry.type, paddr << 2, data, mem_mask, m_cpu->pc());
m_host->set_mae();
m_buserr[0] = 0x8020;
m_buserr[1] = offset << 2;
@ -837,6 +1050,15 @@ void sun4_mmu_base_device::insn_data_w(const uint32_t offset, const uint32_t dat
}
}
bool sun4_mmu_base_device::translate(uint32_t &addr)
{
const uint32_t pmeg = m_curr_segmap_masked[(addr >> 16) & 0xfff];// & m_pmeg_mask;
const uint32_t entry_index = pmeg | ((addr >> m_seg_entry_shift) & m_seg_entry_mask);
const page_entry &entry = m_pagemap[entry_index];
addr = entry.page | (addr & m_page_mask);
return entry.valid;
}
void sun4_mmu_base_device::l2p_command(int ref, const std::vector<std::string> &params)
{
uint64_t addr, offset;
@ -846,21 +1068,17 @@ void sun4_mmu_base_device::l2p_command(int ref, const std::vector<std::string> &
addr &= 0xffffffff;
offset = addr >> 2;
uint8_t pmeg = 0;
uint32_t entry_index = 0, tmp = 0;
uint32_t entry_value = 0;
pmeg = m_curr_segmap_masked[(offset >> 16) & 0xfff];
entry_index = pmeg | ((offset >> m_seg_entry_shift) & m_seg_entry_mask);
tmp = m_pagemap[entry_index].page | (offset & m_page_mask);
entry_value = page_entry_to_uint(entry_index);
uint8_t pmeg = m_curr_segmap_masked[(addr >> 18) & 0xfff];
uint32_t entry_index = pmeg | ((offset >> m_seg_entry_shift) & m_seg_entry_mask);
uint32_t paddr = m_pagemap[entry_index].page | (offset & m_page_mask);
uint32_t entry_value = page_entry_to_uint(entry_index);
if (m_page_valid[entry_index])
{
machine().debugger().console().printf("logical %08x => phys %08x, type %d (pmeg %d, entry %d PTE %08x)\n", addr, tmp << 2, m_pagemap[entry_index].type, pmeg, entry_index, entry_value);
machine().debugger().console().printf("logical %08x => phys %08x, type %d (pmeg %d, entry %d PTE %08x)\n", addr, paddr << 2, m_pagemap[entry_index].type, pmeg, entry_index, entry_value);
}
else
{
machine().debugger().console().printf("logical %08x points to an invalid PTE! (pmeg %d, entry %d PTE %08x)\n", addr, tmp << 2, pmeg, entry_index, entry_value);
machine().debugger().console().printf("logical %08x points to an invalid PTE! (pmeg %d, entry %d PTE %08x)\n", addr, paddr << 2, pmeg, entry_index, entry_value);
}
}

View File

@ -36,6 +36,7 @@ public:
template <typename T> void set_ram(T &&ram_tag) { m_ram.set_tag(std::forward<T>(ram_tag)); }
template <typename T> void set_rom(T &&rom_tag) { m_rom.set_tag(std::forward<T>(rom_tag)); }
template <typename T> void set_scc(T &&scc_tag) { m_scc.set_tag(std::forward<T>(scc_tag)); }
void set_cache_line_size(uint32_t line_size) { m_cache_line_size = line_size; }
auto type1_r() { return m_type1_r.bind(); }
auto type1_w() { return m_type1_w.bind(); }
@ -56,8 +57,14 @@ public:
SUPER_DATA
};
template <insn_data_mode MODE> uint32_t insn_data_r(const uint32_t offset, const uint32_t mem_mask);
template <insn_data_mode MODE> void insn_data_w(const uint32_t offset, const uint32_t data, const uint32_t mem_mask);
enum perm_mode
{
USER_MODE,
SUPER_MODE
};
template <insn_data_mode MODE, perm_mode PERM_MODE> uint32_t insn_data_r(const uint32_t offset, const uint32_t mem_mask);
template <insn_data_mode MODE, perm_mode PERM_MODE> void insn_data_w(const uint32_t offset, const uint32_t data, const uint32_t mem_mask);
uint32_t type1_timeout_r(uint32_t offset);
void type1_timeout_w(uint32_t offset, uint32_t data);
@ -75,6 +82,8 @@ protected:
struct page_entry
{
uint32_t index;
uint32_t raw;
uint32_t valid;
uint32_t writable;
uint32_t supervisor;
@ -90,12 +99,15 @@ protected:
virtual void device_start() override;
virtual void device_reset() override;
virtual void device_stop() override;
virtual void device_timer(emu_timer &timer, device_timer_id id, int param, void *ptr) override;
uint32_t page_entry_to_uint(uint32_t index);
void merge_page_entry(uint32_t index, uint32_t data, uint32_t mem_mask);
bool cache_fetch(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data);
template <perm_mode MODE> bool cache_fetch(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data, uint32_t entry_index);
void cache_fill(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t entry_index);
void segment_flush_w(const uint32_t vaddr);
void context_flush_w(const uint32_t vaddr);
void page_flush_w(const uint32_t vaddr);
@ -107,7 +119,9 @@ protected:
void page_map_w(const uint32_t offset, const uint32_t data, const uint32_t mem_mask);
void type0_timeout_r(const uint32_t offset);
void type0_timeout_w(const uint32_t offset);
bool translate(uint32_t &addr);
void l2p_command(int ref, const std::vector<std::string> &params);
uint32_t vaddr_to_cache_line(uint32_t vaddr);
enum
{
@ -175,9 +189,16 @@ protected:
uint32_t m_page_entry_mask;
uint32_t m_cache_mask;
uint32_t m_cache_tag_mask;
uint32_t m_cache_line_size;
uint32_t m_cache_word_size;
uint32_t m_cache_tag_shift;
uint32_t m_cache_tag_id_mask;
uint32_t m_cache_tag_id_shift;
uint32_t m_cache_vaddr_shift;
uint32_t m_ram_set_mask[4]; // Used for mirroring within 4 megabyte sets
uint32_t m_ram_set_base[4];
uint32_t m_populated_ram_words;
uint64_t m_fpos;
emu_timer *m_reset_timer;
bool m_log_mem;
};

View File

@ -673,12 +673,12 @@ private:
READ32_MEMBER( sun4_base_state::debugger_r )
{
return m_mmu->insn_data_r<sun4_mmu_base_device::SUPER_INSN>(offset, mem_mask);
return m_mmu->insn_data_r<sun4_mmu_base_device::SUPER_INSN, sun4_mmu_base_device::SUPER_MODE>(offset, mem_mask);
}
WRITE32_MEMBER( sun4_base_state::debugger_w )
{
m_mmu->insn_data_w<sun4_mmu_base_device::SUPER_INSN>(offset, data, mem_mask);
m_mmu->insn_data_w<sun4_mmu_base_device::SUPER_INSN, sun4_mmu_base_device::SUPER_MODE>(offset, data, mem_mask);
}
void sun4_base_state::fcodes_command(int ref, const std::vector<std::string> &params)
@ -1078,7 +1078,7 @@ void sun4_base_state::dma_transfer_write()
pack_cnt++;
if (pack_cnt == 4)
{
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA>(m_dma[DMA_ADDR] >> 2, m_dma_pack_register, ~0);
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>(m_dma[DMA_ADDR] >> 2, m_dma_pack_register, ~0);
pack_cnt = 0;
m_dma_pack_register = 0;
m_dma[DMA_ADDR] += 4;
@ -1099,7 +1099,7 @@ void sun4_base_state::dma_transfer_read()
{
if (!word_cached)
{
current_word = m_mmu->insn_data_r<sun4c_mmu_device::SUPER_DATA>(m_dma[DMA_ADDR] >> 2, ~0);
current_word = m_mmu->insn_data_r<sun4c_mmu_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>(m_dma[DMA_ADDR] >> 2, ~0);
word_cached = true;
//logerror("Current word: %08x\n", current_word);
}
@ -1176,7 +1176,7 @@ WRITE32_MEMBER( sun4_base_state::dma_w )
const uint32_t bit_index = (3 - i) * 8;
//const uint32_t value = m_dma_pack_register & (0xff << bit_index);
//logerror("dma_w: draining %02x to RAM address %08x & %08x\n", value >> bit_index, m_dma[DMA_ADDR], 0xff << (24 - bit_index));
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA>(m_dma[DMA_ADDR] >> 2, m_dma_pack_register, 0xff << bit_index);
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>(m_dma[DMA_ADDR] >> 2, m_dma_pack_register, 0xff << bit_index);
m_dma[DMA_ADDR]++;
}
m_dma_pack_register = 0;
@ -1301,16 +1301,16 @@ void sun4_base_state::sun4_base(machine_config &config)
AM79C90(config, m_lance);
m_lance->dma_in().set([this](offs_t offset)
{
u32 const data = m_mmu->insn_data_r<sun4c_mmu_device::SUPER_DATA>((0xff000000U | offset) >> 2, 0xffffffffU);
u32 const data = m_mmu->insn_data_r<sun4c_mmu_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>((0xff000000U | offset) >> 2, 0xffffffffU);
return (offset & 2) ? u16(data) : u16(data >> 16);
});
m_lance->dma_out().set([this](offs_t offset, u16 data, u16 mem_mask)
{
if (offset & 2)
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA>((0xff000000U | offset) >> 2, data, mem_mask);
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>((0xff000000U | offset) >> 2, data, mem_mask);
else
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA>((0xff000000U | offset) >> 2, u32(data) << 16, u32(mem_mask) << 16);
m_mmu->insn_data_w<sun4c_mmu_device::SUPER_DATA, sun4_mmu_base_device::SUPER_MODE>((0xff000000U | offset) >> 2, u32(data) << 16, u32(mem_mask) << 16);
});
// Keyboard/mouse
@ -1390,6 +1390,7 @@ void sun4c_state::sun4c(machine_config &config)
m_mmu->set_ram(m_ram);
m_mmu->set_rom("user1");
m_mmu->set_scc(m_scc2);
m_mmu->set_cache_line_size(16);
m_maincpu->set_mmu(m_mmu);
// SBus
@ -1441,6 +1442,7 @@ void sun4c_state::sun4_50(machine_config &config)
m_mmu->set_ctx_mask(0xf);
m_mmu->set_pmeg_mask(0xff);
m_mmu->set_cache_line_size(32);
m_mmu->set_clock(40'000'000);
m_maincpu->set_clock(40'000'000);
@ -1478,6 +1480,7 @@ void sun4c_state::sun4_75(machine_config &config)
m_mmu->set_ctx_mask(0xf);
m_mmu->set_pmeg_mask(0xff);
m_mmu->set_cache_line_size(32);
m_mmu->set_clock(40'000'000);
m_maincpu->set_clock(40'000'000);