-sun4c_mmu: Checkins, likely will cause other regressions, but checking in as a checkpoint, nw

This commit is contained in:
MooglyGuy 2019-12-15 00:55:36 +01:00
parent 9e34a7f5cb
commit b0dd896e3e
2 changed files with 137 additions and 45 deletions

View File

@ -36,7 +36,7 @@ DEFINE_DEVICE_TYPE(SUN4C_MMU, sun4c_mmu_device, "sun4c_mmu", "Sun 4c MMU")
#define LOG_PARITY (1U << 15)
#define LOG_ALL_ASI (1U << 16) // WARNING: Heavy!
#define VERBOSE (LOG_CACHE_TAGS)
#define VERBOSE (0)
#include "logmacro.h"
sun4_mmu_base_device::sun4_mmu_base_device(const machine_config &mconfig, device_type type, const char *tag, device_t *owner, uint32_t clock)
@ -130,6 +130,13 @@ void sun4_mmu_base_device::device_start()
using namespace std::placeholders;
machine().debugger().console().register_command("l2p", CMDFLAG_NONE, 0, 1, 1, std::bind(&sun4_mmu_base_device::l2p_command, this, _1, _2));
}
m_cache_tag_mask = m_cache_mask >> 3;
printf("m_page_mask %08x\n", m_page_mask);
printf("m_seg_entry_shift %08x\n", m_seg_entry_shift);
printf("m_seg_entry_mask %08x\n", m_seg_entry_mask);
printf("m_page_entry_mask %08x\n", m_page_entry_mask);
printf("m_cache_mask %08x\n", m_cache_mask);
}
void sun4_mmu_base_device::device_reset()
@ -170,7 +177,7 @@ void sun4_mmu_base_device::device_reset()
m_system_enable = 0;
m_fetch_bootrom = true;
memset(m_buserr, 0, sizeof(uint32_t) * 8);
memset(m_buserr, 0, sizeof(uint32_t) * 16);
for (int i = 0; i < 16; i++)
{
memset(&m_segmap[i][0], 0, 4096);
@ -219,7 +226,6 @@ uint32_t sun4_mmu_base_device::read_asi(uint8_t asi, uint32_t offset, uint32_t m
case 12:
case 13:
case 14:
cache_flush_r();
return 0;
default:
return ~0;
@ -253,9 +259,13 @@ void sun4_mmu_base_device::write_asi(uint8_t asi, uint32_t offset, uint32_t data
insn_data_w<SUPER_DATA>(offset, data, mem_mask);
return;
case 12:
segment_flush_w(offset);
break;
case 13:
page_flush_w(offset);
break;
case 14:
cache_flush_w();
context_flush_w(offset);
return;
default:
return;
@ -293,15 +303,33 @@ void sun4_mmu_base_device::parity_w(uint32_t offset, uint32_t data, uint32_t mem
}
}
uint32_t sun4_mmu_base_device::cache_flush_r()
void sun4_mmu_base_device::segment_flush_w(const uint32_t vaddr)
{
// Do nothing for now
return 0;
logerror("%s: segment_flush_w %08x\n", machine().describe_context(), vaddr);
}
void sun4_mmu_base_device::cache_flush_w()
void sun4_mmu_base_device::context_flush_w(const uint32_t vaddr)
{
// Do nothing for now
logerror("%s: context_flush_w %08x\n", machine().describe_context(), vaddr);
const uint32_t cache_addr = (vaddr >> 3) & m_cache_mask;
if ((m_cachetags[cache_addr] & (1 << 19)) != 0)
{
if (((m_cachetags[cache_addr] >> 20) & m_ctx_mask) == m_context_masked)
{
const uint32_t paddr = ((cache_addr << 12) & 0x0fffc000) | ((vaddr << 2) & 0x3ff8);
const uint32_t caddr = (vaddr & ~7) & m_cache_mask;
if (paddr < m_populated_ram_words)
{
memcpy(m_ram_ptr, &m_cachedata[caddr], sizeof(uint32_t) * 8);
}
m_cachetags[cache_addr] &= ~(1 << 19);
}
}
}
void sun4_mmu_base_device::page_flush_w(const uint32_t vaddr)
{
logerror("%s: page_flush_w %08x\n", machine().describe_context(), vaddr);
}
uint32_t sun4_mmu_base_device::system_r(const uint32_t offset, const uint32_t mem_mask)
@ -333,11 +361,11 @@ uint32_t sun4_mmu_base_device::system_r(const uint32_t offset, const uint32_t me
}
case 8: // (d-)cache tags
LOGMASKED(LOG_CACHE_TAGS, "sun4_mmu: read dcache tags @ %x, PC = %x\n", offset, m_cpu->pc());
return m_cachetags[offset & m_cache_mask];
LOGMASKED(LOG_CACHE_TAGS, "%s: sun4c_mmu: read dcache tags @ %x, PC = %x\n", machine().describe_context(), offset << 2, m_cpu->pc());
return m_cachetags[(offset >> 3) & m_cache_tag_mask];
case 9: // (d-)cache data
LOGMASKED(LOG_CACHE_DATA, "sun4c_mmu: read dcache data @ %x, PC = %x\n", offset, m_cpu->pc());
LOGMASKED(LOG_CACHE_DATA, "%s: sun4c_mmu: read dcache data @ %x, PC = %x\n", machine().describe_context(), offset << 2, m_cpu->pc());
return m_cachedata[offset & m_cache_mask];
case 0xf: // UART bypass
@ -401,17 +429,18 @@ void sun4_mmu_base_device::system_w(const uint32_t offset, const uint32_t data,
}
case 8: // cache tags
LOGMASKED(LOG_CACHE_TAGS, "%s: write cache tags %08x = %08x & %08x\n", machine().describe_context(), offset << 2, data, mem_mask);
m_cachetags[offset & m_cache_mask] = data & 0x03f8fffc;
if (m_cpu->pc() == 0xFFE8B940)
LOGMASKED(LOG_CACHE_TAGS, "%s: write dcache tags %08x = %08x & %08x\n", machine().describe_context(), offset << 2, data, mem_mask);
m_cachetags[(offset >> 3) & m_cache_tag_mask] = data & 0x03f8fffc;
/*if (m_cpu->pc() == 0xFFE8B4F8)
{
m_log_mem = true;
}
}*/
return;
case 9: // cache data
LOGMASKED(LOG_CACHE_DATA, "write cache data %08x = %08x & %08x\n", offset << 2, data, mem_mask);
m_cachedata[offset & m_cache_mask] = data;
COMBINE_DATA((&m_cachedata[0] + (offset & m_cache_mask)));
m_cachetags[(offset >> 3) & m_cache_tag_mask] &= ~(1 << 19);
return;
case 0xf: // UART bypass
@ -532,6 +561,46 @@ void sun4_mmu_base_device::merge_page_entry(uint32_t index, uint32_t data, uint3
pe.page = (new_value & m_page_entry_mask) << m_seg_entry_shift;
}
bool sun4_mmu_base_device::cache_fetch(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data)
{
const uint32_t cache_entry = (vaddr >> 3) & m_cache_tag_mask;
if (!(m_cachetags[cache_entry] & (1 << 19)))
{
if (entry.valid)
{
m_cachetags[cache_entry] = (1 << 19);
m_cachetags[cache_entry] |= entry.supervisor ? (1 << 20) : (0 << 20);
m_cachetags[cache_entry] |= entry.writable ? (1 << 21) : (0 << 21);
}
else
{
return false;
}
}
else if ((m_cachetags[cache_entry] & 0xfffc) == ((vaddr >> 12) & 0xfffc))
{
logerror("Cache read: %08x = %08x\n", vaddr << 2, m_cachedata[vaddr & m_cache_mask]);
cached_data = m_cachedata[vaddr & m_cache_mask];
return true;
}
else if (!entry.valid)
{
return false;
}
m_cachetags[cache_entry] &= ~0xfffc;
m_cachetags[cache_entry] |= (vaddr >> 12) & 0xfffc;
m_cachetags[cache_entry] &= ~(m_ctx_mask << 20);
m_cachetags[cache_entry] |= (m_context_masked) << 20;
const uint32_t cache_line_start = (vaddr & ~7) & m_cache_mask;
const uint32_t mem_line_start = paddr & ~7;
for (uint32_t i = 0; i < 8; i++)
{
m_cachedata[cache_line_start + i] = m_ram_ptr[mem_line_start + i];
}
cached_data = m_cachedata[vaddr & m_cache_mask];
return true;
}
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_INSN>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::SUPER_INSN>(const uint32_t, const uint32_t);
template uint32_t sun4_mmu_base_device::insn_data_r<sun4_mmu_base_device::USER_DATA>(const uint32_t, const uint32_t);
@ -551,10 +620,11 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
// it's translation time
const uint32_t pmeg = m_curr_segmap_masked[(offset >> 16) & 0xfff];// & m_pmeg_mask;
const uint32_t entry_index = pmeg | ((offset >> m_seg_entry_shift) & m_seg_entry_mask);
page_entry &entry = m_pagemap[entry_index];
if (m_page_valid[entry_index])
uint32_t cached_data = 0;
if (entry.valid)
{
page_entry &entry = m_pagemap[entry_index];
entry.accessed = PM_ACCESSED;
const uint32_t tmp = entry.page | (offset & m_page_mask);
@ -589,19 +659,18 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
m_buserr[1] = (offset << 2) | boffs;
m_host->set_mae();
}
if (!entry.uncached)
{
const uint32_t cache_entry = (offset >> 2) & 0xfff;
m_cachetags[cache_entry] |= (1 << 19);
m_cachetags[cache_entry] &= ~(3 << 20);
m_cachetags[cache_entry] |= entry.supervisor ? (1 << 20) : (0 << 20);
m_cachetags[cache_entry] |= entry.writable ? (1 << 21) : (0 << 21);
}
const uint32_t set = (tmp >> 22) & 3;
const uint32_t addr_mask = m_ram_set_mask[set];
const uint32_t masked_addr = m_ram_set_base[set] + (tmp & addr_mask);
if (!entry.uncached)
{
if (cache_fetch(entry, offset, masked_addr, cached_data))
{
return cached_data;
}
}
//if (m_log_mem)
//logerror("RAM read: %08x (%08x)\n", offset << 2, tmp << 2);
//logerror("RAM read: %08x = %08x (%08x, %08x)\n", offset << 2, m_ram_ptr[masked_addr], tmp << 2, masked_addr);
return m_ram_ptr[masked_addr];
}
else if (tmp >= 0x4000000 >> 2 && tmp < 0x10000000 >> 2)
@ -626,6 +695,10 @@ uint32_t sun4_mmu_base_device::insn_data_r(const uint32_t offset, const uint32_t
{
if (!machine().side_effects_disabled())
{
if (cache_fetch(entry, offset, 0, cached_data))
{
return cached_data;
}
LOGMASKED(LOG_INVALID_PTE, "read invalid PTE %d (%08x), %08x & %08x, PC=%08x\n", entry_index, page_entry_to_uint(entry_index), offset << 2, mem_mask, m_cpu->pc());
m_host->set_mae();
m_buserr[0] |= 0x80; // invalid PTE
@ -697,13 +770,29 @@ void sun4_mmu_base_device::insn_data_w(const uint32_t offset, const uint32_t dat
if (ACCESSING_BITS_0_7)
m_parity_err |= (1 << 3);
}
m_cachetags[(offset >> 4) & m_cache_mask] |= (1 << 19);
const uint32_t set = (tmp >> 22) & 3;
const uint32_t addr_mask = m_ram_set_mask[set];
const uint32_t masked_addr = m_ram_set_base[set] + (tmp & addr_mask);
if (!entry.uncached)
{
const uint32_t cache_entry = (offset >> 3) & m_cache_tag_mask;
if ((m_cachetags[cache_entry] & (1 << 19)) != 0)
{
if ((m_cachetags[cache_entry] & 0xfffc) == ((offset >> 12) & 0xfffc))
{
logerror("Cache write: %08x (%08x) = %08x\n", (offset & m_cache_mask) << 2, offset, data);
COMBINE_DATA((&m_cachedata[0] + (offset & m_cache_mask)));
}
else
{
logerror("Cache invalidate\n");
m_cachetags[cache_entry] &= ~(1 << 19);
}
}
}
COMBINE_DATA((m_ram_ptr + masked_addr));
//if (m_log_mem)
//logerror("RAM write: %08x (%08x) = %08x\n", offset << 2, tmp << 2, data);
//logerror("RAM write: %08x (%08x, %08x) = %08x (setting cache entry %08x)\n", offset << 2, tmp << 2, masked_addr, data, offset & m_cache_mask);
}
else if (tmp >= 0x4000000 >> 2 && tmp < 0x10000000 >> 2)
{

View File

@ -73,6 +73,19 @@ public:
protected:
sun4_mmu_base_device(const machine_config &mconfig, device_type type, const char *tag, device_t *owner, uint32_t clock);
struct page_entry
{
uint32_t valid;
uint32_t writable;
uint32_t supervisor;
uint32_t uncached;
uint32_t accessed;
uint32_t modified;
uint32_t page;
uint8_t type;
uint8_t pad[3];
};
static const device_timer_id TIMER_RESET = 0;
virtual void device_start() override;
@ -82,8 +95,10 @@ protected:
uint32_t page_entry_to_uint(uint32_t index);
void merge_page_entry(uint32_t index, uint32_t data, uint32_t mem_mask);
uint32_t cache_flush_r();
void cache_flush_w();
bool cache_fetch(page_entry &entry, uint32_t vaddr, uint32_t paddr, uint32_t &cached_data);
void segment_flush_w(const uint32_t vaddr);
void context_flush_w(const uint32_t vaddr);
void page_flush_w(const uint32_t vaddr);
uint32_t system_r(const uint32_t offset, const uint32_t mem_mask);
void system_w(const uint32_t offset, const uint32_t data, const uint32_t mem_mask);
uint32_t segment_map_r(const uint32_t offset, const uint32_t mem_mask);
@ -113,19 +128,6 @@ protected:
PM_MODIFIED = 0x01000000 // modified flag
};
struct page_entry
{
uint32_t valid;
uint32_t writable;
uint32_t supervisor;
uint32_t uncached;
uint32_t accessed;
uint32_t modified;
uint32_t page;
uint8_t type;
uint8_t pad[3];
};
required_device<cpu_device> m_cpu;
required_device<ram_device> m_ram;
required_memory_region m_rom;
@ -172,6 +174,7 @@ protected:
uint32_t m_seg_entry_mask;
uint32_t m_page_entry_mask;
uint32_t m_cache_mask;
uint32_t m_cache_tag_mask;
uint32_t m_ram_set_mask[4]; // Used for mirroring within 4 megabyte sets
uint32_t m_ram_set_base[4];
uint32_t m_populated_ram_words;