mirror of
https://github.com/holub/mame
synced 2025-07-04 17:38:08 +03:00
sharc: Rewrote DMA handling to use timers.
This commit is contained in:
parent
b60c2477b5
commit
087fd08912
@ -73,6 +73,21 @@ typedef struct
|
|||||||
UINT32 loop_type;
|
UINT32 loop_type;
|
||||||
} LADDR;
|
} LADDR;
|
||||||
|
|
||||||
|
typedef struct
|
||||||
|
{
|
||||||
|
UINT32 src;
|
||||||
|
UINT32 dst;
|
||||||
|
UINT32 chain_ptr;
|
||||||
|
INT32 src_modifier;
|
||||||
|
INT32 dst_modifier;
|
||||||
|
INT32 src_count;
|
||||||
|
INT32 dst_count;
|
||||||
|
INT32 pmode;
|
||||||
|
INT32 chained_direction;
|
||||||
|
emu_timer *timer;
|
||||||
|
bool active;
|
||||||
|
} DMA_OP;
|
||||||
|
|
||||||
typedef struct _SHARC_REGS SHARC_REGS;
|
typedef struct _SHARC_REGS SHARC_REGS;
|
||||||
struct _SHARC_REGS
|
struct _SHARC_REGS
|
||||||
{
|
{
|
||||||
@ -147,17 +162,8 @@ struct _SHARC_REGS
|
|||||||
|
|
||||||
SHARC_BOOT_MODE boot_mode;
|
SHARC_BOOT_MODE boot_mode;
|
||||||
|
|
||||||
UINT32 dmaop_src;
|
DMA_OP dma_op[12];
|
||||||
UINT32 dmaop_dst;
|
UINT32 dma_status;
|
||||||
UINT32 dmaop_chain_ptr;
|
|
||||||
INT32 dmaop_src_modifier;
|
|
||||||
INT32 dmaop_dst_modifier;
|
|
||||||
INT32 dmaop_src_count;
|
|
||||||
INT32 dmaop_dst_count;
|
|
||||||
INT32 dmaop_pmode;
|
|
||||||
INT32 dmaop_cycles;
|
|
||||||
INT32 dmaop_channel;
|
|
||||||
INT32 dmaop_chained_direction;
|
|
||||||
|
|
||||||
INT32 interrupt_active;
|
INT32 interrupt_active;
|
||||||
|
|
||||||
@ -260,12 +266,7 @@ static UINT32 sharc_iop_r(SHARC_REGS *cpustate, UINT32 address)
|
|||||||
|
|
||||||
case 0x37: // DMA status
|
case 0x37: // DMA status
|
||||||
{
|
{
|
||||||
UINT32 r = 0;
|
return cpustate->dma_status;
|
||||||
if (cpustate->dmaop_cycles > 0)
|
|
||||||
{
|
|
||||||
r |= 1 << cpustate->dmaop_channel;
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
default: fatalerror("sharc_iop_r: Unimplemented IOP reg %02X at %08X\n", address, cpustate->pc);
|
default: fatalerror("sharc_iop_r: Unimplemented IOP reg %02X at %08X\n", address, cpustate->pc);
|
||||||
}
|
}
|
||||||
@ -292,7 +293,6 @@ static void sharc_iop_w(SHARC_REGS *cpustate, UINT32 address, UINT32 data)
|
|||||||
case 0x1c:
|
case 0x1c:
|
||||||
{
|
{
|
||||||
cpustate->dma[6].control = data;
|
cpustate->dma[6].control = data;
|
||||||
//add_iop_write_latency_effect(cpustate, 0x1c, data, 1);
|
|
||||||
sharc_iop_delayed_w(cpustate, 0x1c, data, 1);
|
sharc_iop_delayed_w(cpustate, 0x1c, data, 1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -312,7 +312,6 @@ static void sharc_iop_w(SHARC_REGS *cpustate, UINT32 address, UINT32 data)
|
|||||||
case 0x1d:
|
case 0x1d:
|
||||||
{
|
{
|
||||||
cpustate->dma[7].control = data;
|
cpustate->dma[7].control = data;
|
||||||
//add_iop_write_latency_effect(cpustate, 0x1d, data, 30);
|
|
||||||
sharc_iop_delayed_w(cpustate, 0x1d, data, 30);
|
sharc_iop_delayed_w(cpustate, 0x1d, data, 30);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -438,6 +437,12 @@ static CPU_INIT( sharc )
|
|||||||
|
|
||||||
cpustate->delayed_iop_timer = device->machine().scheduler().timer_alloc(FUNC(sharc_iop_delayed_write_callback), cpustate);
|
cpustate->delayed_iop_timer = device->machine().scheduler().timer_alloc(FUNC(sharc_iop_delayed_write_callback), cpustate);
|
||||||
|
|
||||||
|
for (int i=0; i < 12; i++)
|
||||||
|
{
|
||||||
|
cpustate->dma_op[i].active = false;
|
||||||
|
cpustate->dma_op[i].timer = device->machine().scheduler().timer_alloc(FUNC(sharc_dma_callback), cpustate);
|
||||||
|
}
|
||||||
|
|
||||||
device->save_item(NAME(cpustate->pc));
|
device->save_item(NAME(cpustate->pc));
|
||||||
device->save_pointer(NAME(&cpustate->r[0].r), ARRAY_LENGTH(cpustate->r));
|
device->save_pointer(NAME(&cpustate->r[0].r), ARRAY_LENGTH(cpustate->r));
|
||||||
device->save_pointer(NAME(&cpustate->reg_alt[0].r), ARRAY_LENGTH(cpustate->reg_alt));
|
device->save_pointer(NAME(&cpustate->reg_alt[0].r), ARRAY_LENGTH(cpustate->reg_alt));
|
||||||
@ -523,17 +528,21 @@ static CPU_INIT( sharc )
|
|||||||
device->save_item(NAME(cpustate->irq_active));
|
device->save_item(NAME(cpustate->irq_active));
|
||||||
device->save_item(NAME(cpustate->active_irq_num));
|
device->save_item(NAME(cpustate->active_irq_num));
|
||||||
|
|
||||||
device->save_item(NAME(cpustate->dmaop_src));
|
for (saveindex = 0; saveindex < ARRAY_LENGTH(cpustate->dma_op); saveindex++)
|
||||||
device->save_item(NAME(cpustate->dmaop_dst));
|
{
|
||||||
device->save_item(NAME(cpustate->dmaop_chain_ptr));
|
device->save_item(NAME(cpustate->dma_op[saveindex].src), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_src_modifier));
|
device->save_item(NAME(cpustate->dma_op[saveindex].dst), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_dst_modifier));
|
device->save_item(NAME(cpustate->dma_op[saveindex].chain_ptr), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_src_count));
|
device->save_item(NAME(cpustate->dma_op[saveindex].src_modifier), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_dst_count));
|
device->save_item(NAME(cpustate->dma_op[saveindex].dst_modifier), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_pmode));
|
device->save_item(NAME(cpustate->dma_op[saveindex].src_count), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_cycles));
|
device->save_item(NAME(cpustate->dma_op[saveindex].dst_count), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_channel));
|
device->save_item(NAME(cpustate->dma_op[saveindex].pmode), saveindex);
|
||||||
device->save_item(NAME(cpustate->dmaop_chained_direction));
|
device->save_item(NAME(cpustate->dma_op[saveindex].chained_direction), saveindex);
|
||||||
|
device->save_item(NAME(cpustate->dma_op[saveindex].active), saveindex);
|
||||||
|
}
|
||||||
|
|
||||||
|
device->save_item(NAME(cpustate->dma_status));
|
||||||
|
|
||||||
device->save_item(NAME(cpustate->interrupt_active));
|
device->save_item(NAME(cpustate->interrupt_active));
|
||||||
|
|
||||||
@ -571,10 +580,9 @@ static CPU_RESET( sharc )
|
|||||||
cpustate->dma[6].control = 0x2a1;
|
cpustate->dma[6].control = 0x2a1;
|
||||||
|
|
||||||
sharc_dma_exec(cpustate, 6);
|
sharc_dma_exec(cpustate, 6);
|
||||||
dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier,
|
dma_op(cpustate, 6);
|
||||||
cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode);
|
|
||||||
cpustate->dmaop_cycles = 0;
|
cpustate->dma_op[6].timer->adjust(attotime::never, 0);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -679,21 +687,6 @@ static CPU_EXECUTE( sharc )
|
|||||||
|
|
||||||
if (cpustate->idle && cpustate->irq_active == 0)
|
if (cpustate->idle && cpustate->irq_active == 0)
|
||||||
{
|
{
|
||||||
// handle pending DMA transfers
|
|
||||||
if (cpustate->dmaop_cycles > 0)
|
|
||||||
{
|
|
||||||
cpustate->dmaop_cycles -= cpustate->icount;
|
|
||||||
if (cpustate->dmaop_cycles <= 0)
|
|
||||||
{
|
|
||||||
cpustate->dmaop_cycles = 0;
|
|
||||||
dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode);
|
|
||||||
if (cpustate->dmaop_chain_ptr != 0)
|
|
||||||
{
|
|
||||||
schedule_chained_dma_op(cpustate, cpustate->dmaop_channel, cpustate->dmaop_chain_ptr, cpustate->dmaop_chained_direction);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cpustate->icount = 0;
|
cpustate->icount = 0;
|
||||||
debugger_instruction_hook(device, cpustate->daddr);
|
debugger_instruction_hook(device, cpustate->daddr);
|
||||||
}
|
}
|
||||||
@ -790,28 +783,6 @@ static CPU_EXECUTE( sharc )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DMA transfer
|
|
||||||
if (cpustate->dmaop_cycles > 0)
|
|
||||||
{
|
|
||||||
--cpustate->dmaop_cycles;
|
|
||||||
if (cpustate->dmaop_cycles <= 0)
|
|
||||||
{
|
|
||||||
cpustate->irptl |= (1 << (cpustate->dmaop_channel+10));
|
|
||||||
|
|
||||||
/* DMA interrupt */
|
|
||||||
if (cpustate->imask & (1 << (cpustate->dmaop_channel+10)))
|
|
||||||
{
|
|
||||||
cpustate->irq_active |= 1 << (cpustate->dmaop_channel+10);
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode);
|
|
||||||
if (cpustate->dmaop_chain_ptr != 0)
|
|
||||||
{
|
|
||||||
schedule_chained_dma_op(cpustate, cpustate->dmaop_channel, cpustate->dmaop_chain_ptr, cpustate->dmaop_chained_direction);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
--cpustate->icount;
|
--cpustate->icount;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -19,59 +19,79 @@ static void schedule_chained_dma_op(SHARC_REGS *cpustate, int channel, UINT32 dm
|
|||||||
UINT32 ext_modifier = dm_read32(cpustate, op_ptr - 6);
|
UINT32 ext_modifier = dm_read32(cpustate, op_ptr - 6);
|
||||||
UINT32 ext_count = dm_read32(cpustate, op_ptr - 7);
|
UINT32 ext_count = dm_read32(cpustate, op_ptr - 7);
|
||||||
|
|
||||||
if (cpustate->dmaop_cycles > 0)
|
if (cpustate->dma_op[channel].active)
|
||||||
{
|
{
|
||||||
fatalerror("schedule_chained_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc);
|
fatalerror("schedule_chained_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chained_direction) // Transmit to external
|
if (chained_direction) // Transmit to external
|
||||||
{
|
{
|
||||||
cpustate->dmaop_dst = ext_index;
|
cpustate->dma_op[channel].dst = ext_index;
|
||||||
cpustate->dmaop_dst_modifier = ext_modifier;
|
cpustate->dma_op[channel].dst_modifier = ext_modifier;
|
||||||
cpustate->dmaop_dst_count = ext_count;
|
cpustate->dma_op[channel].dst_count = ext_count;
|
||||||
cpustate->dmaop_src = int_index;
|
cpustate->dma_op[channel].src = int_index;
|
||||||
cpustate->dmaop_src_modifier = int_modifier;
|
cpustate->dma_op[channel].src_modifier = int_modifier;
|
||||||
cpustate->dmaop_src_count = int_count;
|
cpustate->dma_op[channel].src_count = int_count;
|
||||||
}
|
}
|
||||||
else // Receive from external
|
else // Receive from external
|
||||||
{
|
{
|
||||||
cpustate->dmaop_src = ext_index;
|
cpustate->dma_op[channel].src = ext_index;
|
||||||
cpustate->dmaop_src_modifier = ext_modifier;
|
cpustate->dma_op[channel].src_modifier = ext_modifier;
|
||||||
cpustate->dmaop_src_count = ext_count;
|
cpustate->dma_op[channel].src_count = ext_count;
|
||||||
cpustate->dmaop_dst = int_index;
|
cpustate->dma_op[channel].dst = int_index;
|
||||||
cpustate->dmaop_dst_modifier = int_modifier;
|
cpustate->dma_op[channel].dst_modifier = int_modifier;
|
||||||
cpustate->dmaop_dst_count = int_count;
|
cpustate->dma_op[channel].dst_count = int_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpustate->dmaop_pmode = 0;
|
cpustate->dma_op[channel].pmode = 0;
|
||||||
cpustate->dmaop_channel = channel;
|
cpustate->dma_op[channel].chain_ptr = chain_ptr;
|
||||||
cpustate->dmaop_cycles = cpustate->dmaop_src_count / 4;
|
cpustate->dma_op[channel].chained_direction = chained_direction;
|
||||||
cpustate->dmaop_chain_ptr = chain_ptr;
|
|
||||||
cpustate->dmaop_chained_direction = chained_direction;
|
cpustate->dma_op[channel].active = true;
|
||||||
|
|
||||||
|
int cycles = cpustate->dma_op[channel].src_count / 4;
|
||||||
|
cpustate->dma_op[channel].timer->adjust(cpustate->device->cycles_to_attotime(cycles), channel);
|
||||||
|
|
||||||
|
// enable busy flag
|
||||||
|
cpustate->dma_status |= (1 << channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void schedule_dma_op(SHARC_REGS *cpustate, int channel, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode)
|
static void schedule_dma_op(SHARC_REGS *cpustate, int channel, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode)
|
||||||
{
|
{
|
||||||
if (cpustate->dmaop_cycles > 0)
|
if (cpustate->dma_op[channel].active)
|
||||||
{
|
{
|
||||||
fatalerror("schedule_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc);
|
fatalerror("schedule_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpustate->dmaop_channel = channel;
|
cpustate->dma_op[channel].src = src;
|
||||||
cpustate->dmaop_src = src;
|
cpustate->dma_op[channel].dst = dst;
|
||||||
cpustate->dmaop_dst = dst;
|
cpustate->dma_op[channel].src_modifier = src_modifier;
|
||||||
cpustate->dmaop_src_modifier = src_modifier;
|
cpustate->dma_op[channel].dst_modifier = dst_modifier;
|
||||||
cpustate->dmaop_dst_modifier = dst_modifier;
|
cpustate->dma_op[channel].src_count = src_count;
|
||||||
cpustate->dmaop_src_count = src_count;
|
cpustate->dma_op[channel].dst_count = dst_count;
|
||||||
cpustate->dmaop_dst_count = dst_count;
|
cpustate->dma_op[channel].pmode = pmode;
|
||||||
cpustate->dmaop_pmode = pmode;
|
cpustate->dma_op[channel].chain_ptr = 0;
|
||||||
cpustate->dmaop_chain_ptr = 0;
|
|
||||||
cpustate->dmaop_cycles = src_count / 4;
|
cpustate->dma_op[channel].active = true;
|
||||||
|
|
||||||
|
int cycles = src_count / 4;
|
||||||
|
cpustate->dma_op[channel].timer->adjust(cpustate->device->cycles_to_attotime(cycles), channel);
|
||||||
|
|
||||||
|
// enable busy flag
|
||||||
|
cpustate->dma_status |= (1 << channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_op(SHARC_REGS *cpustate, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode)
|
static void dma_op(SHARC_REGS *cpustate, int channel)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
UINT32 src = cpustate->dma_op[channel].src;
|
||||||
|
UINT32 dst = cpustate->dma_op[channel].dst;
|
||||||
|
int src_modifier = cpustate->dma_op[channel].src_modifier;
|
||||||
|
int dst_modifier = cpustate->dma_op[channel].dst_modifier;
|
||||||
|
int src_count = cpustate->dma_op[channel].src_count;
|
||||||
|
//int dst_count = cpustate->dma_op[channel].dst_count;
|
||||||
|
int pmode = cpustate->dma_op[channel].pmode;
|
||||||
|
|
||||||
//printf("dma_op: %08X, %08X, %08X, %08X, %08X, %08X, %d\n", src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode);
|
//printf("dma_op: %08X, %08X, %08X, %08X, %08X, %08X, %d\n", src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode);
|
||||||
|
|
||||||
switch (pmode)
|
switch (pmode)
|
||||||
@ -124,16 +144,21 @@ static void dma_op(SHARC_REGS *cpustate, UINT32 src, UINT32 dst, int src_modifie
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpustate->dmaop_channel == 6)
|
if (channel == 6)
|
||||||
{
|
{
|
||||||
cpustate->irptl |= (1 << (cpustate->dmaop_channel+10));
|
cpustate->irptl |= (1 << (channel+10));
|
||||||
|
|
||||||
/* DMA interrupt */
|
/* DMA interrupt */
|
||||||
if (cpustate->imask & (1 << (cpustate->dmaop_channel+10)))
|
if (cpustate->imask & (1 << (channel+10)))
|
||||||
{
|
{
|
||||||
cpustate->irq_active |= 1 << (cpustate->dmaop_channel+10);
|
cpustate->irq_active |= 1 << (channel+10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clear busy flag
|
||||||
|
cpustate->dma_status &= ~(1 << channel);
|
||||||
|
|
||||||
|
cpustate->dma_op[channel].active = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sharc_dma_exec(SHARC_REGS *cpustate, int channel)
|
static void sharc_dma_exec(SHARC_REGS *cpustate, int channel)
|
||||||
@ -202,3 +227,25 @@ static void sharc_dma_exec(SHARC_REGS *cpustate, int channel)
|
|||||||
schedule_dma_op(cpustate, channel, src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode);
|
schedule_dma_op(cpustate, channel, src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static TIMER_CALLBACK(sharc_dma_callback)
|
||||||
|
{
|
||||||
|
SHARC_REGS *cpustate = (SHARC_REGS *)ptr;
|
||||||
|
int channel = param;
|
||||||
|
|
||||||
|
cpustate->dma_op[channel].timer->adjust(attotime::never, 0);
|
||||||
|
|
||||||
|
cpustate->irptl |= (1 << (channel+10));
|
||||||
|
|
||||||
|
// DMA interrupt
|
||||||
|
if (cpustate->imask & (1 << (channel+10)))
|
||||||
|
{
|
||||||
|
cpustate->irq_active |= 1 << (channel+10);
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_op(cpustate, channel);
|
||||||
|
if (cpustate->dma_op[channel].chain_ptr != 0)
|
||||||
|
{
|
||||||
|
schedule_chained_dma_op(cpustate, channel, cpustate->dma_op[channel].chain_ptr, cpustate->dma_op[channel].chained_direction);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user