more ThreadSanitizer data race warning "fixes" (nw)
This commit is contained in:
parent
953acc0a69
commit
f563f3bbf2
@ -19,6 +19,7 @@
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
#include <new>
|
||||
#include "eminline.h"
|
||||
|
||||
|
||||
//**************************************************************************
|
||||
@ -2418,7 +2419,7 @@ chd_error chd_file_compressor::compress_continue(double &progress, double &ratio
|
||||
|
||||
// queue the next read
|
||||
for (curitem = startitem; curitem < enditem; curitem++)
|
||||
m_work_item[curitem % WORK_BUFFER_HUNKS].m_status = WS_READING;
|
||||
atomic_exchange32(&m_work_item[curitem % WORK_BUFFER_HUNKS].m_status, WS_READING);
|
||||
osd_work_item_queue(m_read_queue, async_read_static, this, WORK_ITEM_FLAG_AUTO_RELEASE);
|
||||
m_read_queue_offset += WORK_BUFFER_HUNKS * hunk_bytes() / 2;
|
||||
}
|
||||
@ -2489,7 +2490,7 @@ chd_error chd_file_compressor::compress_continue(double &progress, double &ratio
|
||||
} while (0);
|
||||
|
||||
// reset the item and advance
|
||||
item.m_status = WS_READY;
|
||||
atomic_exchange32(&item.m_status, WS_READY);
|
||||
m_write_hunk++;
|
||||
|
||||
// if we hit the end, finalize
|
||||
@ -2502,7 +2503,7 @@ chd_error chd_file_compressor::compress_continue(double &progress, double &ratio
|
||||
m_read_queue_offset = m_read_done_offset = 0;
|
||||
m_write_hunk = 0;
|
||||
for (int itemnum = 0; itemnum < WORK_BUFFER_HUNKS; itemnum++)
|
||||
m_work_item[itemnum].m_status = WS_READY;
|
||||
atomic_exchange32(&m_work_item[itemnum].m_status, WS_READY);
|
||||
}
|
||||
|
||||
// wait for all reads to finish and if we're compressed, write the final SHA1 and map
|
||||
@ -2555,7 +2556,7 @@ void chd_file_compressor::async_walk_parent(work_item &item)
|
||||
item.m_hash[unit].m_crc16 = crc16_creator::simple(item.m_data + unit * unit_bytes(), hunk_bytes());
|
||||
item.m_hash[unit].m_sha1 = sha1_creator::simple(item.m_data + unit * unit_bytes(), hunk_bytes());
|
||||
}
|
||||
item.m_status = WS_COMPLETE;
|
||||
atomic_exchange32(&item.m_status, WS_COMPLETE);
|
||||
}
|
||||
|
||||
|
||||
@ -2583,12 +2584,13 @@ void chd_file_compressor::async_compress_hunk(work_item &item, int threadid)
|
||||
|
||||
// find the best compression scheme, unless we already have a self or parent match
|
||||
// (note we may miss a self match from blocks not yet added, but this just results in extra work)
|
||||
// TODO: data race
|
||||
if (m_current_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND &&
|
||||
m_parent_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND)
|
||||
item.m_compression = item.m_codecs->find_best_compressor(item.m_data, item.m_compressed, item.m_complen);
|
||||
|
||||
// mark us complete
|
||||
item.m_status = WS_COMPLETE;
|
||||
atomic_exchange32(&item.m_status, WS_COMPLETE);
|
||||
}
|
||||
|
||||
|
||||
@ -2644,7 +2646,7 @@ void chd_file_compressor::async_read()
|
||||
UINT32 hunknum = curoffs / hunk_bytes();
|
||||
work_item &item = m_work_item[hunknum % WORK_BUFFER_HUNKS];
|
||||
assert(item.m_status == WS_READING);
|
||||
item.m_status = WS_QUEUED;
|
||||
atomic_exchange32(&item.m_status, WS_QUEUED);
|
||||
item.m_hunknum = hunknum;
|
||||
item.m_osd = osd_work_item_queue(m_work_queue, m_walking_parent ? async_walk_parent_static : async_compress_hunk_static, &item, 0);
|
||||
}
|
||||
|
@ -530,7 +530,9 @@ private:
|
||||
|
||||
osd_work_item * m_osd; // OSD work item running on this block
|
||||
chd_file_compressor *m_compressor; // pointer back to the compressor
|
||||
volatile work_status m_status; // current status of this item
|
||||
// TODO: had to change this to be able to use atomic_* functions on this
|
||||
//volatile work_status m_status; // current status of this item
|
||||
volatile INT32 m_status; // current status of this item
|
||||
UINT32 m_hunknum; // number of the hunk we're working on
|
||||
UINT8 * m_data; // pointer to the data we are working on
|
||||
UINT8 * m_compressed; // pointer to the compressed data
|
||||
|
@ -436,7 +436,7 @@ osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_call
|
||||
item->param = parambase;
|
||||
item->result = NULL;
|
||||
item->flags = flags;
|
||||
item->done = FALSE;
|
||||
atomic_exchange32(&item->done, FALSE);
|
||||
|
||||
// advance to the next
|
||||
lastitem = item;
|
||||
@ -502,7 +502,11 @@ int osd_work_item_wait(osd_work_item *item, osd_ticks_t timeout)
|
||||
|
||||
// if we don't have an event, create one
|
||||
if (item->event == NULL)
|
||||
{
|
||||
INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock);
|
||||
item->event = osd_event_alloc(TRUE, FALSE); // manual reset, not signalled
|
||||
osd_scalable_lock_release(item->queue->lock, lockslot);
|
||||
}
|
||||
else
|
||||
osd_event_reset(item->event);
|
||||
|
||||
@ -719,13 +723,19 @@ static void worker_thread_process(osd_work_queue *queue, work_thread_info *threa
|
||||
osd_work_item_release(item);
|
||||
|
||||
// set the result and signal the event
|
||||
else if (item->event != NULL)
|
||||
else
|
||||
{
|
||||
osd_event_set(item->event);
|
||||
add_to_stat(&item->queue->setevents, 1);
|
||||
INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock);
|
||||
if (item->event != NULL)
|
||||
{
|
||||
osd_event_set(item->event);
|
||||
add_to_stat(&item->queue->setevents, 1);
|
||||
}
|
||||
osd_scalable_lock_release(item->queue->lock, lockslot);
|
||||
}
|
||||
|
||||
// if we removed an item and there's still work to do, bump the stats
|
||||
// TODO: data race
|
||||
if (queue->list != NULL)
|
||||
add_to_stat(&queue->extraitems, 1);
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_call
|
||||
item->param = parambase;
|
||||
item->result = NULL;
|
||||
item->flags = flags;
|
||||
item->done = FALSE;
|
||||
atomic_exchange32(&item->done, FALSE);
|
||||
|
||||
// advance to the next
|
||||
lastitem = item;
|
||||
@ -509,9 +509,13 @@ int osd_work_item_wait(osd_work_item *item, osd_ticks_t timeout)
|
||||
|
||||
// if we don't have an event, create one
|
||||
if (item->event == NULL)
|
||||
{
|
||||
INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock);
|
||||
item->event = osd_event_alloc(TRUE, FALSE); // manual reset, not signalled
|
||||
osd_scalable_lock_release(item->queue->lock, lockslot);
|
||||
}
|
||||
else
|
||||
osd_event_reset(item->event);
|
||||
osd_event_reset(item->event);
|
||||
|
||||
// if we don't have an event, we need to spin (shouldn't ever really happen)
|
||||
if (item->event == NULL)
|
||||
@ -710,13 +714,19 @@ static void worker_thread_process(osd_work_queue *queue, work_thread_info *threa
|
||||
osd_work_item_release(item);
|
||||
|
||||
// set the result and signal the event
|
||||
else if (item->event != NULL)
|
||||
else
|
||||
{
|
||||
osd_event_set(item->event);
|
||||
add_to_stat(&item->queue->setevents, 1);
|
||||
INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock);
|
||||
if (item->event != NULL)
|
||||
{
|
||||
osd_event_set(item->event);
|
||||
add_to_stat(&item->queue->setevents, 1);
|
||||
}
|
||||
osd_scalable_lock_release(item->queue->lock, lockslot);
|
||||
}
|
||||
|
||||
// if we removed an item and there's still work to do, bump the stats
|
||||
// TODO: data race
|
||||
if (queue->list != NULL)
|
||||
add_to_stat(&queue->extraitems, 1);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user