"Concurrent (or re-entrant) enqueue or dequeue operation detected (only one thread at a time may hold the producer or consumer role)");
assert(!inSection&&"Concurrent (or re-entrant) enqueue or dequeue operation detected (only one thread at a time may hold the producer or consumer role)");
inSection=true;
inSection=true;
}
}
AE_NO_TSAN~ReentrantGuard(){inSection=false;}
AE_NO_TSAN~ReentrantGuard(){inSection=false;}
private:
private:
ReentrantGuard&operator=(ReentrantGuardconst&);
ReentrantGuard&operator=(ReentrantGuardconst&);
private:
weak_atomic<bool>&inSection;
};
private:
weak_atomic<bool>&inSection;
};
#endif
#endif
structBlock{
structBlock
{
// Avoid false-sharing by putting highly contended variables on their own cache lines
// Avoid false-sharing by putting highly contended variables on their own cache lines
weak_atomic<size_t>front;// (Atomic) Elements are read from here
weak_atomic<size_t>front;// (Atomic) Elements are read from here
size_tlocalTail;// An uncontended shadow copy of tail, owned by the consumer
size_tlocalTail;// An uncontended shadow copy of tail, owned by the consumer
...
@@ -691,73 +702,75 @@ namespace moodycamel {
...
@@ -691,73 +702,75 @@ namespace moodycamel {
weak_atomic<size_t>tail;// (Atomic) Elements are enqueued here
weak_atomic<size_t>tail;// (Atomic) Elements are enqueued here
charcachelineFiller1[MOODYCAMEL_CACHE_LINE_SIZE-sizeof(weak_atomic<size_t>)-sizeof(size_t)];// next isn't very contended, but we don't want it on the same cache line as tail (which is)
sizeof(size_t)];// next isn't very contended, but we don't want it on the same cache line as tail (which is)
weak_atomic<Block*>next;// (Atomic)
weak_atomic<Block*>next;// (Atomic)
char*data;// Contents (on heap) are aligned to T's alignment
char*data;// Contents (on heap) are aligned to T's alignment
constsize_tsizeMask;
constsize_tsizeMask;
// size must be a power of two (and greater than 0)
// size must be a power of two (and greater than 0)