text
stringlengths 0
2.2M
|
---|
ASSERT(data->d_flagEnd == (i+1));
|
}
|
return 0;
|
}
|
// ----------------------------------------------------------------------------
|
// Test Case 3: Hard Contention Test: many threads - one QLock
|
//
|
// Concerns:
|
// Verify that single QLock allows only one thread at a time to
|
// execute the critical region
|
//
|
// Plan:
|
// Set the global count to zero value. Create several M threads.
|
// Each of them repeats N times execution of the critical region:
|
// - read the global count value and store its locally
|
// - put the random number into count
|
// - sleep random number microseconds
|
// - increment by one the original global count value and update
|
// the global count with the incremented value
|
// The execution of critical region is protected by global QLock.
|
// Before exit each thread waits on the barrier to be synchronized with
|
// other similar threads, acquires the global QLock, and checks that
|
// global count has value equal to N*M, where M the number of threads
|
// and N is number of iterations made by each thread.
|
// Join all threads and ensure that:
|
// - the value of count is still equal N*M.
|
// - global QLock is immediately available, i.e tryLock() returns 0.
|
//
|
// Repeat above scenario several times, incrementing the number of
|
// threads by one on each iteration.
|
// ----------------------------------------------------------------------------
|
struct DataCase3 {
|
int d_numIter;
|
int d_count;
|
bslmt::QLock *d_qlock;
|
bslmt::Mutex *d_mutex;
|
};
|
void *testCase3(int threadNum, const MyTask& task)
|
{
|
DataCase3 *data = reinterpret_cast<DataCase3 *> (task.arg());
|
ASSERT(data->d_count == 0);
|
Rand rand (threadNum);
|
task.barrier()->wait();
|
bslmt::QLockGuard guard;
|
for (int i=0; i < data->d_numIter; ++i) {
|
// Critical region set mutex-qlock and lock
|
guard.lock(data->d_qlock);
|
int original = data->d_count;
|
int sleepTime = rand.get() % 1000;
|
// This will take too long time
|
//int sleepTime = rand.get() % 1000;
|
//bslmt::ThreadUtil::microSleep(++sleepTime);
|
for (int j=0; j < 20; ++j) {
|
data->d_count = rand.get(); // put random value
|
}
|
++original;
|
data->d_count = original; // restore incremented value
|
guard.unlock();
|
}
|
task.barrier()->wait();
|
{
|
guard.lock(data->d_qlock);
|
ASSERT(data->d_count == data->d_numIter*task.numThreadsStarted());
|
}
|
// qlock must be unlocked automatically on destructor
|
return 0;
|
}
|
void *testCase3a(int threadNum, const MyTask& task)
|
{
|
DataCase3 *data = reinterpret_cast<DataCase3 *> (task.arg());
|
ASSERT(data->d_count == 0);
|
Rand rand (threadNum);
|
task.barrier()->wait();
|
bslmt::QLockGuard guard;
|
for (int i=0; i < data->d_numIter; ++i) {
|
// Critical region set mutex-qlock and lock
|
bslmt::LockGuard<bslmt::Mutex> guard(data->d_mutex);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.