476 lines
16 KiB
Text
476 lines
16 KiB
Text
/*
|
|
Copyright 2005-2016 Intel Corporation. All Rights Reserved.
|
|
|
|
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
|
|
you can redistribute it and/or modify it under the terms of the GNU General Public License
|
|
version 2 as published by the Free Software Foundation. Threading Building Blocks is
|
|
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
|
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
See the GNU General Public License for more details. You should have received a copy of
|
|
the GNU General Public License along with Threading Building Blocks; if not, write to the
|
|
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
As a special exception, you may use this file as part of a free software library without
|
|
restriction. Specifically, if other files instantiate templates or use macros or inline
|
|
functions from this file, or you compile this file and link it with other files to produce
|
|
an executable, this file does not by itself cause the resulting executable to be covered
|
|
by the GNU General Public License. This exception does not however invalidate any other
|
|
reasons why the executable file might be covered by the GNU General Public License.
|
|
*/
|
|
|
|
#ifndef __TBB_condition_variable_H
|
|
#define __TBB_condition_variable_H
|
|
|
|
#if _WIN32||_WIN64
|
|
#include "../machine/windows_api.h"
|
|
|
|
namespace tbb {
|
|
namespace interface5 {
|
|
namespace internal {
|
|
struct condition_variable_using_event
|
|
{
|
|
//! Event for blocking waiting threads.
|
|
HANDLE event;
|
|
//! Protects invariants involving n_waiters, release_count, and epoch.
|
|
CRITICAL_SECTION mutex;
|
|
//! Number of threads waiting on this condition variable
|
|
int n_waiters;
|
|
//! Number of threads remaining that should no longer wait on this condition variable.
|
|
int release_count;
|
|
//! To keep threads from waking up prematurely with earlier signals.
|
|
unsigned epoch;
|
|
};
|
|
}}} // namespace tbb::interface5::internal
|
|
|
|
#ifndef CONDITION_VARIABLE_INIT
|
|
typedef void* CONDITION_VARIABLE;
|
|
typedef CONDITION_VARIABLE* PCONDITION_VARIABLE;
|
|
#endif
|
|
|
|
#else /* if not _WIN32||_WIN64 */
|
|
#include <errno.h> // some systems need it for ETIMEDOUT
|
|
#include <pthread.h>
|
|
#if __linux__
|
|
#include <ctime>
|
|
#else /* generic Unix */
|
|
#include <sys/time.h>
|
|
#endif
|
|
#endif /* _WIN32||_WIN64 */
|
|
|
|
#include "../tbb_stddef.h"
|
|
#include "../mutex.h"
|
|
#include "../tbb_thread.h"
|
|
#include "../tbb_exception.h"
|
|
#include "../tbb_profiling.h"
|
|
|
|
namespace tbb {
|
|
|
|
namespace interface5 {
|
|
|
|
// C++0x standard working draft 30.4.3
|
|
// Lock tag types
|
|
struct defer_lock_t { }; //! do not acquire ownership of the mutex
|
|
struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking
|
|
struct adopt_lock_t { }; //! assume the calling thread has already
|
|
const defer_lock_t defer_lock = {};
|
|
const try_to_lock_t try_to_lock = {};
|
|
const adopt_lock_t adopt_lock = {};
|
|
|
|
// C++0x standard working draft 30.4.3.1
|
|
//! lock_guard
|
|
template<typename M>
|
|
class lock_guard : tbb::internal::no_copy {
|
|
public:
|
|
//! mutex type
|
|
typedef M mutex_type;
|
|
|
|
//! Constructor
|
|
/** precondition: If mutex_type is not a recursive mutex, the calling thread
|
|
does not own the mutex m. */
|
|
explicit lock_guard(mutex_type& m) : pm(m) {m.lock();}
|
|
|
|
//! Adopt_lock constructor
|
|
/** precondition: the calling thread owns the mutex m. */
|
|
lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {}
|
|
|
|
//! Destructor
|
|
~lock_guard() { pm.unlock(); }
|
|
private:
|
|
mutex_type& pm;
|
|
};
|
|
|
|
// C++0x standard working draft 30.4.3.2
|
|
//! unique_lock
|
|
template<typename M>
|
|
class unique_lock : tbb::internal::no_copy {
|
|
friend class condition_variable;
|
|
public:
|
|
typedef M mutex_type;
|
|
|
|
// 30.4.3.2.1 construct/copy/destroy
|
|
// NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use.
|
|
//! Constructor
|
|
/** postcondition: pm==0 && owns==false */
|
|
unique_lock() : pm(NULL), owns(false) {}
|
|
|
|
//! Constructor
|
|
/** precondition: if mutex_type is not a recursive mutex, the calling thread
|
|
does not own the mutex m. If the precondition is not met, a deadlock occurs.
|
|
postcondition: pm==&m and owns==true */
|
|
explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;}
|
|
|
|
//! Defer_lock constructor
|
|
/** postcondition: pm==&m and owns==false */
|
|
unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {}
|
|
|
|
//! Try_to_lock constructor
|
|
/** precondition: if mutex_type is not a recursive mutex, the calling thread
|
|
does not own the mutex m. If the precondition is not met, a deadlock occurs.
|
|
postcondition: pm==&m and owns==res where res is the value returned by
|
|
the call to m.try_lock(). */
|
|
unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();}
|
|
|
|
//! Adopt_lock constructor
|
|
/** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail.
|
|
postcondition: pm==&m and owns==true */
|
|
unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {}
|
|
|
|
//! Timed unique_lock acquisition.
|
|
/** To avoid requiring support for namespace chrono, this method deviates from the working draft in that
|
|
it uses tbb::tick_count::interval_t to specify the time duration. */
|
|
unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );}
|
|
|
|
#if __TBB_CPP11_RVALUE_REF_PRESENT
|
|
//! Move constructor
|
|
/** postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this
|
|
construction), src.pm == 0 and src.owns == false. */
|
|
unique_lock(unique_lock && src): pm(NULL), owns(false) {this->swap(src);}
|
|
|
|
//! Move assignment
|
|
/** effects: If owns calls pm->unlock().
|
|
Postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this
|
|
assignment), src.pm == 0 and src.owns == false. */
|
|
unique_lock& operator=(unique_lock && src) {
|
|
if (owns)
|
|
this->unlock();
|
|
pm = NULL;
|
|
this->swap(src);
|
|
return *this;
|
|
}
|
|
#endif // __TBB_CPP11_RVALUE_REF_PRESENT
|
|
|
|
//! Destructor
|
|
~unique_lock() { if( owns ) pm->unlock(); }
|
|
|
|
// 30.4.3.2.2 locking
|
|
//! Lock the mutex and own it.
|
|
void lock() {
|
|
if( pm ) {
|
|
if( !owns ) {
|
|
pm->lock();
|
|
owns = true;
|
|
} else
|
|
throw_exception_v4( tbb::internal::eid_possible_deadlock );
|
|
} else
|
|
throw_exception_v4( tbb::internal::eid_operation_not_permitted );
|
|
__TBB_ASSERT( owns, NULL );
|
|
}
|
|
|
|
//! Try to lock the mutex.
|
|
/** If successful, note that this lock owns it. Otherwise, set it false. */
|
|
bool try_lock() {
|
|
if( pm ) {
|
|
if( !owns )
|
|
owns = pm->try_lock();
|
|
else
|
|
throw_exception_v4( tbb::internal::eid_possible_deadlock );
|
|
} else
|
|
throw_exception_v4( tbb::internal::eid_operation_not_permitted );
|
|
return owns;
|
|
}
|
|
|
|
//! Try to lock the mutex.
|
|
bool try_lock_for( const tick_count::interval_t &i );
|
|
|
|
//! Unlock the mutex
|
|
/** And note that this lock no longer owns it. */
|
|
void unlock() {
|
|
if( owns ) {
|
|
pm->unlock();
|
|
owns = false;
|
|
} else
|
|
throw_exception_v4( tbb::internal::eid_operation_not_permitted );
|
|
__TBB_ASSERT( !owns, NULL );
|
|
}
|
|
|
|
// 30.4.3.2.3 modifiers
|
|
//! Swap the two unique locks
|
|
void swap(unique_lock& u) {
|
|
mutex_type* t_pm = u.pm; u.pm = pm; pm = t_pm;
|
|
bool t_owns = u.owns; u.owns = owns; owns = t_owns;
|
|
}
|
|
|
|
//! Release control over the mutex.
|
|
mutex_type* release() {
|
|
mutex_type* o_pm = pm;
|
|
pm = NULL;
|
|
owns = false;
|
|
return o_pm;
|
|
}
|
|
|
|
// 30.4.3.2.4 observers
|
|
//! Does this lock own the mutex?
|
|
bool owns_lock() const { return owns; }
|
|
|
|
// TODO: Un-comment 'explicit' when the last non-C++0x compiler support is dropped
|
|
//! Does this lock own the mutex?
|
|
/*explicit*/ operator bool() const { return owns; }
|
|
|
|
//! Return the mutex that this lock currently has.
|
|
mutex_type* mutex() const { return pm; }
|
|
|
|
private:
|
|
mutex_type* pm;
|
|
bool owns;
|
|
};
|
|
|
|
template<typename M>
|
|
bool unique_lock<M>::try_lock_for( const tick_count::interval_t &i)
|
|
{
|
|
const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */
|
|
// the smallest wait-time is 0.1 milliseconds.
|
|
bool res = pm->try_lock();
|
|
int duration_in_micro;
|
|
if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) {
|
|
tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3
|
|
do {
|
|
this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds
|
|
duration_in_micro -= unique_lock_tick;
|
|
res = pm->try_lock();
|
|
} while( !res && duration_in_micro>unique_lock_tick );
|
|
}
|
|
return (owns=res);
|
|
}
|
|
|
|
//! Swap the two unique locks that have the mutexes of same type
|
|
template<typename M>
|
|
void swap(unique_lock<M>& x, unique_lock<M>& y) { x.swap( y ); }
|
|
|
|
namespace internal {
|
|
|
|
#if _WIN32||_WIN64
|
|
union condvar_impl_t {
|
|
condition_variable_using_event cv_event;
|
|
CONDITION_VARIABLE cv_native;
|
|
};
|
|
void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv );
|
|
void __TBB_EXPORTED_FUNC internal_destroy_condition_variable( condvar_impl_t& cv );
|
|
void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv );
|
|
void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv );
|
|
bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL );
|
|
|
|
#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */
|
|
typedef pthread_cond_t condvar_impl_t;
|
|
#endif
|
|
|
|
} // namespace internal
|
|
|
|
//! cv_status
|
|
/** C++0x standard working draft 30.5 */
|
|
enum cv_status { no_timeout, timeout };
|
|
|
|
//! condition variable
|
|
/** C++0x standard working draft 30.5.1
|
|
@ingroup synchronization */
|
|
class condition_variable : tbb::internal::no_copy {
|
|
public:
|
|
//! Constructor
|
|
condition_variable() {
|
|
#if _WIN32||_WIN64
|
|
internal_initialize_condition_variable( my_cv );
|
|
#else
|
|
pthread_cond_init( &my_cv, NULL );
|
|
#endif
|
|
}
|
|
|
|
//! Destructor
|
|
~condition_variable() {
|
|
//precondition: There shall be no thread blocked on *this.
|
|
#if _WIN32||_WIN64
|
|
internal_destroy_condition_variable( my_cv );
|
|
#else
|
|
pthread_cond_destroy( &my_cv );
|
|
#endif
|
|
}
|
|
|
|
//! Notify one thread and wake it up
|
|
void notify_one() {
|
|
#if _WIN32||_WIN64
|
|
internal_condition_variable_notify_one( my_cv );
|
|
#else
|
|
pthread_cond_signal( &my_cv );
|
|
#endif
|
|
}
|
|
|
|
//! Notify all threads
|
|
void notify_all() {
|
|
#if _WIN32||_WIN64
|
|
internal_condition_variable_notify_all( my_cv );
|
|
#else
|
|
pthread_cond_broadcast( &my_cv );
|
|
#endif
|
|
}
|
|
|
|
//! Release the mutex associated with the lock and wait on this condition variable
|
|
void wait(unique_lock<mutex>& lock);
|
|
|
|
//! Wait on this condition variable while pred is false
|
|
template <class Predicate>
|
|
void wait(unique_lock<mutex>& lock, Predicate pred) {
|
|
while( !pred() )
|
|
wait( lock );
|
|
}
|
|
|
|
//! Timed version of wait()
|
|
cv_status wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i );
|
|
|
|
//! Timed version of the predicated wait
|
|
/** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */
|
|
template<typename Predicate>
|
|
bool wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i, Predicate pred)
|
|
{
|
|
while( !pred() ) {
|
|
cv_status st = wait_for( lock, i );
|
|
if( st==timeout )
|
|
return pred();
|
|
}
|
|
return true;
|
|
}
|
|
|
|
// C++0x standard working draft. 30.2.3
|
|
typedef internal::condvar_impl_t* native_handle_type;
|
|
|
|
native_handle_type native_handle() { return (native_handle_type) &my_cv; }
|
|
|
|
private:
|
|
internal::condvar_impl_t my_cv;
|
|
};
|
|
|
|
|
|
#if _WIN32||_WIN64
|
|
inline void condition_variable::wait( unique_lock<mutex>& lock )
|
|
{
|
|
__TBB_ASSERT( lock.owns, NULL );
|
|
lock.owns = false;
|
|
if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) {
|
|
int ec = GetLastError();
|
|
// on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT
|
|
__TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL );
|
|
lock.owns = true;
|
|
throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
|
|
}
|
|
lock.owns = true;
|
|
}
|
|
|
|
inline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )
|
|
{
|
|
cv_status rc = no_timeout;
|
|
__TBB_ASSERT( lock.owns, NULL );
|
|
lock.owns = false;
|
|
// condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait()
|
|
if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) {
|
|
int ec = GetLastError();
|
|
if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT )
|
|
rc = timeout;
|
|
else {
|
|
lock.owns = true;
|
|
throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
|
|
}
|
|
}
|
|
lock.owns = true;
|
|
return rc;
|
|
}
|
|
|
|
#else /* !(_WIN32||_WIN64) */
|
|
inline void condition_variable::wait( unique_lock<mutex>& lock )
|
|
{
|
|
__TBB_ASSERT( lock.owns, NULL );
|
|
lock.owns = false;
|
|
if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) {
|
|
lock.owns = true;
|
|
throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
|
|
}
|
|
// upon successful return, the mutex has been locked and is owned by the calling thread.
|
|
lock.owns = true;
|
|
}
|
|
|
|
inline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )
|
|
{
|
|
#if __linux__
|
|
struct timespec req;
|
|
double sec = i.seconds();
|
|
clock_gettime( CLOCK_REALTIME, &req );
|
|
req.tv_sec += static_cast<long>(sec);
|
|
req.tv_nsec += static_cast<long>( (sec - static_cast<long>(sec))*1e9 );
|
|
#else /* generic Unix */
|
|
struct timeval tv;
|
|
struct timespec req;
|
|
double sec = i.seconds();
|
|
int status = gettimeofday(&tv, NULL);
|
|
__TBB_ASSERT_EX( status==0, "gettimeofday failed" );
|
|
req.tv_sec = tv.tv_sec + static_cast<long>(sec);
|
|
req.tv_nsec = tv.tv_usec*1000 + static_cast<long>( (sec - static_cast<long>(sec))*1e9 );
|
|
#endif /*(choice of OS) */
|
|
if( req.tv_nsec>=1e9 ) {
|
|
req.tv_sec += 1;
|
|
req.tv_nsec -= static_cast<long int>(1e9);
|
|
}
|
|
__TBB_ASSERT( 0<=req.tv_nsec && req.tv_nsec<1e9, NULL );
|
|
|
|
int ec;
|
|
cv_status rc = no_timeout;
|
|
__TBB_ASSERT( lock.owns, NULL );
|
|
lock.owns = false;
|
|
if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) {
|
|
if( ec==ETIMEDOUT )
|
|
rc = timeout;
|
|
else {
|
|
__TBB_ASSERT( lock.try_lock()==false, NULL );
|
|
lock.owns = true;
|
|
throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
|
|
}
|
|
}
|
|
lock.owns = true;
|
|
return rc;
|
|
}
|
|
#endif /* !(_WIN32||_WIN64) */
|
|
|
|
} // namespace interface5
|
|
|
|
__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable)
|
|
|
|
} // namespace tbb
|
|
|
|
#if TBB_IMPLEMENT_CPP0X
|
|
|
|
namespace std {
|
|
|
|
using tbb::interface5::defer_lock_t;
|
|
using tbb::interface5::try_to_lock_t;
|
|
using tbb::interface5::adopt_lock_t;
|
|
using tbb::interface5::defer_lock;
|
|
using tbb::interface5::try_to_lock;
|
|
using tbb::interface5::adopt_lock;
|
|
using tbb::interface5::lock_guard;
|
|
using tbb::interface5::unique_lock;
|
|
using tbb::interface5::swap; /* this is for void std::swap(unique_lock<M>&,unique_lock<M>&) */
|
|
using tbb::interface5::condition_variable;
|
|
using tbb::interface5::cv_status;
|
|
using tbb::interface5::timeout;
|
|
using tbb::interface5::no_timeout;
|
|
|
|
} // namespace std
|
|
|
|
#endif /* TBB_IMPLEMENT_CPP0X */
|
|
|
|
#endif /* __TBB_condition_variable_H */
|