Compare commits

...

4 Commits

Author SHA1 Message Date
Dana Robinson
83c2cdf0cc Updates BRANCH.md file 2022-04-13 15:05:47 -07:00
Dana Robinson
46a4dfe8bf Adds BRANCH.md file 2022-04-13 15:04:15 -07:00
mainzer
35a597dfec Updated MANIFEST to add ./test/ttsafe_rec_rw_lock.c 2020-10-19 12:19:01 -05:00
mainzer
266689e80a First cut at replaceing the existing mutex with a recursive R/W lock.
This implementation has the following issues:

    1) pthreads implementation only -- we still need a windows version.

    2) must investigate thread cancelation issues

    3) Error reporting is very poor.  I followed the error reporting on
       the existing thread safe code, but this should be re-visited and
       improved.

Code is currently setup to use the new recursive R/W lock instead of
the global mutex to control entry to the library in threadsafe builds.
To revert to the global mutex, set H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX
in H5TSprivate.h to FALSE.

Added a reasonably robust regression test for the reursive R/W lock
in test/ttsafe_rec_rw_lock.c

Note that the change to hl/src/H5LTanalyse.c is an artifact of clang-format.

Tested serial threadsafe debug and production on jelly, and also regular
serial / debug.

On Windows builds, the new recursive R/W lock should not be built and
we should use the existing global mutex -- however this is not tested
at this time.
2020-10-06 15:30:48 -05:00
10 changed files with 2618 additions and 9 deletions

5
BRANCH.md Normal file
View File

@@ -0,0 +1,5 @@
This branch includes recursive RW locks added to the thread-safety code. It
is based on HDF5 1.12 and these changes have NOT been merged to develop as of
April 2022.
Person to contact: Dana Robinson (derobins@hdfgroup.org)

View File

@@ -23,6 +23,7 @@
./.autom4te.cfg _DO_NOT_DISTRIBUTE_
./.h5chkright.ini _DO_NOT_DISTRIBUTE_
./ACKNOWLEDGMENTS
./BRANCH.md _DO_NOT_DISTRIBUTE_
./COPYING
./COPYING_LBNL_HDF5
./MANIFEST
@@ -1260,6 +1261,7 @@
./test/ttsafe_cancel.c
./test/ttsafe_dcreate.c
./test/ttsafe_error.c
./test/ttsafe_rec_rw_lock.c
./test/tunicode.c
./test/tvlstr.c
./test/tvltypes.c

View File

@@ -2413,11 +2413,7 @@ yy_scan_buffer(char *base, yy_size_t size)
* yy_scan_bytes() instead.
*/
YY_BUFFER_STATE
yy_scan_string(const char *yystr)
{
return yy_scan_bytes(yystr, (int)strlen(yystr));
}
yy_scan_string(const char *yystr) { return yy_scan_bytes(yystr, (int)strlen(yystr)); }
/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
* scan from a @e copy of @a bytes.

View File

@@ -244,10 +244,18 @@ H5TS_pthread_first_thread_init(void)
#endif
/* initialize global API mutex lock */
#if H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX
H5TS_pt_rec_rw_lock_init(&H5_g.init_rw_lock, H5TS__RW_LOCK_POLICY__FAVOR_WRITERS);
#else /* H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX */
pthread_mutex_init(&H5_g.init_lock.atomic_lock, NULL);
pthread_cond_init(&H5_g.init_lock.cond_var, NULL);
H5_g.init_lock.lock_count = 0;
#endif /* H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX */
/* Initialize integer thread identifiers. */
H5TS_tid_init();
@@ -666,4 +674,854 @@ H5TS_create_thread(void *(*func)(void *), H5TS_attr_t *attr, void *udata)
} /* H5TS_create_thread */
/*--------------------------------------------------------------------------
* NAME
* H5TS_alloc_pt_rec_entry_count
*
* RETURNS
* Pointer to allocated and initialized instance of
* H5TS_pt_rec_entry_count_t, or NULL on failure.
*
* DESCRIPTION
* Allocate and initalize an instance of H5TS_pt_rec_entry_count_t.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
H5TS_pt_rec_entry_count_t *
H5TS_alloc_pt_rec_entry_count(hbool_t write_lock)
{
H5TS_pt_rec_entry_count_t *ret_value = NULL;
ret_value = (H5TS_pt_rec_entry_count_t *)HDmalloc(sizeof(H5TS_pt_rec_entry_count_t));
if (ret_value) {
ret_value->magic = H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC;
ret_value->write_lock = write_lock;
ret_value->rec_lock_count = 1;
}
return (ret_value);
} /* H5TS_alloc_pt_rec_entry_count() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_free_pt_rec_entry_count
*
* RETURNS
* void.
*
* DESCRIPTION
* Free the supplied instance of H5TS_pt_rec_entry_count_t.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
void
H5TS_free_pt_rec_entry_count(void *target_ptr)
{
H5TS_pt_rec_entry_count_t *count_ptr;
count_ptr = (H5TS_pt_rec_entry_count_t *)target_ptr;
HDassert(count_ptr);
HDassert(count_ptr->magic == H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC);
count_ptr->magic = 0;
HDfree(count_ptr);
return;
} /* H5TS_free_pt_rec_entry_count() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_lock_init
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Initialize the supplied instance of H5TS_pt_rec_rw_lock_t.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_lock_init(H5TS_pt_rec_rw_lock_t *rw_lock_ptr, int policy)
{
herr_t ret_value = SUCCEED;
/* santity checks -- until other policies are implemented,
* policy must equal H5TS__RW_LOCK_POLICY__FAVOR_WRITERS.
*/
if ((rw_lock_ptr == NULL) || (policy != H5TS__RW_LOCK_POLICY__FAVOR_WRITERS)) {
ret_value = FAIL;
}
if (ret_value == SUCCEED) { /* initialized the mutex */
if (pthread_mutex_init(&(rw_lock_ptr->mutex), NULL) != 0) {
ret_value = FAIL;
}
}
if (ret_value == SUCCEED) { /* initialize the waiting readers cv */
if (pthread_cond_init(&(rw_lock_ptr->readers_cv), NULL) != 0) {
ret_value = FAIL;
}
}
if (ret_value == SUCCEED) { /* initialize the waiting writers cv */
if (pthread_cond_init(&(rw_lock_ptr->writers_cv), NULL) != 0) {
ret_value = FAIL;
}
}
if (ret_value == SUCCEED) { /* initialize the key */
if (pthread_key_create(&(rw_lock_ptr->rec_entry_count_key), H5TS_free_pt_rec_entry_count) != 0) {
ret_value = FAIL;
}
}
if (ret_value == SUCCEED) { /* initialized scalar fields */
rw_lock_ptr->magic = H5TS_PT_REC_RW_LOCK_MAGIC;
rw_lock_ptr->policy = policy;
rw_lock_ptr->waiting_readers_count = 0;
rw_lock_ptr->waiting_writers_count = 0;
rw_lock_ptr->active_readers = 0;
rw_lock_ptr->active_writers = 0;
rw_lock_ptr->stats.read_locks_granted = 0;
rw_lock_ptr->stats.read_locks_released = 0;
rw_lock_ptr->stats.real_read_locks_granted = 0;
rw_lock_ptr->stats.real_read_locks_released = 0;
rw_lock_ptr->stats.max_read_locks = 0;
rw_lock_ptr->stats.max_read_lock_recursion_depth = 0;
rw_lock_ptr->stats.read_locks_delayed = 0;
rw_lock_ptr->stats.max_read_locks_pending = 0;
rw_lock_ptr->stats.write_locks_granted = 0;
rw_lock_ptr->stats.write_locks_released = 0;
rw_lock_ptr->stats.real_write_locks_granted = 0;
rw_lock_ptr->stats.real_write_locks_released = 0;
rw_lock_ptr->stats.max_write_locks = 0;
rw_lock_ptr->stats.max_write_lock_recursion_depth = 0;
rw_lock_ptr->stats.write_locks_delayed = 0;
rw_lock_ptr->stats.max_write_locks_pending = 0;
}
return (ret_value);
} /* H5TS_pt_rec_rw_lock_init() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_lock_takedown
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Takedown an instance of H5TS_pt_rec_rw_lock_t. All mutex, condition
* variables, and keys are destroyed, and magic is set to an invalid
* value. However, the instance of H5TS_pt_rec_rw_lock_t is not
* freed.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_lock_takedown(H5TS_pt_rec_rw_lock_t *rw_lock_ptr)
{
herr_t ret_value = SUCCEED;
if ((rw_lock_ptr == NULL) || (rw_lock_ptr->magic != H5TS_PT_REC_RW_LOCK_MAGIC)) {
ret_value = FAIL;
}
else {
/* we are commited to the takedown at this point. Set magic
* to an invalid value, and call the appropriate pthread
* destroy routines. Call them all, even if one fails along
* the way.
*/
rw_lock_ptr->magic = 0;
if ((pthread_mutex_destroy(&(rw_lock_ptr->mutex)) < 0) ||
(pthread_cond_destroy(&(rw_lock_ptr->readers_cv)) < 0) ||
(pthread_cond_destroy(&(rw_lock_ptr->writers_cv)) < 0) ||
(pthread_key_delete(rw_lock_ptr->rec_entry_count_key) < 0)) {
ret_value = FAIL;
}
}
return (ret_value);
} /* H5TS_pt_rec_rw_lock_takedown() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_rdlock
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Attempt to obtain a read lock on the associated recursive read / write
* lock.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_rdlock(H5TS_pt_rec_rw_lock_t *rw_lock_ptr)
{
hbool_t have_mutex = FALSE;
int result;
H5TS_pt_rec_entry_count_t *count_ptr;
herr_t ret_value = SUCCEED;
if ((rw_lock_ptr == NULL) || (rw_lock_ptr->magic != H5TS_PT_REC_RW_LOCK_MAGIC)) {
ret_value = FAIL;
}
/* obtain the mutex */
if (ret_value == SUCCEED) {
if (pthread_mutex_lock(&(rw_lock_ptr->mutex)) != 0) {
ret_value = FAIL;
}
else {
have_mutex = TRUE;
}
}
/* If there is no specific data for this thread, this is an
* initial read lock request.
*/
if (ret_value == SUCCEED) {
count_ptr = (H5TS_pt_rec_entry_count_t *)pthread_getspecific(rw_lock_ptr->rec_entry_count_key);
if (count_ptr) { /* this is a recursive lock */
if ((count_ptr->write_lock) || (rw_lock_ptr->active_readers == 0) ||
(rw_lock_ptr->active_writers != 0)) {
ret_value = FAIL;
}
else {
count_ptr->rec_lock_count++;
REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK(rw_lock_ptr, count_ptr);
}
}
else { /* this is an initial read lock request */
switch (rw_lock_ptr->policy) {
case H5TS__RW_LOCK_POLICY__FAVOR_WRITERS:
if ((rw_lock_ptr->active_writers != 0) || (rw_lock_ptr->waiting_writers_count != 0)) {
int delayed = rw_lock_ptr->waiting_readers_count + 1;
REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK_DELAY(rw_lock_ptr, delayed);
}
while ((rw_lock_ptr->active_writers != 0) || (rw_lock_ptr->waiting_writers_count != 0)) {
rw_lock_ptr->waiting_readers_count++;
result = pthread_cond_wait(&(rw_lock_ptr->readers_cv), &(rw_lock_ptr->mutex));
rw_lock_ptr->waiting_readers_count--;
if (result != 0) {
ret_value = FAIL;
break;
}
}
break;
default:
ret_value = FAIL;
break;
}
if ((ret_value == SUCCEED) && (NULL == (count_ptr = H5TS_alloc_pt_rec_entry_count(FALSE)))) {
ret_value = FAIL;
}
if ((ret_value == SUCCEED) &&
(pthread_setspecific(rw_lock_ptr->rec_entry_count_key, (void *)count_ptr) != 0)) {
ret_value = FAIL;
}
if (ret_value == SUCCEED) {
rw_lock_ptr->active_readers++;
HDassert(count_ptr->rec_lock_count == 1);
REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK(rw_lock_ptr, count_ptr);
}
}
}
if (have_mutex) {
pthread_mutex_unlock(&(rw_lock_ptr->mutex));
}
return (ret_value);
} /* H5TS_pt_rec_rw_rdlock() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_wrlock
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Attempt to obtain a write lock on the associated recursive read / write
* lock.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_wrlock(H5TS_pt_rec_rw_lock_t *rw_lock_ptr)
{
hbool_t have_mutex = FALSE;
int result;
H5TS_pt_rec_entry_count_t *count_ptr;
herr_t ret_value = SUCCEED;
if ((rw_lock_ptr == NULL) || (rw_lock_ptr->magic != H5TS_PT_REC_RW_LOCK_MAGIC)) {
ret_value = FAIL;
}
/* obtain the mutex */
if (ret_value == SUCCEED) {
if (pthread_mutex_lock(&(rw_lock_ptr->mutex)) != 0) {
ret_value = FAIL;
}
else {
have_mutex = TRUE;
}
}
/* If there is no specific data for this thread, this is an
* initial write lock request.
*/
if (ret_value == SUCCEED) {
count_ptr = (H5TS_pt_rec_entry_count_t *)pthread_getspecific(rw_lock_ptr->rec_entry_count_key);
if (count_ptr) { /* this is a recursive lock */
if ((!(count_ptr->write_lock)) || (rw_lock_ptr->active_readers != 0) ||
(rw_lock_ptr->active_writers != 1)) {
ret_value = FAIL;
}
else {
count_ptr->rec_lock_count++;
REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK(rw_lock_ptr, count_ptr);
}
}
else { /* this is an initial write lock request */
switch (rw_lock_ptr->policy) {
case H5TS__RW_LOCK_POLICY__FAVOR_WRITERS:
if ((rw_lock_ptr->active_readers > 0) || (rw_lock_ptr->active_writers > 0)) {
int delayed = rw_lock_ptr->waiting_writers_count + 1;
REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK_DELAY(rw_lock_ptr, delayed);
}
while ((rw_lock_ptr->active_readers > 0) || (rw_lock_ptr->active_writers > 0)) {
rw_lock_ptr->waiting_writers_count++;
result = pthread_cond_wait(&(rw_lock_ptr->writers_cv), &(rw_lock_ptr->mutex));
rw_lock_ptr->waiting_writers_count--;
if (result != 0) {
ret_value = FAIL;
break;
}
}
break;
default:
ret_value = FAIL;
break;
}
if ((ret_value == SUCCEED) && (NULL == (count_ptr = H5TS_alloc_pt_rec_entry_count(TRUE)))) {
ret_value = FAIL;
}
if ((ret_value == SUCCEED) &&
(pthread_setspecific(rw_lock_ptr->rec_entry_count_key, (void *)count_ptr) != 0)) {
ret_value = FAIL;
}
if (ret_value == SUCCEED) {
rw_lock_ptr->active_writers++;
HDassert(count_ptr->rec_lock_count == 1);
REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK(rw_lock_ptr, count_ptr);
}
}
}
if (have_mutex) {
pthread_mutex_unlock(&(rw_lock_ptr->mutex));
}
return (ret_value);
} /* H5TS_pt_rec_rw_wrlock() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_unlock
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Attempt to unlock either a read or a write lock on the supplied
* recursive read / write lock.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_unlock(H5TS_pt_rec_rw_lock_t *rw_lock_ptr)
{
hbool_t have_mutex = FALSE;
hbool_t discard_rec_count = FALSE;
H5TS_pt_rec_entry_count_t *count_ptr;
herr_t ret_value = SUCCEED;
if ((rw_lock_ptr == NULL) || (rw_lock_ptr->magic != H5TS_PT_REC_RW_LOCK_MAGIC)) {
ret_value = FAIL;
}
/* obtain the mutex */
if (ret_value == SUCCEED) {
if (pthread_mutex_lock(&(rw_lock_ptr->mutex)) != 0) {
ret_value = FAIL;
}
else {
have_mutex = TRUE;
}
}
/* If there is no specific data for this thread, no lock was held,
* and thus the unlock call must fail.
*/
if (ret_value == SUCCEED) {
count_ptr = (H5TS_pt_rec_entry_count_t *)pthread_getspecific(rw_lock_ptr->rec_entry_count_key);
HDassert(count_ptr);
HDassert(count_ptr->magic == H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC);
HDassert(count_ptr->rec_lock_count > 0);
if (NULL == count_ptr) {
ret_value = FAIL;
}
else if (count_ptr->magic != H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC) {
ret_value = FAIL;
}
else if (count_ptr->rec_lock_count <= 0) { /* corrupt count? */
ret_value = FAIL;
}
else if (count_ptr->write_lock) { /* drop a write lock */
HDassert((rw_lock_ptr->active_readers == 0) && (rw_lock_ptr->active_writers == 1));
if ((rw_lock_ptr->active_readers != 0) || (rw_lock_ptr->active_writers != 1)) {
ret_value = FAIL;
}
else {
count_ptr->rec_lock_count--;
HDassert(count_ptr->rec_lock_count >= 0);
if (count_ptr->rec_lock_count == 0) {
/* make note that we must discard the
* recursive entry counter so it will not
* confuse us on the next lock request.
*/
discard_rec_count = TRUE;
/* drop the write lock -- will signal later if needed */
rw_lock_ptr->active_writers--;
HDassert(rw_lock_ptr->active_writers == 0);
}
}
REC_RW_LOCK_STATS__UPDATE_FOR_WR_UNLOCK(rw_lock_ptr, count_ptr);
}
else { /* drop a read lock */
HDassert((rw_lock_ptr->active_readers > 0) && (rw_lock_ptr->active_writers == 0));
if ((rw_lock_ptr->active_readers <= 0) || (rw_lock_ptr->active_writers != 0)) {
ret_value = FAIL;
}
else {
count_ptr->rec_lock_count--;
HDassert(count_ptr->rec_lock_count >= 0);
if (count_ptr->rec_lock_count == 0) {
/* make note that we must discard the
* recursive entry counter so it will not
* confuse us on the next lock request.
*/
discard_rec_count = TRUE;
/* drop the read lock -- will signal later if needed */
rw_lock_ptr->active_readers--;
}
}
REC_RW_LOCK_STATS__UPDATE_FOR_RD_UNLOCK(rw_lock_ptr, count_ptr);
}
if ((ret_value == SUCCEED) && (rw_lock_ptr->active_readers == 0) &&
(rw_lock_ptr->active_writers == 0)) {
/* no locks held -- signal condition variables if required */
switch (rw_lock_ptr->policy) {
case H5TS__RW_LOCK_POLICY__FAVOR_WRITERS:
if (rw_lock_ptr->waiting_writers_count > 0) {
if (pthread_cond_signal(&(rw_lock_ptr->writers_cv)) != 0) {
ret_value = FAIL;
}
}
else if (rw_lock_ptr->waiting_readers_count > 0) {
if (pthread_cond_broadcast(&(rw_lock_ptr->readers_cv)) != 0) {
ret_value = FAIL;
}
}
break;
default:
ret_value = FAIL;
break;
}
}
}
/* if we are really dropping the lock, must set the value of
* rec_entry_count_key for this thread to NULL, so that
* when this thread next requests a lock, it will appear
* as an initial lock, not a recursive lock.
*/
if (discard_rec_count) {
HDassert(count_ptr);
if (pthread_setspecific(rw_lock_ptr->rec_entry_count_key, (void *)NULL) != 0) {
ret_value = FAIL;
}
H5TS_free_pt_rec_entry_count((void *)count_ptr);
count_ptr = NULL;
}
if (have_mutex) {
pthread_mutex_unlock(&(rw_lock_ptr->mutex));
}
return (ret_value);
} /* H5TS_pt_rec_rw_unlock() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_lock_get_stats
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Obtain a copy of the current statistics on the supplied
* recursive read / write lock. Note that to obtain a consistent
* set of statistics, the function must obtain the lock mutex.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_lock_get_stats(H5TS_pt_rec_rw_lock_t *rw_lock_ptr, H5TS_pt_rec_rw_lock_stats_t *stats_ptr)
{
hbool_t have_mutex = FALSE;
herr_t ret_value = SUCCEED;
if ((rw_lock_ptr == NULL) || (rw_lock_ptr->magic != H5TS_PT_REC_RW_LOCK_MAGIC) || (stats_ptr == NULL)) {
ret_value = FAIL;
}
/* obtain the mutex */
if (ret_value == SUCCEED) {
if (pthread_mutex_lock(&(rw_lock_ptr->mutex)) != 0) {
ret_value = FAIL;
}
else {
have_mutex = TRUE;
}
}
if (ret_value == SUCCEED) {
*stats_ptr = rw_lock_ptr->stats;
}
if (have_mutex) {
pthread_mutex_unlock(&(rw_lock_ptr->mutex));
}
return (ret_value);
} /* H5TS_pt_rec_rw_lock_get_stats() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_lock_reset_stats
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Reset the statistics for the supplied recursive read / write lock.
* Note that to reset the statistics consistently, the function must
* obtain the lock mutex.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_lock_reset_stats(H5TS_pt_rec_rw_lock_t *rw_lock_ptr)
{
hbool_t have_mutex = FALSE;
/* update this initializer if you modify H5TS_pt_rec_rw_lock_stats_t */
static const H5TS_pt_rec_rw_lock_stats_t reset_stats = {/* read_locks_granted = */ 0,
/* read_locks_released = */ 0,
/* real_read_locks_granted = */ 0,
/* real_read_locks_released = */ 0,
/* max_read_locks = */ 0,
/* max_read_lock_recursion_depth = */ 0,
/* read_locks_delayed = */ 0,
/* max_read_locks_pending = */ 0,
/* write_locks_granted = */ 0,
/* write_locks_released = */ 0,
/* real_write_locks_granted = */ 0,
/* real_write_locks_released = */ 0,
/* max_write_locks = */ 0,
/* max_write_lock_recursion_depth = */ 0,
/* write_locks_delayed = */ 0,
/* max_write_locks_pending = */ 0};
herr_t ret_value = SUCCEED;
if ((rw_lock_ptr == NULL) || (rw_lock_ptr->magic != H5TS_PT_REC_RW_LOCK_MAGIC)) {
ret_value = FAIL;
}
/* obtain the mutex */
if (ret_value == SUCCEED) {
if (pthread_mutex_lock(&(rw_lock_ptr->mutex)) != 0) {
ret_value = FAIL;
}
else {
have_mutex = TRUE;
}
}
if (ret_value == SUCCEED) {
rw_lock_ptr->stats = reset_stats;
}
if (have_mutex) {
pthread_mutex_unlock(&(rw_lock_ptr->mutex));
}
return (ret_value);
} /* H5TS_pt_rec_rw_lock_reset_stats() */
/*--------------------------------------------------------------------------
* NAME
* H5TS_pt_rec_rw_lock_print_stats
*
* RETURNS
* 0 on success and non-zero on error.
*
* DESCRIPTION
* Print the supplied pthresds recursive R/W lock statistics to
* standard out.
*
* UPDATE THIS FUNCTION IF YOU MODIFY H5TS_pt_rec_rw_lock_stats_t.
*
* PROGRAMMER: John Mainzer
* August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
H5TS_pt_rec_rw_lock_print_stats(const char *header_str, H5TS_pt_rec_rw_lock_stats_t *stats_ptr)
{
herr_t ret_value = SUCCEED;
if ((header_str == NULL) || (stats_ptr == NULL)) {
ret_value = FAIL;
}
else {
HDfprintf(stdout, "\n\n%s\n\n", header_str);
HDfprintf(stdout, " read_locks_granted = %lld\n",
(long long int)(stats_ptr->read_locks_granted));
HDfprintf(stdout, " read_locks_released = %lld\n",
(long long int)(stats_ptr->read_locks_released));
HDfprintf(stdout, " real_read_locks_granted = %lld\n",
(long long int)(stats_ptr->real_read_locks_granted));
HDfprintf(stdout, " real_read_locks_released = %lld\n",
(long long int)(stats_ptr->real_read_locks_released));
HDfprintf(stdout, " max_read_locks = %lld\n",
(long long int)(stats_ptr->max_read_locks));
HDfprintf(stdout, " max_read_lock_recursion_depth = %lld\n",
(long long int)(stats_ptr->max_read_lock_recursion_depth));
HDfprintf(stdout, " read_locks_delayed = %lld\n",
(long long int)(stats_ptr->read_locks_delayed));
HDfprintf(stdout, " max_read_locks_pending = %lld\n",
(long long int)(stats_ptr->max_read_locks_pending));
HDfprintf(stdout, " write_locks_granted = %lld\n",
(long long int)(stats_ptr->write_locks_granted));
HDfprintf(stdout, " write_locks_released = %lld\n",
(long long int)(stats_ptr->write_locks_released));
HDfprintf(stdout, " real_write_locks_granted = %lld\n",
(long long int)(stats_ptr->real_write_locks_granted));
HDfprintf(stdout, " real_write_locks_released = %lld\n",
(long long int)(stats_ptr->real_write_locks_released));
HDfprintf(stdout, " max_write_locks = %lld\n",
(long long int)(stats_ptr->max_write_locks));
HDfprintf(stdout, " max_write_lock_recursion_depth = %lld\n",
(long long int)(stats_ptr->max_write_lock_recursion_depth));
HDfprintf(stdout, " write_locks_delayed = %lld\n",
(long long int)(stats_ptr->write_locks_delayed));
HDfprintf(stdout, " max_write_locks_pending = %lld\n\n",
(long long int)(stats_ptr->max_write_locks_pending));
}
return (ret_value);
} /* H5TS_pt_rec_rw_lock_print_stats() */
#endif /* H5_HAVE_THREADSAFE */

View File

@@ -32,6 +32,12 @@
#ifdef H5_HAVE_WIN_THREADS
/* At present, the recursive R/W lock does not function with
* windows -- thus set H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX
* to FALSE.
*/
#define H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX FALSE
/* Library level data structures */
/* Mutexes, Threads, and Attributes */
@@ -71,6 +77,151 @@ H5_DLL herr_t H5TS_win32_thread_exit(void);
#else /* H5_HAVE_WIN_THREADS */
/* Defines */
/* Set H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX to TRUE iff you want
* to replace the global mutex with the recursive R/W lock.
*/
#define H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX TRUE
/******************************************************************************
*
* p-thread recursive R/W lock stats collection macros
*
* Macros to maintain statistics on the p-threads recursive R/W lock.
*
******************************************************************************/
/* clang-format off */
#define REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK(rw, count_ptr) \
do { \
HDassert(rw); \
HDassert((rw)->magic == H5TS_PT_REC_RW_LOCK_MAGIC); \
HDassert(count_ptr); \
HDassert((count_ptr)->magic == H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC); \
HDassert((count_ptr)->rec_lock_count >= 1); \
HDassert(!(count_ptr)->write_lock); \
\
(rw)->stats.read_locks_granted++; \
\
if ( (count_ptr)->rec_lock_count == 1) { \
\
(rw)->stats.real_read_locks_granted++; \
\
if ( (rw)->active_readers > (rw)->stats.max_read_locks ) { \
\
(rw)->stats.max_read_locks = (rw)->active_readers; \
} \
} \
\
if ( (count_ptr)->rec_lock_count > \
(rw)->stats.max_read_lock_recursion_depth ) { \
\
(rw)->stats.max_read_lock_recursion_depth = \
(count_ptr)->rec_lock_count; \
} \
} while ( FALSE ) /* end REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK */
#define REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK_DELAY(rw, waiting_count) \
do { \
HDassert(rw); \
HDassert((rw)->magic == H5TS_PT_REC_RW_LOCK_MAGIC); \
HDassert((waiting_count) > 0); \
\
(rw)->stats.read_locks_delayed++; \
\
if ( (rw)->stats.max_read_locks_pending < (waiting_count) ) { \
\
(rw)->stats.max_read_locks_pending = (waiting_count); \
} \
} while ( FALSE ) /* REC_RW_LOCK_STATS__UPDATE_FOR_RD_LOCK_DELAY */
#define REC_RW_LOCK_STATS__UPDATE_FOR_RD_UNLOCK(rw, count_ptr) \
do { \
HDassert(rw); \
HDassert((rw)->magic == H5TS_PT_REC_RW_LOCK_MAGIC); \
HDassert(count_ptr); \
HDassert((count_ptr)->magic == H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC); \
HDassert((count_ptr)->rec_lock_count >= 0); \
HDassert(!(count_ptr)->write_lock); \
\
(rw)->stats.read_locks_released++; \
\
if ( count_ptr->rec_lock_count == 0) { \
\
(rw)->stats.real_read_locks_released++; \
} \
} while ( FALSE ) /* end REC_RW_LOCK_STATS__UPDATE_FOR_RD_UNLOCK */
#define REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK(rw, count_ptr) \
do { \
HDassert(rw); \
HDassert((rw)->magic == H5TS_PT_REC_RW_LOCK_MAGIC); \
HDassert(count_ptr); \
HDassert((count_ptr)->magic == H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC); \
HDassert((count_ptr)->rec_lock_count >= 1); \
HDassert((count_ptr)->write_lock); \
\
(rw)->stats.write_locks_granted++; \
\
if ( (count_ptr)->rec_lock_count == 1) { \
\
(rw)->stats.real_write_locks_granted++; \
\
if ( (rw)->active_writers > (rw)->stats.max_write_locks ) { \
\
(rw)->stats.max_write_locks = (rw)->active_writers; \
} \
} \
\
if ( (count_ptr)->rec_lock_count > \
(rw)->stats.max_write_lock_recursion_depth ) { \
\
(rw)->stats.max_write_lock_recursion_depth = \
(count_ptr)->rec_lock_count; \
} \
} while ( FALSE ) /* end REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK */
#define REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK_DELAY(rw, waiting_count) \
do { \
HDassert(rw); \
HDassert((rw)->magic == H5TS_PT_REC_RW_LOCK_MAGIC); \
HDassert((waiting_count) > 0); \
\
(rw)->stats.write_locks_delayed++; \
\
if ( (rw)->stats.max_write_locks_pending < (waiting_count) ) { \
\
(rw)->stats.max_write_locks_pending = (waiting_count); \
} \
} while ( FALSE ) /* REC_RW_LOCK_STATS__UPDATE_FOR_WR_LOCK_DELAY */
#define REC_RW_LOCK_STATS__UPDATE_FOR_WR_UNLOCK(rw, count_ptr) \
do { \
HDassert(rw); \
HDassert((rw)->magic == H5TS_PT_REC_RW_LOCK_MAGIC); \
HDassert(count_ptr); \
HDassert((count_ptr)->magic == H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC); \
HDassert((count_ptr)->rec_lock_count >= 0); \
HDassert((count_ptr)->write_lock); \
\
(rw)->stats.write_locks_released++; \
\
if ( (count_ptr)->rec_lock_count == 0) { \
\
(rw)->stats.real_write_locks_released++; \
} \
} while ( FALSE ) /* end REC_RW_LOCK_STATS__UPDATE_FOR_WR_UNLOCK */
/* clang-format on */
/* Library level data structures */
/* Mutexes, Threads, and Attributes */
@@ -86,6 +237,251 @@ typedef pthread_mutex_t H5TS_mutex_simple_t;
typedef pthread_key_t H5TS_key_t;
typedef pthread_once_t H5TS_once_t;
/******************************************************************************
*
* Structure H5TS_pt_rec_rw_lock_stats_t
*
* Catchall structure for statistics on the recursive p-threads based
* recursive R/W lock (see declaration of H5TS_pt_rec_rw_lock_t below).
*
* Since the mutex must be held when reading a consistent set of statistics
* from the recursibe R/W lock, it simplifies matters to bundle them into
* a single structure. This structure exists for that purpose.
*
* If you modify this structure, be sure to make equivalent changes to
* the reset_stats initializer in H5TS_pt_rec_rw_lock_reset_stats().
*
* Individual fields are discussed below.
*
* JRM -- 8/28/20
*
* Read lock stats:
*
* read_locks_granted: 64 bit integer used to count the total number of read
* locks granted. Note that this includes recursive lock
* requests.
*
* read_locks_released: 64 bit integer used to count the total number of read
* locks released. Note that this includes recursive lock
* release requests.
*
* real_read_locks_granted: 64 bit integer used to count the total number of
* read locks granted, less any recursive lock requests.
*
* real_read_locks_released: 64 bit integer used to count the total number of
* read locks released, less any recursive lock releases.
*
* max_read_locks; 64 bit integer used to track the maximum number of read
* locks active at any point in time.
*
* max_read_lock_recursion_depth; 64 bit integer used to track the maximum
* recursion depth observed for any read lock.
*
* read_locks_delayed: 64 bit integer used to track the number of read locks
* that were not granted immediately.
*
* max_read_locks_delayed; 64 bit integer used to track the maximum number of
* pending read locks at any point in time.
*
*
* Write lock stats:
*
* write_locks_granted: 64 bit integer used to count the total number of write
* locks granted. Note that this includes recursive lock
* requests.
*
* write_locks_released: 64 bit integer used to count the total number of write
* locks released. Note that this includes recursive lock
* release requests.
*
* real_write_locks_granted: 64 bit integer used to count the total number of
* write locks granted, less any recursive lock requests.
*
* real_write_locks_released: 64 bit integer used to count the total number of
* write locks released, less any recursive lock releases.
*
* max_write_locks; 64 bit integer used to track the maximum number of write
* locks active at any point in time. Must be either zero or one.
*
* max_write_lock_recursion_depth; 64 bit integer used to track the maximum
* recursion depth observed for any write lock.
*
* write_locks_delayed: 64 bit integer used to track the number of write locks
* that were not granted immediately.
*
* max_write_locks_delayed; 64 bit integer used to track the maximum number of
* pending write locks at any point in time.
*
******************************************************************************/
typedef struct H5TS_pt_rec_rw_lock_stats_t {
int64_t read_locks_granted;
int64_t read_locks_released;
int64_t real_read_locks_granted;
int64_t real_read_locks_released;
int64_t max_read_locks;
int64_t max_read_lock_recursion_depth;
int64_t read_locks_delayed;
int64_t max_read_locks_pending;
int64_t write_locks_granted;
int64_t write_locks_released;
int64_t real_write_locks_granted;
int64_t real_write_locks_released;
int64_t max_write_locks;
int64_t max_write_lock_recursion_depth;
int64_t write_locks_delayed;
int64_t max_write_locks_pending;
} H5TS_pt_rec_rw_lock_stats_t;
/******************************************************************************
*
* Structure H5TS_pt_rec_rw_lock_t
*
* A read / write lock, is a lock that allows either an arbitrary number
* of readers, or a single writer into a critical region. A recurssive
* lock is one that allows a thread that already has a lock (be it read or
* write) to successfully request the lock again, only droping the lock
* when the number of un-lock calls equals the number of lock calls.
*
* Note that we can't use the p-threads R/W lock, as while it permits
* recursive read locks, it disallows recursive write locks.
*
* This structure is a catchall for the fields needed to implement a
* p-threads based recursive R/W lock, and for the associate statistics
* collection fields.
*
* This recursive R/W lock implementation is an extension of the R/W lock
* implementation given in "UNIX network programming" Volume 2, Chapter 8
* by w. Richard Stevens, 2nd edition.
*
* Individual fields are discussed below.
*
* JRM -- 8/28/20
*
* magic: Unsigned 32 bit integer field used for sanity checking. This
* fields must always be set to H5TS_PT_REC_RW_LOCK_MAGIC.
* If this structure is allocated dynamically, rememver to set
* it to some invalid value before discarding the structure.
*
* policy Integer containing a code indicating the precidence policy
* used by the R/W lock. The supported policies are listed
* below:
*
* H5TS__RW_LOCK_POLICY__FAVOR_WRITERS:
*
* If selected, the R/W lock will grant access to a pending
* writer if there are both pending readers and writers.
*
*
* --- Define other policies here ---
*
*
* mutex: Mutex used to maintain mutual exclusion on the fields of
* of this structure.
*
* readers_cv: Condition variable used for waiting readers.
*
* writers_cv: Condition variable used for waiting writers.
*
* waiting_readers_count: 32 bit integer used to maintain a count of
* waiting readers. This value should always be non-negative.
*
* waiting_writers_count: 32 bit integer used to maintain a count of
* waiting writers. This value should always be non-negative.
*
* The following two fields could be combined into a single field, with
* the count of active readers being represented by a positive value, and
* the number of writers by a negative value. Two fields are used to
* facilitate sanity checking.
*
* active_readers: 32 bit integer used to maintain a count of
* readers that currently hold a read lock. This value
* must be zero if active_writers is positive. It should
* never be negative.
*
* active_writers: 32 bit integer used to maintain a count of
* writers that currently hold a write lock. This value
* must always be either 0 or 1, and must be zero if
* active_readers is positive. It should never be negative.
*
* rec_entry_count_key: Instance of pthread_key_t used to maintain
* a thread specific lock type and recursive entry count
* for all threads holding a lock.
*
* stats: Instance of H5TS_pt_rec_rw_lock_stats_t used to track
* statistics on the recursive R/W lock. See the declaration
* of the structure for discussion of its fields.
*
* Note that the stats are gathered into a structure because
* we must obtain the mutex when reading the statistics to
* avoid changes while the statistics are being read. Collecting
* them into a structure facilitates this.
*
******************************************************************************/
#define H5TS_PT_REC_RW_LOCK_MAGIC 0XABCD
#define H5TS__RW_LOCK_POLICY__FAVOR_WRITERS 0
typedef struct H5TS_pt_rec_rw_lock_t {
uint32_t magic;
int32_t policy;
pthread_mutex_t mutex;
pthread_cond_t readers_cv;
pthread_cond_t writers_cv;
int32_t waiting_readers_count;
int32_t waiting_writers_count;
int32_t active_readers;
int32_t active_writers;
pthread_key_t rec_entry_count_key;
int32_t writer_rec_entry_count;
struct H5TS_pt_rec_rw_lock_stats_t stats;
} H5TS_pt_rec_rw_lock_t;
/******************************************************************************
*
* Structure H5TS_pt_rec_entry_count_t
*
* Strucure associated with the reader_rec_entry_count_key defined in
* H5TS_pt_rec_rw_lock_t.
*
* The primary purpose of this structure is to maintain a count of recursive
* locks so that the lock can be dropped when the count drops to zero.
*
* Aditional fields are included for purposes of sanity checking.
*
* Individual fields are discussed below.
*
* JRM -- 8/28/20
*
* magic: Unsigned 32 bit integer field used for sanity checking. This
* fields must always be set to
* H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC, and should be set to
* some invalid value just before the structure is freed.
*
* write_lock: Boolean field that is set to TRUE if the count is for a write
* lock, and to FALSE if it is for a read lock.
*
* rec_leock_count: Count of ehe number of recursive lock calls, less
* the number of recursive unlock calls. The lock in question
* is dropped when the count drops to zero.
*
******************************************************************************/
#define H5TS_PT_REC_RW_REC_ENTRY_COUNT_MAGIC 0XABBA
typedef struct H5TS_pt_rec_entry_count_t {
uint32_t magic;
hbool_t write_lock;
int64_t rec_lock_count;
} H5TS_pt_rec_entry_count_t;
/* Scope Definitions */
#define H5TS_SCOPE_SYSTEM PTHREAD_SCOPE_SYSTEM
#define H5TS_SCOPE_PROCESS PTHREAD_SCOPE_PROCESS
@@ -122,6 +518,19 @@ H5_DLL herr_t H5TS_cancel_count_inc(void);
H5_DLL herr_t H5TS_cancel_count_dec(void);
H5_DLL H5TS_thread_t H5TS_create_thread(void *(*func)(void *), H5TS_attr_t *attr, void *udata);
/* recursive R/W lock related function declarations */
H5_DLL H5TS_pt_rec_entry_count_t *H5TS_alloc_pt_rec_entry_count(hbool_t write_lock);
H5_DLL void H5TS_free_pt_rec_entry_count(void *target_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_lock_init(H5TS_pt_rec_rw_lock_t *rw_lock_ptr, int policy);
H5_DLL herr_t H5TS_pt_rec_rw_lock_takedown(H5TS_pt_rec_rw_lock_t *rw_lock_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_rdlock(H5TS_pt_rec_rw_lock_t *rw_lock_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_wrlock(H5TS_pt_rec_rw_lock_t *rw_lock_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_unlock(H5TS_pt_rec_rw_lock_t *rw_lock_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_lock_get_stats(H5TS_pt_rec_rw_lock_t * rw_lock_ptr,
H5TS_pt_rec_rw_lock_stats_t *stats_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_lock_reset_stats(H5TS_pt_rec_rw_lock_t *rw_lock_ptr);
H5_DLL herr_t H5TS_pt_rec_rw_lock_print_stats(const char *header_str, H5TS_pt_rec_rw_lock_stats_t *stats_ptr);
#if defined c_plusplus || defined __cplusplus
}
#endif /* c_plusplus || __cplusplus */

View File

@@ -2010,9 +2010,13 @@ extern char H5_lib_vers_info_g[];
/* replacement structure for original global variable */
typedef struct H5_api_struct {
H5TS_mutex_t init_lock; /* API entrance mutex */
hbool_t H5_libinit_g; /* Has the library been initialized? */
hbool_t H5_libterm_g; /* Is the library being shutdown? */
#if H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX
H5TS_pt_rec_rw_lock_t init_rw_lock;
#else /* H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX */
H5TS_mutex_t init_lock; /* API entrance mutex */
#endif /* H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX */
hbool_t H5_libinit_g; /* Has the library been initialized? */
hbool_t H5_libterm_g; /* Is the library being shutdown? */
} H5_api_t;
/* Macros for accessing the global variables */
@@ -2027,9 +2031,18 @@ typedef struct H5_api_struct {
#endif
/* Macros for threadsafe HDF-5 Phase I locks */
#if H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX
#define H5_API_LOCK H5TS_pt_rec_rw_wrlock(&H5_g.init_rw_lock);
#define H5_API_UNLOCK H5TS_pt_rec_rw_unlock(&H5_g.init_rw_lock);
#else /* H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX */
#define H5_API_LOCK H5TS_mutex_lock(&H5_g.init_lock);
#define H5_API_UNLOCK H5TS_mutex_unlock(&H5_g.init_lock);
#endif /* H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX */
/* Macros for thread cancellation-safe mechanism */
#define H5_API_UNSET_CANCEL H5TS_cancel_count_inc();

View File

@@ -146,7 +146,7 @@ LDADD=libh5test.la $(LIBHDF5)
# List the source files for tests that have more than one
ttsafe_SOURCES=ttsafe.c ttsafe_dcreate.c ttsafe_error.c ttsafe_cancel.c \
ttsafe_acreate.c ttsafe_attr_vlen.c
ttsafe_acreate.c ttsafe_attr_vlen.c ttsafe_rec_rw_lock.c
cache_image_SOURCES=cache_image.c genall5.c
mirror_vfd_SOURCES=mirror_vfd.c genall5.c

View File

@@ -105,6 +105,19 @@ main(int argc, char *argv[])
/* Tests are generally arranged from least to most complexity... */
AddTest("is_threadsafe", tts_is_threadsafe, NULL, "library threadsafe status", NULL);
#ifdef H5_HAVE_THREADSAFE
AddTest("rec_rwlock_1", tts_rec_rw_lock_smoke_check_1, cleanup_rec_rw_lock_smoke_check_1,
"recursive R/W lock smoke check 1 -- basic", NULL);
AddTest("rec_rwlock_2", tts_rec_rw_lock_smoke_check_2, cleanup_rec_rw_lock_smoke_check_2,
"recursive R/W lock smoke check 2 -- mob of readers", NULL);
AddTest("rec_rwlock_3", tts_rec_rw_lock_smoke_check_3, cleanup_rec_rw_lock_smoke_check_3,
"recursive R/W lock smoke check 3 -- mob of writers", NULL);
AddTest("rec_rwlock_4", tts_rec_rw_lock_smoke_check_4, cleanup_rec_rw_lock_smoke_check_4,
"recursive R/W lock smoke check 4 -- mixed mob", NULL);
AddTest("dcreate", tts_dcreate, cleanup_dcreate, "multi-dataset creation", NULL);
AddTest("error", tts_error, cleanup_error, "per-thread error stacks", NULL);
#ifdef H5_HAVE_PTHREAD_H

View File

@@ -30,6 +30,10 @@ extern char *gen_name(int);
/* Prototypes for the test routines */
void tts_is_threadsafe(void);
#ifdef H5_HAVE_THREADSAFE
void tts_rec_rw_lock_smoke_check_1(void);
void tts_rec_rw_lock_smoke_check_2(void);
void tts_rec_rw_lock_smoke_check_3(void);
void tts_rec_rw_lock_smoke_check_4(void);
void tts_dcreate(void);
void tts_error(void);
void tts_cancel(void);
@@ -37,6 +41,10 @@ void tts_acreate(void);
void tts_attr_vlen(void);
/* Prototypes for the cleanup routines */
void cleanup_rec_rw_lock_smoke_check_1(void);
void cleanup_rec_rw_lock_smoke_check_2(void);
void cleanup_rec_rw_lock_smoke_check_3(void);
void cleanup_rec_rw_lock_smoke_check_4(void);
void cleanup_dcreate(void);
void cleanup_error(void);
void cleanup_cancel(void);

1305
test/ttsafe_rec_rw_lock.c Normal file

File diff suppressed because it is too large Load Diff