[svn-r8300] Purpose:

Cleanup

Description:
Removed FPHDF configure and test file since it is not implemented
in v1.6.

Platforms tested:
Only tested in Sol parallel.

Misc. update:
Update MANIFEST.
This commit is contained in:
Albert Cheng
2004-04-05 18:39:04 -05:00
parent 740a672798
commit 3a0524dfa9
5 changed files with 2 additions and 256 deletions

View File

@@ -1101,7 +1101,6 @@
./testpar/Makefile.in
./testpar/t_dset.c
./testpar/t_file.c
./testpar/t_fphdf5.c
./testpar/t_mdset.c
./testpar/t_mpi.c
./testpar/t_ph5basic.c

2
configure vendored
View File

@@ -31714,7 +31714,6 @@ _ACEOF
fi
echo "$as_me:$LINENO: checking if MPI_File_set_size works for files over 2GB" >&5
echo $ECHO_N "checking if MPI_File_set_size works for files over 2GB... $ECHO_C" >&6
if test "${hdf5_cv_mpi_file_set_size_big+set}" = set; then
@@ -33415,7 +33414,6 @@ PRINT "Features:"
PRINT_N " dmalloc"
IF_YES_NO "$HAVE_DMALLOC"
PRINT_N " Function Stack Tracing"
IF_ENABLED_DISABLED "$FUNCSTACK"

View File

@@ -2033,23 +2033,6 @@ if test -n "$PARALLEL"; then
AC_DEFINE([HAVE_MPE], [1], [Define if we have MPE support])
fi
dnl ----------------------------------------------------------------------
dnl Check if they would like the "Flexible parallel" functions compiled in
dnl
dnl AC_MSG_CHECKING([if Flexible Parallel HDF5 interface enabled])
dnl AC_ARG_ENABLE([fphdf5],
dnl [AC_HELP_STRING([--enable-fphdf5],
dnl [Enable the Flexible Parallel HDF5
dnl interface])],
dnl [FPHDF5=$enableval])
dnl if test "X$FPHDF5" = "Xyes"; then
dnl AC_DEFINE(HAVE_FPHDF5, 1,
dnl [Define if we want flexible parallel HDF5 support])
dnl AC_MSG_RESULT(yes)
dnl else
dnl AC_MSG_RESULT(no)
dnl fi
dnl ----------------------------------------------------------------------
dnl Set the flag to indicate that the MPI_File_set_size() function
dnl works with files over 2GB, unless it's already set in the cache.
@@ -2513,11 +2496,6 @@ PRINT "Features:"
PRINT_N " dmalloc"
IF_YES_NO "$HAVE_DMALLOC"
dnl
dnl PRINT_N " Flexible Parallel HDF"
dnl IF_YES_NO "$FPHDF5"
dnl
PRINT_N " Function Stack Tracing"
IF_ENABLED_DISABLED "$FUNCSTACK"

View File

@@ -29,7 +29,7 @@ RUNTEST=$(RUNPARALLEL)
## Test programs and scripts.
##
TEST_PROGS_PARA=t_mpi t_fphdf5 testphdf5
TEST_PROGS_PARA=t_mpi testphdf5
TEST_SCRIPTS=testph5.sh
## These are our main targets
@@ -45,7 +45,7 @@ DISTCLEAN=go Makefile testph5.sh
## Test source files
TEST_PHDF5_SRC=testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c
TEST_PHDF5_OBJ=$(TEST_PHDF5_SRC:.c=.lo)
TEST_SRC=t_mpi.c t_fphdf5.c $(TEST_PHDF5_SRC)
TEST_SRC=t_mpi.c $(TEST_PHDF5_SRC)
TEST_OBJ=$(TEST_SRC:.c=.lo)
TEST_HDR=testphdf5.h
@@ -54,9 +54,6 @@ $(PROGS): $(LIBHDF5) $(LIBH5TEST)
$(TEST_OBJ): $(TEST_HDR)
t_fphdf5: t_fphdf5.lo
@$(LT_LINK_EXE) $(CFLAGS) -o $@ t_fphdf5.lo $(LIBH5TEST) $(LIBHDF5) $(LDFLAGS) $(LIBS)
t_mpi: t_mpi.lo
@$(LT_LINK_EXE) $(CFLAGS) -o $@ t_mpi.lo $(LIBH5TEST) $(LIBHDF5) $(LDFLAGS) $(LIBS)

View File

@@ -1,226 +0,0 @@
/*
* Example of how to use Flexible Parallel HDF5.
*
* Author:
* Bill Wendling (wendling@ncsa.uiuc.edu)
* 20. February 2003
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "mpi.h"
#ifdef H5_HAVE_FPHDF5
#include "hdf5.h"
#include "H5public.h"
#include "H5FPpublic.h"
MPI_Comm SAP_Comm;
MPI_Comm SAP_Barrier_Comm;
void err(const char *func, int mrc);
void create_file(int);
void create_file(int sap_rank)
{
int my_rank, mrc;
hid_t fid, acc_tpl, sid, dataset;
if ((mrc = MPI_Comm_rank(SAP_Comm, &my_rank)) != MPI_SUCCESS) {
err("H5FPinit", mrc);
return;
}
fprintf(stderr, "%d: Creating file foo.h5\n", my_rank);
if ((acc_tpl = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
fprintf(stderr, "%d: Failed to create access property list\n", my_rank);
goto done;
}
fprintf(stderr, "%d: Created access property list\n", my_rank);
if (H5Pset_fapl_fphdf5(acc_tpl, SAP_Comm, SAP_Barrier_Comm,
MPI_INFO_NULL, (unsigned)sap_rank) < 0) {
fprintf(stderr, "%d: Failed to set fapl\n", my_rank);
goto done;
}
fprintf(stderr, "%d: Set access property list\n", my_rank);
if ((fid = H5Fcreate("/tmp/foo.h5", H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl)) < 0) {
fprintf(stderr, "%d: Failed to create file foo.h5\n", my_rank);
goto done;
}
fprintf(stderr, "%d: Created file foo.h5\n", my_rank);
fprintf(stderr, "%d: SAP_Barrier_Comm==%d\n", my_rank, SAP_Barrier_Comm);
MPI_Barrier(SAP_Barrier_Comm);
fflush(NULL);
fflush(NULL);
fflush(NULL);
sleep(3);
goto done;
if (my_rank == 0) {
/* Create a dataset in the file */
hsize_t dims[2];
dims[0] = 3;
dims[1] = 5;
if ((sid = H5Screate_simple(2, dims, NULL)) < 0) {
fprintf(stderr, "%d: Failed to create simple dataspace\n", my_rank);
goto done;
}
if ((dataset = H5Dcreate(fid, "/dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT)) < 0) {
fprintf(stderr, "%d: Failed to create simple dataset\n", my_rank);
goto done;
}
fprintf(stderr, "%d: Created dataset ``/dataset''\n", my_rank);
H5Sclose(sid);
H5Dclose(dataset);
}
fprintf(stderr, "%d: SAP_Barrier_Comm==%d\n", my_rank, SAP_Barrier_Comm);
MPI_Barrier(SAP_Barrier_Comm);
if (my_rank == 2) {
/* See if dataset is there */
if ((dataset = H5Dopen(fid, "/dataset")) < 0) {
fprintf(stderr, "%d: Failed to open dataset\n", my_rank);
goto done;
}
fprintf(stderr, "%d: Opened dataset ``/dataset''\n", my_rank);
H5Dclose(dataset);
}
fprintf(stderr, "%d: SAP_Barrier_Comm==%d\n", my_rank, SAP_Barrier_Comm);
MPI_Barrier(SAP_Barrier_Comm);
done:
fprintf(stderr, "----------------------------\n");
fflush(stderr);
if (H5Pclose(acc_tpl) < 0)
fprintf(stderr, "%d: Failed to close access property list\n", my_rank);
else
fprintf(stderr, "%d: Closed access property list\n", my_rank);
if (H5Fclose(fid) < 0)
fprintf(stderr, "%d: Failed to close file\n", my_rank);
else
fprintf(stderr, "%d: Closed file\n", my_rank);
fprintf(stderr, "%d: leaving create_file\n", my_rank);
MPI_Barrier(SAP_Barrier_Comm);
fflush(NULL);
fflush(NULL);
fflush(NULL);
fflush(NULL);
fflush(NULL);
sleep(5);
}
int main(int argc, char *argv[])
{
int ret = EXIT_SUCCESS, mrc;
int my_rank;
int sap_rank = 1;
MPI_Init(&argc, &argv);
mrc = H5FPinit(MPI_COMM_WORLD, sap_rank, &SAP_Comm, &SAP_Barrier_Comm);
if (mrc < 0) {
err("H5FPinit", mrc);
ret = EXIT_FAILURE;
goto fail;
}
if ((mrc = MPI_Comm_rank(SAP_Comm, &my_rank)) != MPI_SUCCESS) {
err("H5FPinit", mrc);
ret = EXIT_FAILURE;
goto fail;
}
fprintf(stderr, "%d: Initialized FPHDF5\n", my_rank);
if (my_rank != sap_rank) {
create_file(sap_rank);
MPI_Barrier(SAP_Barrier_Comm);
}
fail:
H5FPfinalize();
fprintf(stderr, "%d: H5FP finalized\n", my_rank);
H5close();
fprintf(stderr, "%d: HDF5 Closed\n", my_rank);
MPI_Finalize();
fprintf(stderr, "%d: MPI finalized\n", my_rank);
return ret;
}
void err(const char *func, int mrc)
{
fprintf(stderr, "error: %s: ", func);
switch (mrc) {
case MPI_ERR_COMM:
fprintf(stderr, "invalid communicator\n");
break;
case MPI_ERR_COUNT:
fprintf(stderr, "invalid count argument\n");
break;
case MPI_ERR_TYPE:
fprintf(stderr, "invalid datatype argument\n");
break;
case MPI_ERR_TAG:
fprintf(stderr, "invalid tag argument\n");
break;
case MPI_ERR_RANK:
fprintf(stderr, "invalid source or destination rank\n");
break;
case MPI_ERR_INTERN:
fprintf(stderr, "internal MPI-IO error\n");
break;
case MPI_ERR_REQUEST:
fprintf(stderr, "invalid MPI_Request\n");
break;
case MPI_ERR_ARG:
fprintf(stderr, "invalid argument\n");
break;
default:
fprintf(stderr, "unknown MPI-IO error\n");
break;
}
}
#else
/* dummy program since H5_HAVE_PARALLE is not configured in */
int
main(int argc, char *argv[])
{
int my_rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (my_rank==0){
printf("No t_fphdf5 Test because FPHDF5 is not configured in\n");
}
MPI_Finalize();
return(0);
}
#endif