Merge pull request #2819 in HDFFV/hdf5 from coll_chunk_fill_fix to develop

* commit '4c43ff0b8b015ccfc1f4aeab18f189d629a5beb9':
  Simplify default use of MPI_BYTE
  Don't free builtin MPI_BYTE MPI type
  Simplify code to avoid using a boolean to free MPI types
  Avoid creating MPI datatypes on ranks with 0 chunks to write'
This commit is contained in:
Quincey Koziol
2020-08-21 16:17:10 -05:00

View File

@@ -4954,7 +4954,7 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
int blocks, leftover, block_len; /* converted to int for MPI */ int blocks, leftover, block_len; /* converted to int for MPI */
MPI_Aint *chunk_disp_array = NULL; MPI_Aint *chunk_disp_array = NULL;
int *block_lens = NULL; int *block_lens = NULL;
MPI_Datatype mem_type, file_type; MPI_Datatype mem_type = MPI_BYTE, file_type = MPI_BYTE;
H5FD_mpio_xfer_t prev_xfer_mode; /* Previous data xfer mode */ H5FD_mpio_xfer_t prev_xfer_mode; /* Previous data xfer mode */
hbool_t have_xfer_mode = FALSE; /* Whether the previous xffer mode has been retrieved */ hbool_t have_xfer_mode = FALSE; /* Whether the previous xffer mode has been retrieved */
hbool_t need_addr_sort = FALSE; hbool_t need_addr_sort = FALSE;
@@ -4980,9 +4980,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Resulted in division by zero") HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Resulted in division by zero")
num_blocks = (size_t)(chunk_info->num_io / (size_t)mpi_size); /* value should be the same on all procs */ num_blocks = (size_t)(chunk_info->num_io / (size_t)mpi_size); /* value should be the same on all procs */
/* after evenly distributing the blocks between processes, are /* After evenly distributing the blocks between processes, are there any
there any leftover blocks for each individual process * leftover blocks for each individual process (round-robin)?
(round-robin) */ */
leftover_blocks = (size_t)(chunk_info->num_io % (size_t)mpi_size); leftover_blocks = (size_t)(chunk_info->num_io % (size_t)mpi_size);
/* Cast values to types needed by MPI */ /* Cast values to types needed by MPI */
@@ -4990,58 +4990,62 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t); H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t);
H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t); H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t);
/* Allocate buffers */ /* Check if we have any chunks to write on this rank */
/* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */ if(num_blocks > 0 || (leftover && leftover > mpi_rank)) {
if(NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int)))) /* Allocate buffers */
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer") /* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */
if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc((size_t)(blocks + 1) * sizeof(MPI_Aint)))) if(NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer") HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer")
if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc((size_t)(blocks + 1) * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer")
for(i = 0 ; i < blocks ; i++) { for(i = 0 ; i < blocks ; i++) {
/* store the chunk address as an MPI_Aint */ /* store the chunk address as an MPI_Aint */
chunk_disp_array[i] = (MPI_Aint)(chunk_info->addr[i + mpi_rank*blocks]); chunk_disp_array[i] = (MPI_Aint)(chunk_info->addr[i + (mpi_rank * blocks)]);
/* MSC - should not need this if MPI_type_create_hindexed_block is working */ /* MSC - should not need this if MPI_type_create_hindexed_block is working */
block_lens[i] = block_len; block_lens[i] = block_len;
/* make sure that the addresses in the datatype are /* Make sure that the addresses in the datatype are
monotonically non decreasing */ * monotonically non-decreasing
if(i && (chunk_disp_array[i] < chunk_disp_array[i - 1])) */
need_addr_sort = TRUE; if(i && (chunk_disp_array[i] < chunk_disp_array[i - 1]))
} /* end for */ need_addr_sort = TRUE;
} /* end for */
/* calculate if there are any leftover blocks after evenly /* Calculate if there are any leftover blocks after evenly
distributing. If there are, then round robin the distribution * distributing. If there are, then round-robin the distribution
to processes 0 -> leftover. */ * to processes 0 -> leftover.
if(leftover && leftover > mpi_rank) { */
chunk_disp_array[blocks] = (MPI_Aint)chunk_info->addr[blocks*mpi_size + mpi_rank]; if(leftover && leftover > mpi_rank) {
if(blocks && (chunk_disp_array[blocks] < chunk_disp_array[blocks - 1])) chunk_disp_array[blocks] = (MPI_Aint)chunk_info->addr[(blocks * mpi_size) + mpi_rank];
need_addr_sort = TRUE; if(blocks && (chunk_disp_array[blocks] < chunk_disp_array[blocks - 1]))
block_lens[blocks] = block_len; need_addr_sort = TRUE;
blocks++; block_lens[blocks] = block_len;
} blocks++;
}
/* /* Ensure that the blocks are sorted in monotonically non-decreasing
* Ensure that the blocks are sorted in monotonically non-decreasing * order of offset in the file.
* order of offset in the file. */
*/ if(need_addr_sort)
if(need_addr_sort) HDqsort(chunk_disp_array, blocks, sizeof(MPI_Aint), H5D__chunk_cmp_addr);
HDqsort(chunk_disp_array, blocks, sizeof(MPI_Aint), H5D__chunk_cmp_addr);
/* MSC - should use this if MPI_type_create_hindexed block is working: /* MSC - should use this if MPI_type_create_hindexed block is working:
* mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type); * mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type);
*/ */
mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type); mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type);
if(mpi_code != MPI_SUCCESS) if(mpi_code != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type))) if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type); mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type);
if(mpi_code != MPI_SUCCESS) if(mpi_code != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type))) if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
} /* end if */
/* Set MPI-IO VFD properties */ /* Set MPI-IO VFD properties */
@@ -5073,10 +5077,12 @@ done:
HDONE_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set transfer mode") HDONE_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set transfer mode")
/* free things */ /* free things */
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type))) if(MPI_BYTE != file_type)
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type))) HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) if(MPI_BYTE != mem_type)
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
H5MM_xfree(chunk_disp_array); H5MM_xfree(chunk_disp_array);
H5MM_xfree(block_lens); H5MM_xfree(block_lens);