Skip to content

Commit

Permalink
Merge pull request #8690 from mcgratta/master
Browse files Browse the repository at this point in the history
FDS Source: Allocate and exchange geometry arrays earlier in startup
  • Loading branch information
mcgratta authored Aug 20, 2020
2 parents 750f00a + 963631a commit 560ef6c
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 57 deletions.
4 changes: 0 additions & 4 deletions Source/init.f90
Original file line number Diff line number Diff line change
Expand Up @@ -683,10 +683,6 @@ SUBROUTINE INITIALIZE_MESH_VARIABLES_1(DT,NM)

! Allocate wall and edge index arrays

ALLOCATE(M%WALL_INDEX(0:CELL_COUNT(NM),-3:3),STAT=IZERO)
CALL ChkMemErr('INIT','WALL_INDEX',IZERO)
M%WALL_INDEX = 0

ALLOCATE(M%WALL_INDEX_HT3D(0:CELL_COUNT(NM),-3:3),STAT=IZERO)
CALL ChkMemErr('INIT','WALL_INDEX_HT3D',IZERO)
M%WALL_INDEX_HT3D = 0
Expand Down
122 changes: 79 additions & 43 deletions Source/main.f90
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,11 @@ PROGRAM FDS

IF (RADIATION) CALL INIT_RADIATION

! Set up persistent MPI SEND/RECV for WALL_INDEX, SOLID, OBST_INDEX_C, and CELL_INDEX.
! These arrays are needed when initializing WALL cells in INITIALIZE_MESH_VARIABLES_1.

CALL MPI_INITIALIZATION_CHORES(3)

! Allocate and initialize mesh-specific variables, and check to see if the code should stop

DO NM=LOWER_MESH_INDEX,UPPER_MESH_INDEX
Expand All @@ -205,11 +210,11 @@ PROGRAM FDS

! Allocate "request" arrays to keep track of MPI communications

CALL MPI_INITIALIZATION_CHORES(3)
CALL MPI_INITIALIZATION_CHORES(4)

! Exchange information related to size of OMESH arrays

CALL MPI_INITIALIZATION_CHORES(4)
CALL MPI_INITIALIZATION_CHORES(5)

! Initial complex geometry CC setup

Expand Down Expand Up @@ -243,12 +248,6 @@ PROGRAM FDS
CALL MPI_BARRIER(MPI_COMM_WORLD, IERR)
IF (MYID==0 .AND. VERBOSE) WRITE(LU_ERR,'(A)') ' Completed INITIALIZE_MESH_EXCHANGE_2'

! Exchange CELL_COUNT, the dimension of various arrays related to obstructions

IF (N_MPI_PROCESSES>1) THEN
CALL MPI_ALLGATHERV(MPI_IN_PLACE,COUNTS(MYID),MPI_INTEGER,CELL_COUNT,COUNTS,DISPLS,MPI_INTEGER,MPI_COMM_WORLD,IERR)
ENDIF

! Initialize persistent MPI sends and receives and allocate buffer arrays.

CALL POST_RECEIVES(0)
Expand Down Expand Up @@ -995,7 +994,7 @@ SUBROUTINE MPI_INITIALIZATION_CHORES(TASK_NUMBER)

INTEGER, INTENT(IN) :: TASK_NUMBER
INTEGER, ALLOCATABLE, DIMENSION(:) :: REQ0
INTEGER :: N_REQ0
INTEGER :: N_REQ0,N_COMM,SNODE,RNODE

SELECT CASE(TASK_NUMBER)

Expand Down Expand Up @@ -1089,6 +1088,76 @@ SUBROUTINE MPI_INITIALIZATION_CHORES(TASK_NUMBER)

CASE(3)

! Set up persistent send/recv calls for CELL_INDEX, SOLID, WALL_INDEX, and OBST_INDEX_C.
! These arrays hold basic geometry and boundary info for neighboring meshes.

IF (N_MPI_PROCESSES>1) THEN
CALL MPI_ALLGATHERV(MPI_IN_PLACE,COUNTS(MYID),MPI_INTEGER,CELL_COUNT,COUNTS,DISPLS,MPI_INTEGER,MPI_COMM_WORLD,IERR)
ENDIF

N_COMM = 0 ! Number of communication requests
DO NM=LOWER_MESH_INDEX,UPPER_MESH_INDEX
M => MESHES(NM)
ALLOCATE(M%WALL_INDEX(0:CELL_COUNT(NM),-3:3),STAT=IZERO) ; M%WALL_INDEX = 0
N_COMM = N_COMM + M%N_NEIGHBORING_MESHES
ENDDO

ALLOCATE(REQ9(N_COMM*2)) ; REQ9 = MPI_REQUEST_NULL
ALLOCATE(REQ10(N_COMM*2)) ; REQ10 = MPI_REQUEST_NULL
ALLOCATE(REQ11(N_COMM*2)) ; REQ11 = MPI_REQUEST_NULL
ALLOCATE(REQ12(N_COMM*2)) ; REQ12 = MPI_REQUEST_NULL

DO NM=LOWER_MESH_INDEX,UPPER_MESH_INDEX
IF (EVACUATION_ONLY(NM)) CYCLE
M => MESHES(NM)
DO N=1,M%N_NEIGHBORING_MESHES
NOM = M%NEIGHBORING_MESH(N)
IF (EVACUATION_ONLY(NOM)) CYCLE
SNODE = PROCESS(NOM)
IF (SNODE==MYID) CYCLE
M4 => MESHES(NOM)
IF (.NOT.ALLOCATED(M4%CELL_INDEX)) ALLOCATE(M4%CELL_INDEX(0:M4%IBP1,0:M4%JBP1,0:M4%KBP1))
IF (.NOT.ALLOCATED(M4%SOLID)) ALLOCATE(M4%SOLID(0:CELL_COUNT(NOM)))
IF (.NOT.ALLOCATED(M4%WALL_INDEX)) ALLOCATE(M4%WALL_INDEX(0:CELL_COUNT(NOM),-3:3))
IF (.NOT.ALLOCATED(M4%OBST_INDEX_C)) ALLOCATE(M4%OBST_INDEX_C(0:CELL_COUNT(NOM)))
N_REQ9 = N_REQ9+1
CALL MPI_RECV_INIT(M4%CELL_INDEX(0,0,0),SIZE(M4%CELL_INDEX),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ9(N_REQ9),IERR)
N_REQ10 = N_REQ10+1
CALL MPI_RECV_INIT(M4%SOLID(0),SIZE(M4%SOLID),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ10(N_REQ10),IERR)
N_REQ11 = N_REQ11+1
CALL MPI_RECV_INIT(M4%WALL_INDEX(0,-3),SIZE(M4%WALL_INDEX),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ11(N_REQ11),IERR)
N_REQ12 = N_REQ12+1
CALL MPI_RECV_INIT(M4%OBST_INDEX_C(0),SIZE(M4%OBST_INDEX_C),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ12(N_REQ12),IERR)
ENDDO
ENDDO

DO NM=LOWER_MESH_INDEX,UPPER_MESH_INDEX
IF (EVACUATION_ONLY(NM)) CYCLE
M => MESHES(NM)
DO N=1,M%N_NEIGHBORING_MESHES
NOM = M%NEIGHBORING_MESH(N)
IF (EVACUATION_ONLY(NOM)) CYCLE
RNODE = PROCESS(NOM)
IF (RNODE==MYID) CYCLE
M4 => MESHES(NOM)
N_REQ9=MIN(N_REQ9+1,SIZE(REQ9))
CALL MPI_SEND_INIT(M%CELL_INDEX(0,0,0),SIZE(M%CELL_INDEX),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ9(N_REQ9),IERR)
N_REQ10=MIN(N_REQ10+1,SIZE(REQ10))
CALL MPI_SEND_INIT(M%SOLID(0),SIZE(M%SOLID),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ10(N_REQ10),IERR)
N_REQ11=MIN(N_REQ11+1,SIZE(REQ11))
CALL MPI_SEND_INIT(M%WALL_INDEX(0,-3),SIZE(M%WALL_INDEX),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ11(N_REQ11),IERR)
N_REQ12=MIN(N_REQ12+1,SIZE(REQ12))
CALL MPI_SEND_INIT(M%OBST_INDEX_C(0),SIZE(M%OBST_INDEX_C),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ12(N_REQ12),IERR)
ENDDO
ENDDO

CALL MPI_STARTALL(N_REQ9,REQ9(1:N_REQ9),IERR) ; CALL TIMEOUT('REQ9',N_REQ9,REQ9(1:N_REQ9))
CALL MPI_STARTALL(N_REQ10,REQ10(1:N_REQ10),IERR) ; CALL TIMEOUT('REQ10',N_REQ10,REQ10(1:N_REQ10))
CALL MPI_STARTALL(N_REQ11,REQ11(1:N_REQ11),IERR) ; CALL TIMEOUT('REQ11',N_REQ11,REQ11(1:N_REQ11))
CALL MPI_STARTALL(N_REQ12,REQ12(1:N_REQ12),IERR) ; CALL TIMEOUT('REQ12',N_REQ12,REQ12(1:N_REQ12))

CASE(4)

! Allocate "request" arrays to keep track of MPI communications

ALLOCATE(REQ(N_COMMUNICATIONS*40))
Expand All @@ -1100,10 +1169,6 @@ SUBROUTINE MPI_INITIALIZATION_CHORES(TASK_NUMBER)
ALLOCATE(REQ6(N_COMMUNICATIONS*4))
ALLOCATE(REQ7(N_COMMUNICATIONS*4))
ALLOCATE(REQ8(N_COMMUNICATIONS*4))
ALLOCATE(REQ9(N_COMMUNICATIONS*4))
ALLOCATE(REQ10(N_COMMUNICATIONS*4))
ALLOCATE(REQ11(N_COMMUNICATIONS*4))
ALLOCATE(REQ12(N_COMMUNICATIONS*4))
ALLOCATE(REQ14(N_COMMUNICATIONS*4))
ALLOCATE(REQ15(N_COMMUNICATIONS*4))

Expand All @@ -1116,14 +1181,10 @@ SUBROUTINE MPI_INITIALIZATION_CHORES(TASK_NUMBER)
REQ6 = MPI_REQUEST_NULL
REQ7 = MPI_REQUEST_NULL
REQ8 = MPI_REQUEST_NULL
REQ9 = MPI_REQUEST_NULL
REQ10 = MPI_REQUEST_NULL
REQ11 = MPI_REQUEST_NULL
REQ12 = MPI_REQUEST_NULL
REQ14 = MPI_REQUEST_NULL
REQ15 = MPI_REQUEST_NULL

CASE(4)
CASE(5)

IF (N_MPI_PROCESSES>1) THEN

Expand Down Expand Up @@ -2139,20 +2200,6 @@ SUBROUTINE POST_RECEIVES(CODE)

INITIALIZATION_IF: IF (CODE==0) THEN

IF (.NOT.ALLOCATED(M4%CELL_INDEX)) ALLOCATE(M4%CELL_INDEX(0:M4%IBP1,0:M4%JBP1,0:M4%KBP1))
IF (.NOT.ALLOCATED(M4%SOLID)) ALLOCATE(M4%SOLID(0:CELL_COUNT(NOM)))
IF (.NOT.ALLOCATED(M4%WALL_INDEX)) ALLOCATE(M4%WALL_INDEX(0:CELL_COUNT(NOM),-3:3))
IF (.NOT.ALLOCATED(M4%OBST_INDEX_C)) ALLOCATE(M4%OBST_INDEX_C(0:CELL_COUNT(NOM)))

N_REQ9 = MIN(N_REQ9+1,SIZE(REQ9))
CALL MPI_RECV_INIT(M4%CELL_INDEX(0,0,0),SIZE(M4%CELL_INDEX),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ9(N_REQ9),IERR)
N_REQ10 = MIN(N_REQ10+1,SIZE(REQ10))
CALL MPI_RECV_INIT(M4%SOLID(0),SIZE(M4%SOLID),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ10(N_REQ10),IERR)
N_REQ11 = MIN(N_REQ11+1,SIZE(REQ11))
CALL MPI_RECV_INIT(M4%WALL_INDEX(0,-3),SIZE(M4%WALL_INDEX),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ11(N_REQ11),IERR)
N_REQ12 = MIN(N_REQ12+1,SIZE(REQ12))
CALL MPI_RECV_INIT(M4%OBST_INDEX_C(0),SIZE(M4%OBST_INDEX_C),MPI_INTEGER,SNODE,NOM,MPI_COMM_WORLD,REQ12(N_REQ12),IERR)

IJK_SIZE = (M3%I_MAX_R-M3%I_MIN_R+1)*(M3%J_MAX_R-M3%J_MIN_R+1)*(M3%K_MAX_R-M3%K_MIN_R+1)

IF (M3%NIC_R>0) THEN
Expand Down Expand Up @@ -2392,17 +2439,6 @@ SUBROUTINE MESH_EXCHANGE(CODE)

INITIALIZE_SEND_IF: IF (CODE==0) THEN

IF (RNODE/=SNODE) THEN
N_REQ9=MIN(N_REQ9+1,SIZE(REQ9))
CALL MPI_SEND_INIT(M%CELL_INDEX(0,0,0),SIZE(M%CELL_INDEX),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ9(N_REQ9),IERR)
N_REQ10=MIN(N_REQ10+1,SIZE(REQ10))
CALL MPI_SEND_INIT(M%SOLID(0),SIZE(M%SOLID),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ10(N_REQ10),IERR)
N_REQ11=MIN(N_REQ11+1,SIZE(REQ11))
CALL MPI_SEND_INIT(M%WALL_INDEX(0,-3),SIZE(M%WALL_INDEX),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ11(N_REQ11),IERR)
N_REQ12=MIN(N_REQ12+1,SIZE(REQ12))
CALL MPI_SEND_INIT(M%OBST_INDEX_C(0),SIZE(M%OBST_INDEX_C),MPI_INTEGER,RNODE,NM,MPI_COMM_WORLD,REQ12(N_REQ12),IERR)
ENDIF

IF (M3%NIC_S>0 .AND. RNODE/=SNODE) THEN

! Determine the maximum number of radiation angles that are to be sent
Expand Down
3 changes: 2 additions & 1 deletion Source/read.f90
Original file line number Diff line number Diff line change
Expand Up @@ -10203,6 +10203,7 @@ END SUBROUTINE READ_OBST

SUBROUTINE READ_HOLE

USE MISC_FUNCTIONS, ONLY: PROCESS_MESH_NEIGHBORHOOD
CHARACTER(LABEL_LENGTH) :: DEVC_ID,CTRL_ID,MULT_ID
CHARACTER(LABEL_LENGTH) :: MESH_ID
CHARACTER(25) :: COLOR
Expand Down Expand Up @@ -10326,7 +10327,7 @@ SUBROUTINE READ_HOLE

MESH_LOOP: DO NM=1,NMESHES

IF (PROCESS(NM)/=MYID .AND. MYID/=EVAC_PROCESS) CYCLE MESH_LOOP
IF (.NOT.PROCESS_MESH_NEIGHBORHOOD(NM) .AND. MYID/=EVAC_PROCESS) CYCLE MESH_LOOP

M=>MESHES(NM)
CALL POINT_TO_MESH(NM)
Expand Down
9 changes: 0 additions & 9 deletions Verification/scripts/Run_FDS_Cases.sh
Original file line number Diff line number Diff line change
Expand Up @@ -308,15 +308,6 @@ if [ "$REGULAR" == "1" ]; then
fi
fi

cd $CURDIR
cd ..
if [ "$GEOMCASES" == "1" ]; then
./GEOM_Cases.sh
if [ "$CHECKCASES" == "" ]; then
echo Cases in GEOM_Cases.sh submitted
fi
fi

cd $CURDIR
cd ..
if [ "$INSPECTCASES" == "1" ]; then
Expand Down

0 comments on commit 560ef6c

Please sign in to comment.