Commit 3178c177 authored by Gregor Michalicek's avatar Gregor Michalicek

Making Uliana happy (first try to enable stepf parallelization with inp.xml code path)

Note: The temporary usage of the optional mpi argument in stepf still has to be removed.
parent a45b09fe
...@@ -101,6 +101,10 @@ SUBROUTINE initParallelProcesses(atoms,vacuum,input,stars,sliceplot,banddos,& ...@@ -101,6 +101,10 @@ SUBROUTINE initParallelProcesses(atoms,vacuum,input,stars,sliceplot,banddos,&
CALL MPI_BCAST(jij%nqptd,1,MPI_INTEGER,0,mpi%mpi_comm,ierr) CALL MPI_BCAST(jij%nqptd,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
IF (mpi%irank.NE.0) THEN IF (mpi%irank.NE.0) THEN
IF(ALLOCATED(atoms%neq)) DEALLOCATE(atoms%neq)
IF(ALLOCATED(atoms%volmts)) DEALLOCATE(atoms%volmts)
IF(ALLOCATED(atoms%taual)) DEALLOCATE(atoms%taual)
IF(ALLOCATED(atoms%rmt)) DEALLOCATE(atoms%rmt)
ALLOCATE(atoms%nz(atoms%ntype),atoms%zatom(atoms%ntype)) !nz and zatom have the same content! ALLOCATE(atoms%nz(atoms%ntype),atoms%zatom(atoms%ntype)) !nz and zatom have the same content!
ALLOCATE(atoms%jri(atoms%ntype),atoms%dx(atoms%ntype),atoms%rmt(atoms%ntype)) ALLOCATE(atoms%jri(atoms%ntype),atoms%dx(atoms%ntype),atoms%rmt(atoms%ntype))
ALLOCATE(atoms%lmax(atoms%ntype),atoms%nlo(atoms%ntype),atoms%lnonsph(atoms%ntype)) ALLOCATE(atoms%lmax(atoms%ntype),atoms%nlo(atoms%ntype),atoms%lnonsph(atoms%ntype))
...@@ -170,6 +174,7 @@ SUBROUTINE initParallelProcesses(atoms,vacuum,input,stars,sliceplot,banddos,& ...@@ -170,6 +174,7 @@ SUBROUTINE initParallelProcesses(atoms,vacuum,input,stars,sliceplot,banddos,&
ALLOCATE(stars%igfft(0:dimension%nn3d-1,2),stars%igfft2(0:dimension%nn2d-1,2)) ALLOCATE(stars%igfft(0:dimension%nn3d-1,2),stars%igfft2(0:dimension%nn2d-1,2))
ALLOCATE(stars%rgphs(-stars%mx1:stars%mx1,-stars%mx2:stars%mx2,-stars%mx3:stars%mx3)) ALLOCATE(stars%rgphs(-stars%mx1:stars%mx1,-stars%mx2:stars%mx2,-stars%mx3:stars%mx3))
ALLOCATE(stars%pgfft(0:dimension%nn3d-1),stars%pgfft2(0:dimension%nn2d-1)) ALLOCATE(stars%pgfft(0:dimension%nn3d-1),stars%pgfft2(0:dimension%nn2d-1))
IF(ALLOCATED(stars%ufft)) DEALLOCATE(stars%ufft)
ALLOCATE(stars%ufft(0:27*stars%mx1*stars%mx2*stars%mx3-1),stars%ustep(stars%ng3)) ALLOCATE(stars%ufft(0:27*stars%mx1*stars%mx2*stars%mx3-1),stars%ustep(stars%ng3))
ALLOCATE(results%force(3,atoms%ntype,dimension%jspd)) ALLOCATE(results%force(3,atoms%ntype,dimension%jspd))
......
...@@ -591,8 +591,8 @@ SUBROUTINE postprocessInput(mpi,input,sym,stars,atoms,vacuum,obsolete,kpts,& ...@@ -591,8 +591,8 @@ SUBROUTINE postprocessInput(mpi,input,sym,stars,atoms,vacuum,obsolete,kpts,&
#endif #endif
IF (.NOT.sliceplot%iplot) THEN IF (.NOT.sliceplot%iplot) THEN
CALL stepf(sym,stars,atoms,oneD,input,cell,vacuum,mpi)
IF (mpi%irank.EQ.0) THEN IF (mpi%irank.EQ.0) THEN
CALL stepf(sym,stars,atoms,oneD,input,cell,vacuum)
CALL convn(DIMENSION,atoms,stars) CALL convn(DIMENSION,atoms,stars)
CALL efield(atoms,DIMENSION,stars,sym,vacuum,cell,input) CALL efield(atoms,DIMENSION,stars,sym,vacuum,cell,input)
END IF !(mpi%irank.EQ.0) END IF !(mpi%irank.EQ.0)
......
...@@ -48,6 +48,10 @@ ...@@ -48,6 +48,10 @@
INTEGER ierr INTEGER ierr
INTEGER, ALLOCATABLE :: icm_local(:,:,:) INTEGER, ALLOCATABLE :: icm_local(:,:,:)
REAL, ALLOCATABLE :: ufft_local(:), bfft_local(:) REAL, ALLOCATABLE :: ufft_local(:), bfft_local(:)
CALL MPI_BCAST(stars%mx1,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(stars%mx2,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(stars%mx3,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
#endif #endif
ifftd = 27*stars%mx1*stars%mx2*stars%mx3 ifftd = 27*stars%mx1*stars%mx2*stars%mx3
...@@ -143,8 +147,20 @@ ...@@ -143,8 +147,20 @@
! !
#ifdef CPP_MPI #ifdef CPP_MPI
IF (PRESENT(mpi)) THEN IF (PRESENT(mpi)) THEN
CALL MPI_BCAST(atoms%ntype,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(atoms%nat,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(cell%omtil,1,CPP_MPI_REAL,0,mpi%mpi_comm,ierr) CALL MPI_BCAST(cell%omtil,1,CPP_MPI_REAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(cell%bmat,9,CPP_MPI_REAL,0,mpi%mpi_comm,ierr) CALL MPI_BCAST(cell%bmat,9,CPP_MPI_REAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(sym%invs,1,MPI_LOGICAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(oneD%odd%d1,1,MPI_LOGICAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(input%film,1,MPI_LOGICAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(cell%z1,1,CPP_MPI_REAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(cell%vol,1,CPP_MPI_REAL,0,mpi%mpi_comm,ierr)
IF(.NOT.ALLOCATED(atoms%neq)) ALLOCATE(atoms%neq(atoms%ntype))
IF(.NOT.ALLOCATED(atoms%volmts)) ALLOCATE(atoms%volmts(atoms%ntype))
IF(.NOT.ALLOCATED(atoms%taual)) ALLOCATE(atoms%taual(3,atoms%nat))
IF(.NOT.ALLOCATED(atoms%rmt)) ALLOCATE(atoms%rmt(atoms%ntype))
IF(.NOT.ALLOCATED(stars%ufft)) ALLOCATE(stars%ufft(0:27*stars%mx1*stars%mx2*stars%mx3-1))
CALL MPI_BCAST(atoms%neq,size(atoms%neq),MPI_INTEGER,0,mpi%mpi_comm,ierr) CALL MPI_BCAST(atoms%neq,size(atoms%neq),MPI_INTEGER,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(atoms%volmts,size(atoms%volmts),CPP_MPI_REAL,0,mpi%mpi_comm,ierr) CALL MPI_BCAST(atoms%volmts,size(atoms%volmts),CPP_MPI_REAL,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(atoms%taual,size(atoms%taual),CPP_MPI_REAL,0,mpi%mpi_comm,ierr) CALL MPI_BCAST(atoms%taual,size(atoms%taual),CPP_MPI_REAL,0,mpi%mpi_comm,ierr)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment