Commit 0d593037 authored by Gregor Michalicek's avatar Gregor Michalicek

Fixed several bugs related to MPI + cdn.hdf

parent a712aa8f
......@@ -47,6 +47,9 @@ CONTAINS
USE m_icorrkeys
USE m_eig66_io, ONLY : open_eig, write_eig, close_eig,read_eig
USE m_xmlOutput
#ifdef CPP_MPI
USE m_mpi_bc_pot
#endif
IMPLICIT NONE
TYPE(t_results),INTENT(INOUT):: results
......@@ -238,8 +241,16 @@ CONTAINS
& 'Info: and stored in "vxc", the values obtained from the',&
& 'Info: original implementation are saved to "vxc.old".'
ENDIF
CALL readPotential(stars,vacuum,atoms,sphhar,input,sym,POT_ARCHIVE_TYPE_TOT_const,&
iter,vr,vpw,vz,vzxy)
IF (mpi%irank.EQ.0) THEN
CALL readPotential(stars,vacuum,atoms,sphhar,input,sym,POT_ARCHIVE_TYPE_TOT_const,&
iter,vr,vpw,vz,vzxy)
END IF
#ifdef CPP_MPI
CALL mpi_bc_pot(mpi,stars,sphhar,atoms,input,vacuum,&
iter,vr,vpw,vz,vzxy)
#endif
999 CONTINUE
IF (mpi%irank.EQ.0) CALL openXMLElementFormPoly('iteration',(/'numberForCurrentRun','overallNumber '/),(/it,iter/),&
RESHAPE((/19,13,5,5/),(/2,2/)))
......@@ -663,8 +674,15 @@ ENDIF
ENDDO
CLOSE (16)
gwc=2
CALL readPotential(stars,vacuum,atoms,sphhar,input,sym,POT_ARCHIVE_TYPE_COUL_const,&
iter,vr,vpw,vz,vzxy)
IF (mpi%irank.EQ.0) THEN
CALL readPotential(stars,vacuum,atoms,sphhar,input,sym,POT_ARCHIVE_TYPE_COUL_const,&
iter,vr,vpw,vz,vzxy)
END IF
#ifdef CPP_MPI
CALL mpi_bc_pot(mpi,stars,sphhar,atoms,input,vacuum,&
iter,vr,vpw,vz,vzxy)
#endif
GOTO 999
ELSE IF ( input%gw.EQ.2.AND.(gwc==2) ) THEN
CLOSE (12)
......
......@@ -75,7 +75,6 @@ MODULE m_cdn_io
CHARACTER(LEN=19) :: timeStampString
CHARACTER(LEN=15) :: distanceString
CALL getMode(mode)
WRITE(*,*) 'Available densities info:'
......
set(fleur_F77 ${fleur_F77}
)
set(fleur_F90 ${fleur_F90}
main/cdngen.f90
main/cdngen.F90
main/fleur.F90
main/fleur_init.F90
main/fleur_job.F90
......
......@@ -32,6 +32,10 @@
use m_m_perp
USE m_types
USE m_xmlOutput
#ifdef CPP_MPI
USE m_mpi_bc_pot
USE m_mpi_bc_coreden
#endif
IMPLICIT NONE
TYPE(t_results),INTENT(INOUT):: results
TYPE(t_mpi),INTENT(IN) :: mpi
......@@ -94,8 +98,15 @@
ALLOCATE(vpw(stars%ng3,dimension%jspd),vzxy(vacuum%nmzxyd,oneD%odi%n2d-1,2,dimension%jspd),&
& vz(vacuum%nmzd,2,dimension%jspd),vr(atoms%jmtd,0:sphhar%nlhd,atoms%ntype,dimension%jspd))
CALL readPotential(stars,vacuum,atoms,sphhar,input,sym,POT_ARCHIVE_TYPE_TOT_const,&
iter,vr,vpw,vz,vzxy)
IF (mpi%irank.EQ.0) THEN
CALL readPotential(stars,vacuum,atoms,sphhar,input,sym,POT_ARCHIVE_TYPE_TOT_const,&
iter,vr,vpw,vz,vzxy)
END IF
#ifdef CPP_MPI
CALL mpi_bc_pot(mpi,stars,sphhar,atoms,input,vacuum,&
iter,vr,vpw,vz,vzxy)
#endif
DEALLOCATE ( vpw,vzxy )
ALLOCATE ( qpw(stars%ng3,dimension%jspd),rhtxy(vacuum%nmzxyd,oneD%odi%n2d-1,2,dimension%jspd) )
ALLOCATE ( rho(atoms%jmtd,0:sphhar%nlhd,atoms%ntype,dimension%jspd),rht(vacuum%nmzd,2,dimension%jspd) )
......@@ -164,7 +175,6 @@
IF (noco%l_mperp) jspmax = 1
DO jspin = 1,jspmax
CALL timestart("cdngen: cdnval")
CALL cdnval(eig_id,&
mpi,kpts,jspin,sliceplot,noco, input,banddos,cell,atoms,enpara,stars, vacuum,dimension,&
sphhar, sym,obsolete, igq_fft, vr,vz(:,:,jspin), oneD,&
......@@ -201,7 +211,13 @@
tec = 0.0
qint = 0.0
IF (input%frcor) THEN
CALL readCoreDensity(input,atoms,dimension,rh,tec,qint)
IF (mpi%irank.EQ.0) THEN
CALL readCoreDensity(input,atoms,dimension,rh,tec,qint)
END IF
#ifdef CPP_MPI
CALL mpi_bc_coreDen(mpi,atoms,input,dimension,&
rh,tec,qint)
#endif
END IF
DO jspin = 1,input%jspins
......@@ -257,9 +273,9 @@
END IF
!
END DO ! loop over spins
CALL writeCoreDensity(input,atoms,dimension,rhTemp,tec,qint)
IF (mpi%irank.EQ.0) THEN
CALL writeCoreDensity(input,atoms,dimension,rhTemp,tec,qint)
END IF
IF ((input%gw.eq.1 .or. input%gw.eq.3).AND.(mpi%irank.EQ.0)) CLOSE(15)
ELSE
! relativistic core implementation : kcrel.eq.1
......@@ -286,6 +302,7 @@
END DO
END IF
ENDIF
IF ((noco%l_noco).AND.(mpi%irank.EQ.0)) THEN
!---> pk non-collinear
!---> add the coretail-charge to the constant interstitial
......
......@@ -4,6 +4,8 @@ if (${FLEUR_USE_MPI})
set(fleur_F90 ${fleur_F90}
mpi/mingeselle.F90
mpi/mpi_bc_all.F90
mpi/mpi_bc_pot.F90
mpi/mpi_bc_coreDen.F90
mpi/mpi_bc_st.F90
mpi/mpi_col_den.F90
mpi/mpi_make_groups.F90
......
!--------------------------------------------------------------------------------
! Copyright (c) 2016 Peter Grünberg Institut, Forschungszentrum Jülich, Germany
! This file is part of FLEUR and available as free software under the conditions
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
MODULE m_mpi_bc_coreden
CONTAINS
SUBROUTINE mpi_bc_coreden(mpi,atoms,input,dimension,&
rhcs,tecs,qints)
USE m_types
IMPLICIT NONE
INCLUDE 'mpif.h'
TYPE(t_mpi),INTENT(IN) :: mpi
TYPE(t_atoms),INTENT(IN) :: atoms
TYPE(t_input),INTENT(IN) :: input
TYPE(t_dimension),INTENT(IN) :: DIMENSION
REAL, INTENT(INOUT) :: rhcs(atoms%jmtd,atoms%ntype,DIMENSION%jspd)
REAL, INTENT(INOUT) :: tecs(atoms%ntype,DIMENSION%jspd)
REAL, INTENT(INOUT) :: qints(atoms%ntype,DIMENSION%jspd)
INTEGER :: n, ierr(3)
n = atoms%jmtd * atoms%ntype * DIMENSION%jspd
CALL MPI_BCAST(rhcs,n,MPI_DOUBLE,0,mpi%mpi_comm,ierr)
n = atoms%ntype * DIMENSION%jspd
CALL MPI_BCAST(tecs,n,MPI_DOUBLE,0,mpi%mpi_comm,ierr)
n = atoms%ntype * DIMENSION%jspd
CALL MPI_BCAST(qints,n,MPI_DOUBLE,0,mpi%mpi_comm,ierr)
END SUBROUTINE mpi_bc_coreden
END MODULE m_mpi_bc_coreden
!--------------------------------------------------------------------------------
! Copyright (c) 2016 Peter Grünberg Institut, Forschungszentrum Jülich, Germany
! This file is part of FLEUR and available as free software under the conditions
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
MODULE m_mpi_bc_pot
CONTAINS
SUBROUTINE mpi_bc_pot(mpi,stars,sphhar,atoms,input,vacuum,&
iter,fr,fpw,fz,fzxy)
USE m_types
IMPLICIT NONE
INCLUDE 'mpif.h'
TYPE(t_mpi),INTENT(IN) :: mpi
TYPE(t_input),INTENT(IN) :: input
TYPE(t_vacuum),INTENT(IN) :: vacuum
TYPE(t_stars),INTENT(IN) :: stars
TYPE(t_sphhar),INTENT(IN) :: sphhar
TYPE(t_atoms),INTENT(IN) :: atoms
INTEGER, INTENT (INOUT) :: iter
COMPLEX, INTENT (INOUT) :: fpw(stars%ng3,input%jspins)
COMPLEX, INTENT (INOUT) :: fzxy(vacuum%nmzxyd,stars%ng2-1,2,input%jspins)
REAL, INTENT (INOUT) :: fr(atoms%jmtd,0:sphhar%nlhd,atoms%ntype,input%jspins)
REAL, INTENT (INOUT) :: fz(vacuum%nmzd,2,input%jspins)
INTEGER :: n, ierr(3)
CALL MPI_BCAST(iter,1,MPI_INTEGER,0,mpi%mpi_comm,ierr)
n = stars%ng3 * input%jspins
CALL MPI_BCAST(fpw,n,MPI_DOUBLE_COMPLEX,0,mpi%mpi_comm,ierr)
n = vacuum%nmzxyd * (stars%ng2-1) * 2 * input%jspins
CALL MPI_BCAST(fzxy,n,MPI_DOUBLE_COMPLEX,0,mpi%mpi_comm,ierr)
n = atoms%jmtd * (sphhar%nlhd+1) * atoms%ntype * input%jspins
CALL MPI_BCAST(fr,n,MPI_DOUBLE,0,mpi%mpi_comm,ierr)
n = vacuum%nmzd * 2 * input%jspins
CALL MPI_BCAST(fz,n,MPI_DOUBLE,0,mpi%mpi_comm,ierr)
END SUBROUTINE mpi_bc_pot
END MODULE m_mpi_bc_pot
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment