Commit fb32fce4 authored by Daniel Wortmann's avatar Daniel Wortmann

Bugfixes for MPI-parallel part of potential generator

parent c11101bb
......@@ -51,10 +51,11 @@ CONTAINS
TYPE(t_potden) :: workden,denRot
WRITE (6,FMT=8000)
IF (mpi%irank==0) WRITE (6,FMT=8000)
8000 FORMAT (/,/,t10,' p o t e n t i a l g e n e r a t o r',/)
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,atoms,input,vacuum,oneD,noco,den)
#endif
CALL vTot%resetPotDen()
CALL vCoul%resetPotDen()
CALL vx%resetPotDen()
......@@ -67,7 +68,7 @@ CONTAINS
CALL den%sum_both_spin(workden)
CALL vgen_coulomb(1,mpi,DIMENSION,oneD,input,field,vacuum,sym,stars,cell,sphhar,atoms,workden,vCoul,results)
CALL vCoul%copy_both_spin(vTot)
IF (noco%l_noco) THEN
......@@ -79,7 +80,7 @@ CONTAINS
call vgen_xcpot(hybrid,input,xcpot,DIMENSION, atoms,sphhar,stars,&
vacuum,sym, obsolete,cell,oneD,sliceplot,mpi,noco,den,denRot,vTot,vx,results)
!ToDo, check if this is needed for more potentials as well...
CALL vgen_finalize(atoms,stars,vacuum,sym,noco,input,vTot,denRot)
DEALLOCATE(vcoul%pw_w,vx%pw_w)
......
......@@ -55,10 +55,6 @@ CONTAINS
INTEGER i,i3,irec2,irec3,ivac,j,js,k,k3,lh,n,nzst1
INTEGER ifftd2
INTEGER jsp,l
#ifdef CPP_MPI
include 'mpif.h'
integer:: ierr
#endif
CALL exc%init_potden_types(stars,atoms,sphhar,vacuum,1,.false.,1) !one spin only
ALLOCATE(exc%pw_w(stars%ng3,1));exc%pw_w=0.0
......@@ -148,9 +144,7 @@ CONTAINS
IF (mpi%irank == 0) THEN
CALL timestart ("Vxc in MT")
END IF
#ifdef CPP_MPI
CALL MPI_BCAST(den%mt,atoms%jmtd*(1+sphhar%nlhd)*atoms%ntype*dimension%jspd,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
#endif
CALL vmt_xc(DIMENSION,mpi,sphhar,atoms, den,xcpot,input,sym,&
obsolete, vTot,vx,exc)
......
......@@ -87,6 +87,11 @@ CONTAINS
#ifdef CPP_MPI
n_start=mpi%irank+1
n_stride=mpi%isize
IF (mpi%irank>0) THEN
vxc%mt=0.0
vx%mt=0.0
exc%mt=0.0
ENDIF
#else
n_start=1
n_stride=1
......@@ -139,13 +144,11 @@ CONTAINS
CALL finish_mt_grid()
#ifdef CPP_MPI
CALL MPI_ALLREDUCE(MPI_IN_PLACE,vx%mt,atoms%jmtd*(1+sphhar%nlhd)*atoms%ntype*DIMENSION%jspd,CPP_MPI_REAL,MPI_SUM,mpi%mpi_comm,ierr) !ToDo:CPP_MPI_REAL?
!using vxr_local as a temporal buffer
CALL MPI_ALLREDUCE(MPI_IN_PLACE,vxc%mt,atoms%jmtd*(1+sphhar%nlhd)*atoms%ntype*DIMENSION%jspd,CPP_MPI_REAL,MPI_SUM,mpi%mpi_comm,ierr)
CALL MPI_ALLREDUCE(MPI_IN_PLACE,exc%mt(:,:,:,1),atoms%jmtd*(1+sphhar%nlhd)*atoms%ntype,CPP_MPI_REAL,MPI_SUM,mpi%mpi_comm,ierr)
CALL MPI_ALLREDUCE(MPI_IN_PLACE,vx%mt,SIZE(vx%mt),CPP_MPI_REAL,MPI_SUM,mpi%mpi_comm,ierr)
CALL MPI_ALLREDUCE(MPI_IN_PLACE,vxc%mt,SIZE(vxc%mt),CPP_MPI_REAL,MPI_SUM,mpi%mpi_comm,ierr)
CALL MPI_ALLREDUCE(MPI_IN_PLACE,exc%mt,SIZE(exc%mt),CPP_MPI_REAL,MPI_SUM,mpi%mpi_comm,ierr)
#endif
!
RETURN
END SUBROUTINE vmt_xc
END MODULE m_vmt_xc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment