Commit a1035069 authored by Matthias Redies's avatar Matthias Redies

introduce fmpi & "use mpi" everywhere

parent 7df27c5a
......@@ -138,9 +138,8 @@
#ifdef CPP_MPI
!EXTERNAL MPI_BCAST
INTEGER ierr
#include "cpp_double.h"
!INCLUDE "mpif.h"
#endif
!
!----> Abbreviation
!
......@@ -177,7 +176,7 @@
! (2) cut_off core tails from noise
!
#ifdef CPP_MPI
CALL MPI_BCAST(rh,atoms%msh*atoms%ntype,CPP_MPI_REAL,0,fmpi%mpi_comm,ierr)
CALL MPI_BCAST(rh,atoms%msh*atoms%ntype,MPI_DOUBLE_PRECISION,0,fmpi%mpi_comm,ierr)
#endif
mshc(:) = 0 ! This initialization is important because there may be atoms without core states.
nloop: DO n = 1 , atoms%ntype
......@@ -518,10 +517,7 @@
external mpi_bcast
complex :: qpwc_loc(stars%ng3)
integer :: ierr
#include "cpp_double.h"
include "mpif.h"
#endif
czero = (0.0,0.0)
#ifdef CPP_MPI
DO k = 1 , stars%ng3
......@@ -571,7 +567,7 @@
END IF
ENDDO
#ifdef CPP_MPI
CALL mpi_allreduce(qpwc_loc,qpwc,stars%ng3,CPP_MPI_COMPLEX,mpi_sum, &
CALL mpi_allreduce(qpwc_loc,qpwc,stars%ng3,MPI_DOUBLE_COMPLEX,mpi_sum, &
fmpi%mpi_comm,ierr)
#endif
......
......@@ -35,10 +35,7 @@ CONTAINS
COMPLEX,ALLOCATABLE :: x(:) !(1:stars%ng3), may be distributed over fmpi ranks
#ifdef CPP_MPI
INTEGER ierr
#include "cpp_double.h"
!INCLUDE "mpif.h"
#endif
IF (PRESENT(fmpi)) THEN
irank = fmpi%irank
nsize = fmpi%isize
......@@ -103,7 +100,7 @@ CONTAINS
DEALLOCATE(x)
#ifdef CPP_MPI
IF (PRESENT(fmpi)) THEN
CALL MPI_reduce(sum_over_ng3,qis(jsp),1,CPP_MPI_REAL,MPI_SUM,0,fmpi%mpi_comm,ierr)
CALL MPI_reduce(sum_over_ng3,qis(jsp),1,MPI_DOUBLE_PRECISION,MPI_SUM,0,fmpi%mpi_comm,ierr)
ELSE
qis(jsp) = sum_over_ng3
ENDIF
......
......@@ -27,13 +27,14 @@ MODULE m_xmlOutput
USE m_constants
USE m_utility
USE m_compile_descr
#ifdef CPP_MPI
use mpi
#endif
!$ use omp_lib
IMPLICIT NONE
#ifdef CPP_MPI
include "mpif.h"
#endif
INTEGER :: err, isize
INTEGER :: numFlags
INTEGER :: nOMPThreads
......
......@@ -26,6 +26,9 @@
SUBROUTINE io_hdfopen(filename,access_mode,fid,hdferr,access_prp)
USE hdf5
#ifdef CPP_HDFMPI
use mpi
#endif
IMPLICIT NONE
character(len=*),intent(in) :: filename
INTEGER ,INTENT(in) :: access_mode
......@@ -36,7 +39,6 @@
#ifdef CPP_DEBUG
#ifdef CPP_HDFMPI
include "mpif.h"
integer:: irank
call MPI_COMM_RANK (MPI_COMM_WORLD,irank,err)
write(*,"('PE:',i3,' opened:',a20,' rw:',l1)") irank,filename,access_mode==H5F_ACC_RDWR_F
......@@ -54,6 +56,9 @@
subroutine io_hdfclose(fid,hdferr)
USE hdf5
#ifdef CPP_HDFMPI
use mpi
#endif
IMPLICIT NONE
INTEGER(HID_T),INTENT(in) :: fid
INTEGER,INTENT(OUT),optional :: hdferr
......@@ -61,7 +66,6 @@
INTEGER::err
#ifdef CPP_DEBUG
#ifdef CPP_HDFMPI
include "mpif.h"
integer:: irank
character(len=20)::filename
integer(size_t)::flength
......
......@@ -4,8 +4,10 @@
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
MODULE m_juDFT_init
MODULE m_juDFT_init
#ifdef CPP_MPI
use mpi
#endif
USE m_judft_time
USE m_judft_sysinfo
USE m_judft_stop
......@@ -29,6 +31,8 @@
SUBROUTINE signal_handler()
!Installs custom handlers for SIGTERM,SIGSEGV
#ifdef __INTEL_COMPILER
USE ifport
INTEGER :: result
......@@ -45,17 +49,19 @@
! would be changed if it would be defined in the module.
#ifdef __INTEL_COMPILER
FUNCTION intel_signal_handler(signal)
#ifdef CPP_MPI
use mpi
#endif
USE m_judft_time
USE m_judft_sysinfo
IMPLICIT NONE
INTEGER :: signal
INTEGER :: intel_signal_handler
#ifdef CPP_MPI
include "mpif.h"
INTEGER:: irank,ierr
LOGICAL:: mpi_init
CALL MPI_initialized(mpi_init,ierr)
IF (mpi_init) THEN
LOGICAL:: l_mpi_init
CALL MPI_initialized(l_mpi_init,ierr)
IF (l_mpi_init) THEN
CALL MPI_COMM_RANK (MPI_COMM_WORLD,irank,ierr)
WRITE(0,*) "Signal ",signal," detected on PE:",irank
ELSE
......@@ -73,7 +79,7 @@
CALL writetimes()
CALL PRINT_memory_info(0,.true.)
#ifdef CPP_MPI
IF (mpi_init) CALL MPI_ABORT(MPI_COMM_WORLD,ierr)
IF (l_mpi_init) CALL MPI_ABORT(MPI_COMM_WORLD,0,ierr)
#endif
STOP "Signal"
intel_signal_handler=0
......
......@@ -13,6 +13,9 @@ MODULE m_juDFT_time
! Daniel Wortmann, Fri Sep 6 11:53:08 2002
!*****************************************************************
USE m_judft_xmlOutput
#ifdef CPP_MPI
use mpi
#endif
IMPLICIT NONE
! List of different timers
PRIVATE
......@@ -377,6 +380,9 @@ CONTAINS
! writes all times to file
SUBROUTINE writetimes(stdout)
#ifdef CPP_MPI
use mpi
#endif
USE m_juDFT_internalParams
USE m_judft_usage
USE m_judft_args
......@@ -386,7 +392,6 @@ CONTAINS
CHARACTER(len=:), allocatable :: json_str
CHARACTER(len=30)::filename
#ifdef CPP_MPI
INCLUDE "mpif.h"
INTEGER::err,isize
LOGICAL:: l_mpi
CALL mpi_initialized(l_mpi,err)
......@@ -429,14 +434,15 @@ CONTAINS
! writes all times to out.xml file
SUBROUTINE writeTimesXML()
#ifdef CPP_MPI
use mpi
#endif
IMPLICIT NONE
INTEGER :: irank = 0
LOGICAL :: l_out
TYPE(t_timer), POINTER :: timer
#ifdef CPP_MPI
INCLUDE "mpif.h"
INTEGER::err, isize
LOGICAL:: l_mpi
CALL mpi_initialized(l_mpi,err)
......@@ -505,6 +511,7 @@ CONTAINS
END SUBROUTINE privWriteTimesXML
SUBROUTINE check_time_for_next_iteration(it, l_cont)
USE m_judft_args
IMPLICIT NONE
INTEGER, INTENT(IN) :: it
......@@ -514,7 +521,6 @@ CONTAINS
INTEGER :: irank = 0
real :: wtime
#ifdef CPP_MPI
INCLUDE "mpif.h"
INTEGER::err, isize
LOGICAL:: l_mpi
CALL mpi_initialized(l_mpi,err)
......@@ -542,14 +548,15 @@ CONTAINS
END SUBROUTINE check_time_for_next_iteration
SUBROUTINE resetIterationDependentTimers()
#ifdef CPP_MPI
use mpi
#endif
IMPLICIT NONE
INTEGER :: irank = 0
LOGICAL :: l_out
TYPE(t_timer), POINTER :: timer, parenttimer
#ifdef CPP_MPI
INCLUDE "mpif.h"
INTEGER::err, isize
LOGICAL:: l_mpi
CALL mpi_initialized(l_mpi,err)
......@@ -645,9 +652,6 @@ CONTAINS
USE ifport
#endif
IMPLICIT NONE
#ifdef CPP_MPI
INCLUDE 'mpif.h'
#endif
REAL::cputime
!TRY TO USE mpi OR openmp wall-clock functions
......
......@@ -45,7 +45,9 @@ MODULE m_judft_xmlOutput
END FUNCTION getXMLOutputUnitNumber
SUBROUTINE startXMLOutput(filename,tag)
#ifdef CPP_MPI
use mpi
#endif
USE m_juDFT_args
USE m_juDFT_usage
!$ use omp_lib
......@@ -55,7 +57,6 @@ MODULE m_judft_xmlOutput
CHARACTER(len=*),INTENT(in)::filename,tag
#ifdef CPP_MPI
include "mpif.h"
INTEGER :: err, isize
#endif
INTEGER :: numFlags
......
......@@ -230,8 +230,8 @@ CONTAINS
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,inDen)
IF (fi%noco%l_alignMT) THEN
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
END IF
#endif
......@@ -245,8 +245,8 @@ IF (fi%sliceplot%iplot.NE.0) THEN
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,inDen)
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
#endif
END IF
......@@ -263,8 +263,8 @@ IF (fi%sliceplot%iplot.NE.0) THEN
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,inDen)
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
#endif
END IF
......@@ -339,7 +339,7 @@ END IF
#ifdef CPP_MPI
CALL MPI_BARRIER(mpi%mpi_comm,ierr)
CALL MPI_BARRIER(mpi%mpi_comm,ierr(1))
#endif
CALL forcetheo%start(vtot,mpi%irank==0)
forcetheoloop:DO WHILE(forcetheo%next_job(iter==fi%input%itmax,fi%atoms,fi%noco,nococonv))
......@@ -381,7 +381,7 @@ END IF
CALL timestop("gen. of hamil. and diag. (total)")
#ifdef CPP_MPI
CALL MPI_BARRIER(mpi%mpi_comm,ierr)
CALL MPI_BARRIER(mpi%mpi_comm,ierr(1))
#endif
! fermi level and occupancies
......@@ -436,8 +436,8 @@ END IF
!ENDIF
#ifdef CPP_MPI
CALL MPI_BCAST(results%ef,1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(results%w_iks,SIZE(results%w_iks),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(results%ef,1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(results%w_iks,SIZE(results%w_iks),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
#endif
IF (forcetheo%eval(eig_id,fi%atoms,fi%kpts,fi%sym,fi%cell,fi%noco,nococonv,input_soc,mpi,fi%oneD,enpara,vToT,results)) THEN
......@@ -486,8 +486,8 @@ END IF
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,outDen)
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
#endif
END IF
......@@ -504,8 +504,8 @@ END IF
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,outDen)
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
#endif
......@@ -524,20 +524,20 @@ END IF
#ifdef CPP_MPI
CALL MPI_BCAST(enpara%evac,SIZE(enpara%evac),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(enpara%evac0,SIZE(enpara%evac0),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(enpara%el0,SIZE(enpara%el0),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(enpara%ello0,SIZE(enpara%ello0),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(enpara%evac,SIZE(enpara%evac),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(enpara%evac0,SIZE(enpara%evac0),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(enpara%el0,SIZE(enpara%el0),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(enpara%ello0,SIZE(enpara%ello0),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
IF (fi%noco%l_noco) THEN
DO n= 1,fi%atoms%ntype
IF (fi%noco%l_relax(n)) THEN
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDIF
ENDDO
IF (fi%noco%l_constr) THEN
CALL MPI_BCAST(nococonv%b_con,SIZE(nococonv%b_con),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%b_con,SIZE(nococonv%b_con),MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDIF
ENDIF
#endif
......@@ -566,8 +566,8 @@ END IF
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,inDen)
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,outDen)
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
#endif
END IF
......@@ -595,8 +595,8 @@ END IF
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,fi%atoms,fi%input,fi%vacuum,fi%oneD,fi%noco,inDen)
DO n= 1,fi%atoms%ntype
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BCAST(nococonv%alph(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BCAST(nococonv%beta(n),1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
ENDDO
#endif
END IF
......@@ -610,8 +610,8 @@ END IF
CALL timestop("Iteration")
#ifdef CPP_MPI
CALL MPI_BCAST(results%last_distance,1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr)
CALL MPI_BARRIER(mpi%mpi_comm,ierr)
CALL MPI_BCAST(results%last_distance,1,MPI_DOUBLE_PRECISION,0,mpi%mpi_comm,ierr(1))
CALL MPI_BARRIER(mpi%mpi_comm,ierr(1))
#endif
CALL priv_geo_end(mpi)
......
......@@ -4,9 +4,12 @@
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
MODULE m_fleur_init
#ifdef CPP_MPI
use mpi
#endif
IMPLICIT NONE
CONTAINS
SUBROUTINE fleur_init(mpi,&
SUBROUTINE fleur_init(fmpi,&
input,field,atoms, sphhar,cell,stars,sym,noco,nococonv,vacuum,forcetheo,&
sliceplot,banddos,enpara,xcpot,results,kpts,mpinp,hybinp,&
oneD,coreSpecInput,gfinp,hub1inp,wann)
......@@ -53,7 +56,7 @@ CONTAINS
#endif
IMPLICIT NONE
! Types, these variables contain a lot of data!
TYPE(t_mpi) ,INTENT(INOUT):: mpi
TYPE(t_mpi) ,INTENT(INOUT):: fmpi
TYPE(t_input) ,INTENT(OUT):: input
TYPE(t_field), INTENT(OUT) :: field
......@@ -99,18 +102,17 @@ CONTAINS
LOGICAL :: l_found, l_kpts, l_exist, l_krla
#ifdef CPP_MPI
INCLUDE 'mpif.h'
INTEGER ierr(3)
CALL MPI_COMM_RANK (mpi%mpi_comm,mpi%irank,ierr)
CALL MPI_COMM_SIZE (mpi%mpi_comm,mpi%isize,ierr)
CALL MPI_COMM_RANK (fmpi%mpi_comm,fmpi%irank,ierr(1))
CALL MPI_COMM_SIZE (fmpi%mpi_comm,fmpi%isize,ierr(1))
#else
mpi%irank=0 ; mpi%isize=1; mpi%mpi_comm=1
fmpi%irank=0 ; fmpi%isize=1; fmpi%mpi_comm=1
#endif
CALL check_command_line()
#ifdef CPP_HDF
CALL hdf_init()
#endif
IF (mpi%irank.EQ.0) THEN
IF (fmpi%irank.EQ.0) THEN
CALL startFleur_XMLOutput()
IF (judft_was_argument("-info")) THEN
CLOSE(oUnit)
......@@ -126,7 +128,7 @@ CONTAINS
ALLOCATE(t_xcpot_inbuild::xcpot)
!Only PE==0 reads the input and does basic postprocessing
IF (mpi%irank.EQ.0) THEN
IF (fmpi%irank.EQ.0) THEN
CALL fleurinput_read_xml(cell,sym,atoms,input,noco,vacuum,field,&
sliceplot,banddos,mpinp,hybinp,oneD,coreSpecInput,&
wann,xcpot,forcetheo_data,kpts,enparaXML,gfinp,hub1inp)
......@@ -136,17 +138,17 @@ CONTAINS
!Distribute input to all PE
CALL fleurinput_mpi_bc(Cell,Sym,Atoms,Input,Noco,Vacuum,Field,&
Sliceplot,Banddos,mpinp,hybinp,Oned,Corespecinput,Wann,&
Xcpot,Forcetheo_data,Kpts,Enparaxml,gfinp,hub1inp,Mpi%Mpi_comm)
Xcpot,Forcetheo_data,Kpts,Enparaxml,gfinp,hub1inp,fmpi%Mpi_comm)
!Remaining init is done using all PE
CALL nococonv%init(noco)
CALL nococonv%init_ss(noco,atoms)
CALL ylmnorm_init(MAX(atoms%lmaxd, 2*hybinp%lexp))
CALL gaunt_init(atoms%lmaxd+1)
CALL enpara%init_enpara(atoms,input%jspins,input%film,enparaXML)
CALL make_sphhar(mpi%irank==0,atoms,sphhar,sym,cell,oneD)
CALL make_sphhar(fmpi%irank==0,atoms,sphhar,sym,cell,oneD)
! Store structure data (has to be performed before calling make_stars)
CALL storeStructureIfNew(input,stars, atoms, cell, vacuum, oneD, sym, mpi,sphhar,noco)
CALL make_stars(stars,sym,atoms,vacuum,sphhar,input,cell,xcpot,oneD,noco,mpi)
CALL storeStructureIfNew(input,stars, atoms, cell, vacuum, oneD, sym, fmpi,sphhar,noco)
CALL make_stars(stars,sym,atoms,vacuum,sphhar,input,cell,xcpot,oneD,noco,fmpi)
CALL make_forcetheo(forcetheo_data,cell,sym,atoms,forcetheo)
CALL lapw_dim(kpts,cell,input,noco,nococonv,oneD,forcetheo,atoms)
CALL input%init(noco,hybinp%l_hybrid,lapw_dim_nbasfcn)
......@@ -154,17 +156,17 @@ CONTAINS
CALL hybinp%init(atoms, cell, input, oneD, sym, xcpot)
CALL kpts%init(cell, sym, input%film, hybinp%l_hybrid .or. input%l_rdmft)
CALL gfinp%init(atoms, sym, noco, input)
CALL prp_xcfft(mpi,stars,input,cell,xcpot)
CALL convn(mpi%irank==0,atoms,stars)
IF (mpi%irank==0) CALL e_field(atoms,stars,sym,vacuum,cell,input,field%efield)
IF (mpi%isize>1) CALL field%mpi_bc(mpi%mpi_comm,0)
CALL prp_xcfft(fmpi,stars,input,cell,xcpot)
CALL convn(fmpi%irank==0,atoms,stars)
IF (fmpi%irank==0) CALL e_field(atoms,stars,sym,vacuum,cell,input,field%efield)
IF (fmpi%isize>1) CALL field%mpi_bc(fmpi%mpi_comm,0)
!At some point this should be enabled for noco as well
IF (.NOT.noco%l_noco) &
CALL transform_by_moving_atoms(mpi,stars,atoms,vacuum, cell, sym, sphhar,input,oned,noco)
CALL transform_by_moving_atoms(fmpi,stars,atoms,vacuum, cell, sym, sphhar,input,oned,noco)
#ifndef _OPENACC
IF (mpi%irank.EQ.0) THEN
IF (fmpi%irank.EQ.0) THEN
CALL w_inpXML(&
atoms,vacuum,input,stars,sliceplot,forcetheo,banddos,&
cell,sym,xcpot,noco,oneD,mpinp,hybinp,kpts,enpara,gfinp,&
......@@ -177,16 +179,16 @@ CONTAINS
IF (mpi%irank.EQ.0) THEN
CALL writeOutParameters(mpi,input,sym,stars,atoms,vacuum,kpts,&
IF (fmpi%irank.EQ.0) THEN
CALL writeOutParameters(fmpi,input,sym,stars,atoms,vacuum,kpts,&
oneD,hybinp,cell,banddos,sliceplot,xcpot,&
noco,enpara,sphhar)
CALL fleur_info(kpts)
CALL deleteDensities()
END IF
!Finalize the MPI setup
CALL setupMPI(kpts%nkpt,input%neig,mpi)
!Finalize the fmpi setup
CALL setupMPI(kpts%nkpt,input%neig,fmpi)
!Collect some usage info
CALL add_usage_data("A-Types",atoms%ntype)
......@@ -209,20 +211,20 @@ CONTAINS
CALL results%init(input,atoms,kpts,noco)
IF (mpi%irank.EQ.0) THEN
IF(input%gw.NE.0) CALL mixing_history_reset(mpi)
IF (fmpi%irank.EQ.0) THEN
IF(input%gw.NE.0) CALL mixing_history_reset(fmpi)
CALL setStartingDensity(noco%l_noco)
END IF
!new check mode will only run the init-part of FLEUR
IF (judft_was_argument("-check")) CALL judft_end("Check-mode done",mpi%irank)
IF (judft_was_argument("-check")) CALL judft_end("Check-mode done",fmpi%irank)
#ifdef CPP_MPI
CALL MPI_BARRIER(mpi%mpi_comm,ierr)
CALL MPI_BARRIER(fmpi%mpi_comm,ierr(1))
#endif
CONTAINS
SUBROUTINE init_wannier()
! Initializations for Wannier functions (start)
IF (mpi%irank.EQ.0) THEN
IF (fmpi%irank.EQ.0) THEN
wann%l_gwf = wann%l_ms.OR.wann%l_sgwf.OR.wann%l_socgwf
IF(wann%l_gwf) THEN
......@@ -257,7 +259,7 @@ CONTAINS
ALLOCATE (wann%param_vec(3,wann%nparampts))
ALLOCATE (wann%param_alpha(atoms%ntype,wann%nparampts))
IF(mpi%irank.EQ.0) THEN
IF(fmpi%irank.EQ.0) THEN
IF(wann%l_gwf) THEN
OPEN(113,file=wann%param_file,status='old')
READ(113,*)!header
......@@ -294,12 +296,12 @@ CONTAINS
CALL juDFT_error("do not specify 1st component if l_socgwf",calledby="fleur_init")
END IF
END IF!(wann%l_gwf)
END IF!(mpi%irank.EQ.0)
END IF!(fmpi%irank.EQ.0)
#ifdef CPP_MPI
CALL MPI_BCAST(wann%param_vec,3*wann%nparampts,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr)
CALL MPI_BCAST(wann%param_alpha,atoms%ntype*wann%nparampts,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr)
CALL MPI_BCAST(wann%l_dim,3,MPI_LOGICAL,0,MPI_COMM_WORLD,ierr)
CALL MPI_BCAST(wann%param_vec,3*wann%nparampts,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr(1))
CALL MPI_BCAST(wann%param_alpha,atoms%ntype*wann%nparampts,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr(1))
CALL MPI_BCAST(wann%l_dim,3,MPI_LOGICAL,0,MPI_COMM_WORLD,ierr(1))
#endif
! Initializations for Wannier functions (end)
......
......@@ -4,6 +4,9 @@
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
MODULE m_fleur_jobs
#ifdef CPP_MPI
use mpi
#endif
USE m_juDFT
IMPLICIT NONE
PRIVATE
......@@ -133,13 +136,12 @@ CONTAINS
USE m_constants
INTEGER:: irank=0
#ifdef CPP_MPI
INCLUDE 'mpif.h'
INTEGER ierr(3), i
CALL MPI_INIT_THREAD(MPI_THREAD_FUNNELED,i,ierr)
CALL MPI_INIT_THREAD(MPI_THREAD_FUNNELED,i,ierr(1))
#endif
CALL judft_init(oUnit,.FALSE.)
#ifdef CPP_MPI
CALL MPI_COMM_RANK(MPI_COMM_WORLD,irank,ierr)
CALL MPI_COMM_RANK(MPI_COMM_WORLD,irank,ierr(1))
IF(irank.EQ.0) THEN
!$ IF (i<MPI_THREAD_FUNNELED) THEN
!$ WRITE(*,*) ""
......@@ -185,7 +187,6 @@ CONTAINS
#ifdef CPP_MPI
INTEGER:: ierr
INCLUDE 'mpif.h'
CALL MPI_COMM_RANK(MPI_COMM_WORLD,irank,ierr)
!find the number of the job for this PE
......@@ -218,7 +219,6 @@ CONTAINS
use m_types_mpi
TYPE(t_job),INTENT