Commit d815a880 authored by Matthias Redies's avatar Matthias Redies

rename hybmpi -> glob_mpi

parent 4872fe93
......@@ -39,7 +39,7 @@ CONTAINS
INTEGER, INTENT(INOUT) :: iterHF
! local variables
type(t_hybmpi) :: hybmpi
type(t_hybmpi) :: glob_mpi
type(t_work_package) :: work_pack
INTEGER :: jsp, nk, err, i, j
type(t_lapw) :: lapw
......@@ -64,7 +64,7 @@ CONTAINS
IF (.NOT. hybdat%l_calhf) THEN
hybdat%l_subvxc = hybdat%l_subvxc .AND. hybdat%l_addhf
else
call hybmpi%copy_mpi(mpi)
call glob_mpi%copy_mpi(mpi)
results%te_hfex%core = 0
!Check if we are converged well enough to calculate a new potential
......@@ -90,7 +90,7 @@ CONTAINS
CALL timestart("Preparation for hybrid functionals")
!construct the mixed-basis
CALL timestart("generation of mixed basis")
if(hybmpi%rank == 0) write (*,*) "iterHF = ", iterHF
if(glob_mpi%rank == 0) write (*,*) "iterHF = ", iterHF
CALL mixedbasis(fi%atoms, fi%kpts, fi%input, fi%cell, xcpot, fi%mpinp, mpdata, fi%hybinp, hybdat,&
enpara, mpi, v, iterHF)
CALL timestop("generation of mixed basis")
......@@ -104,12 +104,12 @@ CONTAINS
! use jsp=1 for coulomb work-planning
call hybdat%set_states(fi, results, 1)
call work_pack%init(fi, hybdat, 1, hybmpi%rank, hybmpi%size)
call work_pack%init(fi, hybdat, 1, glob_mpi%rank, glob_mpi%size)
CALL coulombmatrix(mpi, fi, mpdata, hybdat, xcpot, work_pack)
call work_pack%free()
do i =1,fi%kpts%nkpt
call hybdat%coul(i)%mpi_ibc(fi, hybmpi, work_pack%owner_nk(i))
call hybdat%coul(i)%mpi_ibc(fi, glob_mpi, work_pack%owner_nk(i))
enddo
CALL hf_init(eig_id, mpdata, fi, hybdat)
......@@ -120,7 +120,7 @@ CONTAINS
call timestart("HF_setup")
CALL HF_setup(mpdata,fi, mpi, nococonv, results, jsp, enpara, &
hybdat, v%mt(:, 0, :, :), eig_irr)
call work_pack%init(fi, hybdat, jsp, hybmpi%rank, hybmpi%size)
call work_pack%init(fi, hybdat, jsp, glob_mpi%rank, glob_mpi%size)
call timestop("HF_setup")
DO i = 1,work_pack%k_packs(1)%size
......@@ -168,5 +168,10 @@ CONTAINS
if(allocated(hybdat%div_vv)) deallocate(hybdat%div_vv)
allocate(hybdat%div_vv(fi%input%neig, fi%kpts%nkpt, fi%input%jspins), source=0.0)
end subroutine first_iteration_alloc
subroutine distribute_mpis(fi, glob_mpi)
implicit none
end subroutine distribute_mpis
END SUBROUTINE calc_hybrid
END MODULE m_calc_hybrid
......@@ -132,7 +132,7 @@ SUBROUTINE rdmft(eig_id,mpi,fi,enpara,stars,&
INTEGER, ALLOCATABLE :: n_q(:)
LOGICAL, ALLOCATABLE :: enabledConstraints(:)
type(t_hybmpi) :: hybmpi
type(t_hybmpi) :: glob_mpi
complex :: c_phase(fi%input%neig)
......@@ -391,14 +391,14 @@ SUBROUTINE rdmft(eig_id,mpi,fi,enpara,stars,&
CALL hybdat%coul(ikpt)%alloc(fi, mpdata%num_radbasfn, mpdata%n_g, ikpt)
END DO
CALL hybmpi%copy_mpi(mpi)
call work_pack%init(fi, hybdat, jsp, hybmpi%rank, hybmpi%size)
CALL glob_mpi%copy_mpi(mpi)
call work_pack%init(fi, hybdat, jsp, glob_mpi%rank, glob_mpi%size)
CALL coulombmatrix(mpi, fi, mpdata, hybdat, xcpot, work_pack)
DO ikpt = 1, fi%kpts%nkpt
CALL hybdat%coul(ikpt)%mpi_ibc(fi, hybmpi, 0)
CALL hybdat%coul(ikpt)%mpi_ibc(fi, glob_mpi, 0)
END DO
CALL hf_init(eig_id,mpdata,fi,hybdat)
......
......@@ -138,7 +138,7 @@ contains
#endif
end subroutine t_coul_mpi_wait
subroutine t_coul_mpi_ibc(coul, fi, hybmpi, root)
subroutine t_coul_mpi_ibc(coul, fi, glob_mpi, root)
use m_types_fleurinput
use m_types_hybmpi
use m_judft
......@@ -148,7 +148,7 @@ contains
implicit none
class(t_coul) :: coul
type(t_fleurinput), intent(in) :: fi
type(t_hybmpi), intent(in) :: hybmpi
type(t_hybmpi), intent(in) :: glob_mpi
integer, intent(in) :: root
#ifdef CPP_MPI
integer :: ierr
......@@ -156,28 +156,28 @@ contains
if (fi%sym%invs) THEN
call MPI_IBcast(coul%mt1_r, size(coul%mt1_r), MPI_DOUBLE_PRECISION, root, hybmpi%comm, coul%bcast_req(1), ierr)
call MPI_IBcast(coul%mt1_r, size(coul%mt1_r), MPI_DOUBLE_PRECISION, root, glob_mpi%comm, coul%bcast_req(1), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mt1_r failed")
call MPI_IBcast(coul%mt2_r, size(coul%mt2_r), MPI_DOUBLE_PRECISION, root, hybmpi%comm, coul%bcast_req(2), ierr)
call MPI_IBcast(coul%mt2_r, size(coul%mt2_r), MPI_DOUBLE_PRECISION, root, glob_mpi%comm, coul%bcast_req(2), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mt2_r failed")
call MPI_IBcast(coul%mt3_r, size(coul%mt3_r), MPI_DOUBLE_PRECISION, root, hybmpi%comm, coul%bcast_req(3), ierr)
call MPI_IBcast(coul%mt3_r, size(coul%mt3_r), MPI_DOUBLE_PRECISION, root, glob_mpi%comm, coul%bcast_req(3), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mt3_r failed")
call MPI_IBcast(coul%mtir_r, size(coul%mtir_r), MPI_DOUBLE_PRECISION, root, hybmpi%comm, coul%bcast_req(4), ierr)
call MPI_IBcast(coul%mtir_r, size(coul%mtir_r), MPI_DOUBLE_PRECISION, root, glob_mpi%comm, coul%bcast_req(4), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mtir_r failed")
else
call MPI_IBcast(coul%mt1_c, size(coul%mt1_c), MPI_DOUBLE_COMPLEX, root, hybmpi%comm, coul%bcast_req(1), ierr)
call MPI_IBcast(coul%mt1_c, size(coul%mt1_c), MPI_DOUBLE_COMPLEX, root, glob_mpi%comm, coul%bcast_req(1), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mt1_c failed")
call MPI_IBcast(coul%mt2_c, size(coul%mt2_c), MPI_DOUBLE_COMPLEX , root, hybmpi%comm, coul%bcast_req(2), ierr)
call MPI_IBcast(coul%mt2_c, size(coul%mt2_c), MPI_DOUBLE_COMPLEX , root, glob_mpi%comm, coul%bcast_req(2), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mt2_r failed")
call MPI_IBcast(coul%mt3_c, size(coul%mt3_c), MPI_DOUBLE_COMPLEX , root, hybmpi%comm, coul%bcast_req(3), ierr)
call MPI_IBcast(coul%mt3_c, size(coul%mt3_c), MPI_DOUBLE_COMPLEX , root, glob_mpi%comm, coul%bcast_req(3), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mt3_r failed")
call MPI_IBcast(coul%mtir_c, size(coul%mtir_c), MPI_DOUBLE_COMPLEX , root, hybmpi%comm, coul%bcast_req(4), ierr)
call MPI_IBcast(coul%mtir_c, size(coul%mtir_c), MPI_DOUBLE_COMPLEX , root, glob_mpi%comm, coul%bcast_req(4), ierr)
if(ierr /= 0) call judft_error("MPI_IBcast of coul%mtir_r failed")
endif
#endif
......
......@@ -13,26 +13,26 @@ MODULE m_types_hybmpi
procedure :: barrier => t_hybmpi_barrier
END TYPE t_hybmpi
contains
subroutine t_hybmpi_copy_mpi(hybmpi, mpi)
subroutine t_hybmpi_copy_mpi(glob_mpi, mpi)
use m_types_mpi
implicit none
class(t_hybmpi), intent(inout) :: hybmpi
class(t_hybmpi), intent(inout) :: glob_mpi
type(t_mpi), intent(in) :: mpi
hybmpi%comm = mpi%mpi_comm
hybmpi%size = mpi%isize
hybmpi%rank = mpi%irank
glob_mpi%comm = mpi%mpi_comm
glob_mpi%size = mpi%isize
glob_mpi%rank = mpi%irank
end subroutine
subroutine t_hybmpi_barrier(hybmpi)
subroutine t_hybmpi_barrier(glob_mpi)
use m_judft
implicit none
class(t_hybmpi), intent(inout) :: hybmpi
class(t_hybmpi), intent(inout) :: glob_mpi
integer :: ierr
#ifdef CPP_MPI
call MPI_Barrier(hybmpi%comm, ierr)
call MPI_Barrier(glob_mpi%comm, ierr)
if(ierr /= 0) call juDFT_error("barrier failed on process: " // &
int2str(hybmpi%rank))
int2str(glob_mpi%rank))
#endif
end subroutine t_hybmpi_barrier
END MODULE m_types_hybmpi
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment