Commit 5143a617 authored by Daniel Wortmann's avatar Daniel Wortmann

Fixed Bugs in parallel Version

parent 98c84f3e
......@@ -78,15 +78,15 @@ endif()
if(${Fleur_uses_MPI})
#Preprocessor switches
if (${Fleur_uses_ELEMENTAL})
set(MPI_DEFINITIONS -DCPP_EVP -DCPP_ELEMENTAL)
set(MPI_DEFINITIONS ${MPI_DEFINITION} -DCPP_ELEMENTAL)
endif()
if (${Fleur_uses_SCALAPACK})
set(MPI_DEFINITIONS -DCPP_EVP)
set(MPI_DEFINITIONS ${MPI_DEFINITION} -DCPP_SCALAPACK )
endif()
if (${Fleur_uses_ELPA})
set(MPI_DEFINITIONS -DCPP_ELPA -DCPP_ELPA2 -DCPP_EVP)
set(MPI_DEFINITIONS ${MPI_DEFINITION} -DCPP_ELPA -DCPP_ELPA2 )
endif()
set(MPI_DEFINITIONS -DCPP_MPI ${MPI_DEFINITIONS})
set(MPI_DEFINITIONS -DCPP_MPI -DCPP_EVP ${MPI_DEFINITIONS})
#fleur_MPI
add_executable(fleur_MPI ${juDFT_HDF} ${juDFT_SRC_F90} ${fleur_SRC} ${c_filesFleur}
......
......@@ -15,7 +15,7 @@ if (${Fleur_uses_MPI})
if(${Fleur_uses_ELPA})
set(fleur_F90 ${fleur_F90} diagonalization/elpa.F90)
endif()
if(${Fleur_uses_SCALAPCK})
if(${Fleur_uses_SCALAPACK})
set(fleur_F90 ${fleur_F90} diagonalization/chani.F90)
endif()
if(${Fleur_uses_ELEMENTAL})
......
This diff is collapsed.
MODULE m_eigen_diag
USE m_juDFT
IMPLICIT NONE
! the parameters are set to negative values to indicate that a particular solver is not compiled
#ifdef CPP_ELEMENTAL
USE m_elemental
#endif
#ifdef CPP_SCALAPACK
USE m_chani
#endif
#ifdef CPP_ELPA
USE m_elpa
#endif
IMPLICIT NONE
#ifdef CPP_ELPA
INTEGER,PARAMETER:: diag_elpa=1
#else
INTEGER,PARAMETER:: diag_elpa=-1
#endif
#ifdef CPP_ELEMENTAL
USE m_elemental
INTEGER,PARAMETER:: diag_elemental=2
#else
INTEGER,PARAMETER:: diag_elemental=-2
#endif
#ifdef CPP_SCALAPACK
USE m_chani
INTEGER,PARAMETER:: diag_scalapack=3
#else
INTEGER,PARAMETER:: diag_scalapack=-3
......@@ -24,7 +30,7 @@ MODULE m_eigen_diag
INTEGER,PARAMETER:: diag_lapack2=5
CONTAINS
SUBROUTINE eigen_diag(jsp,eig_id,it,atoms,dimension,matsize,mpi, n_rank,n_size,ne,nk,lapw,input,nred,sub_comm,&
sym,matind,kveclo, noco,cell,bkpt,el,jij,l_wu,oneD,td,ud, eig,a,b,z)
sym,matind,kveclo, noco,cell,bkpt,el,jij,l_wu,oneD,td,ud, eig,a,b,z,ne_found)
USE m_zsymsecloc
USE m_aline
USE m_alinemuff
......@@ -45,7 +51,8 @@ CONTAINS
INTEGER, INTENT(IN) :: jsp,eig_id,it,matsize
INTEGER, INTENT(IN) :: n_rank,n_size ,nk ,nred,sub_comm
INTEGER, INTENT(IN) :: matind(dimension%nbasfcn,2),kveclo(atoms%nlotot)
INTEGER,INTENT(INOUT):: ne
INTEGER,INTENT(IN) :: ne
INTEGER,INTENT(OUT) :: ne_found
REAL,INTENT(IN) :: el(:,:,:)
LOGICAL, INTENT(IN) :: l_wu
REAL,INTENT(INOUT) :: bkpt(3)
......@@ -90,33 +97,33 @@ CONTAINS
CALL timestart("Diagonalization")
!Select the solver
parallel=(n_size>1)
ne_found=ne
SELECT CASE (priv_select_solver(parallel))
#ifdef CPP_ELPA
CASE (diag_elpa)
CALL elpa(lapw%nmat,n,SUB_COMM,a,b,z,eig,ne)
CALL elpa(lapw%nmat,n,SUB_COMM,a,b,z,eig,ne_found)
#endif
#ifdef CPP_ELEMENTAL
CASE (diag_elemental)
IF (it==1) THEN !switch between direct solver and iterative solver
CALL elemental(lapw%nmat,n,SUB_COMM,a,b,z,eig,ne,1)
CALL elemental(lapw%nmat,dimension%nbasfcn/n_size,SUB_COMM,a,b,z,eig,ne_found,1)
ELSE
CALL elemental(lapw%nmat,n,SUB_COMM,a,b,z,eig,ne,0)
CALL elemental(lapw%nmat,,dimension%nbasfcn/n_size,SUB_COMM,a,b,z,eig,ne_found,0)
ENDIF
#endif
#ifdef CPP_SCALAPACK
CASE (diag_scalapack)
CALL chani(lapw%nmat,n,ndim, n_rank,n_size,SUB_COMM,mpi%mpi_comm, a,b,z,eig,ne)
CALL chani(lapw%nmat,dimension%nbasfcn/n_size,ndim, n_rank,n_size,SUB_COMM,mpi%mpi_comm, a,b,z,eig,ne_found)
#endif
CASE (diag_lapack2)
if (noco%l_ss) call juDFT_error("zsymsecloc not tested with noco%l_ss")
if (input%gw>1) call juDFT_error("zsymsecloc not tested with input%gw>1")
CALL zsymsecloc(jsp,input,lapw,bkpt,atoms,kveclo, sym,cell, dimension,matsize,ndim,&
jij,matind,nred, a,b, z,eig,ne)
jij,matind,nred, a,b, z,eig,ne_found)
CASE (diag_lapack)
CALL franza(dimension%nbasfcn,ndim, lapw%nmat,&
(sym%l_zref.AND.(atoms%nlotot.EQ.0)), jij%l_j,matind,nred, a,b,input%gw, z,eig,ne)
(sym%l_zref.AND.(atoms%nlotot.EQ.0)), jij%l_j,matind,nred, a,b,input%gw, z,eig,ne_found)
CASE DEFAULT
!This should only happen if you select a solver by hand which was not compiled against
print*, "You selected a diagonalization scheme without compiling for it"
......@@ -127,7 +134,7 @@ CONTAINS
ELSE
call timestart("aline")
CALL aline(eig_id,nk,atoms,dimension,sym,cell,input,jsp,el,&
ud,a,b,lapw,td,noco,oneD,bkpt,z,eig,ne)
ud,a,b,lapw,td,noco,oneD,bkpt,z,eig,ne_found)
call timestop("aline")
ENDIF
!---> SECOND VARIATION STEP
......@@ -136,7 +143,7 @@ CONTAINS
!---> hamiltonian
call timestart("second variation diagonalization")
CALL aline_muff(atoms,dimension,sym, cell, jsp,ne, ud,td, bkpt,lapw, z,eig)
CALL aline_muff(atoms,dimension,sym, cell, jsp,ne_found, ud,td, bkpt,lapw, z,eig)
call timestop("second variation diagonalization")
END IF
END SUBROUTINE eigen_diag
......@@ -175,6 +182,7 @@ CONTAINS
IMPLICIT NONE
INTEGER,INTENT(IN):: diag_solver
LOGICAL,INTENT(IN)::parallel
print *,diag_solver,parallel
SELECT CASE(diag_solver)
CASE (diag_elpa)
IF (parallel) THEN
......
......@@ -77,7 +77,7 @@ CONTAINS
!+odim
! ..
! .. Local Scalars ..
INTEGER jsp,nk,nred,ne_all,n_u_in
INTEGER jsp,nk,nred,ne_all,n_u_in,ne_found
INTEGER iter,ne,matsize ,nrec,lh0
INTEGER nspins,isp,l,i,j,err,gwc
INTEGER mlotot,mlolotot,mlot_d,mlolot_d,nlot_d
......@@ -526,33 +526,33 @@ CONTAINS
CALL eigen_diag(jsp,eig_id,it,atoms,dimension,matsize,mpi, n_rank,n_size,ne,nk,lapw,input,&
nred,sub_comm, sym,matind,kveclo, noco,cell,bkpt,enpara%el0,jij,l_wu,&
oneD,td,ud, eig,a,b,z)
oneD,td,ud, eig,a,b,z,ne_found)
!
!---> output results
!
CALL timestart("EV output")
ne_all=ne
ne_all=ne_found
#if defined(CPP_MPI)
!Collect number of all eigenvalues
CALL MPI_ALLREDUCE(ne,ne_all,1,MPI_INTEGER,MPI_SUM, sub_comm,ierr)
CALL MPI_ALLREDUCE(ne_found,ne_all,1,MPI_INTEGER,MPI_SUM, sub_comm,ierr)
#endif
!jij%eig_l = 0.0 ! need not be used, if hdf-file is present
#if ( !defined( CPP_INVERSION) )
IF (.not.jij%l_J) THEN
z(:lapw%nmat,:ne) = conjg(z(:lapw%nmat,:ne))
z(:lapw%nmat,:ne_found) = conjg(z(:lapw%nmat,:ne_found))
ELSE
z(:lapw%nmat,:ne) = cmplx(0.0,0.0)
z(:lapw%nmat,:ne_found) = cmplx(0.0,0.0)
ENDIF
#endif
CALL write_eig(eig_id, nk,jsp,ne,ne_all,lapw%nv(jsp),lapw%nmat,&
CALL write_eig(eig_id, nk,jsp,ne_found,ne_all,lapw%nv(jsp),lapw%nmat,&
lapw%k1(:lapw%nv(jsp),jsp),lapw%k2 (:lapw%nv(jsp),jsp),lapw%k3(:lapw%nv(jsp),jsp),&
bkpt, kpts%wtkpt(nk),eig(:ne),enpara%el0(0:,:,jsp), enpara%ello0(:,:,jsp),enpara%evac0(:,jsp),&
atoms%nlotot,kveclo,n_size,n_rank,z=z(:,:ne))
bkpt, kpts%wtkpt(nk),eig(:ne_found),enpara%el0(0:,:,jsp), enpara%ello0(:,:,jsp),enpara%evac0(:,jsp),&
atoms%nlotot,kveclo,n_size,n_rank,z=z(:,:ne_found))
IF (noco%l_noco) THEN
CALL write_eig(eig_id, nk,2,ne,ne_all,lapw%nv(2),lapw%nmat,&
CALL write_eig(eig_id, nk,2,ne_found,ne_all,lapw%nv(2),lapw%nmat,&
lapw%k1(:lapw%nv(2),2),lapw%k2 (:lapw%nv(2),2),lapw%k3(:lapw%nv(2),2),&
bkpt, kpts%wtkpt(nk),eig(:ne),enpara%el0(0:,:,2), enpara%ello0(:,:,2),enpara%evac0(:,2),&
bkpt, kpts%wtkpt(nk),eig(:ne_found),enpara%el0(0:,:,2), enpara%ello0(:,:,2),enpara%evac0(:,2),&
atoms%nlotot,kveclo)
ENDIF
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment