Commit 2e771840 authored by Daniel Wortmann's avatar Daniel Wortmann

Merge branch 'develop' of iffgit.fz-juelich.de:fleur/fleur into develop

parents 59e03acc e2a9c5ec
......@@ -30,9 +30,9 @@ CONTAINS
IMPLICIT NONE
INTEGER, INTENT(INOUT) :: solver
CLASS(t_mat), INTENT(INOUT) :: smat,hmat
CLASS(t_mat), ALLOCATABLE, INTENT(OUT) :: ev
INTEGER, INTENT(INOUT) :: ne
REAL, INTENT(OUT) :: eig(:)
CLASS(t_mat), ALLOCATABLE, INTENT(OUT) :: ev ! eigenvectors
INTEGER, INTENT(INOUT) :: ne ! number of eigenpairs to be found
REAL, INTENT(OUT) :: eig(:) ! eigenvalues
!Only for chase
INTEGER,OPTIONAL, INTENT(IN) :: ikpt
......@@ -64,7 +64,7 @@ CONTAINS
CASE (diag_scalapack)
CALL scalapack(hmat,smat,ne,eig,ev)
CASE (diag_magma)
!CALL magma_diag(hmat,smat,ne,eig,ev)
CALL magma_diag(hmat,smat,ne,eig,ev)
CASE (diag_cusolver)
CALL cusolver_diag(hmat,smat,ne,eig,ev)
CASE (diag_lapack)
......
......@@ -18,8 +18,8 @@ CONTAINS
! ne ....... number of ev's searched (and found) on this node
! On input, overall number of ev's searched,
! On output, local number of ev's found
! eig ...... eigenvalues, output
! ev ....... eigenvectors, output
! eig ...... all eigenvalues, output
! ev ....... local eigenvectors, output
!
!----------------------------------------------------
......@@ -453,21 +453,23 @@ CONTAINS
CALL MPI_COMM_FREE(mpi_comm_cols,err)
#endif
!
! Put those eigenvalues expected by chani to eig, i.e. for
! process i these are eigenvalues i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvalues per process
! Each process has all eigenvalues in output
eig(:num2) = eig2(:num2)
DEALLOCATE(eig2)
!
!
! Redistribute eigenvectors from ScaLAPACK distribution to each process, i.e. for
! process i these are eigenvectors i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvectors per process
!
num=FLOOR(REAL(num2)/np)
IF (myid.LT.num2-(num2/np)*np) num=num+1
ne=0
DO i=myid+1,num2,np
ne=ne+1
eig(ne)=eig2(i)
!eig(ne)=eig2(i)
ENDDO
DEALLOCATE(eig2)
!
! Redistribute eigvec from ScaLAPACK distribution to each process
! having all eigenvectors corresponding to his eigenvalues as above
!
ALLOCATE(t_mpimat::ev)
CALL ev%init(hmat%l_real,hmat%global_size1,hmat%global_size1,hmat%blacsdata%mpi_com,.FALSE.)
......
......@@ -17,8 +17,8 @@ CONTAINS
! ne ....... number of ev's searched (and found) on this node
! On input, overall number of ev's searched,
! On output, local number of ev's found
! eig ...... eigenvalues, output
! ev ....... eigenvectors, output
! eig ...... all eigenvalues, output
! ev ....... local eigenvectors, output
!
!----------------------------------------------------
!
......@@ -238,22 +238,22 @@ CONTAINS
!ENDIF
ENDIF
!
! Put those eigenvalues expected by chani to eig, i.e. for
! process i these are eigenvalues i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvalues per process
! Each process has all eigenvalues in output
eig(:num2) = eig2(:num2)
DEALLOCATE(eig2)
!
!
! Redistribute eigenvectors from ScaLAPACK distribution to each process, i.e. for
! process i these are eigenvectors i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvectors per process
!
num=FLOOR(REAL(num2)/np)
IF (myid.LT.num2-(num2/np)*np) num=num+1
ne=0
DO i=myid+1,num2,np
ne=ne+1
eig(ne)=eig2(i)
!eig(ne)=eig2(i)
ENDDO
DEALLOCATE(eig2)
!
! Redistribute eigvec from ScaLAPACK distribution to each process
! having all eigenvectors corresponding to his eigenvalues as above
!
ALLOCATE(t_mpimat::ev)
CALL ev%init(ev_dist%l_real,ev_dist%global_size1,ev_dist%global_size1,ev_dist%blacsdata%mpi_com,.FALSE.)
CALL ev%copy(ev_dist,1,1)
......
......@@ -199,6 +199,12 @@ CONTAINS
end select
END IF
! Solve generalized eigenvalue problem.
! ne_all ... number of eigenpairs searched (and found) on this node
! on input, overall number of eigenpairs searched,
! on output, local number of eigenpairs found
! eig ...... all eigenvalues, output
! zMat ..... local eigenvectors, output
CALL eigen_diag(solver,hmat,smat,ne_all,eig,zMat,nk,jsp,iter)
CALL smat%free()
......@@ -220,8 +226,18 @@ CONTAINS
IF (.NOT.zMat%l_real) THEN
zMat%data_c(:lapw%nmat,:ne_found) = CONJG(zMat%data_c(:lapw%nmat,:ne_found))
END IF
IF (mpi%n_rank == 0) THEN
! Only process 0 writes out the value of ne_all and the
! eigenvalues.
! Trying to use MPI_PUT for the very same slot by all processes
! causes problems with IntelMPI/2019
! Mai 2019 U. Alekseeva
CALL write_eig(eig_id, nk,jsp,ne_found,ne_all,&
eig(:ne_found),n_start=mpi%n_size,n_end=mpi%n_rank,zMat=zMat)
eig(:ne_all),n_start=mpi%n_size,n_end=mpi%n_rank,zMat=zMat)
ELSE
CALL write_eig(eig_id, nk,jsp,ne_found,&
n_start=mpi%n_size,n_end=mpi%n_rank,zMat=zMat)
ENDIF
neigBuffer(nk,jsp) = ne_found
#if defined(CPP_MPI)
! RMA synchronization
......
......@@ -265,7 +265,8 @@ CONTAINS
pe=d%pe_basis(nk,jspin)
slot=d%slot_basis(nk,jspin)
!write the number of eigenvalues values
!write the number of eigenvalues
!only one process needs to do it
IF (PRESENT(neig_total)) THEN
CALL MPI_WIN_LOCK(MPI_LOCK_EXCLUSIVE,pe,0,d%neig_handle,e)
ALLOCATE(tmp_int(1))
......@@ -275,25 +276,15 @@ CONTAINS
DEALLOCATE(tmp_int)
ENDIF
!write the eigenvalues
!only one process needs to do it
IF (PRESENT(eig).OR.PRESENT(w_iks)) THEN
ALLOCATE(tmp_real(d%size_eig))
tmp_real=1E99
if (PRESENT(EIG)) THEN
n1=1;n3=1
IF (PRESENT(n_rank)) n1=n_rank+1
IF (PRESENT(n_size)) n3=n_size
n2=SIZE(eig)*n3+n1-1
nn=1
DO n=n1,min(n2,d%size_eig),n3
tmp_real(n)=eig(nn)
nn=nn+1
ENDDO
tmp_real(:d%size_eig) = eig(:d%size_eig)
CALL MPI_WIN_LOCK(MPI_LOCK_EXCLUSIVE,pe,0,d%eig_handle,e)
IF (n3.ne.1) THEN
CALL MPI_ACCUMULATE(tmp_real,d%size_eig,MPI_DOUBLE_PRECISION,pe,slot,d%size_eig,MPI_DOUBLE_PRECISION,MPI_MIN,d%eig_handle,e)
ELSE
CALL MPI_PUT(tmp_real,d%size_eig,MPI_DOUBLE_PRECISION,pe,slot,d%size_eig,MPI_DOUBLE_PRECISION,d%eig_handle,e)
ENDIF
CALL MPI_WIN_UNLOCK(pe,d%eig_handle,e)
END if
IF (PRESENT(w_iks)) THEN
......@@ -304,6 +295,9 @@ CONTAINS
END IF
DEALLOCATE(tmp_real)
ENDIF
!write the eigenvectors
!all procceses participate
IF (PRESENT(zmat)) THEN
tmp_size=zmat%matsize1
ALLOCATE(tmp_real(tmp_size))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment