eigen.F90 11.8 KB
Newer Older
1 2 3 4 5
!--------------------------------------------------------------------------------
! Copyright (c) 2016 Peter Grünberg Institut, Forschungszentrum Jülich, Germany
! This file is part of FLEUR and available as free software under the conditions
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
Daniel Wortmann's avatar
Daniel Wortmann committed
6 7 8
!<@brief
!<The eigen routine sets up and solves the generalized eigenvalue problem
!More description at end of file
9
MODULE m_eigen
10 11
   USE m_juDFT
   IMPLICIT NONE
12
CONTAINS
13 14 15 16 17 18 19 20
   !>The eigenvalue problem is constructed and solved in this routine. The following steps are performed:
   !> 1. Preparation: generate energy parameters, open eig-file
   !> 2. CALL to mt_setup() : this constructs the local Hamiltonian (i.e. the Hamiltonian in the \f$ u,\dot u, u_{lo} \f$ basis) LDA+U is also added here
   !> 3. within the (collinear)spin and k-point loop: CALL to eigen_hssetup() to generate the matrices, CALL to eigen_diag() to perform diagonalization
   !> 4. writing (saving) of eigenvectors
   !>
   !> The matrices generated and diagonalized here are of type m_mat as defined in m_types_mat.
   !>@author D. Wortmann
Matthias Redies's avatar
Matthias Redies committed
21
   SUBROUTINE eigen(mpi,stars,sphhar,atoms,xcpot,sym,kpts,DIMENSION,vacuum,input,&
22
                    cell,enpara,banddos,noco,oneD,hybrid,iter,eig_id,results,inden,v,vx)
23

24
#include"cpp_double.h"
25 26 27 28 29 30 31 32 33 34 35 36 37
      USE m_types
      USE m_eigen_hssetup
      USE m_pot_io
      USE m_eigen_diag
      USE m_add_vnonlocal
      USE m_subvxc
      !USE m_hsefunctional
      USE m_mt_setup
      USE m_util
      USE m_io_hybrid
      !USE m_icorrkeys
      USE m_eig66_io, ONLY : open_eig, write_eig, close_eig,read_eig
      USE m_xmlOutput
Daniel Wortmann's avatar
Daniel Wortmann committed
38
#ifdef CPP_MPI
39
      USE m_mpi_bc_potden
Daniel Wortmann's avatar
Daniel Wortmann committed
40
#endif
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
      USE m_symmetrize_matrix
      USE m_unfold_band_kpts !used for unfolding bands
      USE m_types_mpimat

      IMPLICIT NONE
      TYPE(t_results),INTENT(INOUT):: results
      CLASS(t_xcpot),INTENT(IN)    :: xcpot
      TYPE(t_mpi),INTENT(IN)       :: mpi
      TYPE(t_dimension),INTENT(IN) :: DIMENSION
      TYPE(t_oneD),INTENT(IN)      :: oneD
      TYPE(t_hybrid),INTENT(INOUT) :: hybrid
      TYPE(t_enpara),INTENT(INOUT) :: enpara
      TYPE(t_input),INTENT(IN)     :: input
      TYPE(t_vacuum),INTENT(IN)    :: vacuum
      TYPE(t_noco),INTENT(IN)      :: noco
      TYPE(t_banddos),INTENT(IN)   :: banddos
      TYPE(t_sym),INTENT(IN)       :: sym
      TYPE(t_stars),INTENT(IN)     :: stars
      TYPE(t_cell),INTENT(IN)      :: cell
      TYPE(t_kpts),INTENT(INOUT)   :: kpts
      TYPE(t_sphhar),INTENT(IN)    :: sphhar
      TYPE(t_atoms),INTENT(IN)     :: atoms
      TYPE(t_potden),INTENT(IN)    :: inden,vx
      TYPE(t_potden),INTENT(INOUT) :: v    !u_setup will modify the potential matrix

66
#ifdef CPP_MPI
67
      INCLUDE 'mpif.h'
68
#endif
69

70 71
!    EXTERNAL MPI_BCAST    !only used by band_unfolding to broadcast the gvec

72 73 74 75 76 77
      ! Scalar Arguments
      INTEGER,INTENT(IN)    :: iter
      INTEGER,INTENT(IN)    :: eig_id

      ! Local Scalars
      INTEGER jsp,nk,nred,ne_all,ne_found
Matthias Redies's avatar
Matthias Redies committed
78
      INTEGER ne
79 80
      INTEGER isp,i,j,err
      LOGICAL l_wu,l_file,l_real,l_zref
81
      INTEGER :: solver=0
82
      ! Local Arrays
83
      INTEGER              :: ierr
84 85 86 87 88
      INTEGER              :: neigBuffer(kpts%nkpt,input%jspins)

      COMPLEX              :: unfoldingBuffer(SIZE(results%unfolding_weights,1),kpts%nkpt,input%jspins) ! needed for unfolding bandstructure mpi case

      REAL,    ALLOCATABLE :: bkpt(:)
89
      REAL,    ALLOCATABLE :: eig(:), eigBuffer(:,:,:)
90 91 92 93 94 95 96 97 98 99 100 101 102 103

      INTEGER                   :: jsp_m, i_kpt_m, i_m

      TYPE(t_tlmplm)            :: td
      TYPE(t_usdus)             :: ud
      TYPE(t_lapw)              :: lapw
      CLASS(t_mat), ALLOCATABLE :: zMat
      CLASS(t_mat), ALLOCATABLE :: hmat,smat
      CLASS(t_mat), ALLOCATABLE :: smat_unfold !used for unfolding bandstructure

      ! Variables for HF or hybrid functional calculation
      INTEGER                   :: comm(kpts%nkpt),irank2(kpts%nkpt),isize2(kpts%nkpt), dealloc_stat
      character(len=300)        :: errmsg

104
      call ud%init(atoms,input%jspins)
Matthias Redies's avatar
Matthias Redies committed
105 106 107
      ALLOCATE(eig(DIMENSION%neigd))
      ALLOCATE(bkpt(3))
      ALLOCATE(eigBuffer(DIMENSION%neigd,kpts%nkpt,input%jspins))
108 109 110 111 112 113 114

      l_real=sym%invs.AND..NOT.noco%l_noco

      ! check if z-reflection trick can be used
      l_zref=(sym%zrfs.AND.(SUM(ABS(kpts%bk(3,:kpts%nkpt))).LT.1e-9).AND..NOT.noco%l_noco)
      IF (mpi%n_size > 1) l_zref = .FALSE.

Daniel Wortmann's avatar
Daniel Wortmann committed
115
#ifdef CPP_MPI
116
      CALL mpi_bc_potden(mpi,stars,sphhar,atoms,input,vacuum,oneD,noco,v)
Daniel Wortmann's avatar
Daniel Wortmann committed
117
#endif
118

119 120 121 122 123 124 125
      !IF (mpi%irank.EQ.0) CALL openXMLElementFormPoly('iteration',(/'numberForCurrentRun','overallNumber      '/),(/iter,v%iter/),&
      !                                                RESHAPE((/19,13,5,5/),(/2,2/)))

      ! Set up and solve the eigenvalue problem
      !   loop over spins
      !     set up k-point independent t(l'm',lm) matrices
      CALL mt_setup(atoms,sym,sphhar,input,noco,enpara,inden,v,mpi,results,DIMENSION,td,ud)
126

127 128 129
      neigBuffer = 0
      results%neig = 0
      results%eig = 1.0e300
130
      eigBuffer = 1.0e300
131 132 133 134 135 136 137 138 139 140 141 142
      unfoldingBuffer = CMPLX(0.0,0.0)

      DO jsp = 1,MERGE(1,input%jspins,noco%l_noco)
         k_loop:DO nk = mpi%n_start,kpts%nkpt,mpi%n_stride

            ! Set up lapw list
            CALL lapw%init(input,noco, kpts,atoms,sym,nk,cell,l_zref, mpi)
            call timestart("Setup of H&S matrices")
            CALL eigen_hssetup(jsp,mpi,DIMENSION,hybrid,enpara,input,vacuum,noco,sym,&
                               stars,cell,sphhar,atoms,ud,td,v,lapw,l_real,smat,hmat)
            CALL timestop("Setup of H&S matrices")

Gregor Michalicek's avatar
Gregor Michalicek committed
143
            IF(hybrid%l_hybrid.OR.input%l_rdmft) THEN
144 145 146 147 148 149 150 151

               DO i = 1, hmat%matsize1
                  DO j = 1, i
                     IF (hmat%l_real) THEN
                        IF ((i.LE.5).AND.(j.LE.5)) THEN
                           WRITE(1233,'(2i7,2f15.8)') i, j, hmat%data_r(i,j), hmat%data_r(j,i)
                        END IF
                     ELSE
152 153 154
                        IF ((i.LE.5).AND.(j.LE.5)) THEN
                           WRITE(1233,'(2i7,4f15.8)') i, j, hmat%data_c(i,j), hmat%data_c(j,i)
                        END IF
155 156 157 158 159 160 161
                     ENDIF
                  END DO
               END DO

               ! Write overlap matrix smat to direct access file olap
               print *,"Wrong overlap matrix used, fix this later"
               CALL write_olap(smat,(jsp-1)*kpts%nkpt+nk) ! Note: At this moment this only works without MPI parallelization
Gregor Michalicek's avatar
Gregor Michalicek committed
162 163 164
            END IF ! hybrid%l_hybrid.OR.input%l_rdmft

            IF(hybrid%l_hybrid) THEN
165
               PRINT *,"TODO"
166
!             STOP "TODO"
167 168 169 170 171 172 173 174 175 176 177
               PRINT *,"BASIS:", lapw%nv(jsp), atoms%nlotot
               IF (hybrid%l_addhf) CALL add_Vnonlocal(nk,lapw,atoms,hybrid,dimension,kpts,jsp,results,xcpot,noco,hmat)

               IF(hybrid%l_subvxc) THEN
                  CALL subvxc(lapw,kpts%bk(:,nk),DIMENSION,input,jsp,v%mt(:,0,:,:),atoms,ud,hybrid,enpara%el0,enpara%ello0,&
                              sym,cell,sphhar,stars,xcpot,mpi,oneD,hmat,vx)
               END IF
            END IF ! hybrid%l_hybrid

            l_wu=.FALSE.
            ne_all=DIMENSION%neigd
178 179
            IF(ne_all < 0) ne_all = lapw%nmat
            IF(ne_all > lapw%nmat) ne_all = lapw%nmat
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

            !Try to symmetrize matrix
            CALL symmetrize_matrix(mpi,noco,kpts,nk,hmat,smat)

            IF (banddos%unfoldband) THEN
               select type(smat)
               type is (t_mat)
                  allocate(t_mat::smat_unfold)
                  select type(smat_unfold)
                  type is (t_mat)
                     smat_unfold=smat
                  end select
               type is (t_mpimat)
                  allocate(t_mpimat::smat_unfold)
                  select type(smat_unfold)
                  type is (t_mpimat)
                     smat_unfold=smat
                  end select
               end select
            END IF

201 202 203 204 205 206
            ! Solve generalized eigenvalue problem.
            !     ne_all ... number of eigenpairs searched (and found) on this node
            !                on input, overall number of eigenpairs searched,
            !                on output, local number of eigenpairs found
            !     eig ...... all eigenvalues, output
            !     zMat ..... local eigenvectors, output
207 208
            CALL eigen_diag(solver,hmat,smat,ne_all,eig,zMat,nk,jsp,iter)
              
209 210 211 212 213 214 215 216
            CALL smat%free()
            CALL hmat%free()
            DEALLOCATE(hmat,smat, stat=dealloc_stat, errmsg=errmsg)
            if(dealloc_stat /= 0) call juDFT_error("deallocate failed for hmat or smat",&
                                                   hint=errmsg, calledby="eigen.F90")

            ! Output results
            CALL timestart("EV output")
217
#if defined(CPP_MPI)
218 219 220 221
            ! Collect number of all eigenvalues
            ne_found=ne_all
            CALL MPI_ALLREDUCE(ne_found,ne_all,1,MPI_INTEGER,MPI_SUM, mpi%sub_comm,ierr)
            ne_all=MIN(DIMENSION%neigd,ne_all)
Daniel Wortmann's avatar
Daniel Wortmann committed
222
#else
223 224 225 226 227
            ne_found=ne_all
#endif
            IF (.NOT.zMat%l_real) THEN
               zMat%data_c(:lapw%nmat,:ne_found) = CONJG(zMat%data_c(:lapw%nmat,:ne_found))
            END IF
228 229 230 231 232 233 234 235
            IF (mpi%n_rank == 0) THEN
                ! Only process 0 writes out the value of ne_all and the
                ! eigenvalues. 
                ! Trying to use MPI_PUT for the very same slot by all processes
                ! causes problems with IntelMPI/2019
                !        Mai 2019                 U. Alekseeva      
                CALL write_eig(eig_id, nk,jsp,ne_found,ne_all,&
                           eig(:ne_all),n_start=mpi%n_size,n_end=mpi%n_rank,zMat=zMat)
236
                eigBuffer(:ne_all,nk,jsp) = eig(:ne_all)
237 238 239 240
            ELSE
                CALL write_eig(eig_id, nk,jsp,ne_found,&
                           n_start=mpi%n_size,n_end=mpi%n_rank,zMat=zMat)
            ENDIF
241
            neigBuffer(nk,jsp) = ne_found
242
#if defined(CPP_MPI)
243 244
            ! RMA synchronization
            CALL MPI_BARRIER(mpi%MPI_COMM,ierr)
245
#endif
246
            CALL timestop("EV output")
247

248
            IF (banddos%unfoldband) THEN
249
               CALL calculate_plot_w_n(banddos,cell,kpts,smat_unfold,zMat,lapw,nk,jsp,eig,results,input,atoms,unfoldingBuffer,mpi)
250 251 252 253 254
               CALL smat_unfold%free()
               DEALLOCATE(smat_unfold, stat=dealloc_stat, errmsg=errmsg)
               if(dealloc_stat /= 0) call juDFT_error("deallocate failed for smat_unfold",&
                                                      hint=errmsg, calledby="eigen.F90")
            END IF
255

256 257 258 259
            call zMat%free()
            deallocate(zMat)
         END DO  k_loop
      END DO ! spin loop ends
260 261

#ifdef CPP_MPI
262 263
      IF (banddos%unfoldband) THEN
         results%unfolding_weights = CMPLX(0.0,0.0)
264
       CALL MPI_ALLREDUCE(unfoldingBuffer,results%unfolding_weights,SIZE(results%unfolding_weights,1)*SIZE(results%unfolding_weights,2)*SIZE(results%unfolding_weights,3),CPP_MPI_COMPLEX,MPI_SUM,mpi%mpi_comm,ierr)
265 266
      END IF
      CALL MPI_ALLREDUCE(neigBuffer,results%neig,kpts%nkpt*input%jspins,MPI_INTEGER,MPI_SUM,mpi%mpi_comm,ierr)
267
      CALL MPI_ALLREDUCE(eigBuffer(:dimension%neigd,:,:),results%eig(:dimension%neigd,:,:),dimension%neigd*kpts%nkpt*input%jspins,MPI_DOUBLE_PRECISION,MPI_MIN,mpi%mpi_comm,ierr)
268
      CALL MPI_BARRIER(mpi%MPI_COMM,ierr)
269

Gregor Michalicek's avatar
Gregor Michalicek committed
270
#else
271
      results%neig(:,:) = neigBuffer(:,:)
272
      results%eig(:dimension%neigd,:,:) = eigBuffer(:dimension%neigd,:,:)
273
      results%unfolding_weights(:,:,:) = unfoldingBuffer(:,:,:)
274
#endif
275

276 277 278 279 280 281 282 283 284
      !IF (hybrid%l_hybrid.OR.hybrid%l_calhf) CALL close_eig(eig_id)

      IF( input%jspins .EQ. 1 .AND. hybrid%l_hybrid ) THEN
         results%te_hfex%valence = 2*results%te_hfex%valence
         IF(hybrid%l_calhf) results%te_hfex%core = 2*results%te_hfex%core
      END IF
      enpara%epara_min = MINVAL(enpara%el0)
      enpara%epara_min = MIN(MINVAL(enpara%ello0),enpara%epara_min)
   END SUBROUTINE eigen
285
END MODULE m_eigen
Daniel Wortmann's avatar
Daniel Wortmann committed
286