eigen_hssetup.F90 4.07 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
!--------------------------------------------------------------------------------
! Copyright (c) 2016 Peter Grünberg Institut, Forschungszentrum Jülich, Germany
! This file is part of FLEUR and available as free software under the conditions
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------

MODULE m_eigen_hssetup
CONTAINS
  !> The setup of the Hamiltonian and Overlap matrices are performed here
  !!
  !! The following steps are executed:
  !! 1. The matrices are a allocated (in the noco-case these are 2x2-arrays of matrices)
  !! 2. The Interstitial contribution is calculated (in hs_int())
  !! 3. The MT-part is calculated (in hsmt() )
  !! 4. The vacuum part is added (in hsvac())
  !! 5. The matrices are copied to the final matrix, in the noco-case the full matrix is constructed from the 4-parts.
  
18
  SUBROUTINE eigen_hssetup(isp,mpi,DIMENSION,hybrid,enpara,input,vacuum,noco,sym,&
19
       stars,cell,sphhar,atoms,ud,td,v,lapw,l_real,smat_final,hmat_final)
20 21
    USE m_types
    USE m_types_mpimat
Daniel Wortmann's avatar
Daniel Wortmann committed
22
    USE m_types_gpumat
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
    USE m_hs_int
    USE m_hsvac
    USE m_od_hsvac
    USE m_hsmt
    USE m_eigen_redist_matrix
    IMPLICIT NONE
    INTEGER,INTENT(IN)           :: isp
    TYPE(t_mpi),INTENT(IN)       :: mpi
    TYPE(t_dimension),INTENT(IN) :: DIMENSION
    TYPE(t_hybrid),INTENT(IN)    :: hybrid
    TYPE(t_enpara),INTENT(IN)    :: enpara
    TYPE(t_input),INTENT(IN)     :: input
    TYPE(t_vacuum),INTENT(IN)    :: vacuum
    TYPE(t_noco),INTENT(IN)      :: noco
    TYPE(t_sym),INTENT(IN)       :: sym  
    TYPE(t_stars),INTENT(IN)     :: stars
    TYPE(t_cell),INTENT(IN)      :: cell
    TYPE(t_sphhar),INTENT(IN)    :: sphhar
    TYPE(t_atoms),INTENT(IN)     :: atoms
    TYPE(t_usdus),INTENT(IN)     :: ud
    TYPE(t_tlmplm),INTENT(IN)    :: td
    TYPE(t_lapw),INTENT(IN)      :: lapw
    TYPE(t_potden),INTENT(IN)    :: v
    CLASS(t_mat),ALLOCATABLE,INTENT(INOUT)   :: smat_final,hmat_final
    LOGICAL,INTENT(IN)           :: l_real
    

    
    CLASS(t_mat),ALLOCATABLE :: smat(:,:),hmat(:,:)
    INTEGER :: i,j,ispin,nspins
    
    !Matrices for Hamiltonian and Overlapp
    !In noco case we need 4-matrices for each spin channel
    nspins=MERGE(2,1,noco%l_noco)
57
    IF (mpi%n_size==1) THEN       
58 59 60 61 62
       IF (judft_was_argument("-gpu")) THEN
          ALLOCATE(t_gpumat::smat(nspins,nspins),hmat(nspins,nspins))
       ELSE
          ALLOCATE(t_mat::smat(nspins,nspins),hmat(nspins,nspins))
       ENDIF
63 64 65 66 67
    ELSE
       ALLOCATE(t_mpimat::smat(nspins,nspins),hmat(nspins,nspins))
    ENDIF
    DO i=1,nspins
       DO j=1,nspins
68
          CALL smat(i,j)%init(l_real,lapw%nv(i)+atoms%nlotot,lapw%nv(j)+atoms%nlotot,mpi%sub_comm,.false.)
69
          CALL hmat(i,j)%init(smat(i,j))
70 71 72 73 74 75
       ENDDO
    ENDDO

    
    CALL timestart("Interstitial part")
    !Generate interstitial part of Hamiltonian
76
    CALL hs_int(input,noco,stars,lapw,mpi,cell,isp,v%pw_w,smat,hmat)
77 78 79 80 81 82 83 84 85 86 87
    CALL timestop("Interstitial part")
    CALL timestart("MT part")
      !MT-part of Hamiltonian. In case of noco, we need an loop over the local spin of the atoms
    DO ispin=MERGE(1,isp,noco%l_noco),MERGE(2,isp,noco%l_noco)
       CALL hsmt(atoms,sym,enpara,ispin,input,mpi,noco,cell,lapw,ud,td,smat,hmat)
    ENDDO
    CALL timestop("MT part")
   
    !Vacuum contributions
    IF (input%film) THEN
       CALL timestart("Vacuum part")
88
       CALL hsvac(vacuum,stars,DIMENSION,mpi,isp,input,v,enpara%evac,cell,&
89
            lapw,sym, noco,hmat,smat)
90 91 92 93 94 95
       CALL timestop("Vacuum part")
    ENDIF
    !Now copy the data into final matrix
    ! Collect the four noco parts into a single matrix
    ! In collinear case only a copy is done
    ! In the parallel case also a redistribution happens
96 97
    ALLOCATE(smat_final,mold=smat(1,1))
    ALLOCATE(hmat_final,mold=smat(1,1))
98
    CALL timestart("Matrix redistribution")
99
    CALL eigen_redist_matrix(mpi,lapw,atoms,smat,smat_final)
100
    CALL eigen_redist_matrix(mpi,lapw,atoms,hmat,hmat_final,smat_final)
101
    CALL timestop("Matrix redistribution")
102

103 104 105
  END SUBROUTINE eigen_hssetup
END MODULE m_eigen_hssetup