Commit 5fb890d3 authored by S.Rost's avatar S.Rost

Merge branch 'develop' of iffgit.fz-juelich.de:fleur/fleur into develop

parents da4b396a ee8384a9
......@@ -140,7 +140,7 @@ IMPLICIT NONE
END SELECT
END SUBROUTINE chase_diag
SUBROUTINE chase_diag_noMPI(hmat,smat,ikpt,jsp,iter,ne,eig,zmat)
SUBROUTINE chase_diag_noMPI(hmat,smat,ikpt,jsp,iter,ne,eig,zmat)
USE m_types
USE m_judft
......@@ -213,7 +213,7 @@ IMPLICIT NONE
ne = nev
CALL write_eig(chase_eig_id,ikpt,jsp,nev+nex,nev+nex,&
eigenvalues(:(nev+nex)),zmat=zMatTemp)
eigenvalues(:(nev+nex)),zmat=zMatTemp)
! --> recover the generalized eigenvectors z by solving z' = l^t * z
CALL dtrtrs('U','N','N',hmat%matsize1,nev,smat%data_r,smat%matsize1,zMatTemp%data_r,zmat%matsize1,info)
......@@ -268,7 +268,7 @@ IMPLICIT NONE
ne = nev
CALL write_eig(chase_eig_id,ikpt,jsp,nev+nex,nev+nex,&
eigenvalues(:(nev+nex)),zmat=zMatTemp)
eigenvalues(:(nev+nex)),zmat=zMatTemp)
! --> recover the generalized eigenvectors z by solving z' = l^t * z
CALL ztrtrs('U','N','N',hmat%matsize1,nev,smat%data_c,smat%matsize1,zMatTemp%data_c,zmat%matsize1,info)
......@@ -307,7 +307,7 @@ IMPLICIT NONE
REAL, INTENT(OUT) :: eig(:)
INTEGER :: i, j, nev, nex, nbands,xoff,yoff,xlen,ylen,ierr,nb_x,nb_y
INTEGER :: info,irank
INTEGER :: info,myid,np
REAL :: scale !scaling of eigenvalues from scalapack
CLASS(t_mat), ALLOCATABLE :: zMatTemp
......@@ -315,18 +315,20 @@ IMPLICIT NONE
REAL, ALLOCATABLE :: eigenvalues(:)
include 'mpif.h'
CALL MPI_COMM_RANK(MPI_COMM_WORLD,irank,info)
PRINT *,"Chase 1"
CALL MPI_COMM_RANK(hmat%mpi_com,myid,info)
CALL MPI_COMM_SIZE(hmat%mpi_com,np,info)
smat%blacs_desc=hmat%blacs_desc
call smat%generate_full_matrix()
call hmat%generate_full_matrix()
!Transform to standard problem using SCALAPACK
IF (hmat%l_real) THEN
CALL pdpotrf('U',smat%global_size1,smat%data_r,1,1,smat%blacs_desc,info)
PRINT *,"pdpotrf",info
IF (info.NE.0) THEN
WRITE (*,*) 'Error in pdpotrf: info =',info
CALL juDFT_error("Diagonalization failed",calledby="chase_diag")
ENDIF
CALL pdsygst(1,'U',smat%global_size1,hmat%data_r,1,1,smat%blacs_desc,smat%data_r,1,1,smat%blacs_desc,scale,info)
CALL pdsygst(1,'U',smat%global_size1,hmat%data_r,1,1,hmat%blacs_desc,smat%data_r,1,1,smat%blacs_desc,scale,info)
IF (ABS(scale-1)>1E-10) call judft_error("Scale parameter not implemented in chase_diag")
IF (info.NE.0) THEN
WRITE (6,*) 'Error in pdsygst: info =',info
......@@ -346,27 +348,17 @@ IMPLICIT NONE
ENDIF
END IF
! H is only set in the upper half, solve_evp_real needs a full matrix
! Set lower half from upper half
call hmat%generate_full_matrix()
!call hmat%print_matrix(444)
! Now we are ready to set up chase
PRINT *,"Chase 2"
nev = min(ne,hmat%global_size1)
nev = MIN(ne,hmat%global_size1)
nex = min(max(nev/4, 45), hmat%global_size1-nev) !dimensioning for workspace
CALL priv_init_chasempimat(hmat,chase_mat,nev,nex)
CALL chase_mat%generate_full_matrix()
ALLOCATE(eigenvalues(nev+nex))
eigenvalues = 0.0
ALLOCATE(t_mpimat::zmatTemp)
CALL zMatTemp%init(hmat%l_real,hmat%global_size1,nev+nex,MPI_COMM_SELF,.TRUE.) !Generate a pseudo-distributed matrix
PRINT *,"Chase 3"
IF (hmat%l_real) THEN
IF(iter.EQ.1) THEN
CALL mpi_chase_r(chase_mat%data_r, zMatTemp%data_r, eigenvalues, 25, 1e-10, 'R', 'S' )
......@@ -383,11 +375,9 @@ IMPLICIT NONE
END IF
ENDIF
if (irank==0) PRINT *,eigenvalues
stop "DEBUG"
ne = nev
CALL write_eig(chase_eig_id,ikpt,jsp,nev+nex,nev+nex,&
eigenvalues(:(nev+nex)),zmat=zMatTemp)
IF (myid==0) CALL write_eig(chase_eig_id,ikpt,jsp,nev+nex,nev+nex,&
eigenvalues(:(nev+nex)),zmat=zMatTemp)
CALL hmat%copy(zmatTemp,1,1) !Copy matrix into distributed form
call zmatTemp%free()
......@@ -412,10 +402,12 @@ IMPLICIT NONE
CALL zmat%init(hmat%l_real,hmat%global_size1,hmat%global_size1,hmat%mpi_com,.FALSE.)
CALL zmat%copy(hmat,1,1)
DO i = 1, ne
eig(i) = eigenvalues(i)
END DO
!Distribute eigenvalues over PEs
ne=0
DO i=myid+1,nev,np
ne=ne+1
eig(ne)=eigenvalues(i)
ENDDO
END SUBROUTINE chase_diag_MPI
SUBROUTINE priv_init_chasempimat(hmat,mat,nev,nex)
......@@ -429,7 +421,7 @@ IMPLICIT NONE
INTEGER :: myrow,mycol
INTEGER :: npblacs,np,myid
INTEGER :: rowlen,collen,rowoff,coloff
INTEGER :: k,i,j
INTEGER :: k,i,j,l
INTEGER :: ierr
INTEGER,ALLOCATABLE :: iblacsnums(:),ihelp(:),iusermap(:,:)
......@@ -460,11 +452,6 @@ IMPLICIT NONE
CALL MPI_ALLREDUCE(rowlen,nbr,1,MPI_INTEGER,MPI_MAX,mat%mpi_com,ierr)
CALL MPI_ALLREDUCE(collen,nbc,1,MPI_INTEGER,MPI_MAX,mat%mpi_com,ierr)
!Determine scalapack grid
!PRINT *,"G1:",myid,myrow,mycol,mat%nprow,mat%npcol
!CALL priv_processor_grid(mat%global_size1,rowoff,coloff,nbr,nbc,myrow,mycol,mat%nprow,mat%npcol)
!PRINT *,"G2:",myid,myrow,mycol,mat%nprow,mat%npcol
!Now included in init-call to chase above
ALLOCATE(iusermap(mat%nprow,mat%npcol))
iusermap=-2
......@@ -476,6 +463,7 @@ IMPLICIT NONE
! Create the Grid
CALL BLACS_GRIDMAP(mat%blacs_ctext,iusermap,mat%nprow,mat%nprow,mat%npcol)
!Now create the matrix
mat%matsize1=numroc(mat%global_size1,nbr,myrow,0,mat%nprow)
mat%matsize2=numroc(mat%global_size1,nbc,mycol,0,mat%npcol)
......@@ -498,54 +486,10 @@ IMPLICIT NONE
!Copy data from hmat
CALL mat%copy(hmat,1,1)
!Test if indices match
PRINT *,"I",coloff+3,indxl2g(3,mat%blacs_desc(6), mycol, 0, mat%npcol)
PRINT *,"I",rowoff+4,indxl2g(4,mat%blacs_desc(5), myrow, 0, mat%nprow)
call hmat%print_matrix(445)
CALL mat%print_matrix(444)
DO i=1,mat%matsize1
DO j=1,mat%matsize2
PRINT *,"MAT",i+rowoff,j+coloff,mat%data_r(i,j)
ENDDO
ENDDO
END SUBROUTINE priv_init_chasempimat
! SUBROUTINE priv_processor_grid(myid,np,myrow,mycol,nprow,npcol)
! IMPLICIT NONE
! INTEGER,INTENT(IN) :: myid,np
! INTEGER,INTENT(OUT):: myrow,mycol,nprow,npcol
!
! INTEGER:: j
! distloop: DO j=INT(SQRT(REAL(np))),1,-1
! IF ( (np/j) * j == np) THEN
! nprow = np/j
! npcol = j
! EXIT distloop
! ENDIF
! ENDDO distloop
! mycol=myid/nprow
! myrow=myid-(myid/nprow)*nprow
! END SUBROUTINE priv_processor_grid
SUBROUTINE priv_processor_grid(matsize,rowoff,coloff,nbr,nbc,myrow,mycol,nprow,npcol)
IMPLICIT NONE
INTEGER,INTENT(IN) :: matsize,rowoff,coloff,nbr,nbc
INTEGER,INTENT(OUT):: myrow,mycol,nprow,npcol
npcol=(matsize-1)/nbc+1
nprow=(matsize-1)/nbr+1
myrow=rowoff/nbr
mycol=coloff/nbc
END SUBROUTINE priv_processor_grid
#endif
END MODULE m_chase_diag
......@@ -39,7 +39,7 @@ CONTAINS
parallel_solver_available=any((/diag_elpa,diag_elemental,diag_scalapack/)>0)
END FUNCTION parallel_solver_available
SUBROUTINE eigen_diag(hmat,smat,ikpt,jsp,iter,ne,eig,ev)
SUBROUTINE eigen_diag(mpi,hmat,smat,ikpt,jsp,iter,ne,eig,ev)
USE m_lapack_diag
USE m_magma
USE m_elpa
......@@ -47,10 +47,12 @@ CONTAINS
USE m_elemental
USE m_chase_diag
USE m_types_mpimat
USE m_matrix_copy
IMPLICIT NONE
#ifdef CPP_MPI
include 'mpif.h'
#endif
TYPE(t_mpi), INTENT(IN) :: mpi
CLASS(t_mat), INTENT(INOUT) :: smat,hmat
CLASS(t_mat), ALLOCATABLE, INTENT(OUT) :: ev
INTEGER, INTENT(IN) :: ikpt
......@@ -59,15 +61,33 @@ CONTAINS
INTEGER, INTENT(INOUT) :: ne
REAL, INTENT(OUT) :: eig(:)
!Locals
!Locals
LOGICAL :: parallel
!For check-mode
TYPE(t_mat) :: s_store,h_store
SELECT TYPE(smat)
CLASS IS (t_mpimat)
parallel=.TRUE.
CLASS default
parallel=.FALSE.
END SELECT
!Create a copy of the matrix if in test mode
IF (judft_was_argument("-diag:test")) THEN
SELECT TYPE(hmat)
CLASS IS (t_mpimat)
CALL s_store%init(hmat%l_real,hmat%global_size1,hmat%global_size2)
CALL h_store%init(hmat%l_real,hmat%global_size1,hmat%global_size2)
CLASS default
CALL s_store%init(hmat%l_real,hmat%matsize1,hmat%matsize2)
CALL h_store%init(hmat%l_real,hmat%matsize1,hmat%matsize2)
END SELECT
CALL matrix_copy(smat,s_store)
CALL matrix_copy(hmat,h_store)
END IF
CALL timestart("Diagonalization")
!Select the solver
SELECT CASE (priv_select_solver(parallel))
......@@ -90,11 +110,61 @@ CONTAINS
CASE (diag_debugout)
CALL priv_debug_out(smat,hmat)
END SELECT
!Create test the solution
IF (judft_was_argument("-diag:test")) THEN
CALL priv_test_solution(mpi,ne,s_store,h_store,eig,ev)
CALL judft_error("Diagonalization tested")
END IF
CALL timestop("Diagonalization")
!
END SUBROUTINE eigen_diag
SUBROUTINE priv_test_solution(mpi,ne,s_store,h_store,eig1,ev)
USE m_types
USE m_lapack_diag
USE m_matrix_copy
IMPLICIT NONE
TYPE(t_mpi),INTENT(in):: mpi
INTEGER,INTENT(INOUT) :: ne
TYPE(t_mat) :: s_store,h_store
REAL,INTENT(in) :: eig1(:)
CLASS(t_mat) :: ev
REAL,ALLOCATABLE::eig2(:)
TYPE(t_mat) ::ev1
CLASS(t_mat),ALLOCATABLE ::ev2
INTEGER :: i,irank
ALLOCATE(eig2(ne))
CALL ev1%init(s_store%l_real,s_store%matsize1,s_store%matsize2)
CALL matrix_copy(ev,ev1)
IF (mpi%irank==0) THEN
CALL lapack_diag(h_store,s_store,ne,eig2,ev2)
OPEN(99,file="diag.compare")
WRITE(99,*) "Eigenvalues"
DO i=1,ne
WRITE(99,*) i,eig1(i),eig2(i)
ENDDO
WRITE(99,*) "Eigenvectors"
DO i=1,ne
IF (ev1%l_real) THEN
WRITE(99,"(i0,20(1x,f10.5))") i,ev1%data_r(1:10,i)
WRITE(99,"(i0,20(1x,f10.5))") i,ev2%data_r(1:10,i)
ELSE
WRITE(99,"(i0,20(1x,f10.5))") i,ev1%data_c(1:10,i)
WRITE(99,"(i0,20(1x,f10.5))") i,ev2%data_c(1:10,i)
END IF
ENDDO
CLOSE(99)
END IF
END SUBROUTINE priv_test_solution
SUBROUTINE priv_debug_out(smat,hmat)
USE m_types
use m_judft
......
......@@ -183,7 +183,7 @@ CONTAINS
end select
END IF
CALL eigen_diag(hmat,smat,nk,jsp,iter,ne_all,eig,zMat)
CALL eigen_diag(mpi,hmat,smat,nk,jsp,iter,ne_all,eig,zMat)
DEALLOCATE(hmat,smat)
! Output results
......
......@@ -32,10 +32,15 @@ CONTAINS
CALL mat_final%init(mat(1,1)%l_real,m,m,mpi%sub_comm,.TRUE.) !here the .true. creates a block-cyclic scalapack distribution
!up-up component (or only component in collinear case)
IF (SIZE(mat)==1) THEN
CALL mat_final%move(mat(1,1))
CALL mat(1,1)%free()
RETURN
ENDIF
CALL mat_final%copy(mat(1,1),1,1)
CALL mat(1,1)%free()
IF (SIZE(mat)==1) RETURN
!down-down component
CALL mat_final%copy(mat(2,2),lapw%nv(1)+atoms%nlotot+1,lapw%nv(1)+atoms%nlotot+1)
CALL mat(2,2)%free()
......
......@@ -19,6 +19,7 @@ global/savewigner.f
)
set(fleur_F90 ${fleur_F90}
global/constants.f90
global/matrix_copy.F90
global/checkdop.F90
global/checkdopall.f90
global/genMTBasis.f90
......
!--------------------------------------------------------------------------------
! Copyright (c) 2016 Peter Grünberg Institut, Forschungszentrum Jülich, Germany
! This file is part of FLEUR and available as free software under the conditions
! of the MIT license as expressed in the LICENSE file in more detail.
!--------------------------------------------------------------------------------
!<@brief
!<This matrix_copy module is needed to copy a distributed into a non-distributed matrix or vice versa
!<It calls the usual matrix-copy routines in other cases
MODULE m_matrix_copy
IMPLICIT NONE
CONTAINS
SUBROUTINE matrix_copy(mat_in,mat_out)
USE m_types
USE m_types_mpimat
CLASS(t_mat),INTENT(IN) ::mat_in
CLASS(t_mat),INTENT(INOUT)::mat_out
TYPE(t_mpimat)::tmp_mat
#ifdef CPP_MPI
INCLUDE 'mpif.h'
#else
INTEGER:: MPI_COMM_SELF
#endif
SELECT TYPE(mat_in)
CLASS is(t_mpimat)
SELECT TYPE(mat_out)
CLASS is(t_mpimat)
CALL mat_out%copy(mat_in,1,1)
CLASS is(t_mat)
!Copy from t_mpimat to t_mat
CALL tmp_mat%init(mat_in%l_real,mat_in%global_size1,mat_in%global_size2,MPI_COMM_SELF)
CALL tmp_mat%copy(mat_in,1,1) !redistribute to single matrix
IF (tmp_mat%l_real) THEN
CALL move_ALLOC(tmp_mat%data_r,mat_out%data_r)
ELSE
CALL move_ALLOC(tmp_mat%data_r,mat_out%data_r)
ENDIF
END SELECT
CLASS is (t_mat)
SELECT TYPE(mat_out)
CLASS is(t_mpimat)
!Copy from non-distributed t_mat to t_mpimat
CALL tmp_mat%init(mat_in%l_real,mat_in%matsize1,mat_in%matsize1,MPI_COMM_SELF)
IF (tmp_mat%l_real) THEN
tmp_mat%data_r=mat_in%data_r
ELSE
tmp_mat%data_c=mat_in%data_c
ENDIF
CALL mat_out%copy(tmp_mat,1,1)
CLASS is(t_mat)
CALL mat_out%copy(mat_in,1,1)
END SELECT
END SELECT
END SUBROUTINE matrix_copy
END MODULE m_matrix_copy
This diff is collapsed.
......@@ -82,6 +82,7 @@
! arltv(i) : length of reciprical lattice vector along
! direction (i)
!
IF (.NOT.xcpot%is_gga()) xcpot%gmaxxc=stars%gmax
WRITE (6,'('' gmaxxc should be: 2*kmax <= gmaxxc <= gmax '')')
IF ( abs( xcpot%gmaxxc - stars%gmax ) .le. 10.0**(-6) ) THEN
WRITE (6,'('' concerning memory, you may want to choose'',&
......
......@@ -649,9 +649,9 @@ c in s and p states equal occupation of up and down states
IF (atoms%nlo(n) /= 0) THEN ! check for local orbitals
DO i = 1, atoms%nlo(n)
enpara%ello0(i,n,:) = REAL(lonqn(i,n))
IF (lonqn(i,n) == NINT(enpara%el0(atoms%llo(i,n),n,1))) THEN ! increase qn
enpara%el0(atoms%llo(i,n),n,:) =
& enpara%el0(atoms%llo(i,n),n,1) + 1 ! in LAPW's by 1
IF (lonqn(i,n) == enpara%qn_el(atoms%llo(i,n),n,1)) THEN ! increase qn
enpara%qn_el(atoms%llo(i,n),n,:) =
& enpara%qn_el(atoms%llo(i,n),n,1) + 1 ! in LAPW's by 1
ENDIF
ENDDO
ENDIF
......
......@@ -20,7 +20,7 @@ CONTAINS
!! TE_EXC : charge density-ex-corr.energy density integral
SUBROUTINE vgen(hybrid,field,input,xcpot,DIMENSION,atoms,sphhar,stars,vacuum,sym,&
obsolete,cell,oneD,sliceplot,mpi,results,noco,den,vTot,vXC,vCoul)
obsolete,cell,oneD,sliceplot,mpi,results,noco,den,vTot,vx,vCoul)
USE m_rotate_int_den_to_local
USE m_bfield
......@@ -33,6 +33,7 @@ CONTAINS
#endif
IMPLICIT NONE
TYPE(t_results), INTENT(INOUT) :: results
CLASS(t_xcpot), INTENT(IN) :: xcpot
TYPE(t_hybrid), INTENT(IN) :: hybrid
......@@ -51,26 +52,29 @@ CONTAINS
TYPE(t_sphhar), INTENT(IN) :: sphhar
TYPE(t_atoms), INTENT(IN) :: atoms
TYPE(t_potden), INTENT(INOUT) :: den
TYPE(t_potden), INTENT(INOUT) :: vTot,vXC,vCoul
TYPE(t_potden), INTENT(INOUT) :: vTot,vx,vCoul
TYPE(t_potden) :: workden,denRot
if (mpi%irank==0) WRITE (6,FMT=8000)
8000 FORMAT (/,/,t10,' p o t e n t i a l g e n e r a t o r',/)
CALL vTot%resetPotDen()
CALL vCoul%resetPotDen()
CALL vXC%resetPotDen()
ALLOCATE(vXC%pw_w,vTot%pw_w,mold=vTot%pw)
CALL vx%resetPotDen()
ALLOCATE(vx%pw_w,vTot%pw_w,mold=vTot%pw)
ALLOCATE(vCoul%pw_w(SIZE(den%pw,1),1))
CALL workDen%init(stars,atoms,sphhar,vacuum,input%jspins,noco%l_noco,0)
!sum up both spins in den into workden
CALL den%sum_both_spin(workden)
CALL vgen_coulomb(1,mpi,dimension,oneD,input,field,vacuum,sym,stars,cell,sphhar,atoms,workden,vCoul,results)
CALL vCoul%copy_both_spin(vTot)
IF (noco%l_noco) THEN
......@@ -79,19 +83,21 @@ CONTAINS
CALL rotate_int_den_to_local(dimension,sym,stars,atoms,sphhar,vacuum,cell,input,noco,oneD,denRot)
ENDIF
CALL vgen_xcpot(hybrid,input,xcpot,dimension,atoms,sphhar,stars,vacuum,sym,&
obsolete,cell,oneD,sliceplot,mpi,noco,den,denRot,vTot,vXC,results)
obsolete,cell,oneD,sliceplot,mpi,noco,den,denRot,vTot,vx,results)
!ToDo, check if this is needed for more potentials as well...
CALL vgen_finalize(atoms,stars,vacuum,sym,noco,input,vTot,denRot)
DEALLOCATE(vcoul%pw_w,vXC%pw_w)
DEALLOCATE(vcoul%pw_w,vx%pw_w)
CALL bfield(input,noco,atoms,field,vTot)
#ifdef CPP_MPI
CALL mpi_bc_potden(mpi,stars,sphhar,atoms,input,vacuum,oneD,noco,vTot)
CALL mpi_bc_potden(mpi,stars,sphhar,atoms,input,vacuum,oneD,noco,vCoul)
CALL mpi_bc_potden(mpi,stars,sphhar,atoms,input,vacuum,oneD,noco,vXC)
CALL mpi_bc_potden(mpi,stars,sphhar,atoms,input,vacuum,oneD,noco,vx)
#endif
END SUBROUTINE vgen
......
......@@ -23,6 +23,7 @@ MODULE m_types_mat
PROCEDURE :: to_packed=>t_mat_to_packed !> convert to packed-storage matrix
PROCEDURE :: clear => t_mat_clear !> set data arrays to zero
PROCEDURE :: copy => t_mat_copy !> copy into another t_mat (overloaded for t_mpimat)
PROCEDURE :: move => t_mat_move !> move data into another t_mat (overloaded for t_mpimat)
PROCEDURE :: init => t_mat_init !> initalize the matrix(overloaded for t_mpimat)
PROCEDURE :: free => t_mat_free !> dealloc the data (overloaded for t_mpimat)
PROCEDURE :: add_transpose => t_mat_add_transpose!> add the tranpose/Hermitian conjg. without the diagonal (overloaded for t_mpimat)
......@@ -219,24 +220,26 @@ MODULE m_types_mat
end if
end subroutine t_mat_inverse
SUBROUTINE t_mat_move(mat,mat1)
IMPLICIT NONE
CLASS(t_mat),INTENT(INOUT):: mat
CLASS(t_mat),INTENT(INOUT):: mat1
!Special case, the full matrix is copied. Then use move alloc
IF (mat%l_real) THEN
CALL move_ALLOC(mat1%data_r,mat%data_r)
ELSE
CALL move_ALLOC(mat1%data_c,mat%data_c)
END IF
END SUBROUTINE t_mat_move
SUBROUTINE t_mat_copy(mat,mat1,n1,n2)
IMPLICIT NONE
CLASS(t_mat),INTENT(INOUT):: mat
CLASS(t_mat),INTENT(INOUT) :: mat1
CLASS(t_mat),INTENT(IN) :: mat1
INTEGER,INTENT(IN) :: n1,n2
INTEGER:: i1,i2
IF (n1==1.AND.n2==1.AND.mat%matsize1==mat1%matsize1.AND.mat%matsize2==mat1%matsize2) THEN
!Special case, the full matrix is copied. Then use move alloc
IF (mat%l_real) THEN
CALL move_ALLOC(mat1%data_r,mat%data_r)
ELSE
CALL move_ALLOC(mat1%data_c,mat%data_c)
END IF
RETURN
END IF
i1=mat%matsize1-n1+1 !space available for first dimension
i2=mat%matsize2-n1+1
i1=MIN(i1,mat1%matsize1)
......
......@@ -27,6 +27,7 @@ MODULE m_types_mpimat
INTEGER:: npcol,nprow !> the number of columns/rows in the processor grid
CONTAINS
PROCEDURE,PASS :: copy => mpimat_copy !<overwriten from t_mat, also performs redistribution
PROCEDURE,PASS :: move => mpimat_move !<overwriten from t_mat, also performs redistribution
PROCEDURE,PASS :: free => mpimat_free !<overwriten from t_mat, takes care of blacs-grids
PROCEDURE,PASS :: init => mpimat_init !<overwriten from t_mat, also calls alloc in t_mat
PROCEDURE,PASS :: add_transpose => mpimat_add_transpose !<overwriten from t_mat
......@@ -80,47 +81,60 @@ CONTAINS
SUBROUTINE generate_full_matrix(mat)
CLASS(t_mpimat),INTENT(INOUT) ::mat
INTEGER :: i,n_col,n_row,myid,err,myrow,mycol,np
INTEGER :: i,j,i_glob,j_glob,npcol,nprow,myid,err,myrow,mycol,np
COMPLEX,ALLOCATABLE:: tmp_c(:,:)
REAL,ALLOCATABLE :: tmp_r(:,:)
#ifdef CPP_SCALAPACK
INCLUDE 'mpif.h'
INTEGER, EXTERNAL :: numroc, indxl2g !SCALAPACK functions
!CALL mat%print_matrix(432)
IF (mat%l_real) THEN
ALLOCATE(tmp_r(mat%matsize1,mat%matsize2))
ELSE
ALLOCATE(tmp_c(mat%matsize1,mat%matsize2))
END IF
CALL MPI_COMM_RANK(mat%mpi_com,myid,err)
CALL MPI_COMM_SIZE(mat%mpi_com,np,err)
myrow = myid/mat%npcol
mycol = myid -(myid/mat%npcol)*mat%npcol
CALL blacs_gridinfo(mat%blacs_desc(2),nprow,npcol,myrow,mycol)
!Set lower part of matrix to zero
DO i=1,mat%matsize1
DO j=1,mat%matsize2
! Get global column corresponding to i and number of local rows up to
! and including the diagonal, these are unchanged in A
i_glob = indxl2g(i, mat%blacs_desc(5), myrow, 0, nprow)
j_glob = indxl2g(j, mat%blacs_desc(6), mycol, 0, npcol)