Commit f850eb0a authored by Matthias Redies's avatar Matthias Redies

merge

parents 15fddc25 643a3f97
......@@ -74,11 +74,11 @@ CONTAINS
SELECT TYPE(smat)
TYPE IS (t_mpimat)
CALL MPI_BARRIER(hmat%mpi_com,err)
CALL MPI_COMM_RANK(hmat%mpi_com,myid,err)
CALL MPI_COMM_SIZE(hmat%mpi_com,np,err)
myrow = myid/hmat%npcol
mycol = myid -(myid/hmat%npcol)*hmat%npcol
CALL MPI_BARRIER(hmat%blacsdata%mpi_com,err)
CALL MPI_COMM_RANK(hmat%blacsdata%mpi_com,myid,err)
CALL MPI_COMM_SIZE(hmat%blacsdata%mpi_com,np,err)
myrow = myid/hmat%blacsdata%npcol
mycol = myid -(myid/hmat%blacsdata%npcol)*hmat%blacsdata%npcol
!Create communicators for ELPA
......@@ -86,9 +86,9 @@ CONTAINS
mpi_comm_rows = -1
mpi_comm_cols = -1
#elif defined (CPP_ELPA_201605004) || defined (CPP_ELPA_201605003)||defined(CPP_ELPA_NEW)
err=get_elpa_row_col_comms(hmat%mpi_com, myrow, mycol,mpi_comm_rows, mpi_comm_cols)
err=get_elpa_row_col_comms(hmat%blacsdata%mpi_com, myrow, mycol,mpi_comm_rows, mpi_comm_cols)
#else
CALL get_elpa_row_col_comms(hmat%mpi_com, myrow, mycol,mpi_comm_rows, mpi_comm_cols)
CALL get_elpa_row_col_comms(hmat%blacsdata%mpi_com, myrow, mycol,mpi_comm_rows, mpi_comm_cols)
#endif
!print *,"creating ELPA comms -- done"
......@@ -97,7 +97,7 @@ CONTAINS
ALLOCATE ( eig2(hmat%global_size1), stat=err ) ! The eigenvalue array for ScaLAPACK
IF (err.NE.0) CALL juDFT_error('Failed to allocated "eig2"', calledby ='elpa')
CALL ev_dist%init(hmat%l_real,hmat%global_size1,hmat%global_size2,hmat%mpi_com,.TRUE.)! Eigenvectors for ScaLAPACK
CALL ev_dist%init(hmat)! Eigenvectors for ScaLAPACK
IF (err.NE.0) CALL juDFT_error('Failed to allocated "ev_dist"',calledby ='elpa')
IF (hmat%l_real) THEN
......@@ -107,12 +107,10 @@ CONTAINS
ENDIF
IF (err.NE.0) CALL juDFT_error('Failed to allocated "tmp2"', calledby ='elpa')
smat%blacs_desc=hmat%blacs_desc
ev_dist%blacs_desc=hmat%blacs_desc
nb=hmat%blacs_desc(5)! Blocking factor
IF (nb.NE.hmat%blacs_desc(6)) CALL judft_error("Different block sizes for rows/columns not supported")
nb=hmat%blacsdata%blacs_desc(5)! Blocking factor
IF (nb.NE.hmat%blacsdata%%blacs_desc(6)) CALL judft_error("Different block sizes for rows/columns not supported")
#ifdef CPP_ELPA_201705003
CALL elpa_obj%set("na", hmat%global_size1, err)
......@@ -120,7 +118,7 @@ CONTAINS
CALL elpa_obj%set("local_nrows", hmat%matsize1, err)
CALL elpa_obj%set("local_ncols", hmat%matsize2, err)
CALL elpa_obj%set("nblk", nb, err)
CALL elpa_obj%set("mpi_comm_parent", hmat%mpi_com, err)
CALL elpa_obj%set("mpi_comm_parent", hmat%blacsdata%%mpi_com, err)
CALL elpa_obj%set("process_row", myrow, err)
CALL elpa_obj%set("process_col", mycol, err)
#ifdef CPP_ELPA2
......@@ -202,10 +200,10 @@ CONTAINS
IF (hmat%l_real) THEN
CALL pdtran(hmat%global_size1,hmat%global_size1,1.d0,hmat%data_r,1,1,&
hmat%blacs_desc,0.d0,ev_dist%data_r,1,1,ev_dist%blacs_desc)
hmat%blacs_desc,0.d0,ev_dist%data_r,1,1,ev_dist%blacsdata%%blacs_desc)
ELSE
CALL pztranc(hmat%global_size1,hmat%global_size2,cmplx(1.d0,0.d0),hmat%data_c,1,1,&
hmat%blacs_desc,cmplx(0.d0,0.d0),ev_dist%data_c,1,1,ev_dist%blacs_desc)
hmat%blacs_desc,cmplx(0.d0,0.d0),ev_dist%data_c,1,1,ev_dist%blacsdata%%blacs_desc)
ENDIF
......@@ -261,10 +259,10 @@ CONTAINS
! 2b. tmp2 = eigvec**T
IF (hmat%l_real) THEN
CALL pdtran(ev_dist%global_size1,ev_dist%global_size1,1.d0,ev_dist%data_r,1,1,&
ev_dist%blacs_desc,0.d0,tmp2_r,1,1,ev_dist%blacs_desc)
ev_dist%blacsdata%blacs_desc,0.d0,tmp2_r,1,1,ev_dist%blacsdata%blacs_desc)
ELSE
CALL pztranc(ev_dist%global_size1,ev_dist%global_size1,cmplx(1.0,0.0),ev_dist%data_c,1,1,&
ev_dist%blacs_desc,cmplx(0.d0,0.d0),tmp2_c,1,1,ev_dist%blacs_desc)
ev_dist%blacsdata%blacs_desc,cmplx(0.d0,0.d0),tmp2_c,1,1,ev_dist%blacsdata%blacs_desc)
ENDIF
! 2c. A = U**-T * tmp2 ( = U**-T * Aorig * U**-1 )
......@@ -309,18 +307,18 @@ CONTAINS
IF (hmat%l_real) THEN
CALL pdtran(hmat%global_size1,hmat%global_size1,1.d0,hmat%data_r,1,1,&
hmat%blacs_desc,0.d0,ev_dist%data_r,1,1,ev_dist%blacs_desc)
hmat%blacsdata%blacs_desc,0.d0,ev_dist%data_r,1,1,ev_dist%blacsdata%blacs_desc)
ELSE
CALL pztranc(hmat%global_size1,hmat%global_size1,cmplx(1.0,0.0),hmat%data_c,1,1,&
hmat%blacs_desc,cmplx(0.d0,0.d0),ev_dist%data_c,1,1,ev_dist%blacs_desc)
hmat%blacsdata%blacs_desc,cmplx(0.d0,0.d0),ev_dist%data_c,1,1,ev_dist%blacsdata%blacs_desc)
ENDIF
DO i=1,hmat%matsize2
! Get global column corresponding to i and number of local rows up to
! and including the diagonal, these are unchanged in A
n_col = indxl2g(i, nb, mycol, 0, hmat%npcol)
n_row = numroc (n_col, nb, myrow, 0, hmat%nprow)
n_col = indxl2g(i, nb, mycol, 0, hmat%blacsdata%npcol)
n_row = numroc (n_col, nb, myrow, 0, hmat%blacsdata%nprow)
IF (hmat%l_real) THEN
hmat%data_r(n_row+1:hmat%matsize1,i) = ev_dist%data_r(n_row+1:ev_dist%matsize1,i)
ELSE
......@@ -340,10 +338,10 @@ CONTAINS
#ifdef CPP_ELPA2
IF (hmat%l_real) THEN
ok=solve_evp_real_2stage(hmat%global_size1,num2,hmat%data_r,hmat%matsize1,&
eig2,ev_dist%data_r,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%mpi_com)
eig2,ev_dist%data_r,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%blacsdata%mpi_com)
ELSE
ok=solve_evp_complex_2stage(hmat%global_size1,num2,hmat%data_c,hmat%matsize1,&
eig2,ev_dist%data_c,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%mpi_com)
eig2,ev_dist%data_c,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%blacsdata%mpi_com)
ENDIF
#else
IF (hmat%l_real) THEN
......@@ -358,10 +356,10 @@ CONTAINS
#ifdef CPP_ELPA2
IF (hmat%l_real) THEN
err=solve_evp_real_2stage(hmat%global_size1,num2,hmat%data_r,hmat%matsize1,&
eig2,ev_dist%data_r,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%mpi_com)
eig2,ev_dist%data_r,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%blacsdata%mpi_com)
ELSE
err=solve_evp_complex_2stage(hmat%global_size1,num2,hmat%data_c,hmat%matsize1,&
eig2,ev_dist%data_c,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%mpi_com)
eig2,ev_dist%data_c,ev_dist%matsize1, nb,ev_dist%matsize2, mpi_comm_rows, mpi_comm_cols,hmat%blacsdata%mpi_com)
ENDIF
#else
IF (hmat%l_real) THEN
......@@ -376,10 +374,10 @@ CONTAINS
#ifdef CPP_ELPA2
IF (hmat%l_real) THEN
CALL solve_evp_real_2stage(hmat%global_size1,num2,hmat%data_r,hmat%matsize1,&
eig2,ev_dist%data_r,ev_dist%matsize1, nb, mpi_comm_rows, mpi_comm_cols,hmat%mpi_com)
eig2,ev_dist%data_r,ev_dist%matsize1, nb, mpi_comm_rows, mpi_comm_cols,hmat%blacsdata%mpi_com)
ELSE
CALL solve_evp_complex_2stage(hmat%global_size1,num2,hmat%data_c,hmat%matsize1,&
eig2,ev_dist%data_c,ev_dist%matsize1, nb, mpi_comm_rows, mpi_comm_cols,hmat%mpi_com)
eig2,ev_dist%data_c,ev_dist%matsize1, nb, mpi_comm_rows, mpi_comm_cols,hmat%blacsdata%mpi_com)
ENDIF
#else
IF (hmat%l_real) THEN
......@@ -398,10 +396,10 @@ CONTAINS
! mult_ah_b_complex needs the transpose of U**-1, thus tmp2 = (U**-1)**T
IF (hmat%l_real) THEN
CALL pdtran(smat%global_size1,smat%global_size1,1.d0,smat%data_r,1,1,&
smat%blacs_desc,0.d0,tmp2_r,1,1,smat%blacs_desc)
smat%blacsdata%blacs_desc,0.d0,tmp2_r,1,1,smat%blacsdata%blacs_desc)
ELSE
CALL pztranc(smat%global_size1,smat%global_size1,cmplx(1.d0,0.d0),smat%data_c,1,1,&
smat%blacs_desc,cmplx(0.d0,0.d0),tmp2_c,1,1,smat%blacs_desc)
smat%blacsdata%blacs_desc,cmplx(0.d0,0.d0),tmp2_c,1,1,smat%blacsdata%blacs_desc)
ENDIF
#if defined (CPP_ELPA_201705003)
......@@ -471,7 +469,7 @@ CONTAINS
! having all eigenvectors corresponding to his eigenvalues as above
!
ALLOCATE(t_mpimat::ev)
CALL ev%init(hmat%l_real,hmat%global_size1,hmat%global_size1,hmat%mpi_com,.FALSE.)
CALL ev%init(hmat%l_real,hmat%global_size1,hmat%global_size1,hmat%blacsdata%mpi_com,.FALSE.)
CALL ev%copy(hmat,1,1)
CLASS DEFAULT
call judft_error("Wrong type (1) in scalapack")
......
......@@ -69,8 +69,8 @@ CONTAINS
ALLOCATE(eig2(hmat%global_size1))
CALL MPI_COMM_RANK(hmat%mpi_com,myid,ierr)
CALL MPI_COMM_SIZE(hmat%mpi_com,np,ierr)
CALL MPI_COMM_RANK(hmat%blacsdata%mpi_com,myid,ierr)
CALL MPI_COMM_SIZE(hmat%blacsdata%mpi_com,np,ierr)
num=ne !no of states solved for
......@@ -84,15 +84,15 @@ CONTAINS
!ev_dist%blacs_desc=hmat%blacs_desc
nb=hmat%blacs_desc(5)! Blocking factor
IF (nb.NE.hmat%blacs_desc(6)) CALL judft_error("Different block sizes for rows/columns not supported")
nb=hmat%blacsdata%blacs_desc(5)! Blocking factor
IF (nb.NE.hmat%blacsdata%blacs_desc(6)) CALL judft_error("Different block sizes for rows/columns not supported")
!
nn=MAX(MAX(hmat%global_size1,nb),2)
np0=numroc(nn,nb,0,0,hmat%nprow)
mq0=numroc(MAX(MAX(ne,nb),2),nb,0,0,hmat%npcol)
np0=numroc(nn,nb,0,0,hmat%blacsdata%nprow)
mq0=numroc(MAX(MAX(ne,nb),2),nb,0,0,hmat%blacsdata%npcol)
IF (hmat%l_real) THEN
lwork2=5*hmat%global_size1+MAX(5*nn,np0*mq0+2*nb*nb)+ iceil(ne,hmat%nprow*hmat%npcol)*nn
lwork2=5*hmat%global_size1+MAX(5*nn,np0*mq0+2*nb*nb)+ iceil(ne,hmat%blacsdata%nprow*hmat%blacsdata%npcol)*nn
ALLOCATE ( work2_r(lwork2+10*hmat%global_size1), stat=err ) ! Allocate more in case of clusters
ELSE
lwork2=hmat%global_size1+MAX(nb*(np0+1),3)
......@@ -103,7 +103,7 @@ CONTAINS
CALL juDFT_error('Failed to allocated "work2"', calledby ='chani')
ENDIF
liwork=6*MAX(MAX(hmat%global_size1,hmat%nprow*hmat%npcol+1),4)
liwork=6*MAX(MAX(hmat%global_size1,hmat%blacsdata%nprow*hmat%blacsdata%npcol+1),4)
ALLOCATE ( iwork(liwork), stat=err )
IF (err.NE.0) THEN
WRITE (*,*) 'iwork :',err,liwork
......@@ -114,14 +114,14 @@ CONTAINS
WRITE (*,*) 'ifail :',err,hmat%global_size1
CALL juDFT_error('Failed to allocated "ifail"', calledby ='chani')
ENDIF
ALLOCATE ( iclustr(2*hmat%nprow*hmat%npcol), stat=err )
ALLOCATE ( iclustr(2*hmat%blacsdata%nprow*hmat%blacsdata%npcol), stat=err )
IF (err.NE.0) THEN
WRITE (*,*) 'iclustr:',err,2*hmat%nprow*hmat%npcol
WRITE (*,*) 'iclustr:',err,2*hmat%blacsdata%nprow*hmat%blacsdata%npcol
CALL juDFT_error('Failed to allocated "iclustr"', calledby ='chani')
ENDIF
ALLOCATE ( gap(hmat%nprow*hmat%npcol), stat=err )
ALLOCATE ( gap(hmat%blacsdata%nprow*hmat%blacsdata%npcol), stat=err )
IF (err.NE.0) THEN
WRITE (*,*) 'gap :',err,hmat%nprow*hmat%npcol
WRITE (*,*) 'gap :',err,hmat%blacsdata%nprow*hmat%blacsdata%npcol
CALL juDFT_error('Failed to allocated "gap"', calledby ='chani')
ENDIF
!
......@@ -130,9 +130,9 @@ CONTAINS
IF (hmat%l_real) THEN
uplo='U'
CALL CPP_LAPACK_pdsygvx(1,'V','I','U',hmat%global_size1,hmat%data_r,1,1,&
hmat%blacs_desc,smat%data_r,1,1,smat%blacs_desc,&
hmat%blacsdata%blacs_desc,smat%data_r,1,1,smat%blacsdata%blacs_desc,&
0.0,1.0,1,num,abstol,num1,num2,eig2,orfac,ev_dist%data_r,1,1,&
ev_dist%blacs_desc,work2_r,-1,iwork,-1,ifail,iclustr, gap,ierr)
ev_dist%blacsdata%blacs_desc,work2_r,-1,iwork,-1,ifail,iclustr, gap,ierr)
IF ( work2_r(1).GT.lwork2) THEN
lwork2 = work2_r(1)
DEALLOCATE (work2_r)
......@@ -143,7 +143,7 @@ CONTAINS
ENDIF
ENDIF
ELSE
lrwork=4*hmat%global_size1+MAX(5*nn,np0*mq0)+ iceil(ne,hmat%nprow*hmat%npcol)*nn
lrwork=4*hmat%global_size1+MAX(5*nn,np0*mq0)+ iceil(ne,hmat%blacsdata%nprow*hmat%blacsdata%npcol)*nn
! Allocate more in case of clusters
ALLOCATE(rwork(lrwork+10*hmat%global_size1), stat=ierr)
IF (err /= 0) THEN
......@@ -152,9 +152,9 @@ CONTAINS
ENDIF
CALL CPP_LAPACK_pzhegvx(1,'V','I','U',hmat%global_size1,hmat%data_c,1,1,&
hmat%blacs_desc,smat%data_c,1,1, smat%blacs_desc,&
hmat%blacsdata%blacs_desc,smat%data_c,1,1, smat%blacsdata%blacs_desc,&
0.0,1.0,1,num,abstol,num1,num2,eig2,orfac,ev_dist%data_c,1,1,&
ev_dist%blacs_desc,work2_c,-1,rwork,-1,iwork,-1,ifail,iclustr,&
ev_dist%blacsdata%blacs_desc,work2_c,-1,rwork,-1,iwork,-1,ifail,iclustr,&
gap,ierr)
IF (ABS(work2_c(1)).GT.lwork2) THEN
lwork2=work2_c(1)
......@@ -190,14 +190,14 @@ CONTAINS
!
CALL timestart("SCALAPACK call")
if (hmat%l_real) THEN
CALL CPP_LAPACK_pdsygvx(1,'V','I','U',hmat%global_size1,hmat%data_r,1,1,hmat%blacs_desc,smat%data_r,1,1, smat%blacs_desc,&
CALL CPP_LAPACK_pdsygvx(1,'V','I','U',hmat%global_size1,hmat%data_r,1,1,hmat%blacsdata%blacs_desc,smat%data_r,1,1, smat%blacsdata%blacs_desc,&
1.0,1.0,1,num,abstol,num1,num2,eig2,orfac,ev_dist%data_r,1,1,&
ev_dist%blacs_desc,work2_r,lwork2,iwork,liwork,ifail,iclustr,&
ev_dist%blacsdata%blacs_desc,work2_r,lwork2,iwork,liwork,ifail,iclustr,&
gap,ierr)
else
CALL CPP_LAPACK_pzhegvx(1,'V','I','U',hmat%global_size1,hmat%data_c,1,1,hmat%blacs_desc,smat%data_c,1,1, smat%blacs_desc,&
CALL CPP_LAPACK_pzhegvx(1,'V','I','U',hmat%global_size1,hmat%data_c,1,1,hmat%blacsdata%blacs_desc,smat%data_c,1,1, smat%blacsdata%blacs_desc,&
1.0,1.0,1,num,abstol,num1,num2,eig2,orfac,ev_dist%data_c,1,1,&
ev_dist%blacs_desc,work2_c,lwork2,rwork,lrwork,iwork,liwork,&
ev_dist%blacsdata%blacs_desc,work2_c,lwork2,rwork,lrwork,iwork,liwork,&
ifail,iclustr,gap,ierr)
DEALLOCATE(rwork)
endif
......@@ -255,7 +255,7 @@ CONTAINS
! having all eigenvectors corresponding to his eigenvalues as above
!
ALLOCATE(t_mpimat::ev)
CALL ev%init(ev_dist%l_real,ev_dist%global_size1,ev_dist%global_size1,ev_dist%mpi_com,.FALSE.)
CALL ev%init(ev_dist%l_real,ev_dist%global_size1,ev_dist%global_size1,ev_dist%blacsdata%mpi_com,.FALSE.)
CALL ev%copy(ev_dist,1,1)
CLASS DEFAULT
call judft_error("Wrong type (1) in scalapack")
......
......@@ -38,22 +38,22 @@ CONTAINS
!up-up component (or only component in collinear case)
IF (SIZE(mat)==1) THEN
CALL mat_final%move(mat(1,1))
IF (PRESENT(mat_final_templ)) CALL mat(1,1)%free()
CALL mat(1,1)%free()
RETURN
ENDIF
CALL mat_final%copy(mat(1,1),1,1)
IF (PRESENT(mat_final_templ)) CALL mat(1,1)%free()
CALL mat(1,1)%free()
!down-down component
CALL mat_final%copy(mat(2,2),lapw%nv(1)+atoms%nlotot+1,lapw%nv(1)+atoms%nlotot+1)
IF (PRESENT(mat_final_templ)) CALL mat(2,2)%free()
CALL mat(2,2)%free()
!Now collect off-diagonal parts
CALL mat(1,2)%add_transpose(mat(2,1))
CALL mat_final%copy(mat(1,2),1,lapw%nv(1)+atoms%nlotot+1)
IF (PRESENT(mat_final_templ)) CALL mat(1,2)%free()
IF (PRESENT(mat_final_templ)) CALL mat(2,1)%free()
CALL mat(1,2)%free()
CALL mat(2,1)%free()
END SUBROUTINE eigen_redist_matrix
END MODULE m_eigen_redist_matrix
......
......@@ -42,7 +42,7 @@
REAL dmat(3,3),dmati(3,3)
INTRINSIC sqrt,max,min
DO n = 1, n_u
......
......@@ -47,9 +47,7 @@
! .. Local Arrays ..
COMPLEX sf2(stars%ng2),sf3(stars%ng3),ylm((atoms%lmaxd+1)**2)
REAL rcc(3),x(3)
! ..
! .. Intrinsic Functions ..
INTRINSIC abs,real,sqrt
ivac=iv
......
......@@ -17,14 +17,23 @@ MODULE m_types_mpimat
!<It stores the additional mpi_communicator and sets up a blacs grid for the matrix.
!<This can be used to perform scalapack calls on the matrix with little additional input.
!<The copy procedure is overwritten from t_mat to enable also redistribution of the matrix.
TYPE,EXTENDS(t_mat):: t_mpimat
TYPE t_blacsdata
INTEGER:: no_use
INTEGER:: mpi_com !> mpi-communiator over which matrix is distributed
INTEGER:: blacs_desc(dlen_) !> blacs descriptor
INTEGER:: blacs_ctext !> blacs context
INTEGER:: global_size1,global_size2 !> this is the size of the full-matrix
!> 1: =1
!> 2: context
!> 3,4: global matrix size
!> 5,6: block sizes
!> 7,8: row/colum of grid for first row/colum of matrix
!> 9: leading dimension of local matrix
INTEGER:: npcol,nprow !> the number of columns/rows in the processor grid
END TYPE t_blacsdata
TYPE,EXTENDS(t_mat):: t_mpimat
INTEGER :: global_size1,global_size2 !> this is the size of the full-matrix
TYPE(t_blacsdata),POINTER :: blacsdata
CONTAINS
PROCEDURE,PASS :: copy => mpimat_copy !<overwriten from t_mat, also performs redistribution
PROCEDURE,PASS :: move => mpimat_move !<overwriten from t_mat, also performs redistribution
......@@ -35,7 +44,7 @@ MODULE m_types_mpimat
PROCEDURE,PASS :: generate_full_matrix ! construct full matrix if only upper triangle of hermitian matrix is given
PROCEDURE,PASS :: print_matrix
PROCEDURE,PASS :: from_non_dist
FINAL :: finalize
END TYPE t_mpimat
PUBLIC t_mpimat
......@@ -52,29 +61,29 @@ CONTAINS
CHARACTER(len=10)::filename
INTEGER :: irank,isize,i,j,npr,npc,r,c,tmp,err,status(MPI_STATUS_SIZE)
CALL MPI_COMM_RANK(mat%mpi_com,irank,err)
CALL MPI_COMM_SIZE(mat%mpi_com,isize,err)
CALL MPI_COMM_RANK(mat%blacsdata%mpi_com,irank,err)
CALL MPI_COMM_SIZE(mat%blacsdata%mpi_com,isize,err)
tmp=0
IF (irank>0) CALL MPI_RECV(tmp,1,MPI_INTEGER,irank-1,0,mat%mpi_com,status,err) !lock
IF (irank>0) CALL MPI_RECV(tmp,1,MPI_INTEGER,irank-1,0,mat%blacsdata%mpi_com,status,err) !lock
WRITE(filename,"(a,i0)") "out.",fileno
OPEN(fileno,file=filename,access='append')
CALL blacs_gridinfo(mat%blacs_desc(2),npr,npc,r,c)
CALL blacs_gridinfo(mat%blacsdata%blacs_desc(2),npr,npc,r,c)
DO i=1,mat%matsize1
DO j=1,mat%matsize2
IF (mat%l_real) THEN
WRITE(fileno,"(5(i0,1x),2(f10.5,1x))") irank,i,j,indxl2g(i,mat%blacs_desc(5),r,0,npr),&
indxl2g(j,mat%blacs_desc(6),c,0,npc),mat%data_r(i,j)
WRITE(fileno,"(5(i0,1x),2(f10.5,1x))") irank,i,j,indxl2g(i,mat%blacsdata%blacs_desc(5),r,0,npr),&
indxl2g(j,mat%blacsdata%blacs_desc(6),c,0,npc),mat%data_r(i,j)
ELSE
WRITE(fileno,"(5(i0,1x),2(f10.5,1x))") irank,i,j,indxl2g(i,mat%blacs_desc(5),r,0,npr),&
indxl2g(j,mat%blacs_desc(6),c,0,npc),mat%data_c(i,j)
WRITE(fileno,"(5(i0,1x),2(f10.5,1x))") irank,i,j,indxl2g(i,mat%blacsdata%blacs_desc(5),r,0,npr),&
indxl2g(j,mat%blacsdata%blacs_desc(6),c,0,npc),mat%data_c(i,j)
END IF
ENDDO
ENDDO
CLOSE(fileno)
IF (irank+1<isize) CALL MPI_SEND(tmp,1,MPI_INTEGER,irank+1,0,mat%mpi_com,err)
IF (irank+1<isize) CALL MPI_SEND(tmp,1,MPI_INTEGER,irank+1,0,mat%blacsdata%mpi_com,err)
#endif
END SUBROUTINE print_matrix
......@@ -89,18 +98,18 @@ CONTAINS
INCLUDE 'mpif.h'
INTEGER, EXTERNAL :: numroc, indxl2g !SCALAPACK functions
CALL MPI_COMM_RANK(mat%mpi_com,myid,err)
CALL MPI_COMM_SIZE(mat%mpi_com,np,err)
CALL MPI_COMM_RANK(mat%blacsdata%mpi_com,myid,err)
CALL MPI_COMM_SIZE(mat%blacsdata%mpi_com,np,err)
CALL blacs_gridinfo(mat%blacs_desc(2),nprow,npcol,myrow,mycol)
CALL blacs_gridinfo(mat%blacsdata%blacs_desc(2),nprow,npcol,myrow,mycol)
!Set lower part of matrix to zero
DO i=1,mat%matsize1
DO j=1,mat%matsize2
! Get global column corresponding to i and number of local rows up to
! and including the diagonal, these are unchanged in A
i_glob = indxl2g(i, mat%blacs_desc(5), myrow, 0, nprow)
j_glob = indxl2g(j, mat%blacs_desc(6), mycol, 0, npcol)
i_glob = indxl2g(i, mat%blacsdata%blacs_desc(5), myrow, 0, nprow)
j_glob = indxl2g(j, mat%blacsdata%blacs_desc(6), mycol, 0, npcol)
IF (i_glob>j_glob) THEN
IF (mat%l_real) THEN
......@@ -126,13 +135,13 @@ CONTAINS
ALLOCATE(tmp_c(mat%matsize1,mat%matsize2))
tmp_c=mat%data_c
END IF
CALL MPI_BARRIER(mat%mpi_com,i)
CALL MPI_BARRIER(mat%blacsdata%mpi_com,i)
IF (mat%l_real) THEN
#ifdef CPP_SCALAPACK
CALL pdgeadd('t',mat%global_size1,mat%global_size2,1.0,tmp_r,1,1,mat%blacs_desc,1.0,mat%data_r,1,1,mat%blacs_desc)
CALL pdgeadd('t',mat%global_size1,mat%global_size2,1.0,tmp_r,1,1,mat%blacsdata%blacs_desc,1.0,mat%data_r,1,1,mat%blacsdata%blacs_desc)
ELSE
CALL pzgeadd('c',mat%global_size1,mat%global_size2,CMPLX(1.0,0.0),tmp_c,1,1,mat%blacs_desc,CMPLX(1.0,0.0),mat%data_c,1,1,mat%blacs_desc)
CALL pzgeadd('c',mat%global_size1,mat%global_size2,CMPLX(1.0,0.0),tmp_c,1,1,mat%blacsdata%blacs_desc,CMPLX(1.0,0.0),mat%data_c,1,1,mat%blacsdata%blacs_desc)
#endif
END IF
......@@ -150,8 +159,8 @@ CONTAINS
SELECT TYPE(mat1)
TYPE IS (t_mpimat)
#ifdef CPP_MPI
CALL MPI_COMM_RANK(mat%mpi_com,n_rank,i)
CALL MPI_COMM_SIZE(mat%mpi_com,n_size,i)
CALL MPI_COMM_RANK(mat%blacsdata%mpi_com,n_rank,i)
CALL MPI_COMM_SIZE(mat%blacsdata%mpi_com,n_size,i)
#endif
!Set lower part of matrix to zero...
ii=0
......@@ -168,9 +177,9 @@ CONTAINS
IF (mat%l_real) THEN
#ifdef CPP_SCALAPACK
CALL pdgeadd('t',mat1%global_size1,mat1%global_size2,1.0,mat1%data_r,1,1,mat1%blacs_desc,1.0,mat%data_r,1,1,mat%blacs_desc)
CALL pdgeadd('t',mat1%global_size1,mat1%global_size2,1.0,mat1%data_r,1,1,mat1%blacsdata%blacs_desc,1.0,mat%data_r,1,1,mat%blacsdata%blacs_desc)
ELSE
CALL pzgeadd('c',mat1%global_size1,mat1%global_size2,CMPLX(1.0,0.0),mat1%data_c,1,1,mat1%blacs_desc,CMPLX(1.0,0.0),mat%data_c,1,1,mat1%blacs_desc)
CALL pzgeadd('c',mat1%global_size1,mat1%global_size2,CMPLX(1.0,0.0),mat1%data_c,1,1,mat1%blacsdata%blacs_desc,CMPLX(1.0,0.0),mat%data_c,1,1,mat1%blacsdata%blacs_desc)
#endif
END IF
!Now multiply the diagonal of the matrix by 1/2
......@@ -199,9 +208,9 @@ CONTAINS
SELECT TYPE(mat1)
TYPE IS(t_mpimat)
IF (mat%l_real) THEN
CALL pdgemr2d(Mat1%global_size1,mat1%global_size2,mat1%data_r,1,1,mat1%blacs_desc,mat%data_r,n1,n2,mat%blacs_desc,mat%blacs_ctext)
CALL pdgemr2d(Mat1%global_size1,mat1%global_size2,mat1%data_r,1,1,mat1%blacsdata%blacs_desc,mat%data_r,n1,n2,mat%blacsdata%blacs_desc,mat%blacsdata%blacs_desc(2))
ELSE
CALL pzgemr2d(mat1%global_size1,mat1%global_size2,mat1%data_c,1,1,mat1%blacs_desc,mat%data_c,n1,n2,mat%blacs_desc,mat%blacs_ctext)
CALL pzgemr2d(mat1%global_size1,mat1%global_size2,mat1%data_c,1,1,mat1%blacsdata%blacs_desc,mat%data_c,n1,n2,mat%blacsdata%blacs_desc,mat%blacsdata%blacs_desc(2))
END IF
CLASS DEFAULT
CALL judft_error("Wrong datatype in copy")
......@@ -218,14 +227,14 @@ CONTAINS
#ifdef CPP_SCALAPACK
blacs_desc=(/1,-1,mat1%matsize1,mat1%matsize2,mat1%matsize1,mat1%matsize2,0,0,mat1%matsize1/)
CALL MPI_COMM_RANK(mat%mpi_com,irank,ierr)
CALL MPI_COMM_RANK(mat%blacsdata%mpi_com,irank,ierr)
umap(1,1)=0
CALL BLACS_GET(mat%blacs_ctext,10,blacs_desc(2))
CALL BLACS_GET(mat%blacsdata%blacs_desc(2),10,blacs_desc(2))
CALL BLACS_GRIDMAP(blacs_desc(2),umap,1,1,1)
IF (mat%l_real) THEN
CALL pdgemr2d(Mat1%matsize1,mat1%matsize2,mat1%data_r,1,1,blacs_desc,mat%data_r,1,1,mat%blacs_desc,mat%blacs_ctext)
CALL pdgemr2d(Mat1%matsize1,mat1%matsize2,mat1%data_r,1,1,blacs_desc,mat%data_r,1,1,mat%blacsdata%blacs_desc,mat%blacsdata%blacs_desc(2))
ELSE
CALL pzgemr2d(mat1%matsize1,mat1%matsize2,mat1%data_c,1,1,blacs_desc,mat%data_c,1,1,mat%blacs_desc,mat%blacs_ctext)
CALL pzgemr2d(mat1%matsize1,mat1%matsize2,mat1%data_c,1,1,blacs_desc,mat%data_c,1,1,mat%blacsdata%blacs_desc,mat%blacsdata%blacs_desc(2))
END IF
#endif
END SUBROUTINE from_non_dist
......@@ -239,15 +248,29 @@ CONTAINS
CALL mat%copy(mat1,1,1)
END SUBROUTINE mpimat_move
SUBROUTINE finalize(mat)
IMPLICIT NONE
TYPE(t_mpimat),INTENT(INOUT) :: mat
CALL mpimat_free(mat)
END SUBROUTINE finalize
SUBROUTINE mpimat_free(mat)
IMPLICIT NONE
CLASS(t_mpimat),INTENT(INOUT) :: mat
INTEGER :: ierr
IF (ALLOCATED(mat%data_r)) DEALLOCATE(mat%data_r)
IF (ALLOCATED(mat%data_c)) DEALLOCATE(mat%data_c)
IF (ASSOCIATED(mat%blacsdata)) THEN
IF (mat%blacsdata%no_use>1) THEN
mat%blacsdata%no_use=mat%blacsdata%no_use-1
mat%blacsdata=>null()
ELSE
#ifdef CPP_SCALAPACK
CALL BLACS_GRIDEXIT(mat%blacs_ctext,ierr)
CALL BLACS_GRIDEXIT(mat%blacsdata%blacs_desc(2),ierr)
DEALLOCATE(mat%blacsdata)
#endif
END IF
ENDIF
END SUBROUTINE mpimat_free
!>Initialization of the distributed matrix.
......@@ -272,16 +295,18 @@ CONTAINS
CALL judft_error("Optional arguments must be present in mpimat_init")
mat%global_size1=matsize1
mat%global_size2=matsize2
mat%mpi_com=mpi_subcom
CALL priv_create_blacsgrid(mat%mpi_com,l_2d,matsize1,matsize2,nbx,nby,&
mat%blacs_ctext,mat%blacs_desc,&
ALLOCATE(mat%blacsdata)
mat%blacsdata%no_use=1
mat%blacsdata%mpi_com=mpi_subcom
CALL priv_create_blacsgrid(mat%blacsdata%mpi_com,l_2d,matsize1,matsize2,nbx,nby,&
mat%blacsdata%blacs_desc,&
mat%matsize1,mat%matsize2,&
mat%npcol,mat%nprow)
mat%blacsdata%npcol,mat%blacsdata%nprow)
CALL mat%alloc(l_real) !Attention,sizes determined in call to priv_create_blacsgrid
!check if this matrix is actually distributed over MPI_COMM_SELF
IF (mpi_subcom==MPI_COMM_SELF) THEN
CALL MPI_COMM_RANK(mpi_subcom,irank,ierr)
IF (irank>0) mat%blacs_desc(2)=-1
IF (irank>0) mat%blacsdata%blacs_desc(2)=-1
END IF
#endif
END SUBROUTINE mpimat_init
......@@ -298,11 +323,8 @@ CONTAINS
mat%matsize2=templ%matsize2
mat%global_size1=templ%global_size1
mat%global_size2=templ%global_size2
mat%mpi_com=templ%mpi_com
mat%blacs_desc=templ%blacs_desc
mat%blacs_ctext=templ%blacs_ctext
mat%npcol=templ%npcol
mat%nprow=templ%nprow
mat%blacsdata=>templ%blacsdata
mat%blacsdata%no_use=mat%blacsdata%no_use+1
IF (mat%l_real) THEN
ALLOCATE(mat%data_r(mat%matsize1,mat%matsize2))
ALLOCATE(mat%data_c(0,0))
......@@ -316,13 +338,13 @@ CONTAINS
END SUBROUTINE mpimat_init_template
SUBROUTINE priv_create_blacsgrid(mpi_subcom,l_2d,m1,m2,nbc,nbr,ictextblacs,sc_desc,local_size1,local_size2,nprow,npcol)
SUBROUTINE priv_create_blacsgrid(mpi_subcom,l_2d,m1,m2,nbc,nbr,sc_desc,local_size1,local_size2,nprow,npcol)
IMPLICIT NONE
INTEGER,INTENT(IN) :: mpi_subcom
INTEGER,INTENT(IN) :: m1,m2
INTEGER,INTENT(INOUT)::nbc,nbr
LOGICAL,INTENT(IN) :: l_2d
INTEGER,INTENT(OUT):: ictextblacs,sc_desc(:)
INTEGER,INTENT(OUT):: sc_desc(:)
INTEGER,INTENT(OUT):: local_size1,local_size2
INTEGER,INTENT(OUT):: npcol,nprow
......@@ -331,7 +353,7 @@ CONTAINS
INTEGER :: myrowssca,mycolssca,myrow,mycol
INTEGER :: iamblacs,npblacs,np,myid
INTEGER :: nprow2,npcol2,myrowblacs,mycolblacs
INTEGER :: k,i,j
INTEGER :: k,i,j,ictextblacs
INTEGER :: ierr
INTEGER,ALLOCATABLE :: iblacsnums(:),ihelp(:),iusermap(:,:)
......
......@@ -101,8 +101,6 @@ module m_VYukawaFilm
real :: ga(vacuum%nmzd), gb(vacuum%nmzd)
real :: delta(vacuum%nmzd,2), epsilon(vacuum%nmzd,2), zeta(vacuum%nmzd)
intrinsic aimag, cmplx, conjg, exp, real, mod
! definitions / initialisations:
do iz = 1, vacuum%nmz
......@@ -239,8 +237,6 @@ module m_VYukawaFilm
real :: z(3*stars%mx3)
real :: g_damped(stars%ng2), vcons2(stars%ng2)
intrinsic abs, cmplx, conjg, cos, exp, sin, sqrt
! definitions / initialisations
ifft = 3 * stars%mx3
......
......@@ -33,9 +33,6 @@ C .. Local Scalars ..
COMPLEX em1f,em2f,ep1f,ep2f
REAL cph,rxy,small,sph,x,xy,y
C ..
C .. Intrinsic Functions ..
INTRINSIC cmplx,conjg,sqrt
C ..
C .. Data Statements ..
DATA small/1.0e-12/
c.....------------------------------------------------------------------
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment