Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
fleur
Project overview
Project overview
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
57
Issues
57
List
Boards
Labels
Milestones
Packages
Packages
Container Registry
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Commits
Issue Boards
Open sidebar
fleur
fleur
Commits
2e771840
Commit
2e771840
authored
May 29, 2019
by
Daniel Wortmann
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' of iffgit.fz-juelich.de:fleur/fleur into develop
parents
59e03acc
e2a9c5ec
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
53 additions
and
41 deletions
+53
-41
diagonalization/eigen_diag.F90
diagonalization/eigen_diag.F90
+4
-4
diagonalization/elpa.F90
diagonalization/elpa.F90
+11
-9
diagonalization/scalapack.F90
diagonalization/scalapack.F90
+11
-11
eigen/eigen.F90
eigen/eigen.F90
+18
-2
io/eig66_mpi.F90
io/eig66_mpi.F90
+9
-15
No files found.
diagonalization/eigen_diag.F90
View file @
2e771840
...
...
@@ -30,9 +30,9 @@ CONTAINS
IMPLICIT
NONE
INTEGER
,
INTENT
(
INOUT
)
::
solver
CLASS
(
t_mat
),
INTENT
(
INOUT
)
::
smat
,
hmat
CLASS
(
t_mat
),
ALLOCATABLE
,
INTENT
(
OUT
)
::
ev
INTEGER
,
INTENT
(
INOUT
)
::
ne
REAL
,
INTENT
(
OUT
)
::
eig
(:)
CLASS
(
t_mat
),
ALLOCATABLE
,
INTENT
(
OUT
)
::
ev
! eigenvectors
INTEGER
,
INTENT
(
INOUT
)
::
ne
! number of eigenpairs to be found
REAL
,
INTENT
(
OUT
)
::
eig
(:)
! eigenvalues
!Only for chase
INTEGER
,
OPTIONAL
,
INTENT
(
IN
)
::
ikpt
...
...
@@ -64,7 +64,7 @@ CONTAINS
CASE
(
diag_scalapack
)
CALL
scalapack
(
hmat
,
smat
,
ne
,
eig
,
ev
)
CASE
(
diag_magma
)
!
CALL magma_diag(hmat,smat,ne,eig,ev)
CALL
magma_diag
(
hmat
,
smat
,
ne
,
eig
,
ev
)
CASE
(
diag_cusolver
)
CALL
cusolver_diag
(
hmat
,
smat
,
ne
,
eig
,
ev
)
CASE
(
diag_lapack
)
...
...
diagonalization/elpa.F90
View file @
2e771840
...
...
@@ -18,8 +18,8 @@ CONTAINS
! ne ....... number of ev's searched (and found) on this node
! On input, overall number of ev's searched,
! On output, local number of ev's found
! eig ...... eigenvalues, output
! ev ....... eigenvectors, output
! eig ......
all
eigenvalues, output
! ev .......
local
eigenvectors, output
!
!----------------------------------------------------
...
...
@@ -453,21 +453,23 @@ CONTAINS
CALL
MPI_COMM_FREE
(
mpi_comm_cols
,
err
)
#endif
!
! Put those eigenvalues expected by chani to eig, i.e. for
! process i these are eigenvalues i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvalues per process
! Each process has all eigenvalues in output
eig
(:
num2
)
=
eig2
(:
num2
)
DEALLOCATE
(
eig2
)
!
!
! Redistribute eigenvectors from ScaLAPACK distribution to each process, i.e. for
! process i these are eigenvectors i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvectors per process
!
num
=
FLOOR
(
REAL
(
num2
)/
np
)
IF
(
myid
.LT.
num2
-
(
num2
/
np
)
*
np
)
num
=
num
+1
ne
=
0
DO
i
=
myid
+1
,
num2
,
np
ne
=
ne
+1
eig
(
ne
)
=
eig2
(
i
)
!
eig(ne)=eig2(i)
ENDDO
DEALLOCATE
(
eig2
)
!
! Redistribute eigvec from ScaLAPACK distribution to each process
! having all eigenvectors corresponding to his eigenvalues as above
!
ALLOCATE
(
t_mpimat
::
ev
)
CALL
ev
%
init
(
hmat
%
l_real
,
hmat
%
global_size1
,
hmat
%
global_size1
,
hmat
%
blacsdata
%
mpi_com
,
.FALSE.
)
...
...
diagonalization/scalapack.F90
View file @
2e771840
...
...
@@ -17,8 +17,8 @@ CONTAINS
! ne ....... number of ev's searched (and found) on this node
! On input, overall number of ev's searched,
! On output, local number of ev's found
! eig ...... eigenvalues, output
! ev ....... eigenvectors, output
! eig ......
all
eigenvalues, output
! ev .......
local
eigenvectors, output
!
!----------------------------------------------------
!
...
...
@@ -238,22 +238,22 @@ CONTAINS
!ENDIF
ENDIF
!
! Put those eigenvalues expected by chani to eig, i.e. for
! process i these are eigenvalues i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvalues per process
! Each process has all eigenvalues in output
eig
(:
num2
)
=
eig2
(:
num2
)
DEALLOCATE
(
eig2
)
!
!
! Redistribute eigenvectors from ScaLAPACK distribution to each process, i.e. for
! process i these are eigenvectors i+1, np+i+1, 2*np+i+1...
! Only num=num2/np eigenvectors per process
!
num
=
FLOOR
(
REAL
(
num2
)/
np
)
IF
(
myid
.LT.
num2
-
(
num2
/
np
)
*
np
)
num
=
num
+1
ne
=
0
DO
i
=
myid
+1
,
num2
,
np
ne
=
ne
+1
eig
(
ne
)
=
eig2
(
i
)
!
eig(ne)=eig2(i)
ENDDO
DEALLOCATE
(
eig2
)
!
! Redistribute eigvec from ScaLAPACK distribution to each process
! having all eigenvectors corresponding to his eigenvalues as above
!
ALLOCATE
(
t_mpimat
::
ev
)
CALL
ev
%
init
(
ev_dist
%
l_real
,
ev_dist
%
global_size1
,
ev_dist
%
global_size1
,
ev_dist
%
blacsdata
%
mpi_com
,
.FALSE.
)
CALL
ev
%
copy
(
ev_dist
,
1
,
1
)
...
...
eigen/eigen.F90
View file @
2e771840
...
...
@@ -199,6 +199,12 @@ CONTAINS
end
select
END
IF
! Solve generalized eigenvalue problem.
! ne_all ... number of eigenpairs searched (and found) on this node
! on input, overall number of eigenpairs searched,
! on output, local number of eigenpairs found
! eig ...... all eigenvalues, output
! zMat ..... local eigenvectors, output
CALL
eigen_diag
(
solver
,
hmat
,
smat
,
ne_all
,
eig
,
zMat
,
nk
,
jsp
,
iter
)
CALL
smat
%
free
()
...
...
@@ -220,8 +226,18 @@ CONTAINS
IF
(
.NOT.
zMat
%
l_real
)
THEN
zMat
%
data_c
(:
lapw
%
nmat
,:
ne_found
)
=
CONJG
(
zMat
%
data_c
(:
lapw
%
nmat
,:
ne_found
))
END
IF
IF
(
mpi
%
n_rank
==
0
)
THEN
! Only process 0 writes out the value of ne_all and the
! eigenvalues.
! Trying to use MPI_PUT for the very same slot by all processes
! causes problems with IntelMPI/2019
! Mai 2019 U. Alekseeva
CALL
write_eig
(
eig_id
,
nk
,
jsp
,
ne_found
,
ne_all
,&
eig
(:
ne_found
),
n_start
=
mpi
%
n_size
,
n_end
=
mpi
%
n_rank
,
zMat
=
zMat
)
eig
(:
ne_all
),
n_start
=
mpi
%
n_size
,
n_end
=
mpi
%
n_rank
,
zMat
=
zMat
)
ELSE
CALL
write_eig
(
eig_id
,
nk
,
jsp
,
ne_found
,&
n_start
=
mpi
%
n_size
,
n_end
=
mpi
%
n_rank
,
zMat
=
zMat
)
ENDIF
neigBuffer
(
nk
,
jsp
)
=
ne_found
#if defined(CPP_MPI)
! RMA synchronization
...
...
io/eig66_mpi.F90
View file @
2e771840
...
...
@@ -265,7 +265,8 @@ CONTAINS
pe
=
d
%
pe_basis
(
nk
,
jspin
)
slot
=
d
%
slot_basis
(
nk
,
jspin
)
!write the number of eigenvalues values
!write the number of eigenvalues
!only one process needs to do it
IF
(
PRESENT
(
neig_total
))
THEN
CALL
MPI_WIN_LOCK
(
MPI_LOCK_EXCLUSIVE
,
pe
,
0
,
d
%
neig_handle
,
e
)
ALLOCATE
(
tmp_int
(
1
))
...
...
@@ -275,25 +276,15 @@ CONTAINS
DEALLOCATE
(
tmp_int
)
ENDIF
!write the eigenvalues
!only one process needs to do it
IF
(
PRESENT
(
eig
)
.OR.
PRESENT
(
w_iks
))
THEN
ALLOCATE
(
tmp_real
(
d
%
size_eig
))
tmp_real
=
1E99
if
(
PRESENT
(
EIG
))
THEN
n1
=
1
;
n3
=
1
IF
(
PRESENT
(
n_rank
))
n1
=
n_rank
+1
IF
(
PRESENT
(
n_size
))
n3
=
n_size
n2
=
SIZE
(
eig
)
*
n3
+
n1
-1
nn
=
1
DO
n
=
n1
,
min
(
n2
,
d
%
size_eig
),
n3
tmp_real
(
n
)
=
eig
(
nn
)
nn
=
nn
+1
ENDDO
tmp_real
(:
d
%
size_eig
)
=
eig
(:
d
%
size_eig
)
CALL
MPI_WIN_LOCK
(
MPI_LOCK_EXCLUSIVE
,
pe
,
0
,
d
%
eig_handle
,
e
)
IF
(
n3
.ne.
1
)
THEN
CALL
MPI_ACCUMULATE
(
tmp_real
,
d
%
size_eig
,
MPI_DOUBLE_PRECISION
,
pe
,
slot
,
d
%
size_eig
,
MPI_DOUBLE_PRECISION
,
MPI_MIN
,
d
%
eig_handle
,
e
)
ELSE
CALL
MPI_PUT
(
tmp_real
,
d
%
size_eig
,
MPI_DOUBLE_PRECISION
,
pe
,
slot
,
d
%
size_eig
,
MPI_DOUBLE_PRECISION
,
d
%
eig_handle
,
e
)
ENDIF
CALL
MPI_WIN_UNLOCK
(
pe
,
d
%
eig_handle
,
e
)
END
if
IF
(
PRESENT
(
w_iks
))
THEN
...
...
@@ -304,6 +295,9 @@ CONTAINS
END
IF
DEALLOCATE
(
tmp_real
)
ENDIF
!write the eigenvectors
!all procceses participate
IF
(
PRESENT
(
zmat
))
THEN
tmp_size
=
zmat
%
matsize1
ALLOCATE
(
tmp_real
(
tmp_size
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment