Report Typos and Errors    
Semi-Lagrangian Library
Modular library for kinetic and gyrokinetic simulations of plasmas in fusion energy devices.
sll_m_xdmf_light_parallel.F90
Go to the documentation of this file.
1 !------------------------------------------------------------------------------
2 ! SELALIB
3 !------------------------------------------------------------------------------
4 ! MODULE: sll_m_xdmf_light_parallel
5 !
6 ! DESCRIPTION:
11 !------------------------------------------------------------------------------
13 
14 !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
15 #include "sll_working_precision.h"
16 
17  use sll_m_collective, only: &
23 
24  use sll_m_xdmf_light_serial, only: &
26 
27  use mpi, only: &
28  mpi_any_source, &
29  mpi_any_tag, &
30  mpi_character, &
31  mpi_integer, &
32  mpi_recv, &
33  mpi_send, &
34  mpi_source, &
35  mpi_status_size
36 
37  implicit none
38 
39  public :: &
41 
42  private
43 !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
44 
45 !==============================================================================
46 
48  integer, parameter :: maxlen = 256
49 
50  !----------------------------------------------------------------------------
53 
54  type(sll_t_collective_t), pointer :: comm => null()
55  sll_int32 :: rank = -1
56  type(sll_t_xdmf_file), allocatable :: xdmf_file
57 
58  contains
59  procedure :: init => t_xdmf_parallel__init
60  procedure :: write => t_xdmf_parallel__write
61  procedure :: delete => t_xdmf_parallel__delete
62  procedure :: add_grid => t_xdmf_parallel__add_grid
63  procedure :: add_field => t_xdmf_parallel__add_field
64 
66 
67 !==============================================================================
68 contains
69 !==============================================================================
70 
71  subroutine t_xdmf_parallel__init(self, time, comm)
72  class(sll_t_xdmf_parallel_file), intent(out) :: self
73  sll_real64, intent(in) :: time
74  type(sll_t_collective_t), pointer :: comm
75 
76  ! Fill in fields
77  self%comm => comm
78  self%rank = sll_f_get_collective_rank(comm)
79 
80  ! MASTER: Allocate and initialize sequential XDMF file
81  if (self%rank == 0) then
82  allocate (self%xdmf_file)
83  call self%xdmf_file%init(time)
84  end if
85 
86  end subroutine t_xdmf_parallel__init
87 
88  !----------------------------------------------------------------------------
89  subroutine t_xdmf_parallel__write(self, fname)
90  class(sll_t_xdmf_parallel_file), intent(in) :: self
91  character(len=*), intent(in) :: fname
92 
93  ! MASTER: write XDMF file
94  if (self%rank == 0) then
95  call self%xdmf_file%write(fname)
96  end if
97 
98  end subroutine t_xdmf_parallel__write
99 
100  !----------------------------------------------------------------------------
101  subroutine t_xdmf_parallel__delete(self)
102  class(sll_t_xdmf_parallel_file), intent(inout) :: self
103 
104  ! MASTER: delete and deallocate sequential XDMF file
105  if (self%rank == 0) then
106  if (allocated(self%xdmf_file)) then
107  call self%xdmf_file%delete()
108  deallocate (self%xdmf_file)
109  end if
110  end if
111 
112  ! Reset internal fields
113  self%comm => null()
114  self%rank = -1
115 
116  end subroutine t_xdmf_parallel__delete
117 
118  !----------------------------------------------------------------------------
119  subroutine t_xdmf_parallel__add_grid(self, grid_name, x1_path, x2_path, dims, gid)
120  class(sll_t_xdmf_parallel_file), intent(inout) :: self
121  character(len=*), intent(in) :: grid_name
122  character(len=*), intent(in) :: x1_path
123  character(len=*), intent(in) :: x2_path
124  sll_int32, intent(in) :: dims(2)
125  sll_int32, intent(out) :: gid
126 
127  sll_int32 :: buf_gid(1)
128 
129  ! MASTER: add grid to sequential XDMF file
130  ! TODO: other processes might want to add a grid
131  if (self%rank == 0) then
132  call self%xdmf_file%add_grid(grid_name, x1_path, x2_path, dims, gid)
133  buf_gid(1) = gid
134  end if
135 
136  ! Broadcast grid ID to all other processes
137  call sll_o_collective_bcast(self%comm, buf_gid, 0)
138  gid = buf_gid(1)
139 
140  end subroutine t_xdmf_parallel__add_grid
141 
142  !----------------------------------------------------------------------------
143  subroutine t_xdmf_parallel__add_field(self, &
144  grid_id, field_name, field_path, &
145  to_file)
146 
147  class(sll_t_xdmf_parallel_file), intent(inout) :: self
148  sll_int32, intent(in) :: grid_id
149  character(len=*), intent(in) :: field_name
150  character(len=*), intent(in) :: field_path
151  logical, intent(in) :: to_file
152 
153  sll_int32 :: i ! index for cycles
154  sll_int32 :: nprocs ! number of processors
155  sll_int32 :: comm ! MPI communicator
156  sll_int32 :: ierr ! MPI error
157  logical :: buf(1) ! send buffer for gather
158  logical, allocatable :: recbuf(:) ! receive buffer for gather
159  character(len=11) :: rank_str ! rank converted to string
160  sll_int32 :: stat(mpi_status_size)
161  sll_int32 :: sender_rank
162 
163  ! Send/receive buffers (receive for master, send for all others)
164  sll_int32 :: buf_gid ! grid_id
165  character(len=maxlen) :: buf_fn ! field_name
166  character(len=maxlen) :: buf_fp ! field_path
167 
168  ! Some info about communicator
169  nprocs = sll_f_get_collective_size(self%comm)
170  comm = self%comm%comm
171 
172  ! Fill in send buffer (all processes)
173  buf(1) = to_file
174 
175  ! MASTER ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
176  if (self%rank == 0) then
177  ! Allocate receive buffer for 'to_file' logicals, then gather all of them
178  allocate (recbuf(0:nprocs - 1))
179  call sll_o_collective_gather(self%comm, buf, 0, recbuf)
180  !print *, "PROC #0: gather"
181 
182  ! Write array info on XDMF file
183  if (to_file) then
184  call self%xdmf_file%add_field(grid_id, field_name, field_path)
185  !print *, "PROC #0: write array info to xmf"
186  end if
187  !print *, "PROC #0: recbuf = ", recbuf
188  if (nprocs > 1) then
189  ! Receive data from other processes, and write array info to XDMF file
190  do i = 1, count(recbuf(1:))
191 
192  ! Receive grid ID from any process, then field name/path from same proc
193 
194  ! MPI_RECV( BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR )
195  call mpi_recv(buf_gid, 1, mpi_integer, mpi_any_source, &
196  mpi_any_tag, comm, stat, ierr)
197 
198  sender_rank = stat(mpi_source)
199 
200  call mpi_recv(buf_fn, maxlen, mpi_character, sender_rank, &
201  mpi_any_tag, comm, stat, ierr)
202 
203  call mpi_recv(buf_fp, maxlen, mpi_character, sender_rank, &
204  mpi_any_tag, comm, stat, ierr)
205 
206  write (rank_str, '(i8)') sender_rank
207  !print *, "PROC #0: data received from processor "// &
208  ! trim( adjustl( rank_str ) )
209 
210  ! Add field to sequential XDMF file
211  call self%xdmf_file%add_field(buf_gid, trim(buf_fn), trim(buf_fp))
212  !print *, "PROC #0: field added to XDMF file"
213 
214  end do
215  end if
216  ! NOT MASTER ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
217  else
218  allocate (recbuf(0)) ! allocate empty buffer (ifort complains otherwise)
219  call sll_o_collective_gather(self%comm, buf, 0, recbuf)
220  write (rank_str, '(i8)') self%rank
221  !print *, "PROC #"//trim( adjustl( rank_str ) )//": gather"
222 
223  if (to_file) then
224  ! Fill in send buffers
225  buf_gid = grid_id
226  buf_fn = adjustl(field_name)
227  buf_fp = adjustl(field_path)
228 
229  ! Send data to master
230  ! MPI_SEND( BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR )
231  call mpi_send(buf_gid, 1, mpi_integer, 0, 9, comm, ierr)
232  call mpi_send(buf_fn, maxlen, mpi_character, 0, 9, comm, ierr)
233  call mpi_send(buf_fp, maxlen, mpi_character, 0, 9, comm, ierr)
234  !print *, "PROC #"//trim( adjustl( rank_str ) )//": send"
235  end if
236 
237  end if
238 
239  end subroutine t_xdmf_parallel__add_field
240 
241 end module sll_m_xdmf_light_parallel
Broadcasts a message from the process with rank "root" to all other processes of the communicator.
Gathers together values from a group of processes.
Parallelizing facility.
integer(kind=i32) function, public sll_f_get_collective_rank(col)
Determines the rank of the calling process in the communicator.
integer(kind=i32) function, public sll_f_get_collective_size(col)
Determines the size of the group associated with a communicator.
Construct the XML component of an XDMF database (parallel).
subroutine t_xdmf_parallel__add_grid(self, grid_name, x1_path, x2_path, dims, gid)
subroutine t_xdmf_parallel__init(self, time, comm)
subroutine t_xdmf_parallel__write(self, fname)
subroutine t_xdmf_parallel__add_field(self, grid_id, field_name, field_path, to_file)
integer, parameter maxlen
Maximum length of variable-length strings to be passed through MPI.
Construct the XML component of an XDMF database (sequential).
Wrapper around the communicator.
    Report Typos and Errors