2 c---------------------------------------------------------------------
3 c---------------------------------------------------------------------
7 c---------------------------------------------------------------------
8 c---------------------------------------------------------------------
14 integer mstatus(MPI_STATUS_SIZE)
15 integer sizes(4), starts(4), subsizes(4)
16 integer cell_btype(maxcells), cell_ftype(maxcells)
17 integer cell_blength(maxcells)
19 character*20 cb_nodes, cb_size
21 integer cell_disp(maxcells)
23 call mpi_bcast(collbuf_nodes, 1, MPI_INTEGER,
24 > root, comm_setup, ierr)
26 call mpi_bcast(collbuf_size, 1, MPI_INTEGER,
27 > root, comm_setup, ierr)
29 if (collbuf_nodes .eq. 0) then
32 write (cb_nodes,*) collbuf_nodes
33 write (cb_size,*) collbuf_size
34 call MPI_Info_create(info, ierr)
35 call MPI_Info_set(info, 'cb_nodes', cb_nodes, ierr)
36 call MPI_Info_set(info, 'cb_buffer_size', cb_size, ierr)
37 call MPI_Info_set(info, 'collective_buffering', 'true', ierr)
40 call MPI_Type_contiguous(5, MPI_DOUBLE_PRECISION,
42 call MPI_Type_commit(element, ierr)
43 call MPI_Type_extent(element, eltext, ierr)
47 c Outer array dimensions ar same for every cell
53 c 4th dimension is cell number, total of maxcells cells
57 c Internal dimensions of cells can differ slightly between cells
59 subsizes(1) = cell_size(1, c)
60 subsizes(2) = cell_size(2, c)
61 subsizes(3) = cell_size(3, c)
63 c Cell is 4th dimension, 1 cell per cell type to handle varying
64 c cell sub-array sizes
69 c type constructors use 0-based start addresses
77 c Create buftype for a cell
79 call MPI_Type_create_subarray(4, sizes, subsizes,
80 $ starts, MPI_ORDER_FORTRAN, element,
81 $ cell_btype(c), ierr)
83 c block length and displacement for joining cells -
84 c 1 cell buftype per block, cell buftypes have own displacment
85 c generated from cell number (4th array dimension)
92 c Create combined buftype for all cells
94 call MPI_Type_struct(ncells, cell_blength, cell_disp,
95 $ cell_btype, combined_btype, ierr)
96 call MPI_Type_commit(combined_btype, ierr)
102 sizes(1) = PROBLEM_SIZE
103 sizes(2) = PROBLEM_SIZE
104 sizes(3) = PROBLEM_SIZE
109 subsizes(1) = cell_size(1, c)
110 subsizes(2) = cell_size(2, c)
111 subsizes(3) = cell_size(3, c)
114 c Starting point in full array of c'th cell
116 starts(1) = cell_low(1,c)
117 starts(2) = cell_low(2,c)
118 starts(3) = cell_low(3,c)
120 call MPI_Type_create_subarray(3, sizes, subsizes,
121 $ starts, MPI_ORDER_FORTRAN,
122 $ element, cell_ftype(c), ierr)
127 call MPI_Type_struct(ncells, cell_blength, cell_disp,
128 $ cell_ftype, combined_ftype, ierr)
129 call MPI_Type_commit(combined_ftype, ierr)
132 if (node .eq. root) then
133 call MPI_File_delete(filenm, MPI_INFO_NULL, ierr)
137 call MPI_Barrier(comm_solve, ierr)
139 call MPI_File_open(comm_solve,
141 $ MPI_MODE_RDWR+MPI_MODE_CREATE,
142 $ MPI_INFO_NULL, fp, ierr)
144 if (ierr .ne. MPI_SUCCESS) then
145 print *, 'Error opening file'
149 call MPI_File_set_view(fp, iseek, element,
150 $ combined_ftype, 'native', info, ierr)
152 if (ierr .ne. MPI_SUCCESS) then
153 print *, 'Error setting file view'
167 c---------------------------------------------------------------------
168 c---------------------------------------------------------------------
170 subroutine output_timestep
172 c---------------------------------------------------------------------
173 c---------------------------------------------------------------------
177 integer mstatus(MPI_STATUS_SIZE)
180 call MPI_File_write_at_all(fp, iseek, u,
181 $ 1, combined_btype, mstatus, ierr)
182 if (ierr .ne. MPI_SUCCESS) then
183 print *, 'Error writing to file'
187 call MPI_Type_size(combined_btype, iosize, ierr)
188 iseek = iseek + iosize/eltext
190 idump_sub = idump_sub + 1
191 if (rd_interval .gt. 0) then
192 if (idump_sub .ge. rd_interval) then
195 call acc_sub_norms(idump+1)
205 c---------------------------------------------------------------------
206 c---------------------------------------------------------------------
208 subroutine acc_sub_norms(idump_cur)
215 integer ii, m, ichunk
217 integer mstatus(MPI_STATUS_SIZE)
218 double precision xce_single(5)
220 ichunk = idump_cur - idump_sub + 1
223 call MPI_File_read_at_all(fp, iseek, u,
224 $ 1, combined_btype, mstatus, ierr)
225 if (ierr .ne. MPI_SUCCESS) then
226 print *, 'Error reading back file'
227 call MPI_File_close(fp, ierr)
231 call MPI_Type_size(combined_btype, iosize, ierr)
232 iseek = iseek + iosize/eltext
234 if (node .eq. root) print *, 'Reading data set ', ii+ichunk
236 call error_norm(xce_single)
238 xce_sub(m) = xce_sub(m) + xce_single(m)
245 c---------------------------------------------------------------------
246 c---------------------------------------------------------------------
248 subroutine btio_cleanup
250 c---------------------------------------------------------------------
251 c---------------------------------------------------------------------
257 call MPI_File_close(fp, ierr)
262 c---------------------------------------------------------------------
263 c---------------------------------------------------------------------
266 subroutine accumulate_norms(xce_acc)
268 c---------------------------------------------------------------------
269 c---------------------------------------------------------------------
274 double precision xce_acc(5)
277 if (rd_interval .gt. 0) goto 20
279 call MPI_File_open(comm_solve,
287 call MPI_File_set_view(fp, iseek, element, combined_ftype,
288 $ 'native', MPI_INFO_NULL, ierr)
290 c clear the last time step
294 c read back the time steps and accumulate norms
296 call acc_sub_norms(idump)
298 call MPI_File_close(fp, ierr)
302 xce_acc(m) = xce_sub(m) / dble(idump)