Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Remove warning about uninitialized variable
[simgrid.git] / examples / smpi / NAS / SP / sp.f
1 !-------------------------------------------------------------------------!
2 !                                                                         !
3 !        N  A  S     P A R A L L E L     B E N C H M A R K S  3.3         !
4 !                                                                         !
5 !                                   S P                                   !
6 !                                                                         !
7 !-------------------------------------------------------------------------!
8 !                                                                         !
9 !    This benchmark is part of the NAS Parallel Benchmark 3.3 suite.      !
10 !    It is described in NAS Technical Reports 95-020 and 02-007           !
11 !                                                                         !
12 !    Permission to use, copy, distribute and modify this software         !
13 !    for any purpose with or without fee is hereby granted.  We           !
14 !    request, however, that all derived work reference the NAS            !
15 !    Parallel Benchmarks 3.3. This software is provided "as is"           !
16 !    without express or implied warranty.                                 !
17 !                                                                         !
18 !    Information on NPB 3.3, including the technical report, the          !
19 !    original specifications, source code, results and information        !
20 !    on how to submit new results, is available at:                       !
21 !                                                                         !
22 !           http://www.nas.nasa.gov/Software/NPB/                         !
23 !                                                                         !
24 !    Send comments or suggestions to  npb@nas.nasa.gov                    !
25 !                                                                         !
26 !          NAS Parallel Benchmarks Group                                  !
27 !          NASA Ames Research Center                                      !
28 !          Mail Stop: T27A-1                                              !
29 !          Moffett Field, CA   94035-1000                                 !
30 !                                                                         !
31 !          E-mail:  npb@nas.nasa.gov                                      !
32 !          Fax:     (650) 604-3957                                        !
33 !                                                                         !
34 !-------------------------------------------------------------------------!
35
36
37 c---------------------------------------------------------------------
38 c
39 c Authors: R. F. Van der Wijngaart
40 c          W. Saphir
41 c---------------------------------------------------------------------
42
43 c---------------------------------------------------------------------
44        program MPSP
45 c---------------------------------------------------------------------
46
47        include  'header.h'
48        include  'mpinpb.h'
49       
50        integer          i, niter, step, c, error, fstatus
51        external timer_read
52        double precision mflops, t, tmax, timer_read
53        logical          verified
54        character        class
55
56        call setup_mpi
57        if (.not. active) goto 999
58
59 c---------------------------------------------------------------------
60 c      Root node reads input file (if it exists) else takes
61 c      defaults from parameters
62 c---------------------------------------------------------------------
63        if (node .eq. root) then
64           
65           write(*, 1000)
66           open (unit=2,file='inputsp.data',status='old', iostat=fstatus)
67 c
68           if (fstatus .eq. 0) then
69             write(*,233) 
70  233        format(' Reading from input file inputsp.data')
71             read (2,*) niter
72             read (2,*) dt
73             read (2,*) grid_points(1), grid_points(2), grid_points(3)
74             close(2)
75           else
76             write(*,234) 
77             niter = niter_default
78             dt    = dt_default
79             grid_points(1) = problem_size
80             grid_points(2) = problem_size
81             grid_points(3) = problem_size
82           endif
83  234      format(' No input file inputsp.data. Using compiled defaults')
84
85           write(*, 1001) grid_points(1), grid_points(2), grid_points(3)
86           write(*, 1002) niter, dt
87           if (no_nodes .ne. total_nodes) write(*, 1004) total_nodes
88           if (no_nodes .ne. maxcells*maxcells) 
89      >        write(*, 1005) maxcells*maxcells
90           write(*, 1003) no_nodes
91
92  1000 format(//,' NAS Parallel Benchmarks 3.3 -- SP Benchmark',/)
93  1001     format(' Size: ', i4, 'x', i4, 'x', i4)
94  1002     format(' Iterations: ', i4, '    dt: ', F11.7)
95  1004     format(' Total number of processes: ', i5)
96  1005     format(' WARNING: compiled for ', i5, ' processes ')
97  1003     format(' Number of active processes: ', i5, /)
98
99        endif
100
101        call mpi_bcast(niter, 1, MPI_INTEGER, 
102      >                root, comm_setup, error)
103
104        call mpi_bcast(dt, 1, dp_type, 
105      >                root, comm_setup, error)
106
107        call mpi_bcast(grid_points(1), 3, MPI_INTEGER, 
108      >                root, comm_setup, error)
109
110
111        call make_set
112
113        do  c = 1, ncells
114           if ( (cell_size(1,c) .gt. IMAX) .or.
115      >         (cell_size(2,c) .gt. JMAX) .or.
116      >         (cell_size(3,c) .gt. KMAX) ) then
117              print *,node, c, (cell_size(i,c),i=1,3)
118              print *,' Problem size too big for compiled array sizes'
119              goto 999
120           endif
121        end do
122
123        call set_constants
124
125        call initialize
126
127 c       call mpi_finalize(error)
128 c       stop
129
130        call lhsinit
131
132        call exact_rhs
133
134        call compute_buffer_size(5)
135
136 c---------------------------------------------------------------------
137 c      do one time step to touch all code, and reinitialize
138 c---------------------------------------------------------------------
139        call adi
140        call initialize
141
142 c---------------------------------------------------------------------
143 c      Synchronize before placing time stamp
144 c---------------------------------------------------------------------
145        call mpi_barrier(comm_setup, error)
146
147        call timer_clear(1)
148        call timer_start(1)
149
150        do  step = 1, niter
151
152           if (node .eq. root) then
153              if (mod(step, 20) .eq. 0 .or. 
154      >           step .eq. 1) then
155                 write(*, 200) step
156  200            format(' Time step ', i4)
157               endif
158           endif
159
160           call adi
161
162        end do
163
164        call timer_stop(1)
165        t = timer_read(1)
166        
167        call verify(niter, class, verified)
168
169        call mpi_reduce(t, tmax, 1, 
170      >                 dp_type, MPI_MAX, 
171      >                 root, comm_setup, error)
172
173        if( node .eq. root ) then
174           if( tmax .ne. 0. ) then
175              mflops = (881.174*float( problem_size )**3
176      >                -4683.91*float( problem_size )**2
177      >                +11484.5*float( problem_size )
178      >                -19272.4) * float( niter ) / (tmax*1000000.0d0)
179           else
180              mflops = 0.0
181           endif
182
183          call print_results('SP', class, grid_points(1), 
184      >     grid_points(2), grid_points(3), niter, maxcells*maxcells, 
185      >     total_nodes, tmax, mflops, '          floating point', 
186      >     verified, npbversion,compiletime, cs1, cs2, cs3, cs4, cs5, 
187      >     cs6, '(none)')
188        endif
189
190  999   continue
191        call mpi_barrier(MPI_COMM_WORLD, error)
192        call mpi_finalize(error)
193
194        end