* under the terms of the license (GNU LGPL) which comes with this package. */
// This is the MWE of https://framagit.org/simgrid/simgrid/-/issues/50
-// The problem was occuring when suspending an actor that will be executed later in the same scheduling round
+// The problem was occurring when suspending an actor that will be executed later in the same scheduling round
#include <iostream>
#include <simgrid/s4u.hpp>
-! Check that getarg does somethig sensible.
+! Check that getarg does something sensible.
program getarg_1
use mpi
CHARACTER*10 ARGS, ARGS2
MPI_Type_extent (newtype[0], &basic_extent);
if (basic_extent != sizeof (test_basic_struct_t)) {
- fprintf (stderr, "(%d): Unexpect extent for struct\n");
+ fprintf (stderr, "(%d): Unexpected extent for struct\n");
MPI_Abort (MPI_COMM_WORLD, 666);
}
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
MPI_Op_create(matmult, 0, &op);
- /* A single rotation matrix (3x3, stored as 9 consequetive elements) */
+ /* A single rotation matrix (3x3, stored as 9 consecutive elements) */
MPI_Type_contiguous(9, MPI_INT, &mattype);
MPI_Type_commit(&mattype);
/* Create the local matrices.
* Initialize the input matrix so that the entries are
- * consequtive integers, by row, starting at 0.
+ * consecutive integers, by row, starting at 0.
*/
if (rank == size - 1) {
localA = (float *) malloc(gN * lmlast * sizeof(float));
Transpose(localA, localB, gM, gN, comm);
/* check the transposed matrix
- * In the global matrix, the transpose has consequtive integers,
+ * In the global matrix, the transpose has consecutive integers,
* organized by columns.
*/
if (rank == size - 1) {
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
- order, independant of the root.
+ order, independent of the root.
*/
int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
- order, independant of the root.
+ order, independent of the root.
*/
void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
for (i = 0; i < size; i++) {
for (j = 0; j < COUNT; j++) {
if (recvbuf[i * COUNT + j] != i * VERIFY_CONST + j) {
- printf("PE 0: mis-match error");
+ printf("PE 0: mismatch error");
printf(" recbuf[%d * %d + %d] = ", i, COUNT, j);
printf(" %ld,", recvbuf[i * COUNT + j]);
printf(" should be %ld\n", i * VERIFY_CONST + j);
}
/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
- * in (*req) a request handle corresonding to that operation. This call should
+ * in (*req) a request handle corresponding to that operation. This call should
* be considered collective over comm (with a consistent value for "rndnum"),
* even though the operation may only be a point-to-point request. */
static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request * req,
else
maxsize = size;
- /* General forumula: If we multiple the values from 1 to n, the
+ /* General formula: If we multiple the values from 1 to n, the
* product is n!. This grows very fast, so we'll only use the first
* five (1! = 1, 2! = 2, 3! = 6, 4! = 24, 5! = 120), with n!
* stored in the array result[n] */
}
/* Just performs a simple sum but can be marked as non-commutative to
- potentially tigger different logic in the implementation. */
+ potentially trigger different logic in the implementation. */
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type);
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type)
{
}
/* Just performs a simple sum but can be marked as non-commutative to
- potentially tigger different logic in the implementation. */
+ potentially trigger different logic in the implementation. */
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type);
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type)
{
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
- order, independant of the root.
+ order, independent of the root.
*/
void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
#define ITERS 10
/* This test uses several scenarios to overlap iallreduce and comm_idup
- * 1.) Use comm_idup dublicate the COMM_WORLD and do iallreduce
+ * 1.) Use comm_idup duplicate the COMM_WORLD and do iallreduce
* on the COMM_WORLD
* 2.) Do the above test in a loop
- * 3.) Dublicate COMM_WORLD, overalp iallreduce on one
+ * 3.) Duplicate COMM_WORLD, overalp iallreduce on one
* communicator with comm_idup on the nother communicator
* 4.) Split MPI_COMM_WORLD, communicate on the split communicator
while dublicating COMM_WORLD
* 5.) Duplicate the split communicators with comm_idup
* while communicating onCOMM_WORLD
- * 6.) Ceate an inter-communicator and duplicate it with comm_idup while
+ * 6.) Create an inter-communicator and duplicate it with comm_idup while
* communicating on the inter-communicator
- * 7.) Dublicate the inter-communicator whil communicate on COMM_WORLD
+ * 7.) Duplicate the inter-communicator whil communicate on COMM_WORLD
* 8.) Merge the inter-communicator to an intra-communicator and idup it,
* overlapping with communication on MPI_COMM_WORLD
* 9.) Communicate on the merge communicator, while duplicating COMM_WORLD
char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME];
MTest_Init(&argc, &argv);
- /* Check world and self firt */
+ /* Check world and self first */
nameout[0] = 0;
MPI_Comm_get_name(MPI_COMM_WORLD, nameout, &rlen);
if (strcmp(nameout, "MPI_COMM_WORLD")) {
MPI_Comm comm, dupcomm, dupcomm2;
MPI_Request rreq[2];
int count;
- int indicies[2];
+ int indices[2];
int r1buf, r2buf, s1buf, s2buf;
int rank, isLeft;
MPI_Irecv(&r1buf, 1, MPI_INT, 0, 0, dupcomm, &rreq[0]);
MPI_Irecv(&r2buf, 1, MPI_INT, 0, 0, comm, &rreq[1]);
MPI_Send(&s2buf, 1, MPI_INT, 0, 0, comm);
- MPI_Waitsome(2, rreq, &count, indicies, MPI_STATUSES_IGNORE);
- if (count != 1 || indicies[0] != 1) {
+ MPI_Waitsome(2, rreq, &count, indices, MPI_STATUSES_IGNORE);
+ if (count != 1 || indices[0] != 1) {
/* The only valid return is that exactly one message
* has been received */
errs++;
- if (count == 1 && indicies[0] != 1) {
+ if (count == 1 && indices[0] != 1) {
printf("Error in context values for intercomm\n");
}
else if (count == 2) {
int i;
printf("Error: count = %d", count);
for (i = 0; i < count; i++) {
- printf(" indicies[%d] = %d", i, indicies[i]);
+ printf(" indices[%d] = %d", i, indices[i]);
}
printf("\n");
}
/* blockindexed_vector_test()
*
* Tests behavior with a blockindexed of some vector types;
- * this shouldn't be easily convertable into anything else.
+ * this shouldn't be easily convertible into anything else.
*
* Returns the number of errors encountered.
*/
/* hindexed_block_vector_test()
*
* Tests behavior with a hindexed_block of some vector types;
- * this shouldn't be easily convertable into anything else.
+ * this shouldn't be easily convertible into anything else.
*
* Returns the number of errors encountered.
*/
int remainder = 0;
MPI_Datatype memtype, chunktype;
- /* need to cook up a new datatype to accomodate large datatypes */
+ /* need to cook up a new datatype to accommodate large datatypes */
/* first pass: chunks of 1 MiB plus an additional remainder. Does require
* 8 byte MPI_Aint, which should have been checked for earlier */
MPI_Aint *disp;
MPI_Datatype memtype;
- /* need to cook up a new datatype to accomodate large datatypes */
+ /* need to cook up a new datatype to accommodate large datatypes */
/* Does require 8 byte MPI_Aint, which should have been checked for earlier
*/
fprintf(stderr, " MPI_Type_struct of %s failed.\n", typemapstring);
if (verbose)
MTestPrintError(err);
- /* No point in contiuing */
+ /* No point in continuing */
return errs;
}
/* added in MPI 3 */
{ MPI_COUNT, "MPI_COUNT" },
#endif
- { 0, (char *)0 }, /* Sentinal used to indicate the last element */
+ { 0, (char *)0 }, /* Sentinel used to indicate the last element */
};
char name[MPI_MAX_OBJECT_NAME];
/* We should not get here, because the default error handler
* is ERRORS_ARE_FATAL. This makes sure that the correct error
- * handler is called and that no failure occured (such as
+ * handler is called and that no failure occurred (such as
* a SEGV) in Comm_call_errhandler on the default
* error handler. */
printf("After the Error Handler Has Been Called\n");
printf( "Unable to cancel MPI_Irecv request\n" );
}
/* Using MPI_Request_free should be ok, but some MPI implementations
- object to it imediately after the cancel and that isn't essential to
+ object to it immediately after the cancel and that isn't essential to
this test */
MTest_Finalize( errs );
{ MPI_LONG_LONG, "MPI_LONG_LONG" },
{ MPI_UNSIGNED_LONG_LONG, "MPI_UNSIGNED_LONG_LONG" },
{ MPI_LONG_DOUBLE_INT, "MPI_LONG_DOUBLE_INT" },
- { 0, (char *)0 }, /* Sentinal used to indicate the last element */
+ { 0, (char *)0 }, /* Sentinel used to indicate the last element */
};
/*
call mtest_init( ierr )
C
-C For upto 6 dimensions, test with periodicity in 0 through all
+C For up to 6 dimensions, test with periodicity in 0 through all
C dimensions. The test is computed by both:
C get info about the created communicator
C apply cart shift
C now create one with MPI_WEIGHTS_EMPTY
C NOTE that MPI_WEIGHTS_EMPTY was added in MPI-3 and does not
-C appear before then. Incluing this test means that this test cannot
+C appear before then. Including this test means that this test cannot
C be compiled if the MPI version is less than 3 (see the testlist file)
degs(1) = 0;
errs = 0
call mtest_init( ierr )
-! integers with upto 9 are 4 bytes integers; r of 4 are 2 byte,
+! integers with up to 9 are 4 bytes integers; r of 4 are 2 byte,
! and r of 2 is 1 byte
call mpi_type_create_f90_integer( 9, ntype1, ierr )
!
endif
call MPI_RECV( aint, 1, MPI_AINT, 0, 0, MPI_COMM_WORLD, s, ierr )
if (taint .ne. aint) then
- print *, "Address-sized int not correctly transfered"
+ print *, "Address-sized int not correctly transferred"
print *, "Value should be ", taint, " but is ", aint
errs = errs + 1
endif
call MPI_RECV( oint, 1, MPI_OFFSET, 0, 1, MPI_COMM_WORLD, s, ierr )
if (toint .ne. oint) then
- print *, "Offset-sized int not correctly transfered"
+ print *, "Offset-sized int not correctly transferred"
print *, "Value should be ", toint, " but is ", oint
errs = errs + 1
endif
call MPI_RECV( iint, 1, MPI_INTEGER, 0, 2, MPI_COMM_WORLD, s, ierr )
if (tiint .ne. iint) then
- print *, "Integer (by kind) not correctly transfered"
+ print *, "Integer (by kind) not correctly transferred"
print *, "Value should be ", tiint, " but is ", iint
errs = errs + 1
endif
call mpi_type_free(newtype,ierr)
! write(*,*) "Sent ",name(1:5),x
else
-! Everyone calls barrier incase size > 2
+! Everyone calls barrier in case size > 2
call mpi_barrier( MPI_COMM_WORLD, ierr )
if (me.eq.dest) then
position=0
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 8) {
- fprintf(stderr, "Test requires 8 processes (16 prefered) only %d provided\n", size);
+ fprintf(stderr, "Test requires 8 processes (16 preferred) only %d provided\n", size);
errs++;
}
/*
Test the group routines
- (some tested elsewere)
+ (some tested elsewhere)
MPI_Group_compare
MPI_Group_excl
MTest_Init(&argc, &argv);
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = 0; i < NKEYS; i++) {
MPI_Info_set(info, keys[i], values[i]);
MTest_Init(&argc, &argv);
MPI_Info_create(&info1);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info1, (char *) "host", (char *) "myhost.myorg.org");
MPI_Info_set(info1, (char *) "file", (char *) "runfile.txt");
/* 1,2,3 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = 0; i < NKEYS; i++) {
MPI_Info_set(info, keys1[i], values1[i]);
/* 3,2,1 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = NKEYS - 1; i >= 0; i--) {
MPI_Info_set(info, keys1[i], values1[i]);
/* 1,3,2 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[0], values1[0]);
MPI_Info_set(info, keys1[2], values1[2]);
/* 2,1,3 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[1], values1[1]);
MPI_Info_set(info, keys1[0], values1[0]);
/* 2,3,1 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[1], values1[1]);
MPI_Info_set(info, keys1[2], values1[2]);
/* 3,1,2 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[2], values1[2]);
MPI_Info_set(info, keys1[0], values1[0]);
MTest_Init(&argc, &argv);
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = 0; i < NKEYS; i++) {
MPI_Info_set(info, keys[i], values[i]);
}
else if (val_p != &attrval[i]) {
lerrs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
*
* . generalized file writing/reading to handle arbitrary number of processors
* . provides the "cb_config_list" hint with several permutations of the
- * avaliable processors.
+ * available processors.
* [ makes use of code copied from ROMIO's ADIO code to collect the names of
* the processors ]
*/
/* this deceptively simple test uncovered a bug in the way certain file systems
- * dealt with tuning parmeters. See
+ * dealt with tuning parameters. See
* https://github.com/open-mpi/ompi/issues/158 and
* http://trac.mpich.org/projects/mpich/ticket/2261
*
sprintf(file, "%s", opt_file);
MPI_Info_create(&info);
nr_errors += test_write(file, nprocs, rank, info);
- /* acutal value does not matter. test only writes a small amount of data */
+ /* actual value does not matter. test only writes a small amount of data */
MPI_Info_set(info, "striping_factor", "50");
nr_errors += test_write(file, nprocs, rank, info);
MPI_Info_free(&info);
#define MAX_MSGS 30
/*
-static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI recieve uses both ANY_SOURCE and ANY_TAG";
+static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI receive uses both ANY_SOURCE and ANY_TAG";
*/
int main(int argc, char *argv[])
#include "mpi.h"
#include "mpitest.h"
-/* Test bsend with a buffer with arbitray alignment */
+/* Test bsend with a buffer with arbitrary alignment */
#define BUFSIZE 2000*4
int main(int argc, char *argv[])
{
if (bptr != buf + align) {
errs++;
printf
- ("Did not recieve the same buffer on detach that was provided on init (%p vs %p)\n",
+ ("Did not receive the same buffer on detach that was provided on init (%p vs %p)\n",
bptr, buf);
}
}
else if (rank == dest) {
MPI_Win_fence(0, win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, &recvtype);
if (err) {
errs += err;
MPI_Group_free(&neighbors);
MPI_Win_wait(win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, &recvtype);
if (err) {
errs += errs;
/* This test is going to test the atomicity for "read-modify-write" in GACC
* operations */
-/* This test is similiar with atomic_rmw_fop.c.
+/* This test is similar with atomic_rmw_fop.c.
* There are three processes involved in this test: P0 (origin_shm), P1 (origin_am),
* and P2 (dest). P0 and P1 issues multiple GACC with MPI_SUM and OP_COUNT integers
* (value 1) to P2 via SHM and AM respectively. The correct results should be that the
- * results on P0 and P1 never be the same for intergers on the corresponding index
+ * results on P0 and P1 never be the same for integers on the corresponding index
* in [0...OP_COUNT-1].
*/
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
/** Contended RMA put test -- James Dinan <dinan@mcs.anl.gov>
*
* Each process issues COUNT put operations to non-overlapping locations on
- * every other processs.
+ * every other process.
*/
#include <stdio.h>
/** Contended RMA put/get test -- James Dinan <dinan@mcs.anl.gov>
*
* Each process issues COUNT put and get operations to non-overlapping
- * locations on every other processs.
+ * locations on every other process.
*/
#include <stdio.h>
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MPI_Get(recvtype->buf, recvtype->count,
recvtype->datatype, source, 0, sendtype->count, sendtype->datatype, win);
if (err) {
/* Perform several communication operations, mixing synchronization
* types. Use multiple communication to avoid the single-operation
* optimization that may be present. */
- MTestPrintfMsg(3, "Begining loop %d of mixed sync put/acc operations\n", loop);
+ MTestPrintfMsg(3, "Beginning loop %d of mixed sync put/acc operations\n", loop);
memset(winbuf, 0, count * sizeof(int));
MPI_Barrier(comm);
if (crank == source) {
/* Perform several communication operations, mixing synchronization
* types. Use multiple communication to avoid the single-operation
* optimization that may be present. */
- MTestPrintfMsg(3, "Begining loop %d of mixed sync put/get/acc operations\n", loop);
+ MTestPrintfMsg(3, "Beginning loop %d of mixed sync put/get/acc operations\n", loop);
MPI_Barrier(comm);
if (crank == source) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, dest, 0, win);
else if (rank == dest) {
MPI_Win_fence(0, win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, recvtype);
if (err) {
if (errs < 10) {
MPI_Group_free(&neighbors);
MPI_Win_wait(win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, &recvtype);
if (err) {
errs += errs;
if ($ResultTest ne "") {
# This test really needs to be run manually, with this test
- # Eventually, we can update this to include handleing in checktests.
+ # Eventually, we can update this to include handling in checktests.
print STDERR "Run $curdir/$programname with $np processes and use $ResultTest to check the results\n";
return;
}
free(myname); \
counts[cnt] = 1; bytesize[cnt] = sizeof(_ctype) * (_count); cnt++; }
-/* This defines a structure of two basic members; by chosing things like
+/* This defines a structure of two basic members; by choosing things like
(char, double), various packing and alignment tests can be made */
#define SETUPSTRUCT2TYPE(_mpitype1,_ctype1,_mpitype2,_ctype2,_count,_tname) { \
int i; char *myname; \
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 1;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
MPI_Barrier(MPI_COMM_WORLD);
- /* Set dims[] values to descibe a grid of nbNodes and DIM dimensions*/
+ /* Set dims[] values to describe a grid of nbNodes and DIM dimensions*/
MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder, &gridComm);
if (gridComm == MPI_COMM_NULL)