A
lgorithmique
N
umérique
D
istribuée
Public GIT Repository
projects
/
simgrid.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
add various defines and objects to provide better compilation support (all C tests...
[simgrid.git]
/
src
/
smpi
/
smpi_pmpi.c
diff --git
a/src/smpi/smpi_pmpi.c
b/src/smpi/smpi_pmpi.c
index
d34903c
..
f10a36f
100644
(file)
--- a/
src/smpi/smpi_pmpi.c
+++ b/
src/smpi/smpi_pmpi.c
@@
-523,8
+523,9
@@
int PMPI_Group_excl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup)
if (i >= n) {
index = smpi_group_index(group, rank);
smpi_group_set_mapping(*newgroup, index, rank);
if (i >= n) {
index = smpi_group_index(group, rank);
smpi_group_set_mapping(*newgroup, index, rank);
- rank++;
+
}
}
+ rank++;
}
}
smpi_group_use(*newgroup);
}
}
smpi_group_use(*newgroup);
@@
-622,6
+623,7
@@
int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3],
smpi_group_set_mapping(*newgroup, index, newrank);
}
}
smpi_group_set_mapping(*newgroup, index, newrank);
}
}
+ newrank++; //added to avoid looping, need to be checked ..
}
}
}
}
}
}
@@
-1150,16
+1152,16
@@
int PMPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype,
MPI_Comm comm, MPI_Status * status)
{
//TODO: suboptimal implementation
MPI_Comm comm, MPI_Status * status)
{
//TODO: suboptimal implementation
- void *recvbuf;
- int retval
, size
;
+
//
void *recvbuf;
+ int retval;
- size = smpi_datatype_size(datatype) * count;
- recvbuf = xbt_new(char, size);
+
//
size = smpi_datatype_size(datatype) * count;
+
//
recvbuf = xbt_new(char, size);
retval =
retval =
- MPI_Sendrecv(buf, count, datatype, dst, sendtag,
recv
buf, count,
+ MPI_Sendrecv(buf, count, datatype, dst, sendtag, buf, count,
datatype, src, recvtag, comm, status);
datatype, src, recvtag, comm, status);
-
memcpy(buf, recvbuf, size * sizeof(char));
- xbt_free(recvbuf);
+
/*
memcpy(buf, recvbuf, size * sizeof(char));
+ xbt_free(recvbuf);
*/
return retval;
}
return retval;
}
@@
-1404,7
+1406,7
@@
int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__);
#endif
TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__);
#endif
- smpi_mpi_waitall(count, requests, status);
+
int retval =
smpi_mpi_waitall(count, requests, status);
#ifdef HAVE_TRACING
for (i = 0; i < count; i++) {
int src_traced, dst_traced, is_wait_for_receive;
#ifdef HAVE_TRACING
for (i = 0; i < count; i++) {
int src_traced, dst_traced, is_wait_for_receive;
@@
-1423,7
+1425,7
@@
int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
TRACE_smpi_computing_in(rank_traced);
#endif
smpi_bench_begin();
TRACE_smpi_computing_in(rank_traced);
#endif
smpi_bench_begin();
- return
MPI_SUCCESS
;
+ return
retval
;
}
int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount,
}
int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount,
@@
-1914,7
+1916,9
@@
int PMPI_Get_processor_name(char *name, int *resultlen)
smpi_bench_end();
strncpy(name, SIMIX_host_get_name(SIMIX_host_self()),
smpi_bench_end();
strncpy(name, SIMIX_host_get_name(SIMIX_host_self()),
- MPI_MAX_PROCESSOR_NAME - 1);
+ strlen(SIMIX_host_get_name(SIMIX_host_self())) < MPI_MAX_PROCESSOR_NAME - 1 ?
+ strlen(SIMIX_host_get_name(SIMIX_host_self())) +1 :
+ MPI_MAX_PROCESSOR_NAME - 1 );
*resultlen =
strlen(name) >
MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
*resultlen =
strlen(name) >
MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
@@
-1953,7
+1957,7
@@
int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_typ
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
- } else if (count<
=
0){
+ } else if (count<0){
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_contiguous(count, old_type, new_type);
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_contiguous(count, old_type, new_type);
@@
-1983,7
+1987,7
@@
int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type,
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
- } else if (count<
=0 || blocklen<=
0){
+ } else if (count<
0 || blocklen<
0){
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_vector(count, blocklen, stride, old_type, new_type);
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_vector(count, blocklen, stride, old_type, new_type);
@@
-1998,7
+2002,7
@@
int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
- } else if (count<
=0 || blocklen<=
0){
+ } else if (count<
0 || blocklen<
0){
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_hvector(count, blocklen, stride, old_type, new_type);
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_hvector(count, blocklen, stride, old_type, new_type);
@@
-2014,7
+2018,7
@@
int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
- } else if (count<
=
0){
+ } else if (count<0){
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_indexed(count, blocklens, indices, old_type, new_type);
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_indexed(count, blocklens, indices, old_type, new_type);
@@
-2029,7
+2033,7
@@
int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatyp
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
smpi_bench_end();
if (old_type == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
- } else if (count<
=
0){
+ } else if (count<0){
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_hindexed(count, blocklens, indices, old_type, new_type);
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_hindexed(count, blocklens, indices, old_type, new_type);
@@
-2043,7
+2047,7
@@
int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype*
int retval;
smpi_bench_end();
int retval;
smpi_bench_end();
- if (count<
=
0){
+ if (count<0){
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_struct(count, blocklens, indices, old_types, new_type);
retval = MPI_ERR_COUNT;
} else {
retval = smpi_datatype_struct(count, blocklens, indices, old_types, new_type);
@@
-2051,6
+2055,11
@@
int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype*
smpi_bench_begin();
return retval;}
smpi_bench_begin();
return retval;}
+int PMPI_Error_class(int errorcode, int* errorclass) {
+ // assume smpi uses only standard mpi error codes
+ *errorclass=errorcode;
+ return MPI_SUCCESS;
+}
/* The following calls are not yet implemented and will fail at runtime. */
/* Once implemented, please move them above this notice. */
/* The following calls are not yet implemented and will fail at runtime. */
/* Once implemented, please move them above this notice. */
@@
-2124,10
+2133,6
@@
int PMPI_Topo_test(MPI_Comm comm, int* top_type) {
return not_yet_implemented();
}
return not_yet_implemented();
}
-int PMPI_Error_class(int errorcode, int* errorclass) {
- return not_yet_implemented();
-}
-
int PMPI_Errhandler_create(MPI_Handler_function* function, MPI_Errhandler* errhandler) {
return not_yet_implemented();
}
int PMPI_Errhandler_create(MPI_Handler_function* function, MPI_Errhandler* errhandler) {
return not_yet_implemented();
}