35template <
class IT,
class NT>
41template <
class IT,
class NT>
45 arr.resize(MyLocLength(), initval);
49template <
class IT,
class NT>
54template <
class IT,
class NT>
58 arr.resize(MyLocLength(), initval);
61template <
class IT,
class NT>
69template <
class IT,
class NT>
70template <
class ITRHS,
class NTRHS>
74 arr.resize(
static_cast<IT>(rhs.arr.size()),
NT());
76 for(
IT i=0; (unsigned)i < arr.size(); ++i)
78 arr[i] =
static_cast<NT>(rhs.arr[
static_cast<ITRHS
>(i)]);
87template <
class IT,
class NT>
91 MPI_Comm World = commGrid->GetWorld();
92 int nprocs = commGrid->GetSize();
93 int rank = commGrid->GetRank();
96 IT nsize = fillarr.size();
98 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), sizes, 1, MPIType<IT>(), World);
99 glen = std::accumulate(sizes, sizes+
nprocs,
static_cast<IT>(0));
101 IT lengthuntil = std::accumulate(sizes, sizes+
rank,
static_cast<IT>(0));
107 int * sendcnt =
new int[
nprocs]();
108 for(
IT i=0; i<nsize; ++i)
111 int owner = Owner(i+lengthuntil, locind);
114 int * recvcnt =
new int[
nprocs];
115 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
117 int * sdispls =
new int[
nprocs];
118 int * rdispls =
new int[
nprocs];
121 for(
int i=0; i<
nprocs-1; ++i)
123 sdispls[i+1] = sdispls[i] + sendcnt[i];
124 rdispls[i+1] = rdispls[i] + recvcnt[i];
126 IT totrecv = std::accumulate(recvcnt,recvcnt+
nprocs,
static_cast<IT>(0));
130 MPI_Alltoallv(fillarr.data(), sendcnt, sdispls, MPIType<NT>(), arr.data(), recvcnt, rdispls, MPIType<NT>(), World);
131 DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
137template <
class IT,
class NT>
140 auto it = min_element(arr.begin(), arr.end());
143 MPI_Allreduce( &localMin, &globalMin, 1, MPIType<NT>(), MPI_MIN, commGrid->GetWorld());
145 IT localMinIdx = TotalLength();
146 if(globalMin==localMin)
148 localMinIdx = distance(arr.begin(), it) + LengthUntil();
151 MPI_Allreduce( &localMinIdx, &globalMinIdx, 1, MPIType<IT>(), MPI_MIN, commGrid->GetWorld());
153 return std::make_pair(globalMinIdx, globalMin);
157template <
class IT,
class NT>
158template <
typename _BinaryOperation>
162 NT localsum = std::accumulate( arr.begin(), arr.end(),
identity, __binary_op);
169template <
class IT,
class NT>
170template <
typename OUT,
typename _BinaryOperation,
typename _UnaryOperation>
174 OUT localsum = default_val;
178 typename std::vector< NT >::const_iterator iter = arr.begin();
181 while (iter < arr.end())
183 localsum = __binary_op(localsum, __unary_op(*iter));
188 OUT totalsum = default_val;
195template<
class IT,
class NT>
204 IT length = TotalLength();
205 std::vector<double> loccands(length);
206 std::vector<NT> loccandints(length);
207 MPI_Comm World = commGrid->GetWorld();
208 int myrank = commGrid->GetRank();
211 for(
int i=0; i<length; ++i)
212 loccands[i] = M.
rand();
213 std::transform(loccands.begin(), loccands.end(), loccands.begin(), std::bind2nd( std::multiplies<double>(), nver ));
215 for(
int i=0; i<length; ++i)
216 loccandints[i] =
static_cast<NT>(loccands[i]);
218 MPI_Bcast(&(loccandints[0]), length, MPIType<NT>(),0, World);
219 for(
IT i=0; i<length; ++i)
220 SetElement(i,loccandints[i]);
223template <
class IT,
class NT>
224template <
class ITRHS,
class NTRHS>
227 if(
static_cast<const void*
>(
this) !=
static_cast<const void*
>(&rhs))
230 glen =
static_cast<IT>(rhs.glen);
231 commGrid = rhs.commGrid;
233 arr.resize(rhs.arr.size(),
NT());
234 for(
IT i=0; (unsigned)i < arr.size(); ++i)
236 arr[i] =
static_cast<NT>(rhs.arr[
static_cast<ITRHS
>(i)]);
242template <
class IT,
class NT>
253template <
class IT,
class NT>
257 arr.resize(rhs.MyLocLength());
258 std::fill(arr.begin(), arr.end(),
NT());
261 for(
IT i=0; i< spvecsize; ++i)
265 arr[rhs.ind[i]] = rhs.num[i];
278template <
class IT,
class NT>
283 #pragma omp parallel for
285 for(
IT i=0; i< spvecsize; ++i)
287 if(arr[rhs.ind[i]] ==
NT())
288 arr[rhs.ind[i]] = rhs.num[i];
290 arr[rhs.ind[i]] += rhs.num[i];
297template <
class IT,
class NT>
301 for(
IT i=0; i< spvecsize; ++i)
303 arr[rhs.ind[i]] -= rhs.num[i];
313template <
class IT,
class NT>
314template <
typename _BinaryOperation>
317 std::transform ( arr.begin(), arr.end(), rhs.arr.begin(), arr.begin(), __binary_op );
324template <
class IT,
class NT>
325template <
typename _BinaryOperation,
typename OUT>
328 std::transform ( arr.begin(), arr.end(), rhs.arr.begin(), result.arr.begin(), __binary_op );
332template <
class IT,
class NT>
337 if(!(*commGrid == *rhs.commGrid))
339 std::cout <<
"Grids are not comparable elementwise addition" << std::endl;
344 EWise(rhs, std::plus<NT>());
350template <
class IT,
class NT>
355 if(!(*commGrid == *rhs.commGrid))
357 std::cout <<
"Grids are not comparable elementwise addition" << std::endl;
362 EWise(rhs, std::minus<NT>());
368template <
class IT,
class NT>
373 local = (int) std::equal(arr.begin(), arr.end(), rhs.arr.begin(), epsilonequal );
375 MPI_Allreduce( &local, &whole, 1, MPI_INT, MPI_BAND, commGrid->GetWorld());
376 return static_cast<bool>(whole);
379template <
class IT,
class NT>
380template <
typename _Predicate>
383 IT local = count_if( arr.begin(), arr.end(), pred );
385 MPI_Allreduce( &local, &whole, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
391template <
class IT,
class NT>
392template <
typename _Predicate>
396 MPI_Comm World = commGrid->GetWorld();
397 int nprocs = commGrid->GetSize();
398 int rank = commGrid->GetRank();
400 IT sizelocal = LocArrSize();
401 IT sizesofar = LengthUntil();
402 for(
IT i=0; i<sizelocal; ++i)
406 found.arr.push_back(i+sizesofar);
410 IT nsize = found.arr.size();
412 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), World);
413 IT lengthuntil = std::accumulate(dist, dist+
rank,
static_cast<IT>(0));
414 found.glen = std::accumulate(dist, dist+
nprocs,
static_cast<IT>(0));
420 int * sendcnt =
new int[
nprocs];
421 std::fill(sendcnt, sendcnt+
nprocs, 0);
422 for(
IT i=0; i<nsize; ++i)
425 int owner = found.Owner(i+lengthuntil, locind);
428 int * recvcnt =
new int[
nprocs];
429 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
431 int * sdispls =
new int[
nprocs];
432 int * rdispls =
new int[
nprocs];
435 for(
int i=0; i<
nprocs-1; ++i)
437 sdispls[i+1] = sdispls[i] + sendcnt[i];
438 rdispls[i+1] = rdispls[i] + recvcnt[i];
440 IT totrecv = std::accumulate(recvcnt,recvcnt+
nprocs,
static_cast<IT>(0));
441 std::vector<IT> recvbuf(totrecv);
444 MPI_Alltoallv(&(found.arr[0]), sendcnt, sdispls, MPIType<IT>(), &(recvbuf[0]), recvcnt, rdispls, MPIType<IT>(), World);
445 found.arr.swap(recvbuf);
447 DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
455template <
class IT,
class NT>
456template <
typename _Predicate>
460 size_t size = arr.size();
461 for(
size_t i=0; i<
size; ++i)
465 found.ind.push_back( (
IT) i);
466 found.num.push_back(arr[i]);
475template <
class IT,
class NT>
479 size_t size = arr.size();
480 for(
size_t i=0; i<
size; ++i)
484 found.ind.push_back( (
IT) i);
485 found.num.push_back(val);
493template <
class IT,
class NT>
494template <
class HANDLER>
504template <
class IT,
class NT>
505template <
class HANDLER>
509 tmpSpVec.
SaveGathered(outfile, master, handler, printProcSplits);
512template <
class IT,
class NT>
515 int rank = commGrid->GetRank();
519 std::cout <<
"FullyDistVec::SetElement can't be called on an empty vector." << std::endl;
523 int owner = Owner(indx, locind);
524 if(commGrid->GetRank() == owner)
526 if (locind > (LocArrSize() -1))
528 std::cout <<
"FullyDistVec::SetElement cannot expand array" << std::endl;
532 std::cout <<
"FullyDistVec::SetElement local index < 0" << std::endl;
541template <
class IT,
class NT>
545 MPI_Comm World = commGrid->GetWorld();
546 int rank = commGrid->GetRank();
550 std::cout <<
"FullyDistVec::GetElement can't be called on an empty vector." << std::endl;
555 int owner = Owner(indx, locind);
556 if(commGrid->GetRank() == owner)
558 if (locind > (LocArrSize() -1))
560 std::cout <<
"FullyDistVec::GetElement local index > size" << std::endl;
566 std::cout <<
"FullyDistVec::GetElement local index < 0" << std::endl;
574 MPI_Bcast(&ret, 1, MPIType<NT>(), owner, World);
579template <
class IT,
class NT>
583 MPI_Comm World = commGrid->GetWorld();
584 MPI_Comm_rank(World, &
rank);
585 MPI_Comm_size(World, &
nprocs);
587 char _fn[] =
"temp_fullydistvec";
588 MPI_File_open(World, _fn, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &thefile);
589 IT lengthuntil = LengthUntil();
593 char native[] =
"native";
594 MPI_File_set_view(thefile,
int64_t(lengthuntil *
sizeof(
NT)), MPIType<NT>(), MPIType<NT>(), native, MPI_INFO_NULL);
596 IT count = LocArrSize();
597 MPI_File_write(thefile, &(arr[0]), count, MPIType<NT>(), MPI_STATUS_IGNORE);
598 MPI_File_close(&thefile);
602 MPI_Gather(&count, 1, MPIType<IT>(), counts, 1, MPIType<IT>(), 0, World);
605 FILE * f = fopen(
"temp_fullydistvec",
"r");
608 std::cerr <<
"Problem reading binary input file\n";
611 IT maxd = *std::max_element(counts, counts+
nprocs);
612 NT * data =
new NT[maxd];
614 for(
int i=0; i<
nprocs; ++i)
617 size_t result = fread(data,
sizeof(
NT), counts[i],f);
618 if (result != (
unsigned)counts[i]) { std::cout <<
"Error in fread, only " << result <<
" entries read" << std::endl; }
620 std::cout <<
"Elements stored on proc " << i <<
": {" ;
621 for (
int j = 0; j < counts[i]; j++)
623 std::cout << data[j] <<
",";
625 std::cout <<
"}" << std::endl;
632template <
class IT,
class NT>
633template <
typename _UnaryOperation,
typename IRRELEVANT_NT>
636 typename std::vector< IT >::const_iterator miter = mask.ind.begin();
637 while (miter < mask.ind.end())
640 arr[index] = __unary_op(arr[index]);
644template <
class IT,
class NT>
645template <
typename _BinaryOperation,
typename _BinaryPredicate,
class NT2>
648 if(*(commGrid) == *(other.commGrid))
650 if(glen != other.glen)
652 std::ostringstream outs;
653 outs <<
"Vector dimensions don't match (" << glen <<
" vs " << other.glen <<
") for FullyDistVec::EWiseApply\n";
659 typename std::vector< NT >::iterator thisIter = arr.begin();
660 typename std::vector< NT2 >::const_iterator otherIter = other.arr.begin();
661 while (thisIter < arr.end())
663 if (_do_op(*thisIter, *otherIter,
false,
false))
664 *thisIter = __binary_op(*thisIter, *otherIter,
false,
false);
672 std::ostringstream outs;
673 outs <<
"Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply" << std::endl;
682template <
class IT,
class NT>
683template <
typename _BinaryOperation,
typename _BinaryPredicate,
class NT2>
686 if(*(commGrid) == *(other.commGrid))
688 if(glen != other.glen)
690 std::cerr <<
"Vector dimensions don't match (" << glen <<
" vs " << other.glen <<
") for FullyDistVec::EWiseApply\n";
695 typename std::vector< IT >::const_iterator otherInd = other.ind.begin();
696 typename std::vector< NT2 >::const_iterator otherNum = other.num.begin();
700 for(
IT i=0; (unsigned)i < arr.size(); ++i)
702 if (otherInd == other.ind.end() || i < *otherInd)
704 if (_do_op(arr[i], nullValue,
false,
true))
705 arr[i] = __binary_op(arr[i], nullValue,
false,
true);
709 if (_do_op(arr[i], *otherNum,
false,
false))
710 arr[i] = __binary_op(arr[i], *otherNum,
false,
false);
725 IT spsize = other.ind.size();
727#pragma omp parallel for
729 for(
IT i=0; i< spsize; i++)
731 if (_do_op(arr[other.ind[i]], other.num[i],
false,
false))
732 arr[other.ind[i]] = __binary_op(arr[other.ind[i]], other.num[i],
false,
false);
739 std::cout <<
"Grids are not comparable elementwise apply" << std::endl;
745template <
class IT,
class NT>
748 MPI_Comm World = commGrid->GetWorld();
750 IT nnz = LocArrSize();
751 std::pair<NT,IT> * vecpair =
new std::pair<NT,IT>[nnz];
752 int nprocs = commGrid->GetSize();
753 int rank = commGrid->GetRank();
757 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), World);
758 IT sizeuntil = LengthUntil();
759 for(
IT i=0; i< nnz; ++i)
761 vecpair[i].first = arr[i];
762 vecpair[i].second = i + sizeuntil;
766 std::vector< IT > narr(nnz);
767 for(
IT i=0; i< nnz; ++i)
769 arr[i] = vecpair[i].first;
770 narr[i] = vecpair[i].second;
782template <
class IT,
class NT>
792 MPI_Comm World = commGrid->GetWorld();
793 int nprocs = commGrid->GetSize();
794 int rank = commGrid->GetRank();
797#ifdef COMBBLAS_LEGACY
798 std::pair<double,NT> * vecpair =
new std::pair<double,NT>[
size];
801 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), World);
802 for(
int i=0; i<
size; ++i)
804 vecpair[i].first = M.
rand();
805 vecpair[i].second = arr[i];
809 std::vector< NT > nnum(
size);
810 for(
int i=0; i<
size; ++i) nnum[i] = vecpair[i].second;
814 std::vector< std::vector< NT > > data_send(
nprocs);
815 for(
int i=0; i<
size; ++i)
819 data_send[dest].push_back(arr[i]);
821 int * sendcnt =
new int[
nprocs];
822 int * sdispls =
new int[
nprocs];
823 for(
int i=0; i<
nprocs; ++i) sendcnt[i] = (
int) data_send[i].size();
825 int * rdispls =
new int[
nprocs];
826 int * recvcnt =
new int[
nprocs];
827 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
830 for(
int i=0; i<
nprocs-1; ++i)
832 sdispls[i+1] = sdispls[i] + sendcnt[i];
833 rdispls[i+1] = rdispls[i] + recvcnt[i];
835 IT totrecv = std::accumulate(recvcnt,recvcnt+
nprocs,
static_cast<IT>(0));
836 if(totrecv > std::numeric_limits<int>::max())
838 std::cout <<
"COMBBLAS_WARNING: total data to receive exceeds max int: " << totrecv << std::endl;
840 std::vector<NT>().swap(arr);
843 for(
int i=0; i<
nprocs; ++i)
845 std::copy(data_send[i].begin(), data_send[i].end(), sendbuf+sdispls[i]);
846 std::vector<NT>().swap(data_send[i]);
848 NT * recvbuf =
new NT[totrecv];
849 MPI_Alltoallv(sendbuf, sendcnt, sdispls, MPIType<NT>(), recvbuf, recvcnt, rdispls, MPIType<NT>(), World);
851 std::default_random_engine gen(seed);
852 std::shuffle(recvbuf, recvbuf+ totrecv,gen);
855 localcounts[
rank] = totrecv;
856 MPI_Allgather(MPI_IN_PLACE, 1, MPI_LONG_LONG, localcounts, 1, MPI_LONG_LONG, World);
857 int64_t glenuntil = std::accumulate(localcounts, localcounts+
rank,
static_cast<int64_t>(0));
859 std::vector< std::vector< IT > > locs_send(
nprocs);
860 for(
IT i=0; i< totrecv; ++i)
863 int owner = Owner(glenuntil+i, remotelocind);
864 locs_send[owner].push_back(remotelocind);
865 data_send[owner].push_back(recvbuf[i]);
868 for(
int i=0; i<
nprocs; ++i) sendcnt[i] = (
int) data_send[i].size();
869 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
872 for(
int i=0; i<
nprocs-1; ++i)
874 sdispls[i+1] = sdispls[i] + sendcnt[i];
875 rdispls[i+1] = rdispls[i] + recvcnt[i];
877 IT newsize = std::accumulate(recvcnt,recvcnt+
nprocs,
static_cast<IT>(0));
878 if(newsize > std::numeric_limits<int>::max())
880 std::cout <<
"COMBBLAS_WARNING: total data to receive exceeds max int: " << newsize << std::endl;
883 IT totalsend = std::accumulate(sendcnt, sendcnt+
nprocs,
static_cast<IT>(0));
884 if(totalsend != totrecv || newsize !=
size)
886 std::cout <<
"COMBBLAS_WARNING: sending different sized data than received: " << totalsend <<
"=" << totrecv <<
" , " << newsize <<
"=" <<
size << std::endl;
888 for(
int i=0; i<
nprocs; ++i)
890 std::copy(data_send[i].begin(), data_send[i].end(), recvbuf+sdispls[i]);
891 std::vector<NT>().swap(data_send[i]);
894 MPI_Alltoallv(recvbuf, sendcnt, sdispls, MPIType<NT>(), sendbuf, recvcnt, rdispls, MPIType<NT>(), World);
896 IT * newinds =
new IT[totalsend];
897 for(
int i=0; i<
nprocs; ++i)
899 std::copy(locs_send[i].begin(), locs_send[i].end(), newinds+sdispls[i]);
900 std::vector<IT>().swap(locs_send[i]);
903 MPI_Alltoallv(newinds, sendcnt, sdispls, MPIType<IT>(), indsbuf, recvcnt, rdispls, MPIType<IT>(), World);
904 DeleteAll(newinds, sendcnt, sdispls, rdispls, recvcnt);
908 arr[indsbuf[i]] = sendbuf[i];
915template <
class IT,
class NT>
919 IT length = MyLocLength();
925template <
class IT,
class NT>
928 if(!(*commGrid == *ri.commGrid))
930 std::cout <<
"Grids are not comparable for dense vector subsref" << std::endl;
934 MPI_Comm World = commGrid->GetWorld();
936 int nprocs = commGrid->GetSize();
937 std::vector< std::vector< IT > > data_req(
nprocs);
938 std::vector< std::vector< IT > > revr_map(
nprocs);
941 for(
IT i=0; i < riloclen; ++i)
944 int owner = Owner(ri.arr[i], locind);
945 data_req[owner].push_back(locind);
946 revr_map[owner].push_back(i);
948 IT * sendbuf =
new IT[riloclen];
949 int * sendcnt =
new int[
nprocs];
950 int * sdispls =
new int[
nprocs];
951 for(
int i=0; i<
nprocs; ++i)
952 sendcnt[i] = (
int) data_req[i].size();
954 int * rdispls =
new int[
nprocs];
955 int * recvcnt =
new int[
nprocs];
956 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
959 for(
int i=0; i<
nprocs-1; ++i)
961 sdispls[i+1] = sdispls[i] + sendcnt[i];
962 rdispls[i+1] = rdispls[i] + recvcnt[i];
964 IT totrecv = std::accumulate(recvcnt,recvcnt+
nprocs,
static_cast<IT>(0));
965 for(
int i=0; i<
nprocs; ++i)
967 std::copy(data_req[i].begin(), data_req[i].end(), sendbuf+sdispls[i]);
968 std::vector<IT>().swap(data_req[i]);
971 IT * reversemap =
new IT[riloclen];
972 for(
int i=0; i<
nprocs; ++i)
974 std::copy(revr_map[i].begin(), revr_map[i].end(), reversemap+sdispls[i]);
975 std::vector<IT>().swap(revr_map[i]);
978 IT * recvbuf =
new IT[totrecv];
979 MPI_Alltoallv(sendbuf, sendcnt, sdispls, MPIType<IT>(), recvbuf, recvcnt, rdispls, MPIType<IT>(), World);
986 NT * databack =
new NT[totrecv];
987 for(
int i=0; i<
nprocs; ++i)
989 for(
int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)
991 databack[j] = arr[recvbuf[j]];
995 NT * databuf =
new NT[riloclen];
998 MPI_Alltoallv(databack, recvcnt, rdispls, MPIType<NT>(), databuf, sendcnt, sdispls, MPIType<NT>(), World);
1003 for(
int i=0; i<
nprocs; ++i)
1005 for(
int j=sdispls[i]; j< sdispls[i]+sendcnt[i]; ++j)
1007 Indexed.arr[reversemap[j]] = databuf[j];
1010 DeleteAll(sdispls, sendcnt, databuf,reversemap);
1015template <
class IT,
class NT>
1018 IT totl = TotalLength();
1019 if (commGrid->GetRank() == 0)
1020 std::cout <<
"As a whole, " << vectorname <<
" has length " << totl << std::endl;
1027template <
class IT,
class NT>
1030 if(*(commGrid) == *(other.commGrid))
1032 if(glen != other.glen)
1034 std::cerr <<
"Vector dimensions don't match (" << glen <<
" vs " << other.glen <<
") for FullyDistVec::Set\n";
1042#pragma omp parallel for
1044 for(
IT i=0; i< spvecsize; ++i)
1046 arr[other.ind[i]] = other.num[i];
1052 std::cout <<
"Grids are not comparable for Set" << std::endl;
1062template <
class IT,
class NT>
1063template <
class NT1,
typename _BinaryOperationIdx,
typename _BinaryOperationVal>
1066 if(*(commGrid) != *(spVec.commGrid))
1068 std::cout <<
"Grids are not comparable for GSet" << std::endl;
1073 if(spVecSize==0)
return;
1076 MPI_Comm World = commGrid->GetWorld();
1077 int nprocs = commGrid->GetSize();
1079 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
1082 std::vector< std::vector< NT > > datsent(
nprocs);
1083 std::vector< std::vector< IT > > indsent(
nprocs);
1084 IT lengthUntil = spVec.LengthUntil();
1086 for(
IT k=0; k < spVecSize; ++k)
1091 IT globind = __binopIdx(spVec.num[k], spVec.ind[k] + lengthUntil);
1092 int owner = Owner(globind, locind);
1093 NT val = __binopVal(spVec.num[k], spVec.ind[k] + lengthUntil);
1096 datsent[owner].push_back(val);
1097 indsent[owner].push_back(locind);
1102 for(
int j = 0; j < datsent[myrank].size(); ++j)
1104 arr[indsent[myrank][j]] = datsent[myrank][j];
1111 for(
int i=0; i<
nprocs; ++i)
1115 MPI_Win_lock(MPI_LOCK_SHARED,i,MPI_MODE_NOCHECK,win);
1116 for(
int j = 0; j < datsent[i].size(); ++j)
1118 MPI_Put(&datsent[i][j], 1, MPIType<NT>(), i, indsent[i][j], 1, MPIType<NT>(), win);
1120 MPI_Win_unlock(i, win);
1134template <
class IT,
class NT>
1135template <
class NT1,
typename _BinaryOperationIdx>
1138 if(*(commGrid) != *(spVec.commGrid))
1140 std::cout <<
"Grids are not comparable for GGet" << std::endl;
1144 MPI_Comm World = commGrid->GetWorld();
1145 int nprocs = commGrid->GetSize();
1147 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
1150 std::vector< std::vector< NT > > spIdx(
nprocs);
1151 std::vector< std::vector< IT > > indsent(
nprocs);
1152 IT lengthUntil = spVec.LengthUntil();
1156 res.ind.resize(spVecSize);
1157 res.num.resize(spVecSize);
1160 for(
IT k=0; k < spVecSize; ++k)
1165 IT globind = __binopIdx(spVec.num[k], spVec.ind[k] + lengthUntil);
1166 int owner = Owner(globind, locind);
1170 spIdx[owner].push_back(k);
1171 indsent[owner].push_back(locind);
1174 res.num[k] = nullValue;
1175 res.ind[k] = spVec.ind[k];
1179 for(
int j = 0; j < indsent[myrank].size(); ++j)
1181 res.num[spIdx[myrank][j]] = arr[indsent[myrank][j]];
1186 MPI_Win_create(&arr[0], LocArrSize() *
sizeof(
NT),
sizeof(
NT), MPI_INFO_NULL, World, &win);
1187 for(
int i=0; i<
nprocs; ++i)
1191 MPI_Win_lock(MPI_LOCK_SHARED,i,MPI_MODE_NOCHECK,win);
1192 for(
int j = 0; j < indsent[i].size(); ++j)
1194 MPI_Get(&res.num[spIdx[i][j]], 1, MPIType<NT>(), i, indsent[i][j], 1, MPIType<NT>(), win);
1196 MPI_Win_unlock(i, win);
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)
Totally obsolete version that only accepts an ifstream object and ascii files.
void SaveGathered(std::ofstream &outfile, int master, HANDLER handler, bool printProcSplits=false)
FullyDistVec< IT, IT > FindInds(_Predicate pred) const
Return the indices where pred is true.
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)
void EWiseApply(const FullyDistVec< IT, NT2 > &other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp)
NT Reduce(_BinaryOperation __binary_op, NT identity) const
FullyDistVec< IT, NT > operator()(const FullyDistVec< IT, IT > &ri) const
FullyDistSpVec< IT, NT > Find(_Predicate pred) const
Return the elements for which pred is true.
bool operator==(const FullyDistVec< IT, NT > &rhs) const
void PrintInfo(std::string vectorname) const
void SaveGathered(std::ofstream &outfile, int master, HANDLER handler, bool printProcSplits=false)
FullyDistVec< IT, NT > & operator-=(const FullyDistSpVec< IT, NT > &rhs)
void SetElement(IT indx, NT numx)
void iota(IT globalsize, NT first)
void Set(const FullyDistSpVec< IT, NT > &rhs)
friend class FullyDistVec
FullyDistSpVec< IT, NT > GGet(const FullyDistSpVec< IT, NT1 > &spVec, _BinaryOperationIdx __binopIdx, NT nullValue)
IT Count(_Predicate pred) const
Return the number of elements for which pred is true.
FullyDistVec< IT, NT > & operator+=(const FullyDistSpVec< IT, NT > &rhs)
void SelectCandidates(double nver)
ABAB: Put concept check, NT should be integer for this to make sense.
void Apply(_UnaryOperation __unary_op)
void EWiseOut(const FullyDistVec< IT, NT > &rhs, _BinaryOperation __binary_op, FullyDistVec< IT, OUT > &result)
void GSet(const FullyDistSpVec< IT, NT1 > &spVec, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, MPI_Win win)
NT GetElement(IT indx) const
FullyDistVec< IT, NT > & operator=(const FullyDistVec< ITRHS, NTRHS > &rhs)
std::pair< IT, NT > MinElement() const
FullyDistVec< IT, IT > sort()
static void iota(_ForwardIter __first, _ForwardIter __last, T __val)
static void Print(const std::string &s)
static void MemoryEfficientPSort(std::pair< KEY, VAL > *array, IT length, IT *dist, const MPI_Comm &comm)
unsigned __int64 uint64_t