NEDISS: Network Diffusion and Synchronization Simulator
CommunicationFunctions.h
1//
2// Created by m4zz31 on 5/11/21.
3//
4
5#ifndef CPPPROJCT_COMMUNICATIONFUNCTIONS_H
6#define CPPPROJCT_COMMUNICATIONFUNCTIONS_H
7
8#include <set>
9#include <iterator>
10#include <random>
11#include <type_traits>
12
13#include "../macros/macros.h"
14
15#include "../GraphClasses/GeneralGraph.h"
16#include "../GraphClasses/GraphFunctions.h"
17
18#include "../Utils/HelperClasses.h"
19#include "../Utils/error.h"
20#include "../Utils/global_standard_messages.h"
21#include "../Utils/msleep.h"
22
23
24
25void build_answer_nodes(double &answer, ReferenceContainer &REF, double ix, int owner, int &MyNProc);
26void build_answer_edges(double * answer, ReferenceContainer &REF, double * ix, int owner, int &MyNProc);
27
28
29template <typename SpecificRequestObject>
30class RequestObject : public SpecificRequestObject {
31 public:
32 RequestObject(int type): SpecificRequestObject(type){};
33 RequestObject(){};
34};
35
36
37template <int field>
39public:
40 int recvLength = -1;
41 int sendLength = -1;
42 int recvTag = -1;
43 int sendTag = -1;
44 bool recvInt = true;
45 bool sendDouble = true;
46 // Reception type
47 typedef typename std::conditional< (field >= 0), int, double>::type buffer_type;
48 typedef typename std::conditional< (field >= -2), double, double>::type answer_type;
50 // Diffrent SendTag builders
51 void buildSendTag(int * data);
52 void buildSendTag(double * data);
53 // Different RecvTag builders
54 void buildRecvTag(int * data){};
55 void buildRecvTag(double * data){};
56 void buildRecvTag();
57 // integer buffer
58 void computeAnswer(ReferenceContainer &REF, int * buffer, double * answer, MPI_Status &S, int MYPROC);
59 void computeReady(ReferenceContainer &REF, int * buffer, bool &isReady);
60 // double buffer
61 void computeReady(ReferenceContainer &REF, double * buffer, bool &isReady);
62 void computeAnswer(ReferenceContainer &REF, double * buffer, double * answer, MPI_Status &S, int MYPROC);
63};
64
65template <int field>
67 sendTag = *data;
68};
69
70template <int field>
72 if (field == -1){
73 sendTag = (int) (*data);
74 } else if (field == -2) {
75 sendTag = (int) ((int) (*(data + 1)) + (int) OFFSET);
76 }
77};
78
79
80template <int field>
82 if (field == 0){
83 recvTag = K1_REQUEST;
84 return;
85 } else if (field == 1) {
86 recvTag = K2_REQUEST;
87 return;
88 } else if (field == 2) {
89 recvTag = K3_REQUEST;
90 return;
91 } else if (field == -1) {
92 recvTag = VERTEXVAL_REQUEST_FLAG;
93 return;
94 } else if (field == -2){
95 recvTag = EDGEVAL_REQUEST_FLAG;
96 return;
97 }
98};
99
100
101
102template <int field>
104 if (field == 0){
105 // Answering requests for the field term 1
106 recvLength = 1;
107 sendLength = 1;
108
109 } else if (field == 1) {
110 // Answering requests for the field term 2
111 recvLength = 1;
112 sendLength = 1;
113
114 } else if (field == 2) {
115 // Answering requests for the field term 3
116 recvLength = 1;
117 sendLength = 1;
118
119 } else if (field == -1) {
120 // Answering requests for the node values
121 recvInt = false;
122 sendDouble = true;
123 recvLength = 1;
124 sendLength = 1;
125
126 } else if (field == -2) {
127 // Answering requests for the edge values
128 recvLength = 2;
129 sendLength = 2;
130 recvInt = false;
131 sendDouble = true;
132 }
133}
134
135template <int field>
136void FieldRequestObject<field>::computeAnswer(ReferenceContainer &REF,
137 int * buffer,
138 double * answer,
139 MPI_Status &S,
140 int MYPROC){
141 if (field == 0){
142#pragma omp atomic read
143 (*answer) = REF.p_LayHelper->data[*(buffer)].RK1[0];
144 } else if (field == 1) {
145#pragma omp atomic read
146 (*answer) = REF.p_LayHelper->data[*(buffer)].RK2[0];
147 } else if (field == 2) {
148#pragma omp atomic read
149 (*answer) = REF.p_LayHelper->data[*(buffer)].RK3[0];
150 }
151};
152
153
154template <int field>
155void FieldRequestObject<field>::computeAnswer(ReferenceContainer &REF,
156 double * buffer,
157 double * answer,
158 MPI_Status &S,
159 int MYPROC){
160 if (field == -1) {
161 build_answer_nodes(*answer, REF, *buffer, S.MPI_SOURCE, MYPROC);
162 } else if (field == -2) {
163 build_answer_edges(answer, REF, buffer, S.MPI_SOURCE, MYPROC);
164 }
165};
166
167
168
169template <int field>
170void FieldRequestObject<field>::computeReady(ReferenceContainer &REF, double * buffer, bool &isReady){
171 if (field == -1) {
172 isReady = true;
173 return;
174 } else if (field == -2) {
175 isReady = true;
176 return;
177 }
178};
179
180template <int field>
181void FieldRequestObject<field>::computeReady(ReferenceContainer &REF, int * buffer, bool &isReady){
182 if (field == 0){
183#pragma omp atomic read
184 isReady = REF.p_LayHelper->data[*(buffer)].RK1_status;
185 return;
186 } else if (field == 1) {
187#pragma omp atomic read
188 isReady = REF.p_LayHelper->data[*(buffer)].RK2_status;
189 return;
190 } else if (field == 2) {
191#pragma omp atomic read
192 isReady = REF.p_LayHelper->data[*(buffer)].RK3_status;
193 return;
194 }
195};
196
197
198
199
200template<int DT, int TIMETOL, int BATCH, typename RequestClass>
201void generic_answer_requests(ReferenceContainer &REF, int MYTHR, RequestClass ReqObj){
202 int flag = 0;
203 int TRIES=0;
204 int MYPROC = REF.p_ComHelper->WORLD_RANK[MYTHR];
205 MPI_Request R;
206 MPI_Message M;
207 MPI_Status S;
208 int t=0;
209 bool firstlap = true;
210
211 // Should we receive an int or a double
212 typename decltype(ReqObj)::buffer_type buffer[ReqObj.recvLength];
213 typename decltype(ReqObj)::answer_type answer[ReqObj.sendLength];
214
215 ReqObj.buildRecvTag();
216 while (TRIES < 4){
217 flag = 0;
218 MPI_Status status;
219 if ((flag == 1) || firstlap) {
220 flag = 0;
221 R = MPI_Request();
222 M = MPI_Message();
223 S = MPI_Status();
224 MPI_Improbe(MPI_ANY_SOURCE,
225 ReqObj.recvTag,
226 MPI_COMM_WORLD,
227 &flag,
228 &M,
229 &status);
230 }
231 if (firstlap) firstlap = false;
232 PRINTF_DBG("About to probe! recvTag is %d\n", ReqObj.recvTag);
233 while ((flag != 1) && (t < TIMETOL)) {
234 MPI_Improbe(MPI_ANY_SOURCE,
235 ReqObj.recvTag,
236 MPI_COMM_WORLD,
237 &flag,
238 &M,
239 &status);
240 ++t;
241 mssleep(DT);
242 }
243 if (t >= TIMETOL) ++TRIES;
244 t = 0;
245 if (flag == 1) {
246 PRINTF_DBG("[GAR] About to receive! recvInt is %d, and recvLength is %d\n", ReqObj.recvInt, ReqObj.recvLength);
247 if (ReqObj.recvInt){
248 MPI_Mrecv(&buffer,
249 ReqObj.recvLength,
250 MPI_INT,
251 &M, &S);
252 } else {
253 MPI_Mrecv(&buffer,
254 ReqObj.recvLength,
255 MPI_DOUBLE,
256 &M, &S);
257 }
258 PRINTF_DBG("[GAR] Received successfully!\n");
259 bool isReady = false;
260#pragma omp critical
261{
262 ReqObj.computeReady(REF, &buffer[0], isReady);
263};
264 while (!isReady) {
265#pragma omp critical
266{
267 ReqObj.computeReady(REF, &buffer[0], isReady);
268};
269 PRINTF_DBG("[GAR] not ready yet!\n");
270 mssleep(DT);
271 }
272 ReqObj.computeAnswer(REF, &buffer[0], &answer[0], S, MYPROC);
273 ReqObj.buildSendTag(&buffer[0]);
274 PRINTF_DBG("[GAR] About to send! sendDouble is %d, ReqObj.sendLength is %d, and ReqObj.sendTag is %d\n", ReqObj.sendDouble, ReqObj.sendLength, ReqObj.sendTag);
275 PRINTF_DBG("[GAR] The sent message('s first element) will be %f\n", answer[0]);
276 if (ReqObj.sendDouble){
277 MPI_Ssend(&answer[0],
278 ReqObj.sendLength,
279 MPI_DOUBLE,
280 S.MPI_SOURCE,
281 ReqObj.sendTag,
282 MPI_COMM_WORLD);
283 } else {
284 MPI_Ssend(&answer[0],
285 ReqObj.sendLength,
286 MPI_INT,
287 S.MPI_SOURCE,
288 ReqObj.sendTag,
289 MPI_COMM_WORLD);
290 }
291 }
292 }
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309template<int DT, int TIMETOL, int BATCH>
310void perform_field_requests(ReferenceContainer &REF,int MYPROC, int fieldOrder,std::queue<long> * queue){
311
312 int ASKING_TAGS[4] = {VERTEXVAL_REQUEST_FLAG, K1_REQUEST, K2_REQUEST, K3_REQUEST};
313 bool keep_working = true;
314 long ix;
315 int L;
316#pragma omp critical
317{
318 if (!queue->empty()){
319 ix = queue->front();
320 queue->pop();
321 } else {
322 keep_working = false;
323 }
324}
325 while (keep_working){
326 if (fieldOrder==2){
327 L = REF.p_LayHelper->data[ix].RK1.size();
328 } else if (fieldOrder==3) {
329 L = REF.p_LayHelper->data[ix].RK2.size();
330 } else if (fieldOrder==4) {
331 L = REF.p_LayHelper->data[ix].RK2.size();
332 } else if (fieldOrder==1) {
333 assert((*REF.p_IntHelper)[ix].ixMap.size() == (REF.p_LayHelper->data[ix].RK2.size()-1));
334 L = (*REF.p_IntHelper)[ix].ixMap.size() + 1;
335 } else {
336 printf("[FATAL] field order requested to perform_field_requests does not exist!\n");std::cout<<std::flush;
337 exit(1);
338 }
339
340 for (int i=0; i<L-1 ; ++i){
341 double recvBuffer;
342 if (((int) std::get<2>((*REF.p_IntHelper)[ix].ixMap[i])) == MYPROC) {
343 unsigned long owner = std::get<1>((*REF.p_IntHelper)[ix].ixMap[i]);
344 bool isReadyYet = false;
345 if (fieldOrder == 2) {
346#pragma omp atomic read
347 isReadyYet = REF.p_LayHelper->data[owner].RK1_status;
348 while (!isReadyYet) {
349 mssleep(DT);
350#pragma omp atomic read
351 isReadyYet = REF.p_LayHelper->data[owner].RK1_status;
352 }
353#pragma omp atomic read
354 recvBuffer = REF.p_LayHelper->data[owner].RK1[0];
355 } else if (fieldOrder == 3) {
356#pragma omp atomic read
357 isReadyYet = REF.p_LayHelper->data[owner].RK2_status;
358 while (!isReadyYet) {
359 mssleep(DT);
360#pragma omp atomic read
361 isReadyYet = REF.p_LayHelper->data[owner].RK2_status;
362 }
363#pragma omp atomic read
364 recvBuffer = REF.p_LayHelper->data[owner].RK2[0];
365 } else if (fieldOrder == 4) {
366#pragma omp atomic read
367 isReadyYet = REF.p_LayHelper->data[owner].RK3_status;
368 while (!isReadyYet) {
369 mssleep(DT);
370#pragma omp atomic read
371 isReadyYet = REF.p_LayHelper->data[owner].RK3_status;
372 }
373#pragma omp atomic read
374 recvBuffer = REF.p_LayHelper->data[owner].RK3[0];
375 } else if (fieldOrder == 1) {
376#pragma omp atomic read
377 recvBuffer = (*REF.p_IntHelper)[owner].centralValue;
378 };
379 } else {
380
381 // This is kinda dumb, but the 'else' clause is for compatibility.
382 if (fieldOrder != 1){
383 int sendBuffer = (int) std::get<1>((*REF.p_IntHelper)[ix].ixMap[i]);
384 PRINTF_DBG("[pfr]!=1 About to send! sendbuffer says %d... asking tag is %d\n", sendBuffer, ASKING_TAGS[fieldOrder-1]);
385 MPI_Ssend(&sendBuffer,
386 1,
387 MPI_INT,
388 (int) std::get<2>((*REF.p_IntHelper)[ix].ixMap[i]),
389 ASKING_TAGS[fieldOrder-1],
390 MPI_COMM_WORLD);
391 PRINTF_DBG("Sent successfully!\n");
392 } else {
393 double sendBuffer = (double) std::get<1>((*REF.p_IntHelper)[ix].ixMap[i]);
394 PRINTF_DBG("[pfr]==1 About to send! sendbuffer says %f... asking tag is %d\n", sendBuffer, ASKING_TAGS[fieldOrder-1]);
395 MPI_Ssend(&sendBuffer,
396 1,
397 MPI_DOUBLE,
398 (int) std::get<2>((*REF.p_IntHelper)[ix].ixMap[i]),
399 ASKING_TAGS[fieldOrder-1],
400 MPI_COMM_WORLD);
401 PRINTF_DBG("Sent successfully!\n");
402 }
403 PRINTF_DBG("[pfr] About to receive! tag is %d\n", (int) std::get<1>((*REF.p_IntHelper)[ix].ixMap[i]));
404 MPI_Recv(&recvBuffer,
405 1,
406 MPI_DOUBLE,
407 (int) std::get<2>((*REF.p_IntHelper)[ix].ixMap[i]),
408 (int) std::get<1>((*REF.p_IntHelper)[ix].ixMap[i]),
409 MPI_COMM_WORLD,
410 MPI_STATUS_IGNORE);
411 PRINTF_DBG("[pfr] recieved %f!\n",recvBuffer);
412 }
413
414 if (fieldOrder == 2) {
415 REF.p_LayHelper->data[ix].RK1[i+1] = recvBuffer;
416 } else if (fieldOrder == 3) {
417 REF.p_LayHelper->data[ix].RK2[i+1] = recvBuffer;
418 } else if (fieldOrder == 4) {
419 REF.p_LayHelper->data[ix].RK3[i+1] = recvBuffer;
420 } else if (fieldOrder == 1) {
421 (*REF.p_IntHelper)[ix].neighborValues[i] = recvBuffer;
422 };
423 }
424#pragma omp critical
425 {
426 if (!queue->empty()){
427 ix = queue->front();
428 queue->pop();
429 } else {
430 keep_working = false;
431 }
432 }
433 }
434};
435
436
437template <int DT, int TIMETOL, int BATCH>
438void perform_requests(int NNodes,
439 ReferenceContainer REF,
440 unsigned long N,
441 OpenMPHelper &O) {
442 long ix;
443 unsigned long uix;
444 int total_processed=0;
445 int atomic_helper;
446
447 bool globalstatus = true;
448 bool ix_update = false;
449 long current_ix;
450
451 bool all_sent_locally = false;
452 int tot_locals = 0;
453 int sent_locally = 0;
454
455 // Insert here required initialization
456 std::list<long> all_indexes_seen;
457 if (BATCH<=0)error_report(min_batch_msg);
458 bool waiting = false;
459 bool bypass = false;
460 bool was_last_appearance = false;
461 int special_index = -1;
462 int rstatus = 0, sstatus = 0;
463 MPI_Request requests_send[BATCH] = {MPI_REQUEST_NULL};
464 int fsend[BATCH] = {0};
465 int frecv[BATCH] = {0};
466 int areRecv[BATCH] = {0};
467 int myProbes[BATCH] = {0};
468 MPI_Request requests_recv[BATCH] = {MPI_REQUEST_NULL};
469 InfoVecElem results[BATCH];
470 std::queue<int> QAvailable;
471 std::list<int> QPend;
472 for (int i = 0; i < BATCH; ++i){
473 QAvailable.push(i);
474 }
475 double vval[BATCH], their_vix[BATCH];
476 double vval2[BATCH][2], their_vix2[BATCH][2];
477 int ixlist[BATCH];
478 int owner[BATCH];
479 int retStatus=1;
480 int status_rstatus=1;
481 int counter=0,MAX_TRIES=3; // #HYPERPARAMS #HYPERPARAMETERS
482 bool NONBLOCKING = true;
483 bool HANDSHAKE = false;
484
485#pragma omp atomic read
486 atomic_helper = *(REF.p_TOT);
487 globalstatus = (atomic_helper < NNodes);
488
489 while (globalstatus) {
490 // Perform the main task: ask for the required information!
491 if (ix_update) {
492 auto itBeg = REF.p_ParHelper->data[ix].MissingA.begin();
493 auto itEnd = REF.p_ParHelper->data[ix].MissingA.end();
494 tot_locals = 0;
495 sent_locally = 0;
496 total_processed = 0;
497 int localcounter = 0;
498
499 for (auto _it= itBeg; _it != itEnd; ++_it) {
500 auto &thread = *_it;
501 for (auto it = thread.begin(); it != thread.end(); it++) {
502 ++tot_locals;
503 }
504 }
505
506 for (auto _it= itBeg; _it != itEnd; ++_it) {
507 auto &thread = *_it;
508 auto it = thread.begin();
509 while (it != thread.end()) {
510 ++localcounter;
511
512 owner[QAvailable.front()] = std::get<1>(*it);
513 their_vix[QAvailable.front()] = (double) std::get<2>(*it);
514 results[QAvailable.front()] = std::make_tuple((double) 0, // placeholder until we get the correct val
515 (double) std::get<0>(*it),
516 (unsigned long) (((unsigned long) owner[QAvailable.front()]) * N + (unsigned long) their_vix[QAvailable.front()] ),
517 (unsigned long) std::get<2>(*it),
518 (unsigned long) std::get<1>(*it));
519 PRINTF_DBG("[PR] About to ask for one node!\n");std::cout<<std::flush;
520 MPI_Ssend(&their_vix[QAvailable.front()],
521 1,
522 MPI_DOUBLE,
523 owner[QAvailable.front()],
524 VERTEXVAL_REQUEST_FLAG, MPI_COMM_WORLD);
525 PRINTF_DBG("[PR] Asked!\n");std::cout<<std::flush;
526
527 PRINTF_DBG("[PR] About to recv!\n");std::cout<<std::flush;
528 MPI_Recv(&vval[QAvailable.front()],
529 1,
530 MPI_DOUBLE,
531 owner[QAvailable.front()],
532 (int) their_vix[QAvailable.front()],
533 MPI_COMM_WORLD,
534 MPI_STATUS_IGNORE);
535 PRINTF_DBG("[PR] Correctly recieved!\n");std::cout<<std::flush;
536
537 // Prepare stuff for the next iteration
538 QPend.push_back(QAvailable.front());
539 QAvailable.pop();
540
541 if ((QAvailable.empty()) || (localcounter == tot_locals)) {
542 int target_size = 0;
543 if (localcounter == tot_locals){
544 target_size = 0;
545 } else {
546 target_size = BATCH-1;
547 }
548 while (QPend.size() != target_size) {
549 auto i = QPend.begin();
550 while (i != QPend.end()) {
551 std::get<0>(results[*i]) = vval[*i];
552 special_index = ix;
553 (*REF.p_IntHelper)[special_index].ResultsPendProcess.push_back(results[*i]);
554 QAvailable.push(*i);
555 QPend.erase(i++);
556 }
557 }
558 }
559 thread.erase(it++);
560 }
561 }
562
563 auto itBegE = REF.p_ParHelper->data[ix].MissingB.begin();
564 auto itEndE = REF.p_ParHelper->data[ix].MissingB.end();
565 tot_locals = 0;
566 sent_locally = 0;
567 total_processed = 0;
568 localcounter = 0;
569
570 for (auto _it= itBegE; _it != itEndE; ++_it) {
571 auto &thread = *_it;
572 for (auto it = thread.begin(); it != thread.end(); it++) {
573 ++tot_locals;
574 }
575 }
576
577
578
579 for (auto _it= itBegE; _it != itEndE; ++_it) {
580 auto &thread = *_it;
581 auto it = thread.begin();
582 while (it != thread.end()) {
583 ++localcounter;
584
585 // Retrieve data
586 owner[QAvailable.front()] = (int) std::get<1>(*it);
587 their_vix2[QAvailable.front()][0] = (double) std::get<0>(*it);
588 their_vix2[QAvailable.front()][1] = (double) std::get<2>(*it);
589 // Build the result
590 results[QAvailable.front()] = std::make_tuple(0.0, // placeholder until we get the correct node val
591 0.0, // placeholder until we get the correct edge val
592 (unsigned long) (((unsigned long) owner[QAvailable.front()]) * N + (unsigned long) their_vix2[QAvailable.front()][1] ),
593 (unsigned long) std::get<2>(*it),
594 (unsigned long) std::get<1>(*it));
595
596
597 PRINTF_DBG("[PR] About to ask for one node and edge!\n");std::cout<<std::flush;
598 PRINTF_DBG("About to send ixs %f and %f\n",their_vix2[QAvailable.front()][0], their_vix2[QAvailable.front()][1]);std::cout<<std::flush;
599 MPI_Ssend(&their_vix2[QAvailable.front()][0],
600 2,
601 MPI_DOUBLE,
602 owner[QAvailable.front()],
603 EDGEVAL_REQUEST_FLAG, MPI_COMM_WORLD);
604 PRINTF_DBG("[PR] Asked!\n");std::cout<<std::flush;
605
606 PRINTF_DBG("[PR] About to recv!\n");std::cout<<std::flush;
607 MPI_Recv(&vval2[QAvailable.front()][0],
608 2,
609 MPI_DOUBLE,
610 owner[QAvailable.front()],
611 (int) (int) (OFFSET + (int) their_vix2[QAvailable.front()][1]),
612 MPI_COMM_WORLD,
613 MPI_STATUS_IGNORE);
614 PRINTF_DBG("[PR] Correctly recieved!\n");std::cout<<std::flush;
615
616 QPend.push_back(QAvailable.front());
617 QAvailable.pop();
618
619 if ((QAvailable.empty()) || (localcounter == tot_locals)) {
620 int target_size = 0;
621 if (localcounter == tot_locals){
622 target_size = 0;
623 } else {
624 target_size = BATCH-1;
625 }
626
627 while (QPend.size() != target_size) {
628 auto i = QPend.begin();
629 while (i != QPend.end()) {
630 std::get<0>(results[*i]) = vval2[*i][0];
631 std::get<1>(results[*i]) = vval2[*i][1];
632 special_index = ix;
633 (*REF.p_IntHelper)[special_index].ResultsPendProcess.push_back(results[*i]);
634 QAvailable.push(*i);
635 QPend.erase(i++);
636 }
637
638 }
639 }
640 thread.erase(it++);
641 }
642 }
643
644
645#pragma critical
646 {
647 (*REF.p_READY_FOR_INTEGRATION).first.push(ix);
648 (*REF.p_READY_FOR_INTEGRATION).second.push(uix);
649 }
650 ++total_processed;
651
652 } else {
653 // *************THIS IS WHAT HAPPENS IF THERE WAS NO INDEX UPDATE****************
654 tot_locals = 0;
655 sent_locally = 0;
656 total_processed = 0;
657 }
658 ix_update = false;
659#pragma omp critical
660 {
661 if (!(*REF.p_CHECKED).first.empty()) { // Assuming they have both the same length
662 ix = (*REF.p_CHECKED).first.front();
663 uix = (*REF.p_CHECKED).second.front();
664 (*REF.p_CHECKED).first.pop();
665 (*REF.p_CHECKED).second.pop();
666 ix_update = true;
667 }
668 }
669 // Add to the number of totals processed the ones from previous lap :-)
670 if (total_processed != 0) {
671#pragma omp atomic update
672 *(REF.p_TOT) += total_processed;
673 }
674
675 // if there was no available index, check if it is still worth looping.
676 if (!ix_update) {
677#pragma omp atomic read
678 atomic_helper = *(REF.p_TOT);
679 globalstatus = (atomic_helper < NNodes); // this should be a strict equality but its buggy :^(
680 }
681 PRINTF_DBG("TOT=%d, NNodes=%d, ix_update=%d, total_processed=%d, ix=%d\n",
682 atomic_helper, NNodes, ix_update, total_processed, ix);std::cout<<std::flush;
683 if (globalstatus && (!ix_update)) mssleep(DT);
684 }
685 PRINTF_DBG("Final termination of perform_requests :-)\n");std::cout<<std::flush;
686
687} // end of function
688
689
690#endif //CPPPROJCT_COMMUNICATIONFUNCTIONS_H
Definition: CommunicationFunctions.h:38
Definition: CommunicationFunctions.h:30