HARM
harm and utilities
 All Data Structures Files Functions Variables Typedefs Macros Pages
diag.c
Go to the documentation of this file.
1 
2 #include "decs.h"
3 
9 #define DIAGREPORT {trifprintf("t=%21.15g to do: tener=%21.15g (dt=%21.15g): dump_cnt=%ld @ t=%21.15g (dt=%21.15g) : avg_cnt=%ld @ t=%21.15g (dt=%21.15g) : debug_cnt=%ld @ t=%21.15g (dt=%21.15g) : image_cnt=%ld @ t=%21.15g (dt=%21.15g): restart=%d @ nstep=%ld (dt=%ld) dtfake=%ld\n",t,tdumpgen[ENERDUMPTYPE],DTdumpgen[ENERDUMPTYPE],dumpcntgen[MAINDUMPTYPE],tdumpgen[MAINDUMPTYPE],DTdumpgen[MAINDUMPTYPE],dumpcntgen[AVG1DUMPTYPE],tdumpgen[AVG1DUMPTYPE],DTdumpgen[AVG1DUMPTYPE],dumpcntgen[DEBUGDUMPTYPE],tdumpgen[DEBUGDUMPTYPE],DTdumpgen[DEBUGDUMPTYPE],dumpcntgen[IMAGEDUMPTYPE],tdumpgen[IMAGEDUMPTYPE],DTdumpgen[IMAGEDUMPTYPE],whichrestart,nrestart,DTr,DTfake);}
10 
11 
12 
13 static int get_dodumps(int call_code, int firsttime, SFTYPE localt, long localnstep, long localrealnstep, FTYPE *tdumpgen, FTYPE *tlastgen, FTYPE tlastareamap
14  ,long long int nlastrestart, long long int nrestart
15  ,long long int nlastfake, long long int nfake
16  ,int *doareamap, int *dodumpgen);
17 static int pre_dump(int whichDT, FTYPE t, FTYPE localt, FTYPE *DTdumpgen, long int *dumpcntgen, long long int *dumpcgen, FTYPE *tdumpgen, FILE **dumpcnt_filegen, FTYPE *tlastgen
18  ,int whichrestart, long long int restartc, long long int localrealnstep, long long int nrestart,long DTr, long long int nlastrestart
19  ,int whichfakevar, long long int fakec, long long int nfake,long DTfakevar, long long int nlastfake
20  );
21 static int post_dump(int whichDT, FTYPE localt, FTYPE *DTdumpgen, long int *dumpcntgen, long long int *dumpcgen, FTYPE *tdumpgen, FILE **dumpcnt_filegen, FTYPE *tlastgen
22  ,long *restartsteps, int *whichrestart, long long int *restartc, long localrealnstep,long long int *nrestart, long DTr, long long int *nlastrestart
23  ,long *fakesteps, int *whichfakevar, long long int *fakec, long long int *nfake, long DTfakevar, long long int *nlastfake
24  );
25 
26 
29 int diag(int call_code, FTYPE localt, long localnstep, long localrealnstep)
30 {
32  //
33  // BEGIN DIAG VARS
34  //
35  static int firsttime = 1;
36  int pl,pliter;
37  FTYPE asym[NPR], norm[NPR], maxasym[NPR];
38 
39  FILE *dumpcnt_filegen[NUMDUMPTYPES];
40  static SFTYPE tlastgen[NUMDUMPTYPES];
41  static SFTYPE tlastareamap;
42  static long long int dumpcgen[NUMDUMPTYPES];
43  static SFTYPE tdumpgen[NUMDUMPTYPES];
44  int dodumpgen[NUMDUMPTYPES];
45  // restart stuff
46  static long long int restartc;
47  static long long int nlastrestart,nrestart;
48  // fake stuff
49  static long long int fakec;
50  static long long int nlastfake,nfake;
51 
52  // other stuff
53  int doareamap;
54  int dtloop;
55 
56  int dir,interpi,enodebugi;
57  int dissloop;
58  int indexfinalstep;
59  int enodebugdump(long dump_cnt);
60  int asym_compute_1(FTYPE (*prim)[NSTORE2][NSTORE3][NPR]);
61  int whichDT;
62  int (*dumpfuncgen[NUMDUMPTYPES])(long dump_cnt);
63 
64 
65 
66 
67 
68  if(DODIAGS==0){
69  // then shouldn't be here, so just return without error
70  return(0);
71  }
72 
73 
74 
75 
77  //
78  // Setup pointers to dump functions
79  //
81 
82  dumpfuncgen[IMAGEDUMPTYPE]=&image_dump;
83  dumpfuncgen[RESTARTDUMPTYPE]=&restart_write;
84  dumpfuncgen[RESTARTUPPERPOLEDUMPTYPE]=&restartupperpole_write;
85  dumpfuncgen[RESTARTMETRICDUMPTYPE]=&restartmetric_write;
86  dumpfuncgen[MAINDUMPTYPE]=&dump;
87  dumpfuncgen[GRIDDUMPTYPE]=&gdump;
88  dumpfuncgen[AVG1DUMPTYPE]=&avgdump;
89  dumpfuncgen[AVG2DUMPTYPE]=&avg2dump;
90  dumpfuncgen[DEBUGDUMPTYPE]=&debugdump;
91  dumpfuncgen[FIELDLINEDUMPTYPE]=&fieldlinedump;
92  dumpfuncgen[ENODEBUGDUMPTYPE]=&enodebugdump;
93  dumpfuncgen[DISSDUMPTYPE]=&dissdump;
94  dumpfuncgen[OTHERDUMPTYPE]=&dumpother;
95  dumpfuncgen[FLUXDUMPTYPE]=&fluxdumpdump;
96  dumpfuncgen[EOSDUMPTYPE]=&eosdump;
97  dumpfuncgen[VPOTDUMPTYPE]=&vpotdump;
98  dumpfuncgen[FAILFLOORDUDUMPTYPE]=&failfloordudump;
99  // ENERDUMPTYPE : not standard spatial dump file and not standard prototype
100  dumpfuncgen[ENERDUMPTYPE]=NULL;
101  dumpfuncgen[RADDUMPTYPE]=&raddump;
102  dumpfuncgen[DISSMEASUREDUMPTYPE]=&dissmeasuredump;
103  dumpfuncgen[FLUXSIMPLEDUMPTYPE]=&fluxsimpledump;
104  if(USEMPI && USEROMIO==1 && MPIVERSION==2){
105  // then do restart-like dumping of fakedump so ROMIO non-blocking writes are eventually put to disk on reasonable timestep scale.
106  dumpfuncgen[FAKEDUMPTYPE]=&fakedump;
107  }
108  else{
109  // then no need to call fakedump()
110  dumpfuncgen[FAKEDUMPTYPE]=NULL;
111  }
112 
113 
114 
115 
117  //
118  // setup timing of writes to files
119  //
121 
122  if ((call_code == INIT_OUT) || (firsttime == 1)) {
123 
124  // no need to repeat if restarting
125  nlastrestart = (long) (DTr*(SFTYPE)((localrealnstep/DTr))-1);
126  nlastfake = (long) (DTfake*(SFTYPE)((localrealnstep/DTfake))-1);
127  tlastareamap = localt-SMALL;
128 
129 
130  for(dtloop=0;dtloop<NUMDUMPTYPES;dtloop++){
131  tlastgen[dtloop] = DTdumpgen[dtloop]*(SFTYPE)((long long int)(t/DTdumpgen[dtloop]))-SMALL;
132  }
133 
134  for(dtloop=0;dtloop<NUMDUMPTYPES;dtloop++){
135  dumpcgen[dtloop]=0;
136  }
137 
138  if (RESTARTMODE == 0) {
139 
140  for(dtloop=0;dtloop<NUMDUMPTYPES;dtloop++){
141  tdumpgen[dtloop]=localt;
142  }
143 
144 
145  nrestart = localnstep;
146  // override defaults:
147  tdumpgen[AVG1DUMPTYPE]=localt+DTdumpgen[AVG1DUMPTYPE]; // do next time
148  tdumpgen[AVG2DUMPTYPE]=localt+DTdumpgen[AVG2DUMPTYPE]; // do next time
149 
150 
151  for(dtloop=0;dtloop<NUMDUMPTYPES;dtloop++){
152  dumpcntgen[dtloop]=0;
153  }
154 
155  appendold = 0;
156 
157  }
158  else{
159 
160  setrestart(&appendold);
161 
162  // assuming started at t=0 and localnstep=0 for original run
163  // time to dump NEXT
164  nrestart = (long)(DTr*(SFTYPE)((localrealnstep/DTr)+1));
165  nfake = (long)(DTfake*(SFTYPE)((localrealnstep/DTfake)+1));
166 
167  for(dtloop=0;dtloop<NUMDUMPTYPES;dtloop++){
168  tdumpgen[dtloop]=DTdumpgen[dtloop]*(SFTYPE)((long long int)(localt/DTdumpgen[dtloop])+1);
169  }
170 
171  DIAGREPORT;
172  }
173  }// end if firsttime or diag(INIT_OUT) called
174 
175 
176 
177 
178 
179 
181  //
182  // setup what we will dump this call to diag()
183  //
185  get_dodumps(call_code, firsttime, localt, localnstep, localrealnstep, tdumpgen, tlastgen, tlastareamap, nlastrestart, nrestart, nlastfake, nfake, &doareamap, dodumpgen);
186 
187 
189  //
190  // check if only wanted to know if next time would lead to making a dump file
191  // used so only compute expensive things during step for diagnostics if outputting diagnostics
192  //
194  if(call_code==FUTURE_OUT){
195  if(dodumpgen[MAINDUMPTYPE]) return(DOINGFUTUREOUT);
196  else return(0);
197  }
198 
199 
200 
201 
202 
203 
205  //
207  //
208  // For non-blocking ROMIO to be effective as written, need to have largest dumps last
209  //
211 
212 
213 #if(ASYMDIAGCHECK)
214  asym_compute_1(GLOBALPOINT(pdump));
215 #endif
216 
217 
218 
219 
220 
221 
223  //
224  // extra bounding for diagnostics
225  //
227  if(
228  // if doing simulbccalc type loop in step_ch.c then need to bound when doing diagnostics since not done yet
229  (SIMULBCCALC>=1 && (dodumpgen[MAINDUMPTYPE]||dodumpgen[RESTARTDUMPTYPE]||dodumpgen[ENERDUMPTYPE]))
230  // assume if PRODUCTION>0 then user knows divB won't be computed at MPI boundaries and that's ok. Don't want to compute so avoid bounding that slows things down. Assume ok to still compute dump file version, just not ener version that may be too often
231  || (PRODUCTION==0 && (dodumpgen[MAINDUMPTYPE]||dodumpgen[ENERDUMPTYPE]))
232  || (PRODUCTION>0 && (dodumpgen[MAINDUMPTYPE]))
233  ){
234 
235  // for dump, rdump, and divb in ener
236  int finalstep=0; // only for diagnostics, no accounting.
237  // bound_allprim(STAGEM1,finalstep, localt,GLOBALPOINT(pdump),GLOBALPOINT(pstagdump),GLOBALPOINT(udump), USEMPI); // no idea why was doing this.
238  if(DOENOFLUX != NOENOFLUX){
239  // bound GLOBALPOINT(udump) (unew) so divb can be computed at MPI boundaries (still real boundaries won't be computed correctly for OUTFLOW types)
240  // Notice only need to bound 1 cell layer (BOUNDPRIMSIMPLETYPE) since divb computation only will need 1 extra cell
241  int fakedir=0;
242  //bound_mpi(STAGEM1,finalstep,fakedir,BOUNDPRIMSIMPLETYPE,GLOBALPOINT(udump),NULL,NULL,NULL,NULL);
243  bound_mpi(STAGEM1,finalstep,fakedir,BOUNDPSTAGSIMPLETYPE,GLOBALPOINT(udump),NULL,NULL,NULL,NULL); // for staggered field
244  }
245  }
246 
247 
248 
249 
250 #if(PRODUCTION==0)
251  // DEBUG:
252  // GODMARK: need to disable to see if hurts internal solution since bounding this may help algorithm itself and we don't want that
253  // so far don't see problem with stag + FV or FLUXRECON
254 
255  // bound conserved quantities so divb is computed correctly when doing higher-order methods
256  // note that divb will always be in error at boundary since uses unew
257  // don't need ustag in boundary cells, but do this to check divb on boundary when testing
258 
259  // not generally a good idea
260  // bound_uavg(STAGEM1,localt,GLOBALPOINT(udump), 1, USEMPI);
261 #endif
262 
263 
264 
265 
266 
267 
268 
270  //
271  // Things to compute before creating dumping
272  //
274 
275  if(DOAVGDIAG){
276  // do every time step
277  // assume can't fail, but can apparently
278  if(average_calc(dodumpgen[AVG1DUMPTYPE]||dodumpgen[AVG2DUMPTYPE])>=1) return(1);
279  }
280 
281 
282 
283 
284 
286  //
287  // ener dump (integrated quantities: integrate and possibly dump them too (if dodumpgen[ENERDUMPTYPE]==1))
288  // need integratd quantities for restart dump, but don't write them to ener file.
289  // (not part of standard NUMDUMPTYPES array)
291  if(DOENERDIAG&&(dodumpgen[ENERDUMPTYPE]||dodumpgen[RESTARTDUMPTYPE])){
292  // get integrated quantities and possiblly dump them to files
293  dump_ener(dodumpgen[ENERDUMPTYPE],dodumpgen[RESTARTDUMPTYPE],call_code);
294  }
295 
296 
298  //
299  // output other things not put into restart file AND update time to output to ener files
300  //
301  // (not part of standard NUMDUMPTYPES array)
303 
304  if(DOENERDIAG&&dodumpgen[ENERDUMPTYPE]){
305  if(COMPUTEFRDOT){
306  frdotout(); // need to include all terms and theta fluxes on horizon/outer edge at some point GODMARK
307  }
308 
309  // only update DT if wrote upon ener-type file period
310  whichDT=ENERDUMPTYPE;
311  // below is really floor to nearest integer plus 1
312  dumpcgen[whichDT] = 1 + MAX(0,(long long int)((localt-tdumpgen[whichDT])/DTdumpgen[whichDT]));
313  tdumpgen[whichDT] = (ROUND2LONGLONGINT(tdumpgen[whichDT]/DTdumpgen[whichDT]) + dumpcgen[whichDT])*DTdumpgen[whichDT];
314  tlastgen[ENERDUMPTYPE]=localt;
315  }
316 
317 
318 
319 
321  //
322  // AREA MAP (not part of standard NUMDUMPTYPES array)
323  //
325 
326  if(doareamap){
327  if(area_map(call_code, TIMESERIESAREAMAP, 20, ifail, jfail, kfail, GLOBALPOINT(pdump))>=1) return(1);
328  tlastareamap=t;
329  }
330 
331 
332 
333 
335  // additional types of dumps linked to normal sequence of dump types
337 
338  int dumptypeiter2=MAINDUMPTYPE;
339  if(dodumpgen[dumptypeiter2]){
340 
341  // get main dump count
342  long int dumpcntmain;
343  dumpcntmain=dumpcntgen[dumptypeiter2];
344 
345  // don't want restart write here to change "state" of restart header as post_dump() does.
346  // MAINDUMPTYPE period for restart files is here instead of part of main loop since the "dodumpgen[]" condition is setup for the frequent restart dumps and want the below restart dumps to be in synch with main dump files
347  // so can restart at a dump without reconstructing the rdump from a dump.
348  // Also, if run out of disk space then normal rdump's can be corrupted
349 
350  // avoid upperpole restart file since this is called inside restart_write() itself, since always want these to be in synch
351  // if(FLUXB==FLUXCTSTAG && special3dspc==1) restartupperpole_write(-(long)dumpcntgen[dumptypeiter]-1);
352  // NOTEMARK: This maindump-type-restart file is done after all other dump types are done so the dump type file numbers are fully updated for this dumping time.
353  restart_write(-(long)dumpcntmain-1);
354  if(DOEVOLVEMETRIC) restartmetric_write(-(long)dumpcntmain-1);
355  }
356 
357 
358 
360  //
361  // Loop over dumps creation
362  //
364 
365  long int dumpcntmain;
366  int dumptypeiter;
367  for(dumptypeiter=0;dumptypeiter<NUMDUMPTYPES;dumptypeiter++){
368 
369  if(dodumpgen[dumptypeiter]&&dumpfuncgen[dumptypeiter]!=NULL){
370  // pre_dump:
371  pre_dump(dumptypeiter,t,localt,DTdumpgen,dumpcntgen,dumpcgen,tdumpgen,dumpcnt_filegen,tlastgen
372  ,whichrestart,restartc,localrealnstep,nrestart,DTr,nlastrestart
373  ,whichfake,fakec,nfake,DTfake,nlastfake
374  );
375 
376 
377  if((*(dumpfuncgen[dumptypeiter]))(dumpcntgen[dumptypeiter]) >= 1){
378  dualfprintf(fail_file,"unable to print %s file\n",dumpnamelist[dumptypeiter]);
379  return (1);
380  }
381 
382 
383  // post_dump:
384  post_dump(dumptypeiter,localt,DTdumpgen,dumpcntgen,dumpcgen,tdumpgen,dumpcnt_filegen,tlastgen
385  ,restartsteps, &whichrestart, &restartc, localrealnstep, &nrestart, DTr, &nlastrestart
386  ,fakesteps, &whichfake, &fakec, &nfake, DTfake, &nlastfake
387  );
388  }
389  }
390 
391 
392 
393 
394 
396  // special cases
398 
399  // GRIDDUMPTYPE special case
400  // Only done at t=0 or for 1D on synch with MAINDUMPTYPE
401  // output grid (probaly want both fullgrid (to make sure ok) and compute grid to compare with data dumps
402  if((DOGDUMP)&&(!GAMMIEDUMP)&&(firsttime&&(RESTARTMODE==0))){
403  // -1 means no file number on filename
404  gdump(-1);
405  // NOTEMARK: if want to, e.g., stop right after gdump, then have to add MPI barrier. If forcing gdump output, then put 1|| in conditional above .
406  // #if(USEMPI)
407  // MPI_Barrier(MPI_COMM_WORLD);
408  // #endif
409  // myexit(0); // stop after gdump.bin created
410  //
411  // If want to create gdump and continue computing even after having previously restarted...
412  // Then add a static int firsttimegdump=1; before conditional and a firsttimegdump=0 inside conditional at the end of the conditional.
413  }
414 
415 
416 
417 
418 
419 
420 
421 
422 
423 
424 
426  //
427  // DIAG clearings
428  //
429  // finally some clearing based upon what called
430  //
432 
433  if(DODEBUG){ // shouldn't clear these till after ener and debug dump done so both have all timescale data.
434 
435 #pragma omp parallel // need no global non-arrays
436  {
437  int i,j,k,floor;
438  OPENMP3DLOOPVARSDEFINE; OPENMP3DLOOPSETUPZLOOP; // constant loop parameters for entire region, so can be shared
439 
440  if(dodumpgen[ENERDUMPTYPE]){
441  // cleanse the ener time scale for the failure diag
442 #pragma omp for schedule(OPENMPSCHEDULE(),OPENMPCHUNKSIZE(blocksize)) nowait
444  OPENMP3DLOOPBLOCK2IJK(i,j,k);
446  FINALSTEPLOOP(indexfinalstep) FLOORLOOP(floor) GLOBALMACP0A3(failfloorcount,i,j,k,indexfinalstep,ENERTS,floor) =0;
447  }// end 3D loop
448  }// end if
449 
450 
451  if(dodumpgen[DEBUGDUMPTYPE]){
452  // clense failure diag
454 #pragma omp for schedule(OPENMPSCHEDULE(),OPENMPCHUNKSIZE(blocksize)) nowait
456  OPENMP3DLOOPBLOCK2IJK(i,j,k);
457  FINALSTEPLOOP(indexfinalstep) FLOORLOOP(floor) GLOBALMACP0A3(failfloorcount,i,j,k,indexfinalstep,DEBUGTS,floor) =0;
458  }// end 3D loop
459  }// end if
460 
461  if(dodumpgen[IMAGEDUMPTYPE]){
462  // clense the failure counts for this time scale
464 #pragma omp for schedule(OPENMPSCHEDULE(),OPENMPCHUNKSIZE(blocksize)) nowait
466  OPENMP3DLOOPBLOCK2IJK(i,j,k);
467  FINALSTEPLOOP(indexfinalstep) FLOORLOOP(floor) GLOBALMACP0A3(failfloorcount,i,j,k,indexfinalstep,IMAGETS,floor) =0;
468  }// end 3D loop
469  }// end if
470  }// end parallel region (and implied barrier)
471  }// end if DODEBUG
472 
473 
474 
475  if(DOENODEBUG){
476  if(dodumpgen[DEBUGDUMPTYPE]){
477  int i,j,k;
478  FULLLOOP DIMENLOOP(dir) INTERPENOTYPELOOP(interpi) PDIAGLOOP(pl) ENODEBUGLOOP(enodebugi){
479  if(dir<=2 && pl<=U2){
480  GLOBALMACP0A4(enodebugarray,i,j,k,dir-1,interpi,pl,enodebugi)=0;
481  }
482  }
483  }
484  }
485 
486 
487  if(0&&DODISS){
488  // clear diss (I don't see why one should do this and tie results to dump frequency)
489  if(dodumpgen[MAINDUMPTYPE]){
490  int i,j,k;
491  FULLLOOP{
492  for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++){
493  // clear failures too
494  GLOBALMACP0A1(dissfunpos,i,j,k,dissloop)=0.0;
495  }
496  }
497  }
498  }
499 
500 
501  /* //symmetry check
502  j = 0;
503  k = 0;
504  PDIAGLOOP(pl) maxasym[pl] = 0.0;
505 
506  PDIAGLOOP(pl) {
507  if( pl >= U1 && pl <= U3 ) {
508  norm[pl] = coordparams.timescalefactor;
509  }
510  else if( pl == UU ) {
511  norm[pl] = coordparams.timescalefactor * coordparams.timescalefactor;
512  }
513  else {
514  norm[pl] = 1.;
515  }
516 
517  norm[pl] = 1 / norm[pl];
518  }
519 
520  for( i = 0; i < N1; i++ ) {
521  PDIAGLOOP( pl ) {
522  if( pl == U1 ) {
523  asym[pl] = fabs( MACP0A1(p,i,j,k,pl) +MACP0A1(p,N1 - 1 - i,j,k,pl) );
524  }
525  else {
526  asym[pl] = fabs(MACP0A1(p,i,j,k,pl) -MACP0A1(p,N1 - 1 - i,j,k,pl) );
527  }
528 
529 
530  asym[pl] /= norm[pl];
531 
532  maxasym[pl] = MAX( asym[pl], maxasym[pl] );
533  }
534  }
535 
536  dualfprintf( fail_file, "asym %ld ", localrealnstep );
537 
538  PDIAGLOOP(pl) if( pl <= U1 ) dualfprintf( fail_file, " %22.16g", maxasym[pl] );
539 
540  dualfprintf( fail_file, "\n" );
541  */
542  firsttime = 0;
543  return (0);
544 }
545 
546 
547 
548 
550 static int pre_dump(
551  int whichDT, FTYPE tt, FTYPE localt, FTYPE *DTdumpgenvar, long int *dumpcntgenvar, long long int *dumpcgen, FTYPE *tdumpgen, FILE **dumpcnt_filegen, FTYPE *tlastgen
552  ,int whichrestartvar, long long int restartc, long long int localrealnstep, long long int nrestart,long DTrvar, long long int nlastrestart
553  ,int whichfakevar, long long int fakec, long long int nfake,long DTfakevar, long long int nlastfake
554  )
555 {
556 
557  DIAGREPORT;
558  if(whichDT==RESTARTDUMPTYPE || whichDT==RESTARTMETRICDUMPTYPE){
559  // integer based period
560  trifprintf("dumping: restart: %d localnstep: %ld nlastrestart: %ld nrestart: %ld restartc: %d\n", whichrestartvar,localrealnstep,nlastrestart,nrestart,restartc);
561  }
562  else{
563  trifprintf("dumping: dump_cnt=%ld : tt=%21.15g tlastdump=%21.15g tdump=%21.15g dumpc=%d\n", dumpcntgenvar[whichDT],localt,tlastgen[whichDT],tdumpgen[whichDT],dumpcgen[whichDT]);
564  }
565  return(0);
566 }
567 
568 
569 
571 static int post_dump(
572  int whichDT, FTYPE localt, FTYPE *DTdumpgenvar, long int *dumpcntgenvar, long long int *dumpcgen, FTYPE *tdumpgen, FILE **dumpcnt_filegen, FTYPE *tlastgen
573  ,long *restartstepsvar, int *whichrestartvar, long long int *restartc, long localrealnstep,long long int *nrestart, long DTrvar, long long int *nlastrestart
574  ,long *fakestepsvar, int *whichfakevar, long long int *fakec, long long int *nfake, long DTfakevar, long long int *nlastfake
575  )
576 {
577  char temps[MAXFILENAME];
578  char temps2[MAXFILENAME];
579  char temps3[MAXFILENAME];
580 
581 
582  if(whichDT==RESTARTDUMPTYPE || whichDT==RESTARTMETRICDUMPTYPE){
583  // integer based period
584  // 0 1 0 1 0 1 ...
585 
586  restartstepsvar[*whichrestartvar] = localrealnstep;
587  *whichrestartvar = !(*whichrestartvar);
588 
589  // set "counter"
590  dumpcntgenvar[whichDT]=(long int)(*whichrestartvar);
591 
592  *restartc = 1 + MAX(0,(long long int)(((FTYPE)localrealnstep-(FTYPE)(*nrestart))/((FTYPE)DTr)));
593  *nrestart = (ROUND2LONGLONGINT((FTYPE)localrealnstep/((FTYPE)DTr)) + (*restartc)) * DTr;
594  *nlastrestart=localrealnstep;
595  }
596  else if(whichDT==FAKEDUMPTYPE){
597  fakestepsvar[*whichfakevar] = localrealnstep;
598  *whichfakevar = !(*whichfakevar); // go ahead and oscillate this, even if not used.
599 
600  // set "counter"
601  dumpcntgenvar[whichDT]=(long int)(*whichfakevar); // counter not currently used, but ok to set this to something.
602 
603  *fakec = 1 + MAX(0,(long long int)(((FTYPE)localrealnstep-(FTYPE)(*nfake))/((FTYPE)DTfakevar)));
604  *nfake = (ROUND2LONGLONGINT((FTYPE)localrealnstep/((FTYPE)DTfakevar)) + (*fakec)) * DTfakevar;
605  *nlastfake=localrealnstep;
606  }
607  else{
608 
609  // iterate counter
610  dumpcntgenvar[whichDT]++;
611  // below is really floor to nearest integer plus 1
612  dumpcgen[whichDT] = 1 + MAX(0,(long long int)((localt-tdumpgen[whichDT])/DTdumpgenvar[whichDT]));
613  tdumpgen[whichDT] = (ROUND2LONGLONGINT(tdumpgen[whichDT]/DTdumpgenvar[whichDT]) + dumpcgen[whichDT])*DTdumpgenvar[whichDT];
614 
615  // output number of dumps
616  sprintf(temps, "dumps/0_num%d_%s_dumps.dat",whichDT,dumpnamelist[whichDT]);
617  sprintf(temps2, "error opening %d %s dump count file\n",whichDT,dumpnamelist[whichDT]);
618  sprintf(temps3, "Couldn't close %d %s dumpcnt_file",whichDT,dumpnamelist[whichDT]);
619 
620  myfopen(temps,"w",temps2,&dumpcnt_filegen[whichDT]);
621  myfprintf(dumpcnt_filegen[whichDT], "# Number of %d %s dumps\n%ld\n",whichDT,dumpnamelist[whichDT],dumpcntgenvar[whichDT]);
622  myfclose(&dumpcnt_filegen[whichDT],temps3);
623  tlastgen[whichDT]=localt;
624  }
625 
626 
627  return(0);
628 }
629 
631 static int get_dodumps(int call_code, int firsttime, SFTYPE localt, long localnstep, long localrealnstep, FTYPE *tdumpgen, FTYPE *tlastgen, FTYPE tlastareamap, long long int nlastrestart, long long int nrestart, long long int nlastfake, long long int nfake, int *doareamap, int *dodumpgen)
632 {
633 
634 
635 
636  if((DOAREAMAPDIAG)&&(localt != tlastareamap)&&(dofailmap)&&(localnstep>=steptofailmap)){
637  *doareamap=1;
638  }
639  else *doareamap=0;
640 
641 
643  //
644  // rest are on NUMDUMPTYPES list
645  //
647 
648  // IMAGEDUMPTYPE
649  if((dnumcolumns[IMAGEDUMPTYPE]>0)&&((DOIMAGEDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[IMAGEDUMPTYPE])&&(localt >= tdumpgen[IMAGEDUMPTYPE] || call_code==FINAL_OUT))) )){
650  dodumpgen[IMAGEDUMPTYPE]=1;
651  }
652  else dodumpgen[IMAGEDUMPTYPE]=0;
653 
654 
655  // upperpole restart is (for now) always called inside normal restart dump call, so avoid extra call that this would create
656  // RESTARTUPPERPOLEDUMPTYPE
657  // if((FLUXB==FLUXCTSTAG&&special3dspc==1)&&((DORDUMPDIAG)&&( ((nlastrestart!=nrestart)&&(failed == 0) && (localrealnstep >= nrestart ))||(call_code==FINAL_OUT) ) ) ){
658  // dodumpgen[RESTARTUPPERPOLEDUMPTYPE]=1;
659  // }
660  // else dodumpgen[RESTARTUPPERPOLEDUMPTYPE]=0;
661  dodumpgen[RESTARTUPPERPOLEDUMPTYPE]=0;
662 
663 
664  // RESTARTDUMPTYPE
665  if((DORDUMPDIAG)&&( ((nlastrestart!=nrestart)&&(failed == 0) && (localrealnstep >= nrestart ))||(call_code==FINAL_OUT) ) ){
666  dodumpgen[RESTARTDUMPTYPE]=1;
667  }
668  else dodumpgen[RESTARTDUMPTYPE]=0;
669 
670  // RESTARTMETRICDUMPTYPE
671  if(DOEVOLVEMETRIC&&((DORDUMPDIAG)&&( ((nlastrestart!=nrestart)&&(failed == 0) && (localrealnstep >= nrestart ))||(call_code==FINAL_OUT) ) ) ){
672  dodumpgen[RESTARTMETRICDUMPTYPE]=1;
673  }
674  else dodumpgen[RESTARTMETRICDUMPTYPE]=0;
675 
676 
677  // MAINDUMPTYPE
678  if((dnumcolumns[MAINDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[MAINDUMPTYPE])&&(localt >= tdumpgen[MAINDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
679  dodumpgen[MAINDUMPTYPE]=1;
680  }
681  else dodumpgen[MAINDUMPTYPE]=0;
682 
683 
684 
685  // GRIDDUMPTYPE
686  // output grid (probaly want both fullgrid (to make sure ok) and compute grid to compare with data dumps
687  // only reasonable to do if 1-D
688  if((DOGDUMPDIAG)&&(!GAMMIEDUMP)&&(DOEVOLVEMETRIC&&(N2==1)&&(N3==1)&&(DOGDUMPDIAG)&&(!GAMMIEDUMP)&&(RESTARTMODE==0)&&(localt >= tdumpgen[GRIDDUMPTYPE] || call_code==FINAL_OUT))){
689  dodumpgen[GRIDDUMPTYPE]=1;
690  }
691  else dodumpgen[GRIDDUMPTYPE]=0;
692 
693 
694 
695 
696  // AVG1DUMPTYPE
697  if((dnumcolumns[AVG1DUMPTYPE]>0)&&((DOAVGDIAG)&&((localt!=tlastgen[AVG1DUMPTYPE])&&(localt >= tdumpgen[AVG1DUMPTYPE] || call_code==FINAL_OUT)))){
698  dodumpgen[AVG1DUMPTYPE]=1;
699  }
700  else dodumpgen[AVG1DUMPTYPE]=0;
701 
702  // AVG2DUMPTYPE
703  if((DOAVG2&&dnumcolumns[AVG2DUMPTYPE]>0)&&((DOAVGDIAG)&&((localt!=tlastgen[AVG2DUMPTYPE])&&(localt >= tdumpgen[AVG2DUMPTYPE] || call_code==FINAL_OUT)))){
704  dodumpgen[AVG2DUMPTYPE]=1;
705  }
706  else dodumpgen[AVG2DUMPTYPE]=0;
707 
708 
709 
710  // DEBUGDUMPTYPE
711  if((dnumcolumns[DEBUGDUMPTYPE]>0)&&( ((DODEBUG)&&(DOENODEBUGEVERYSUBSTEP||DODIAGEVERYSUBSTEP||((localt!=tlastgen[DEBUGDUMPTYPE])&&(localt >= tdumpgen[DEBUGDUMPTYPE] || call_code==FINAL_OUT))) ) )){
712  dodumpgen[DEBUGDUMPTYPE]=1;
713  }
714  else dodumpgen[DEBUGDUMPTYPE]=0;
715 
716 
717 
718  // FIELDLINEDUMPTYPE
719  if((dnumcolumns[FIELDLINEDUMPTYPE]>0)&&((DOFIELDLINE)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[FIELDLINEDUMPTYPE])&&(localt >= tdumpgen[FIELDLINEDUMPTYPE] || call_code==FINAL_OUT))) )){
720  dodumpgen[FIELDLINEDUMPTYPE]=1;
721  }
722  else dodumpgen[FIELDLINEDUMPTYPE]=0;
723 
724 
725  // ENODEBUGDUMPTYPE
726  if((DOENODEBUG&&dnumcolumns[ENODEBUGDUMPTYPE]>0)&&( ((DODEBUG)&&(DOENODEBUGEVERYSUBSTEP||DODIAGEVERYSUBSTEP||((localt!=tlastgen[ENODEBUGDUMPTYPE])&&(localt >= tdumpgen[ENODEBUGDUMPTYPE] || call_code==FINAL_OUT))) ) )){
727  dodumpgen[ENODEBUGDUMPTYPE]=1;
728  }
729  else dodumpgen[ENODEBUGDUMPTYPE]=0;
730 
731 
732  // DISSDUMPTYPE
733  if((dnumcolumns[DISSDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[DISSDUMPTYPE])&&(localt >= tdumpgen[DISSDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
734  dodumpgen[DISSDUMPTYPE]=1;
735  }
736  else dodumpgen[DISSDUMPTYPE]=0;
737 
738  // OTHERDUMPTYPE
739  if((dnumcolumns[OTHERDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[OTHERDUMPTYPE])&&(localt >= tdumpgen[OTHERDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
740  dodumpgen[OTHERDUMPTYPE]=1;
741  }
742  else dodumpgen[OTHERDUMPTYPE]=0;
743 
744  // FLUXDUMPTYPE
745  if((dnumcolumns[FLUXDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[FLUXDUMPTYPE])&&(localt >= tdumpgen[FLUXDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
746  dodumpgen[FLUXDUMPTYPE]=1;
747  }
748  else dodumpgen[FLUXDUMPTYPE]=0;
749 
750 
751 
752  // EOSDUMPTYPE
753  if( (dnumcolumns[EOSDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[EOSDUMPTYPE])&&(localt >= tdumpgen[EOSDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
754  dodumpgen[EOSDUMPTYPE]=1;
755  }
756  else dodumpgen[EOSDUMPTYPE]=0;
757 
758  // VPOTDUMPTYPE
759  if((dnumcolumns[VPOTDUMPTYPE]>0)&&( (((DOVPOTDUMP)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[VPOTDUMPTYPE])&&(localt >= tdumpgen[VPOTDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ) ) ) ))) ){
760  dodumpgen[VPOTDUMPTYPE]=1;
761  }
762  else dodumpgen[VPOTDUMPTYPE]=0;
763 
764 
765  // FAILFLOORDUDUMPTYPE
766  if((dnumcolumns[FAILFLOORDUDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[FAILFLOORDUDUMPTYPE])&&(localt >= tdumpgen[FAILFLOORDUDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
767  dodumpgen[FAILFLOORDUDUMPTYPE]=1;
768  }
769  else dodumpgen[FAILFLOORDUDUMPTYPE]=0;
770 
771 
772  // FLUXSIMPLEDUMPTYPE
773  if((dnumcolumns[FLUXSIMPLEDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[FLUXSIMPLEDUMPTYPE])&&(localt >= tdumpgen[FLUXSIMPLEDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
774  dodumpgen[FLUXSIMPLEDUMPTYPE]=1;
775  }
776  else dodumpgen[FLUXSIMPLEDUMPTYPE]=0;
777 
778 
779  // ENERDUMPTYPE (dnumcolumns[ENERDUMPTYPE]==0 is normal)
780  // t!=tlast avoids duplicate entries
781  if(DOENERDIAG&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[ENERDUMPTYPE])&&( (localt >= tdumpgen[ENERDUMPTYPE])||(call_code==INIT_OUT)||(call_code==FINAL_OUT)||firsttime)))){
782  dodumpgen[ENERDUMPTYPE]=1;
783  }
784  else dodumpgen[ENERDUMPTYPE]=0;
785 
786  // RADDUMPTYPE
787  if( (dnumcolumns[RADDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[RADDUMPTYPE])&&(localt >= tdumpgen[RADDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
788  dodumpgen[RADDUMPTYPE]=1;
789  }
790  else dodumpgen[RADDUMPTYPE]=0;
791 
792  // DISSMEASUREDUMPTYPE
793  if((dnumcolumns[DISSMEASUREDUMPTYPE]>0)&&( ((DODUMPDIAG)&&(DODIAGEVERYSUBSTEP||((localt!=tlastgen[DISSMEASUREDUMPTYPE])&&(localt >= tdumpgen[DISSMEASUREDUMPTYPE] || (RESTARTMODE&&dofaildump&&(localnstep>=steptofaildump)) || call_code==FINAL_OUT ))) ) )){
794  dodumpgen[DISSMEASUREDUMPTYPE]=1;
795  }
796  else dodumpgen[DISSMEASUREDUMPTYPE]=0;
797 
798 
799  // FAKEDUMPTYPE
800  if((USEMPI && USEROMIO==1 && MPIVERSION==2)&&( ((nlastfake!=nfake)&&(failed == 0) && (localrealnstep >= nfake ))||(call_code==FINAL_OUT) ) ){
801  dodumpgen[FAKEDUMPTYPE]=1;
802  }
803  else dodumpgen[FAKEDUMPTYPE]=0;
804 
805 
806 
807 
808 
809 
810  // DEBUG:
811  // if(t>2.45E4 || realnstep>28000){
812  // dodumpgen[ENERDUMPTYPE]=1;
813  // dodumpgen[EOSDUMPTYPE]=1;
814  // dodumpgen[DISSDUMPTYPE]=1;
815  // dodumpgen[MAINDUMPTYPE]=1;
816  // dodumpgen[GRIDDUMPTYPE]=1;
817  // dodumpgen[FAILFLOORDUDUMPTYPE]=1;
818  // dodumpgen[DEBUGDUMPTYPE]=1;
819  // }
820 
821 
822 
823 
824 
825  return(0);
826 }
827 
828 
829 
830 
831 
832 
833 
834 
835 
836 
837 
838 
844 
845 int asym_compute_1(FTYPE (*prim)[NSTORE2][NSTORE3][NPR])
846 {
847  int i,j,k;
848  int pl,pliter;
849 
850 #if(0)
851  // for implosion problem
852  FULLLOOP{
853 
854  if(MACP0A1(prim,i,j,k,RHO)!=MACP0A1(prim,j,i,k,RHO)){
855  dualfprintf(fail_file,"ASYM in RHO %d %d %d : %23.16g %23.16g\n",i,j,k,MACP0A1(prim,i,j,k,RHO),MACP0A1(prim,j,i,k,RHO));
856  }
857 
858  if(MACP0A1(prim,i,j,k,UU)!=MACP0A1(prim,j,i,k,UU)){
859  dualfprintf(fail_file,"ASYM in UU %d %d %d : %23.16g %23.16g\n",i,j,k,MACP0A1(prim,i,j,k,UU),MACP0A1(prim,j,i,k,UU));
860  }
861 
862 
863  if(MACP0A1(prim,i,j,k,U1)!=MACP0A1(prim,j,i,k,U2)){
864  dualfprintf(fail_file,"ASYM in U1 %d %d %d : %23.16g %23.16g\n",i,j,k,MACP0A1(prim,i,j,k,U1),MACP0A1(prim,j,i,k,U2));
865  }
866 
867  }
868 #endif
869 #if(1)
870  // for any periodic boundary conditions
871  FULLLOOP{
872  if(i<N1/2 && j<N2/2 && k<N3/2){
873  PLOOP(pliter,pl){
874  if(MACP0A1(prim,i,j,k,pl)!=MACP0A1(prim,i+N1,j,k,pl)){
875  dualfprintf(fail_file,"ASYM nstep=%ld steppart=%d in pl=%d dir=1 :: %d %d %d : %23.16g %23.16g\n",nstep,steppart,pl,i,j,k,MACP0A1(prim,i,j,k,pl),MACP0A1(prim,i+N1,j,k,pl));
876  }
877  if(MACP0A1(prim,i,j,k,pl)!=MACP0A1(prim,i,j+N2,k,pl)){
878  dualfprintf(fail_file,"ASYM nstep=%ld steppart=%d in pl=%d dir=2 :: %d %d %d : %23.16g %23.16g\n",nstep,steppart,pl,i,j,k,MACP0A1(prim,i,j,k,pl),MACP0A1(prim,i,j+N2,k,pl));
879  }
880  }
881  }
882  }
883 
884  // for any periodic boundary conditions
885  FULLLOOP{
886  if(i<N1/2 && j<N2/2 && k<N3/2){
887  PLOOP(pliter,pl){
888  if(GLOBALMACP0A1(udump,i,j,k,pl)!=GLOBALMACP0A1(udump,i+N1,j,k,pl)){
889  dualfprintf(fail_file,"ASYMUDUMP nstep=%ld steppart=%d in pl=%d dir=1 :: %d %d %d : %23.16g %23.16g\n",nstep,steppart,pl,i,j,k,GLOBALMACP0A1(udump,i,j,k,pl),GLOBALMACP0A1(udump,i+N1,j,k,pl));
890  }
891  if(GLOBALMACP0A1(udump,i,j,k,pl)!=GLOBALMACP0A1(udump,i,j+N2,k,pl)){
892  dualfprintf(fail_file,"ASYMUDUMP nstep=%ld steppart=%d in pl=%d dir=2 :: %d %d %d : %23.16g %23.16g\n",nstep,steppart,pl,i,j,k,GLOBALMACP0A1(udump,i,j,k,pl),GLOBALMACP0A1(udump,i,j+N2,k,pl));
893  }
894  }
895  }
896  }
897 
898 #endif
899 
900 
901  return(0);
902 }
903 
906 int asym_compute_2(FTYPE (*prim)[NSTORE2][NSTORE3][NPR])
907 {
908  int i,j,k;
909  int pl,pliter;
910 
911 
912 #if(0)
913  ZLOOP{
914 
915  if(MACP0A1(prim,i,j,k,RHO)!=MACP0A1(prim,j,i,k,RHO)){
916  dualfprintf(fail_file,"ASYM in RHO %d %d %d : %23.16g %23.16g\n",i,j,k,MACP0A1(prim,i,j,k,RHO),MACP0A1(prim,j,i,k,RHO));
917  }
918 
919  if(MACP0A1(prim,i,j,k,UU)!=MACP0A1(prim,j,i,k,UU)){
920  dualfprintf(fail_file,"ASYM in UU %d %d %d : %23.16g %23.16g\n",i,j,k,MACP0A1(prim,i,j,k,UU),MACP0A1(prim,j,i,k,UU));
921  }
922 
923 
924  if(MACP0A1(prim,i,j,k,U1)!=MACP0A1(prim,j,i,k,U2)){
925  dualfprintf(fail_file,"ASYM in U1 %d %d %d : %23.16g %23.16g\n",i,j,k,MACP0A1(prim,i,j,k,U1),MACP0A1(prim,j,i,k,U2));
926  }
927 
928  }
929 #endif
930 #if(1)
931  // for any periodic boundary conditions
932  ZLOOP{
933  if(i<N1/2 && j<N2/2 && k<N3/2){
934  PLOOP(pliter,pl){
935  if(MACP0A1(prim,i,j,k,pl)!=MACP0A1(prim,i+N1,j,k,pl)){
936  dualfprintf(fail_file,"ASYM nstep=%ld steppart=%d in pl=%d dir=1 :: %d %d %d : %23.16g %23.16g\n",nstep,steppart,pl,i,j,k,MACP0A1(prim,i,j,k,pl),MACP0A1(prim,i+N1,j,k,pl));
937  }
938  if(MACP0A1(prim,i,j,k,pl)!=MACP0A1(prim,i,j+N2,k,pl)){
939  dualfprintf(fail_file,"ASYM nstep=%ld steppart=%d in pl=%d dir=2 :: %d %d %d : %23.16g %23.16g\n",nstep,steppart,pl,i,j,k,MACP0A1(prim,i,j,k,pl),MACP0A1(prim,i,j+N2,k,pl));
940  }
941  }
942  }
943  }
944 #endif
945 
946 
947 
948  return(0);
949 }
950 
951 
952 
956 int area_map(int call_code, int type, int size, int i, int j, int k, FTYPE (*prim)[NSTORE2][NSTORE3][NPR])
957 {
958  int pl,pliter;
959  int l,m,ll,mm;
960  FTYPE vmin1, vmax1, vmin2, vmax2;
961  int ignorecourant;
962  struct of_state q;
963  FTYPE X[NDIM],V[NDIM];
964  FTYPE divb;
965  FTYPE tens_em[NDIM][NDIM], tens_matt[NDIM][NDIM], b[NDIM],
966  ucon[NDIM];
967  FTYPE U[NPR];
968  int lowersizex1,uppersizex1;
969  int lowersizex2,uppersizex2;
970  int lowersizex3,uppersizex3;
971 
972  static FILE* fileptr;
973  static int firsttime=1;
974  static int domap=0;
975  static int doclose=0;
976  struct of_geom geomdontuse;
977  struct of_geom *ptrgeom=&geomdontuse;
978  int loc=CENT;
979 
980 
981  trifprintf("\nStart area_map function ... ");
982 
983 
984 
985  if(i-(-N1BND)<size/2) lowersizex1=i-(-N1BND);
986  else lowersizex1=size/2;
987  if((N1-1+N1BND)-i<size/2) uppersizex1=(N1-1+N1BND)-i;
988  else uppersizex1=size/2;
989 
990  if(j-(-N2BND)<size/2) lowersizex2=j-(-N2BND);
991  else lowersizex2=size/2;
992  if((N2-1+N2BND)-j<size/2) uppersizex2=(N2-1+N2BND)-j;
993  else uppersizex2=size/2;
994 
995  if(k-(-N3BND)<size/2) lowersizex3=k-(-N3BND);
996  else lowersizex3=size/2;
997  if((N3-1+N3BND)-k<size/2) uppersizex3=(N3-1+N3BND)-k;
998  else uppersizex3=size/2;
999 
1000 
1001  if(firsttime){
1002  if((type==TIMESERIESAREAMAP)&&(dofailmap)){
1003  if((fileptr=fopen("areamap","wt"))==NULL){
1004  dualfprintf(fail_file,"Cannot open ./areamap on proc=%d\n",myid);
1005  domap=0;
1006  }
1007  else domap=1;
1008  }
1009  }
1010 
1011  if((type==TIMESERIESAREAMAP)&&domap&&(call_code==2)){
1012  doclose=1;
1013  }
1014  else doclose=0;
1015 
1016  if(type==FINALTDUMPAREAMAP){
1017  dualfprintf(fail_file, "area map\n");
1018  dualfprintf(fail_file, "areamap at: i=%d j=%d k=%d\n",i+startpos[1],j+startpos[2],k+startpos[3]);
1019  coord(i,j,k,loc,X);
1020  dualfprintf(fail_file, "areamap at: i=%d j=%d k=%d\n",i+startpos[1],j+startpos[2],k+startpos[3]);
1021 
1022 
1023 
1024  PDIAGLOOP(pl) {// all vars
1025 
1026  dualfprintf(fail_file, "variable %d \n", pl);
1027 
1028  // ONLY 2D map for now
1029  dualfprintf(fail_file, "i = \t ");
1030  for(l=i-lowersizex1;l<=i+uppersizex1;l++){
1031  ll=l+startpos[1];
1032  dualfprintf(fail_file, "%21d", ll);
1033  }
1034  dualfprintf(fail_file, "\n");
1035  for(m=j-lowersizex2;m<=j+uppersizex2;m++){
1036  mm=m+startpos[2];
1037  dualfprintf(fail_file, "j = %d \t ",mm);
1038  for(l=i-lowersizex1;l<=i+uppersizex1;l++){
1039  ll=l+startpos[1];
1040  dualfprintf(fail_file, "%21.15g ",MACP0A1(prim,l,m,k,pl));
1041  }
1042  dualfprintf(fail_file, "\n");
1043  }
1044  }
1045  }
1046  else if((type==TIMESERIESAREAMAP)&&(domap)){
1047  if(firsttime){
1048  // GODMARK: 2D only, not outputting z-related stuff so function remains consistent with SM macro
1049  fprintf(fileptr,"%21.15g %lld %lld %lld %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %d %d %d %d %d %d %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g\n",
1050  t,totalsize[1],totalsize[2],totalsize[3],startx[1],startx[2],startx[3],dx[1],dx[2],dx[3],lowersizex1,uppersizex1,lowersizex2,uppersizex2,startpos[1]+i,startpos[2]+j,gam,a,R0,Rin,Rout,hslope);
1051  fflush(fileptr);
1052  }
1053  for(m=j-size/2;m<=j+size/2;m++){
1054  if((m<-N2BND)||(m>N2-1+N2BND)) continue;
1055  mm=m+startpos[2];
1056  for(l=i-size/2;l<=i+size/2;l++){
1057  if((l<-N1BND)||(l>N1-1+N1BND)) continue;
1058  ll=l+startpos[1];
1059 
1060 
1061  bl_coord_ijk_2(l, m, k, loc, X,V);
1062  get_geometry(l, m, k, loc, ptrgeom);
1063  if (!failed) {
1064  if (get_state(MAC(prim,l,m,k), ptrgeom, &q) >= 1)
1065  FAILSTATEMENT("diag.c:areamap()", "get_state() dir=0", 1);
1066  if (vchar_all(MAC(prim,l,m,k), &q, 1, ptrgeom, &vmax1, &vmin1,&ignorecourant) >= 1)
1067  FAILSTATEMENT("diag.c:areamap()", "vchar_all() dir=1or2", 1);
1068  if (vchar_all(MAC(prim,l,m,k), &q, 2, ptrgeom, &vmax2, &vmin2,&ignorecourant) >= 1)
1069  FAILSTATEMENT("diag.c:areamap()", "vchar_all() dir=1or2", 2);
1070  // GODMARK: no 3-direction char.
1071  }
1072 
1073  // GODMARK: use of globals
1074  if((l>=-1)&&(l<=N1+1)&&(m>=-1)&&(m<=N2+1)&&(k>=-1)&&(k<=N3+1) ){ setfdivb(&divb, prim, GLOBALPOINT(pstagdump), GLOBALPOINT(udump), GLOBALPOINT(Bhatdump), l, m, k);} // GLOBALPOINT(udump) here must be set externally GODMARK
1075  else divb=0.0;
1076 
1077  // same order as dump.c for first columns (easy sm read)
1078  fprintf(fileptr,
1079  "%d %d "
1080  "%21.15g %21.15g "
1081  "%21.15g %21.15g "
1082  "%21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g "
1083  "%21.15g "
1084  "%21.15g %21.15g %21.15g %21.15g "
1085  "%21.15g %21.15g %21.15g %21.15g "
1086  "%21.15g %21.15g %21.15g %21.15g "
1087  "%21.15g %21.15g %21.15g %21.15g "
1088  "%21.15g %21.15g %21.15g %21.15g "
1089  "%21.15g "
1090  "%21.15g %ld\n",
1091  ll,mm,
1092  X[1],X[2],
1093  V[1],V[2],
1094  MACP0A1(prim,l,m,k,0),
1095  MACP0A1(prim,l,m,k,1),
1096  MACP0A1(prim,l,m,k,2),
1097  MACP0A1(prim,l,m,k,3),
1098  MACP0A1(prim,l,m,k,4),
1099  MACP0A1(prim,l,m,k,5),
1100  MACP0A1(prim,l,m,k,6),
1101  MACP0A1(prim,l,m,k,7),
1102  divb,
1103  q.ucon[0],q.ucon[1],q.ucon[2],q.ucon[3],
1104  q.ucov[0],q.ucov[1],q.ucov[2],q.ucov[3],
1105  q.bcon[0],q.bcon[1],q.bcon[2],q.bcon[3],
1106  q.bcov[0],q.bcov[1],q.bcov[2],q.bcov[3],
1107  vmin1,vmax1,vmin2,vmax2,
1108  ptrgeom->gdet,
1109  t,realnstep);
1110  }
1111  }
1112  fflush(fileptr);
1113  }
1114 
1115  if(doclose) if(fileptr!=NULL) fclose(fileptr);
1116 
1117 
1118  /* print out other diagnostics here */
1119 
1120  firsttime=0;
1121  trifprintf("end area_map function.\n");
1122  return(0);
1123 }
1124 
1125 
1126 
1127 /* evaluate fluxed based diagnostics; put results in global variables */
1128 
1129 #define JETBSQORHO (3.162)
1130 
1131 
1134 int diag_flux_pureflux(FTYPE (*prim)[NSTORE2][NSTORE3][NPR], FTYPE (*F1)[NSTORE2][NSTORE3][NPR+NSPECIAL], FTYPE (*F2)[NSTORE2][NSTORE3][NPR+NSPECIAL], FTYPE (*F3)[NSTORE2][NSTORE3][NPR+NSPECIAL], SFTYPE Dt)
1135 {
1136  int fluxdir;
1137  int i, j, k, pl, pliter, l, dir,fl,enerregion;
1138  SFTYPE surface,mysurface,surf2;
1139  SFTYPE surgdet;
1140  FTYPE (*flux)[NSTORE2][NSTORE3][NPR+NSPECIAL];
1141  int start1,start2,start3,stop1,stop2,stop3;
1142  int gpos;
1143  int ii;
1144  FTYPE ftemp;
1145  FTYPE ftemp0,ftemp1,ftemp2,ftemp3,ftemp4,ftemp5,ftemp6;
1146  FTYPE pgas,bsq,bsqorho;
1147  struct of_state q;
1148  FTYPE X[NDIM],V[NDIM];
1149  FTYPE b[NDIM],ucon[NDIM];
1150  FTYPE U[NPR];
1151  int condjet2;
1152  FTYPE Ftemp[NPR],Ftempdiag[NPR];
1153  int firstinloop;
1154  FTYPE pr[NPR];
1155  int *localdoflux, *localenerpos;
1156  SFTYPE (*localpcum)[NPR];
1157  SFTYPE (*localpdot)[NPR];
1158  SFTYPE (*localpdotterms)[NUMFLUXTERMS][NPR];
1159  struct of_geom geomdontuse;
1160  struct of_geom *ptrgeom=&geomdontuse;
1161  int loc=CENT;
1162 
1163 
1164 
1165  if(DODIAGS==0) return(0);
1166 
1167 
1168  // initialize
1169  ENERREGIONLOOP(enerregion){
1170  localdoflux=dofluxreg[enerregion];
1171  localenerpos=enerposreg[enerregion];
1172  localpcum=pcumreg[enerregion];
1173  localpdot=pdotreg[enerregion];
1174 
1175 
1176  DIRLOOP(dir) PDIAGLOOP(pl){
1177  localpdot[dir][pl]=0;
1178  }
1179  // true outer boundary surface fluxes (per direction, per conserved variable)
1180  DIRLOOP(dir) {
1181  if (localdoflux[dir] >= 0) {// then this cpu is involved in flux BOUNDARY calculation
1182  // otherwise don't add to it
1183 
1184 
1186  // assumes rectangular region
1187  //
1188  if((dir==X1UP)||(dir==X1DN)){
1189 
1190  surface = dx[2]*dx[3];
1191 
1192  start1=stop1=localdoflux[dir];
1193 
1194  start2=localenerpos[X2DN];
1195  stop2=localenerpos[X2UP];
1196 
1197  start3=localenerpos[X3DN];
1198  stop3=localenerpos[X3UP];
1199 
1200  flux=F1;
1201  gpos=FACE1;
1202  fluxdir=1;
1203  }
1204  else if((dir==X2UP)||(dir==X2DN)){
1205 
1206  surface = dx[1]*dx[3];
1207 
1208  start2=stop2=localdoflux[dir];
1209 
1210  start1=localenerpos[X1DN];
1211  stop1=localenerpos[X1UP];
1212 
1213  start3=localenerpos[X3DN];
1214  stop3=localenerpos[X3UP];
1215 
1216  flux=F2;
1217  gpos=FACE2;
1218  fluxdir=2;
1219  }
1220  else if((dir==X3UP)||(dir==X3DN)){
1221 
1222  surface = dx[1]*dx[2];
1223 
1224  start3=stop3=localdoflux[dir];
1225 
1226  start1=localenerpos[X1DN];
1227  stop1=localenerpos[X1UP];
1228 
1229  start2=localenerpos[X2DN];
1230  stop2=localenerpos[X2UP];
1231 
1232  flux=F3;
1233  gpos=FACE3;
1234  fluxdir=3;
1235  }
1236  else{
1237  dualfprintf(fail_file,"no such direction: %d\n",dir);
1238  myexit(1);
1239  }
1240 
1241  // zero out summation quantities
1242  PDIAGLOOP(pl){
1243  localpdot[dir][pl]=0;
1244  }
1245  // GENLOOP set by enerregion so set correctly for GRID SECTION
1246  GENLOOP(i,j,k,start1,stop1,start2,stop2,start3,stop3){ // OPENMPOPTMARK: Could parallelize this, but not too expensive since only surface values
1247  // now add up all zones for each conserved quantity (PDIAGLOOP)
1248 
1249 
1250 
1252  //
1253  // GET STATE
1254  //
1256 
1257 
1258 
1260  get_geometry(i, j, k, loc, ptrgeom);
1261  PDIAGLOOP(pl) pr[pl]=MACP0A1(prim,i,j,k,pl);
1262 
1263 
1264  // modify effective position of integral in case adding energy/angular momentum to horizon
1265  // apparently not correct
1266  if(0 && enerregion==OUTSIDEHORIZONENERREGION){
1267  mysurface = surface /(q.ucov[TT]);
1268  }
1269  else mysurface = surface;
1270 
1271 
1272 
1273 
1275  //
1276  // do standard flux for accounting
1277  //
1279 
1280  get_geometry(i, j, k, gpos, ptrgeom);
1281  PDIAGLOOP(pl) Ftemp[pl]=MACP0A1(flux,i,j,k,pl)*mysurface; // in UEVOLVE form
1282  // GODMARK: for finite volume method, below doesn't change the result if eomfunc=gdet.
1283  // Otherwise flux would have to be completely recomputed for gdet case JUST for diagnostic to be consistent at higher order
1284  UtoU_evolve2diag(UEVOLVE,UDIAG,ptrgeom,Ftemp,Ftempdiag); // convert to diag form
1285  PDIAGLOOP(pl){
1286 #if(PRODUCTION>0)
1287  // assume if nan that just box is beyond where flux defined
1288  if(!isfinite(Ftempdiag[pl])){
1289  Ftempdiag[pl]=0.0;
1290  }
1291 #endif
1292  localpdot[dir][pl] += Ftempdiag[pl];
1293  }
1294  if(REMOVERESTMASSFROMUU==2){
1295  // localpdot[dir][UU] += -Ftempdiag[RHO]; // add rest-mass back in (GODMARK: Problem is that non-relativistic problems will have energy term swamped by mass term)
1296  }
1297  else{
1298  // then already correct
1299  // so fix this instead!
1300  localpdot[dir][UU] += Ftempdiag[RHO]; // remove rest-mass term.
1301  }
1302 
1303  /*
1304  // DEBUG
1305  PDIAGLOOP(pl) if(!finite(localpdot[dir][pl])){
1306  dualfprintf(fail_file,"not finite: i=%d j=%d k=%d :: dir=%d pl=%d %g : %g : %g %g :: %g %g\n",i,j,k,dir,pl,localpdot[dir][pl],MACP0A1(flux,i,j,k,pl),ptrgeom->gdet,mysurface,Ftemp[pl],Ftempdiag[pl]);
1307  }
1308  */
1309 
1310 
1311  }// end GENLOOP
1312 
1313  // cumulative only based upon localpdot
1314  // localpdot contains entire sum over grid of relevant surface integral value for this direction and ener-region.
1315  PDIAGLOOP(pl) localpcum[dir][pl]+=localpdot[dir][pl]*Dt; // localpdot is already corrected for REMOVERESTMASSFROMUU
1316 
1317  }// end if localdoflux
1318  }// end DIRLOOP
1319  }// end ENERloop
1320 #if(COMPUTEFRDOT)
1321  // radial flux vs. radius
1322  flux=F1;
1323  surface = dx[2]*dx[3];
1324  for(i=0;i<N1;i++){
1325 
1326  PDIAGLOOP(pl) frdot[i][pl]=0;
1327 
1328  for(j=0;j<N2;j++) for(k=0;k<N3;k++){
1329  get_geometry(i, j, k, FACE1, ptrgeom);
1330  PDIAGLOOP(pl) Ftemp[pl]=MACP0A1(flux,i,j,k,pl)*surface; // UEVOLVE form
1331  // GODMARK: for finite volume method, below doesn't change the result if eomfunc=gdet.
1332  // Otherwise flux would have to be completely recomputed for gdet case JUST for diagnostic to be consistent at higher order
1333  UtoU_evolve2diag(UEVOLVE,UDIAG,ptrgeom,Ftemp,Ftempdiag); // convert to diag form
1334  PDIAGLOOP(pl) frdot[i][pl]+=Ftempdiag[pl];
1335  if(REMOVERESTMASSFROMUU==2){
1336  // frdot[i][UU]+= -Ftempdiag[RHO]; // GODMARK: Problem is rest-mass term will dominate and destroy ability to recover energy flux
1337  }
1338  else{
1339  // already correct
1340  // so fix this instead!
1341  frdot[i][UU] += Ftempdiag[RHO]; // remove rest-mass term.
1342 
1343  }
1344  }
1345 
1346  }
1347 #endif
1348  // GODMARK
1349  // want all fluxes vs theta on horizon
1350 
1351 
1352  return(0);
1353 
1354 }
1355 
1356 /* evaluate fluxed based diagnostics; put results in global variables */
1357 
1358 #define JETBSQORHO (3.162)
1359 
1360 
1363 int diag_flux_general(FTYPE (*prim)[NSTORE2][NSTORE3][NPR], SFTYPE Dt)
1364 {
1365  int fluxdir;
1366  int i, j, k, pl, pliter, l, dir,fl,enerregion;
1367  SFTYPE surface,mysurface,surf2;
1368  SFTYPE surgdet;
1369  int start1,start2,start3,stop1,stop2,stop3;
1370  int gpos;
1371  int ii;
1372  FTYPE ftemp;
1373  FTYPE ftemp0,ftemp1,ftemp2,ftemp3,ftemp4,ftemp5,ftemp6;
1374  FTYPE pgas,bsq,bsqorho;
1375  struct of_state q;
1376  FTYPE X[NDIM],V[NDIM];
1377  FTYPE b[NDIM],ucon[NDIM];
1378  FTYPE U[NPR];
1379  int condjet2;
1380  FTYPE Ftemp[NPR],Ftempdiag[NPR];
1381  int firstinloop;
1382  FTYPE pr[NPR];
1383  int *localdoflux, *localenerpos;
1384  SFTYPE (*localpdotterms)[NUMFLUXTERMS][NPR];
1385  struct of_geom geomdontuse;
1386  struct of_geom *ptrgeom=&geomdontuse;
1387  int loc=CENT;
1388 
1389 
1390  if(DODIAGS==0) return(0);
1391 
1392  if(FLUXDIAGPARTS==0) return(0);
1393 
1394  // initialize
1395  ENERREGIONLOOP(enerregion){
1396  localdoflux=dofluxreg[enerregion];
1397  localenerpos=enerposreg[enerregion];
1398  localpdotterms=pdottermsreg[enerregion];
1399 
1400 
1401  DIRLOOP(dir) PDIAGLOOP(pl){
1402  FLLOOP(fl) localpdotterms[dir][fl][pl]=0;
1403  if(enerregion==0) FLLOOP(fl) pdottermsjet2[dir][fl][pl]=0;
1404  }
1405  // true outer boundary surface fluxes (per direction, per conserved variable)
1406  DIRLOOP(dir) {
1407  if (localdoflux[dir] >= 0) {// then this cpu is involved in flux BOUNDARY calculation
1408  // otherwise don't add to it
1409 
1410 
1412  // assumes rectangular region
1413  //
1414  if((dir==X1UP)||(dir==X1DN)){
1415 
1416  surface = dx[2]*dx[3];
1417 
1418  start1=stop1=localdoflux[dir];
1419 
1420  start2=localenerpos[X2DN];
1421  stop2=localenerpos[X2UP];
1422 
1423  start3=localenerpos[X3DN];
1424  stop3=localenerpos[X3UP];
1425 
1426  gpos=FACE1;
1427  fluxdir=1;
1428  }
1429  else if((dir==X2UP)||(dir==X2DN)){
1430 
1431  surface = dx[1]*dx[3];
1432 
1433  start2=stop2=localdoflux[dir];
1434 
1435  start1=localenerpos[X1DN];
1436  stop1=localenerpos[X1UP];
1437 
1438  start3=localenerpos[X3DN];
1439  stop3=localenerpos[X3UP];
1440 
1441  gpos=FACE2;
1442  fluxdir=2;
1443  }
1444  else if((dir==X3UP)||(dir==X3DN)){
1445 
1446  surface = dx[1]*dx[2];
1447 
1448  start3=stop3=localdoflux[dir];
1449 
1450  start1=localenerpos[X1DN];
1451  stop1=localenerpos[X1UP];
1452 
1453  start2=localenerpos[X2DN];
1454  stop2=localenerpos[X2UP];
1455 
1456  gpos=FACE3;
1457  fluxdir=3;
1458  }
1459  else{
1460  dualfprintf(fail_file,"no such direction: %d\n",dir);
1461  myexit(1);
1462  }
1463 
1464  // zero out summation quantities
1465  PDIAGLOOP(pl){
1466  FLLOOP(fl) localpdotterms[dir][fl][pl]=0;
1467  if(enerregion==0) FLLOOP(fl) pdottermsjet2[dir][fl][pl]=0;
1468  }
1469  // GENLOOP set by enerregion so set correctly for GRID SECTION
1470  GENLOOP(i,j,k,start1,stop1,start2,stop2,start3,stop3){ // OPENMPOPTMARK: Could parallelize this, but not too expensive since only surface values
1471  // OPENMPSUPERMARK: If parallelize this, must make whocalleducon threadprivate again!
1472  // now add up all zones for each conserved quantity (PDIAGLOOP)
1473 
1474 
1475 
1477  //
1478  // GET STATE
1479  //
1481 
1482 
1483 
1485  get_geometry(i, j, k, loc, ptrgeom);
1486  PDIAGLOOP(pl) pr[pl]=MACP0A1(prim,i,j,k,pl);
1487 
1488  //bl_coord_ijk_2(i, j, k, loc, X,V);
1489  // if failed, then data output for below invalid, but columns still must exist
1490  // loc since p at center
1491  if (!failed) {
1492  // OPTMARK: Should avoid non-fundamental flux cumulation except (e.g.) every full timestep since not important to be very accurate.
1493  // OPTMARK: Also, when doing get_state(), use existing STOREFLUXSTATE if created.
1494  if (get_state(pr, ptrgeom, &q) >= 1) FAILSTATEMENT("diag.c:diag_flux()", "get_state() dir=0", 1);
1495  }
1496  else {// do a per zone check, otherwise set to 0
1497  whocalleducon=1; // force no failure mode, just return like failure, and don't return if failure, just set to 0 and continue
1498  if (get_state(pr, ptrgeom, &q) >= 1){
1499  for (l = 0; l < NDIM; l++)
1500  q.ucon[l]=0;
1501  for (l = 0; l < NDIM; l++)
1502  q.ucov[l]=0;
1503  for (l = 0; l < NDIM; l++)
1504  q.bcon[l]=0;
1505  for (l = 0; l < NDIM; l++)
1506  q.bcov[l]=0;
1507  }
1508  whocalleducon=0; // return to normal state
1509  }
1510 
1511  // somewhat like function which averages stress terms
1512  pgas = pressure_rho0_u_simple(i,j,k,loc,pr[RHO],pr[UU]);
1513  bsq=0; for(l=0;l<NDIM;l++) bsq+=(q.bcon[l])*(q.bcov[l]);
1514 
1515 
1516 
1517 
1518  // modify effective position of integral in case adding energy/angular momentum to horizon
1519  // apparently not correct
1520  if(0 && enerregion==OUTSIDEHORIZONENERREGION){
1521  mysurface = surface /(q.ucov[TT]);
1522  }
1523  else mysurface = surface;
1524 
1525 
1526 
1527 
1528 
1530  //
1531  // do term-by-term flux for physics accounting
1532  // somewhat like dumps and somewhat like avg of terms
1533  // GODMARK: For finite volume method this does not differentiate between point and average values!
1534  //
1535 
1536 
1537 
1538 
1539 
1540 
1541 
1542  if(enerregion==0){
1543  bsqorho=bsq/pr[RHO]; // b^2/\rho
1544  // we assume user will check if this condition makes sense for a particular simulation.
1545  // condition answer for recording for jet2 region
1546  //
1547  // a real analysis would trace a field line from the horizon to the outer edge in the funnel-type region and only include the region inside, where eventually we have at the outer edge (or will have) unbound/outbound flow.
1548  if(dir==X1DN){
1549  condjet2=(bsqorho>JETBSQORHO); // assumes that plunging region never develops such large values. Can occur, but generally not so. Can raise if problems.
1550  }
1551  else if(dir==X1UP){ // assumes in jet2 region, but non-jet2 region could have this property.
1552  condjet2=((q.ucon[1]>0.0)&&(-q.ucov[0]-1.0>0.0)); // outgoing and unbound at outer edge
1553  }
1554  else{
1555  condjet2=0;
1556  }
1557  }
1558  else condjet2=0; // never touches pdottermsjet2 then
1559 
1560 
1561  surgdet=(ptrgeom->gdet)*mysurface;
1562 
1563  // loop and if's since some calculations are redundantly simple for similar pl
1564  PDIAGLOOP(pl){
1565  if(pl==RHO){
1566  ftemp0=pr[pl]*(q.ucon[fluxdir])*surgdet;
1567  localpdotterms[dir][0][pl]+=ftemp0; // only one part
1568  if(condjet2) pdottermsjet2[dir][0][pl]+=ftemp0; // only one part
1569  // localpdot[dir][pl] += ftemp0 * surgdet;
1570  }
1571  // part0-6
1572  else if((pl>=UU)&&(pl<=U3)){
1573  l=pl-UU;
1574  // we currently DO NOT add flux[RHO] to flux[UU], just assume reader knows this is from the native stress
1575  ftemp0=pgas*(q.ucon[fluxdir])*(q.ucov[l])*surgdet;
1576  localpdotterms[dir][0][pl]+=ftemp0;
1577  if(condjet2) pdottermsjet2[dir][0][pl]+=ftemp0;
1578 
1579  ftemp1=MACP0A1(prim,i,j,k,RHO)*(q.ucon[fluxdir])*(q.ucov[l])*surgdet;
1580  localpdotterms[dir][1][pl]+=ftemp1;
1581  if(condjet2) pdottermsjet2[dir][1][pl]+=ftemp1;
1582 
1583 
1584  ftemp2=MACP0A1(prim,i,j,k,UU)*(q.ucon[fluxdir])*(q.ucov[l])*surgdet;
1585  localpdotterms[dir][2][pl]+=ftemp2;
1586  if(condjet2) pdottermsjet2[dir][2][pl]+=ftemp2;
1587 
1588 
1589  ftemp3=bsq*(q.ucon[fluxdir])*(q.ucov[l])*surgdet;
1590  localpdotterms[dir][3][pl]+=ftemp3;
1591  if(condjet2) pdottermsjet2[dir][3][pl]+=ftemp3;
1592 
1593 
1594  ftemp4=pgas*delta(fluxdir,pl-UU)*surgdet;
1595  localpdotterms[dir][4][pl]+=ftemp4;
1596  if(condjet2) pdottermsjet2[dir][4][pl]+=ftemp4;
1597 
1598 
1599  ftemp5=0.5*bsq*delta(fluxdir,pl-UU)*surgdet;
1600  localpdotterms[dir][5][pl]+=ftemp5;
1601  if(condjet2) pdottermsjet2[dir][5][pl]+=ftemp5;
1602 
1603 
1604  ftemp6=-(q.bcon[fluxdir])*(q.bcov[l])*surgdet;
1605  localpdotterms[dir][6][pl]+=ftemp6;
1606  if(condjet2) pdottermsjet2[dir][6][pl]+=ftemp6;
1607 
1608  }
1609  else if(pl==B1){
1610  ftemp0=(q.bcon[1])*(q.ucon[fluxdir])*surgdet; // flux_b1 term1
1611  localpdotterms[dir][0][pl]+=ftemp0;
1612  if(condjet2) pdottermsjet2[dir][0][pl]+=ftemp0;
1613 
1614 
1615  ftemp1=-(q.bcon[fluxdir])*(q.ucon[1])*surgdet; // flux_b1 term2
1616  localpdotterms[dir][1][pl]+=ftemp1;
1617  if(condjet2) pdottermsjet2[dir][1][pl]+=ftemp1;
1618 
1619  }
1620  else if(pl==B2){
1621  ftemp0=(q.bcon[2])*(q.ucon[fluxdir])*surgdet; // flux_b2 term1
1622  localpdotterms[dir][0][pl]+=ftemp0;
1623  if(condjet2) pdottermsjet2[dir][0][pl]+=ftemp0;
1624 
1625 
1626  ftemp1=-(q.bcon[fluxdir])*(q.ucon[2])*surgdet; // flux_b2 term2
1627  localpdotterms[dir][1][pl]+=ftemp1;
1628  if(condjet2) pdottermsjet2[dir][1][pl]+=ftemp1;
1629 
1630  }
1631  else if(pl==B3){
1632  ftemp0=(q.bcon[3])*(q.ucon[fluxdir])*surgdet; // flux_b3 term1
1633  localpdotterms[dir][0][pl]+=ftemp0;
1634  if(condjet2) pdottermsjet2[dir][0][pl]+=ftemp0;
1635 
1636  ftemp1=-(q.bcon[fluxdir])*(q.ucon[3])*surgdet; // flux_b3 term2
1637  localpdotterms[dir][1][pl]+=ftemp1;
1638  if(condjet2) pdottermsjet2[dir][1][pl]+=ftemp1;
1639 
1640  }
1641 
1642  }// end PDIAGLOOP over term-by-term fluxes
1643 
1644  }// end GENLOOP
1645 
1646  }// end if localdoflux
1647  }// end DIRLOOP
1648  }// end ENERloop
1649 
1650 
1651  return(0);
1652 
1653 }
1654 
1655 
1656 #define DEBUGFRLOOP 1
1657 
1660 void frdotout(void)
1661 {
1662  int i,j,k,pl,pliter,l;
1663  SFTYPE ftemp;
1664 #if(USEMPI)
1665  MPI_Request rrequest;
1666  MPI_Request srequest;
1667 #endif
1668  SFTYPE frdottemp[N1][NPR];
1669  SFTYPE *frtot;
1670  int ospos1;
1671  FILE*frout;
1672  static int firsttime=1;
1673 
1674  if(numprocs==1){
1675  frtot=(SFTYPE (*))(&frdot[0][0]);
1676  }
1677  else{
1678 #if(USEMPI)
1679  if(myid==0){
1680  frtot=(SFTYPE*) malloc(sizeof(SFTYPE)*totalsize[1]*NPR);
1681  if(frtot==NULL){
1682  dualfprintf(fail_file,"Cannot get frtot memory\n");
1683  myexit(1);
1684  }
1685  else{
1686  for(i=0;i<totalsize[1];i++) PDIAGLOOP(pl){
1687  frtot[i*NPR+pl]=0;
1688  }
1689  }
1690  for(l=0;l<numprocs;l++){ // just go over all cpus and assume only added to frdot per cpu for correct cpus.
1691  ospos1=(l%ncpux1)*N1;
1692  if(l==0){ // assumes cpu=0 is main cpu and is on horizon
1693  for(i=0;i<N1;i++) PDIAGLOOP(pl){
1694  frdottemp[i][pl]=frdot[i][pl];
1695  }
1696  }
1697  else{
1698  MPI_Irecv(frdottemp,N1*NPR,MPI_SFTYPE,MPIid[l], TAGSTARTFRDOT + l, MPI_COMM_GRMHD,&rrequest);
1699  MPI_Wait(&rrequest,&mpichstatus);
1700  }
1701  for(i=0;i<N1;i++) PDIAGLOOP(pl){
1702  frtot[(ospos1+i)*NPR+pl]+=frdottemp[i][pl];
1703 #if(DEBUGFRLOOP)
1704  if((ospos1+i)*NPR+pl>=totalsize[1]*NPR){
1705  dualfprintf(fail_file,"outside bounds: %d\n",(ospos1+i)*NPR+pl);
1706  myexit(1);
1707  }
1708 #endif
1709  }
1710  }
1711  }
1712  else{
1713  MPI_Isend(frdot,N1*NPR,MPI_SFTYPE,MPIid[0],TAGSTARTFRDOT + myid, MPI_COMM_GRMHD,&srequest);
1714  MPI_Wait(&srequest,&mpichstatus);
1715  }
1716 #endif
1717  }
1718  // now we have frtot with full fluxes vs. radius (totalsize[1]), so output
1719 
1720  if(myid==0){
1721  frout=fopen("frdot.out","at");
1722  if(frout==NULL){
1723  dualfprintf(fail_file,"Cannot open frdot.out\n");
1724  myexit(1);
1725  }
1726  if(firsttime){
1727  fprintf(frout,"%21.15g %ld %lld %lld %lld %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g\n",
1728  t,realnstep,totalsize[1],totalsize[2],totalsize[3],startx[1],startx[2],startx[3],dx[1],dx[2],dx[3]);
1729  fflush(frout);
1730  }
1731 
1732  for(i=0;i<totalsize[1];i++){
1733  fprintf(frout,"%21.15g %d ",t,i);
1734  PDUMPLOOP(pliter,pl){// dump only dump prims
1735  fprintf(frout,"%21.15g ",frtot[i*NPR+pl]);
1736  }
1737  fprintf(frout,"\n");
1738  }
1739 
1740  if(frout!=NULL) fclose(frout);
1741 #if(USEMPI)
1742  if( numprocs != 1 ) free(frtot); //atch corrected: without the "if" on 1 proc. in MPI this leads to freeing the stack memory
1743 #endif
1744  }
1745  firsttime=0;
1746 }
1747 
1748 
1749 
1750 
1752 void init_varstavg(void)
1753 {
1754  int i,j,k,ii;
1755 
1756  ZLOOP{ // OPENMPOPTMARK: Could parallelize this if used
1757  for(ii=0;ii<NUMNORMDUMP;ii++){
1758  GLOBALMACP0A1(normalvarstavg,i,j,k,ii)=0.0;
1759  GLOBALMACP0A1(anormalvarstavg,i,j,k,ii)=0.0;
1760  }
1761  for(ii=0;ii<NDIM;ii++){
1762 #if(CALCFARADAYANDCURRENTS)
1763  GLOBALMACP0A1(jcontavg,i,j,k,ii)=0.0;
1764  GLOBALMACP0A1(jcovtavg,i,j,k,ii)=0.0;
1765  GLOBALMACP0A1(ajcontavg,i,j,k,ii)=0.0;
1766  GLOBALMACP0A1(ajcovtavg,i,j,k,ii)=0.0;
1767 #endif
1768  GLOBALMACP0A1(massfluxtavg,i,j,k,ii)=0.0;
1769  GLOBALMACP0A1(amassfluxtavg,i,j,k,ii)=0.0;
1770  }
1771  for(ii=0;ii<NUMOTHER;ii++){
1772  GLOBALMACP0A1(othertavg,i,j,k,ii)=0.0;
1773  GLOBALMACP0A1(aothertavg,i,j,k,ii)=0.0;
1774  }
1775 #if(CALCFARADAYANDCURRENTS)
1776  for(ii=0;ii<NUMFARADAY;ii++){
1777  GLOBALMACP0A1(fcontavg,i,j,k,ii)=0.0;
1778  GLOBALMACP0A1(fcovtavg,i,j,k,ii)=0.0;
1779  GLOBALMACP0A1(afcontavg,i,j,k,ii)=0.0;
1780  GLOBALMACP0A1(afcovtavg,i,j,k,ii)=0.0;
1781  }
1782 #endif
1783  for(ii=0;ii<NUMSTRESSTERMS;ii++){
1784  GLOBALMACP0A1(tudtavg,i,j,k,ii)=0.0;
1785  GLOBALMACP0A1(atudtavg,i,j,k,ii)=0.0;
1786  }
1787 
1788  }
1789 }
1790 
1793 {
1794  int i,j,k,ii;
1795 
1796  ZLOOP{ // OPENMPOPTMARK: Could parallelize this if used
1797  for(ii=0;ii<NUMNORMDUMP;ii++){
1798  GLOBALMACP0A1(normalvarstavg,i,j,k,ii)=GLOBALMACP0A1(normalvarstavg,i,j,k,ii)*IDT;
1799  GLOBALMACP0A1(anormalvarstavg,i,j,k,ii)=GLOBALMACP0A1(anormalvarstavg,i,j,k,ii)*IDT;
1800  }
1801  for(ii=0;ii<NDIM;ii++){
1802 #if(CALCFARADAYANDCURRENTS)
1803  GLOBALMACP0A1(jcontavg,i,j,k,ii)=GLOBALMACP0A1(jcontavg,i,j,k,ii)*IDT;
1804  GLOBALMACP0A1(jcovtavg,i,j,k,ii)=GLOBALMACP0A1(jcovtavg,i,j,k,ii)*IDT;
1805  GLOBALMACP0A1(ajcontavg,i,j,k,ii)=GLOBALMACP0A1(ajcontavg,i,j,k,ii)*IDT;
1806  GLOBALMACP0A1(ajcovtavg,i,j,k,ii)=GLOBALMACP0A1(ajcovtavg,i,j,k,ii)*IDT;
1807 #endif
1808  GLOBALMACP0A1(massfluxtavg,i,j,k,ii)*=IDT;
1809  GLOBALMACP0A1(amassfluxtavg,i,j,k,ii)*=IDT;
1810  }
1811  for(ii=0;ii<NUMOTHER;ii++){
1812  GLOBALMACP0A1(othertavg,i,j,k,ii)*=IDT;
1813  GLOBALMACP0A1(aothertavg,i,j,k,ii)*=IDT;
1814  }
1815 #if(CALCFARADAYANDCURRENTS)
1816  for(ii=0;ii<NUMFARADAY;ii++){
1817  GLOBALMACP0A1(fcontavg,i,j,k,ii)=GLOBALMACP0A1(fcontavg,i,j,k,ii)*IDT;
1818  GLOBALMACP0A1(fcovtavg,i,j,k,ii)=GLOBALMACP0A1(fcovtavg,i,j,k,ii)*IDT;
1819  GLOBALMACP0A1(afcontavg,i,j,k,ii)=GLOBALMACP0A1(afcontavg,i,j,k,ii)*IDT;
1820  GLOBALMACP0A1(afcovtavg,i,j,k,ii)=GLOBALMACP0A1(afcovtavg,i,j,k,ii)*IDT;
1821  }
1822 #endif
1823  for(ii=0;ii<NUMSTRESSTERMS;ii++){
1824  GLOBALMACP0A1(tudtavg,i,j,k,ii)=GLOBALMACP0A1(tudtavg,i,j,k,ii)*IDT;
1825  GLOBALMACP0A1(atudtavg,i,j,k,ii)=GLOBALMACP0A1(atudtavg,i,j,k,ii)*IDT;
1826  }
1827  }
1828 }
1829 
1832 {
1833  int i,j,k;
1834  int iii;
1835  int ll;
1836  int l,ii,aii;
1837  FTYPE ftemp;
1838  FTYPE ftemp0,ftemp1,ftemp2,ftemp3,ftemp4,ftemp5,ftemp6;
1839  FTYPE pgas,bsq;
1840  FTYPE jcov[NDIM];
1841  FTYPE fcov[NUMFARADAY];
1842  FTYPE V[NDIM], vmin[NDIM], vmax[NDIM];
1843  int ignorecourant;
1844  struct of_state q;
1845  FTYPE X[NDIM];
1846  FTYPE divb;
1847  FTYPE b[NDIM],ucon[NDIM];
1848  FTYPE U[NPR];
1849  struct of_geom geomdontuse;
1850  struct of_geom *ptrgeom=&geomdontuse;
1851  int loc=CENT;
1852 
1853 
1854 
1855  ZLOOP{ // OPENMPOPTMARK: Could parallelize this if used
1856 
1857  // just like dumps
1858  bl_coord_ijk_2(i, j, k, loc, X,V);
1859  // if failed, then data output for below invalid, but columns still must exist
1860  get_geometry(i, j, k, loc, ptrgeom);
1861  if (!failed) {
1862  if (get_state(GLOBALMAC(pdump,i,j,k), ptrgeom, &q) >= 1)
1863  FAILSTATEMENT("diag.c:set_varstavg()", "get_state() dir=0", 1);
1864 
1865  if (vchar_all(GLOBALMAC(pdump,i,j,k), &q, 1, ptrgeom, &vmax[1], &vmin[1],&ignorecourant) >= 1)
1866  FAILSTATEMENT("diag.c:set_varstavg()", "vchar_all() dir=1or2or3", 1);
1867  if (vchar_all(GLOBALMAC(pdump,i,j,k), &q, 2, ptrgeom, &vmax[2], &vmin[2],&ignorecourant) >= 1)
1868  FAILSTATEMENT("diag.c:set_varstavg()", "vchar_all() dir=1or2or3", 2);
1869  if (vchar_all(GLOBALMAC(pdump,i,j,k), &q, 3, ptrgeom, &vmax[3], &vmin[3],&ignorecourant) >= 1)
1870  FAILSTATEMENT("diag.c:set_varstavg()", "vchar_all() dir=1or2or3", 3);
1871  }
1872  else {// do a per zone check, otherwise set to 0
1873  whocalleducon=1; // force no failure mode, just return like failure, and don't return if failure, just set to 0 and continue
1874  if (get_state(GLOBALMAC(pdump,i,j,k), ptrgeom, &q) >= 1){
1875  for (iii = 0; iii < NDIM; iii++)
1876  q.ucon[iii]=0;
1877  for (iii = 0; iii < NDIM; iii++)
1878  q.ucov[iii]=0;
1879  for (iii = 0; iii < NDIM; iii++)
1880  q.bcon[iii]=0;
1881  for (iii = 0; iii < NDIM; iii++)
1882  q.bcov[iii]=0;
1883  }
1884  if (vchar_all(GLOBALMAC(pdump,i,j,k), &q, 1, ptrgeom, &vmax[1], &vmin[1],&ignorecourant) >= 1){
1885  vmax[1]=vmin[1]=0;
1886  }
1887 
1888  if (vchar_all(GLOBALMAC(pdump,i,j,k), &q, 2, ptrgeom, &vmax[2], &vmin[2],&ignorecourant) >= 1){
1889  vmax[2]=vmin[2]=0;
1890  }
1891 
1892  if (vchar_all(GLOBALMAC(pdump,i,j,k), &q, 2, ptrgeom, &vmax[3], &vmin[3],&ignorecourant) >= 1){
1893  vmax[3]=vmin[3]=0;
1894  }
1895 
1896 
1897  whocalleducon=0; // return to normal state
1898 
1899  }
1900 
1901  // GODMARK: use of globals
1902  setfdivb(&divb, GLOBALPOINT(pdump), GLOBALPOINT(pstagdump), GLOBALPOINT(udump), GLOBALPOINT(Bhatdump), i, j, k); // GLOBALPOINT(pdump),GLOBALPOINT(udump) set externally GODMARK
1903 
1904 
1905  ii=0;
1906  aii=0;
1907 
1908  for(iii=0;iii<NPR;iii++){ // always NPR here
1909  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=GLOBALMACP0A1(pdump,i,j,k,iii)*tfrac;
1910  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(GLOBALMACP0A1(pdump,i,j,k,iii))*tfrac;
1911  }
1912  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=divb*tfrac;
1913  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(divb)*tfrac;
1914 
1915  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=q.ucon[iii]*tfrac;
1916  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(q.ucon[iii])*tfrac;
1917 
1918  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=q.ucov[iii]*tfrac;
1919  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(q.ucov[iii])*tfrac;
1920 
1921  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=q.bcon[iii]*tfrac;
1922  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(q.bcon[iii])*tfrac;
1923 
1924  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=q.bcov[iii]*tfrac;
1925  for (iii = 0; iii < NDIM; iii++) GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(q.bcov[iii])*tfrac;
1926 
1927  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=vmin[1]*tfrac;
1928  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(vmin[1])*tfrac;
1929 
1930  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=vmax[1]*tfrac;
1931  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(vmax[1])*tfrac;
1932 
1933  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=vmin[2]*tfrac;
1934  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(vmin[2])*tfrac;
1935 
1936  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=vmax[2]*tfrac;
1937  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(vmax[2])*tfrac;
1938 
1939  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=vmin[3]*tfrac;
1940  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(vmin[3])*tfrac;
1941 
1942  GLOBALMACP0A1(normalvarstavg,i,j,k,ii++)+=vmax[3]*tfrac;
1943  GLOBALMACP0A1(anormalvarstavg,i,j,k,aii++)+=fabs(vmax[3])*tfrac;
1944 
1945 
1946 #if(CALCFARADAYANDCURRENTS)
1947  lower_vec(GLOBALMAC(jcon,i,j,k),ptrgeom,jcov);
1948  for(ii=0;ii<NDIM;ii++){
1949  GLOBALMACP0A1(jcontavg,i,j,k,ii)+=GLOBALMACP0A1(jcon,i,j,k,ii)*tfrac;
1950  GLOBALMACP0A1(jcovtavg,i,j,k,ii)+=jcov[ii]*tfrac;
1951  GLOBALMACP0A1(ajcontavg,i,j,k,ii)+=fabs(GLOBALMACP0A1(jcon,i,j,k,ii))*tfrac;
1952  GLOBALMACP0A1(ajcovtavg,i,j,k,ii)+=fabs(jcov[ii])*tfrac;
1953  }
1954 #endif
1955 
1956  for(ii=0;ii<NDIM;ii++){
1957  ftemp=(ptrgeom->gdet)*GLOBALMACP0A1(pdump,i,j,k,RHO)*(q.ucon[ii]);
1958  GLOBALMACP0A1(massfluxtavg,i,j,k,ii)+=ftemp*tfrac;
1959  GLOBALMACP0A1(amassfluxtavg,i,j,k,ii)+=fabs(ftemp)*tfrac;
1960  }
1961 
1962  ii=0;
1963  aii=0;
1964  ftemp=(q.ucon[3])/(q.ucon[0]);
1965  GLOBALMACP0A1(othertavg,i,j,k,ii++)=ftemp*tfrac;
1966  GLOBALMACP0A1(aothertavg,i,j,k,aii++)=fabs(ftemp)*tfrac;
1967 
1968 #if(CALCFARADAYANDCURRENTS)
1969  lowerf(GLOBALMAC(fcon,i,j,k),ptrgeom,fcov);
1970  for(ii=0;ii<NUMFARADAY;ii++){
1971  GLOBALMACP0A1(fcontavg,i,j,k,ii)+=GLOBALMACP0A1(fcon,i,j,k,ii)*tfrac;
1972  GLOBALMACP0A1(fcovtavg,i,j,k,ii)+=fcov[ii]*tfrac;
1973  GLOBALMACP0A1(afcontavg,i,j,k,ii)+=fabs(GLOBALMACP0A1(fcon,i,j,k,ii))*tfrac;
1974  GLOBALMACP0A1(afcovtavg,i,j,k,ii)+=fabs(fcov[ii])*tfrac;
1975  }
1976 #endif
1977 
1978  pgas = pressure_rho0_u_simple(i,j,k,loc,GLOBALMACP0A1(pdump,i,j,k,RHO),GLOBALMACP0A1(pdump,i,j,k,UU));
1979  bsq=0; for(iii=0;iii<NDIM;iii++) bsq+=(q.bcon[iii])*(q.bcov[iii]);
1980 
1981  // part0
1982  ii=0;
1983  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
1984  ftemp0=pgas*(q.ucon[iii])*(q.ucov[l]);
1985  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp0*tfrac;
1986  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp0)*tfrac;
1987  ii++;
1988  }
1989  // part1
1990  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
1991  ftemp1=GLOBALMACP0A1(pdump,i,j,k,RHO)*(q.ucon[iii])*(q.ucov[l]);
1992  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp1*tfrac;
1993  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp1)*tfrac;
1994  ii++;
1995  }
1996  // part2
1997  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
1998  ftemp2=GLOBALMACP0A1(pdump,i,j,k,UU)*(q.ucon[iii])*(q.ucov[l]);
1999  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp2*tfrac;
2000  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp2)*tfrac;
2001  ii++;
2002  }
2003  // part3
2004  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
2005  ftemp3=bsq*(q.ucon[iii])*(q.ucov[l]);
2006  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp3*tfrac;
2007  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp3)*tfrac;
2008  ii++;
2009  }
2010  // part4
2011  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
2012  ftemp4=pgas*delta(iii,l);
2013  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp4*tfrac;
2014  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp4)*tfrac;
2015  ii++;
2016  }
2017  // part5
2018  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
2019  ftemp5=0.5*bsq*delta(iii,l);
2020  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp5*tfrac;
2021  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp5)*tfrac;
2022  ii++;
2023  }
2024  // part6
2025  for(iii=0;iii<NDIM;iii++) for(l=0;l<NDIM;l++){
2026  ftemp6=-(q.bcon[iii])*(q.bcov[l]);
2027  GLOBALMACP0A1(tudtavg,i,j,k,ii)+=ftemp6*tfrac;
2028  GLOBALMACP0A1(atudtavg,i,j,k,ii)+=fabs(ftemp6)*tfrac;
2029  ii++;
2030  }
2031 
2032  }
2033 
2034  return(0);
2035 
2036 }
2037 
2038 
2040 int average_calc(int doavg)
2041 {
2042  static FTYPE lastdt;
2043  static int calls=0;
2044  static FTYPE tavgi,tavgf;
2045  static int tavgflag=1;
2046 
2047  if(calls>0){ // since need 2 times
2048 
2049  if(tavgflag){
2050  // gets reached on next call after dump call or first time
2051  init_varstavg();
2052  tavgflag=0;
2053  tavgi=t;
2054  }
2055 
2056  // always do
2057  if(set_varstavg(0.5*(lastdt+dt))>=1) return(1);
2058 
2059  if(doavg==1){
2060  tavgflag=1;
2061  tavgf=t;
2062  final_varstavg(1.0/(tavgf-tavgi));
2063  // expect to dump after this function ends and before next call to this function
2064  }
2065  }
2066  calls++;
2067  lastdt=dt;
2068 
2069  return(0);
2070 }
2071 
2072 
2074 void diag_source_all(struct of_geom *ptrgeom, FTYPE *dU,SFTYPE Dt)
2075 {
2076  int pl,enerregion;
2077  FTYPE ftemp[NPR];
2078  FTYPE ftempdiag[NPR];
2079  SFTYPE *localsourceadd;
2080  int *localenerpos;
2081  SFTYPE (*localsourceaddterms)[NPR];
2082 
2083 
2084  if(DODIAGS==0) return;
2085 
2086 
2087  // does not matter what stage
2088  if(Dt>0.0){
2089 
2090  // dualfprintf(fail_file,"got here: i=%d j=%d t=%21.15g\n",ptrgeom->i,ptrgeom->j,t);
2091 
2092 
2093  ENERREGIONLOOP(enerregion){
2094  localenerpos=enerposreg[enerregion];
2095  localsourceaddterms=sourceaddtermsreg[enerregion];
2096  localsourceadd=sourceaddreg[enerregion];
2097 
2098  if(WITHINENERREGION(localenerpos,ptrgeom->i,ptrgeom->j,ptrgeom->k)){
2099  PDIAGLOOP(pl) ftemp[pl]=Dt*dVF*dU[pl]; // in UEVOLVE form
2100  // GODMARK: for finite volume method, below doesn't change the result if eomfunc=gdet.
2101  // Otherwise source would have to be completely recomputed for gdet case JUST for diagnostic to be consistent at higher order
2102  UtoU_evolve2diag(UEVOLVE,UDIAG,ptrgeom,ftemp,ftempdiag); // convert to diag form
2103 
2104  // now assign diagnostic form of source
2105  PDIAGLOOP(pl){
2106  localsourceadd[pl]+=ftempdiag[pl];
2107  } // end PDIAGLOOP on diag
2108  }
2109  }
2110  }
2111 
2112 }
2113 
2114 
2116 void diag_source_comp(struct of_geom *ptrgeom, FTYPE (*dUcomp)[NPR],SFTYPE Dt)
2117 {
2118  int sc,pl,enerregion;
2119  FTYPE ftemp[NPR];
2120  FTYPE ftempdiag[NPR];
2121  SFTYPE *localsourceadd;
2122  int *localenerpos;
2123  SFTYPE (*localsourceaddterms)[NPR];
2124 
2125 
2126  if(DODIAGS==0) return;
2127 
2128 
2129  // does not matter what stage
2130  if(Dt>0.0){
2131 
2132  ENERREGIONLOOP(enerregion){
2133  localenerpos=enerposreg[enerregion];
2134  localsourceaddterms=sourceaddtermsreg[enerregion];
2135  localsourceadd=sourceaddreg[enerregion];
2136 
2137  if(WITHINENERREGION(localenerpos,ptrgeom->i,ptrgeom->j,ptrgeom->k)){
2138  SCLOOP(sc){
2139  PDIAGLOOP(pl) ftemp[pl]=Dt*dVF*dUcomp[sc][pl]; // in UEVOLVE form
2140  // GODMARK: for finite volume method, below doesn't change the result if eomfunc=gdet.
2141  // Otherwise source would have to be completely recomputed for gdet case JUST for diagnostic to be consistent at higher order
2142  UtoU_evolve2diag(UEVOLVE,UDIAG,ptrgeom,ftemp,ftempdiag); // convert to diag form
2143 
2144  // now assign diagnostic form of source
2145  PDIAGLOOP(pl){
2146  localsourceaddterms[sc][pl]+=ftempdiag[pl];
2147 #if(DOLUMVSR)
2148  // GODMARK: only correct for diagonal coordinate Jacobian in which each i is same radius for all j
2149  // Do NOT account for geometry changes in energy -- only account for non-geometrical changes in U[UU]
2150  // Only for enerregion==0
2151  if(pl==UU && sc!=GEOMSOURCE && enerregion==0) lumvsr[startpos[1]+ptrgeom->i]+=ftempdiag[pl];
2152 #endif
2153 
2154  } // end PDIAGLOOP on diag
2155  } // end SCLOOP
2156  }
2157  }
2158  }
2159 
2160 }
2161 
2162 
2163 /*
2164 
2165  print {Si Sf Sgen Sgendissco Sgendissconomax Sgendisslab1 Sgendisslab1nomax Sgendisslab2 Sgendisslab2nomax}
2166  #
2167  # for Sod shock (test=51, N1=150) I get:
2168  # -143.6 -143.2 0.3631 0.3908
2169  # which is close
2170  #
2171  # at N1=300 I get:
2172  # -134.4 -134 0.3546 0.3789
2173  #
2174  # at N1=600 I get:
2175  # -129.8 -129.4 0.3502 0.3733
2176  #
2177  # where totals are changing just because included
2178  # boundary zones that go out to different distances
2179  #
2180  # Seems results converge, but dissipation version
2181  # doesn't converge to expected value from
2182  # energy evolution and discrete change in entropy
2183  #
2184  # Residual offset is likely due to error
2185  # in evolving forward with energy but
2186  # trying to use entropy evolution to estimate entropy
2187  # generation -- wasn't expected to be exact
2188  #
2189  # Residual error may be related to why
2190  # non-conservative schemes don't obtain entropy
2191  # jump correctly even with dissipation
2192  #
2193  # I noticed that ratio of errors was related to
2194  # how much shock spreads at head and tail of shock
2195  # spreads by a couple extra zones, and this leakage
2196  # seems to account for error in entropy generation
2197  # Could just be coincidence
2198 */
2199 
2200 
2201 
2202 // whether to not really do full inversion since can be expensive
2203 #define AVOIDFULLINVERSION 1
2204 
2210 int diss_compute(int evolvetype, int inputtype, FTYPE *U, struct of_geom *ptrgeom, FTYPE *prbefore, FTYPE *pr, struct of_newtonstats *newtonstats)
2211 {
2212  FTYPE prother[NUMDISSVERSIONS][NPR];
2213  FTYPE Uother[NUMDISSVERSIONS][NPR];
2214  FTYPE Uenergy[NPR];
2215  PFTYPE otherfail;
2216  int enerregion;
2217  FTYPE dissenergy[NUMDISSVERSIONS];
2218  int pl,pliter;
2219  struct of_state q,qsimple,qfull,qenergy;
2220  FTYPE Unew[NPR];
2221  FTYPE primtoUcons;
2222  FTYPE Ugeomfree[NPR];
2223  int loopinv;
2224  int choseninv;
2225  FTYPE entropy,entropydiss;
2226 
2227 
2228 
2229 #if(DOENTROPY==DONOENTROPY || ENTROPY==-100)
2230  dualfprintf(fail_file,"Should not be doing diss_compute() unless DOENTROPY!=DONOENTROPY\n");
2231  myexit(7626762);
2232 #endif
2233 
2234 
2235 
2236  if(DODISS||DODISSVSR){
2237 
2238 
2239 
2241  //
2242  // Idea is following:
2243  //
2244  // 1) We follow some fluid deformed fluid element that at the new time is the exact shape of the cell
2245  // 2) Now within that cell the conserved entropy and the entropy deduced from the energy equation can be compared
2246  // One compares S=s u^t tracked with S deduced from energy equation either by:
2247  // a) s = \rho_0 S/D where D=\rho_0 u^t where \rho_0 is from energy evolution (assumes fluid elements match)
2248  // b) Or do full inversion of conserved quantities comparing energy version with entropy version (full inversion seems unreliable)
2249  // 3) To obtain lab-frame (at infinity) values do:
2250  // a) dU/d\tau u_\mu where u is internal energy added extra due to dissipation (assumes what reaches inf is all dissipated energy)
2251  // b) One can also compare T^t_t[energy evolution] and T^t_t[entropy version of $u$ otherwise same]
2252  // This assumes emission is instantaneous (no cooling timescale) so cooling time doesn't enter into issue of d\tau vs. dt that otherwise would be an issue (i.e. one would have to have physical model of cooling)
2253  //
2254  // 4) Entropy in comoving frame can be understood as useful as related to entropy source term to entropy equation
2255  // d/d\tau(entropy/rho)=0 -> \nabla_\mu(entropy u^\mu)=0
2256  // or \nabla_\mu(entropy u^\mu) = \rho_0 d/d\tau(entropy/rho)
2257  // Like with internal energy, assume all entropy goes away on each timestep (so no issue with d\tau vs. dt)
2258  // Then source is just \rho_0 \delta (entropy/rho) for the timestep being studied
2259  // So \rho_o d(s/\rho_0/dt = ds/dt - s/\rho_0 d\rho_0/dt so if ds is comoving source then term we need is:
2260  // ds - s/\rho_0 d\rho_0 , so we need how \rho_0 changed in fluid element from previous time
2261  // However, we are not comparing previous time with current time, we are instead comparing the same
2262  // fluid element evolved differently. So the only source of variance between the two is assumed to be
2263  // (for SIMPLEINV) the entropy, not density changes. GODMARK: Is this right?
2264  //
2265  // I noticed that the shocks can be entropy-violating in general (i.e. total entropy decreases)
2266  // So also output versions without MAX that match better true conserved entropy
2267  //
2269 
2270 
2271 
2272 
2273 
2274 
2276  //
2277  // Obtain simple inversion (all other components (velocity, etc.) assumed same in final state -- this is consistent with fluid element being same)
2278  //
2279  // This is \partial U/\partial\tau or internal energy changed in comoving frame
2280  // Source term to energy-momentum equation is (\nabla T)_\mu = dU/d\tau u_\mu
2281  // so energy-momentum transferred to infinity is: {L_\inf}_\mu = dU/d\tau u_\mu
2282  // So the conserved energy at infinity is {L_\inf}_t = dU/d\tau u_t
2283  //
2284  //
2286 
2287  // set rho,v,B
2288  // ufromentropy_calc needs prother to at least have RHO set for ideal gas EOS
2289  PALLLOOP(pl) prother[DISSSIMPLEINVCO][pl]=pr[pl];
2290 
2291  UtoU(inputtype,UNOTHING,ptrgeom,U,Ugeomfree);
2292  // Ugeomfree[ENTROPY] is \rho_0 u^t (entropy)
2293  // invertentropyflux_calc(ptrgeom,Ugeomfree[ENTROPY],TT, *q, FTYPE*pr); // don't know q, and don't need to, just do below
2294 
2295  // DEBUG:
2296  // dualfprintf(fail_file,"S=%21.15g U[rho]=%21.15g pr[RHO]=%21.15g\n",Ugeomfree[ENTROPY],Ugeomfree[RHO],pr[RHO]);
2297 
2298  // \rho_0 U[ENTROPY]/U[RHO] is entropy density in expected form
2299  entropy=pr[RHO]*Ugeomfree[ENTROPY]/Ugeomfree[RHO];
2300  ufromentropy_calc(ptrgeom,entropy, prother[DISSSIMPLEINVCO]);
2301  prother[DISSSIMPLEINVCO][UU]=prother[DISSSIMPLEINVCO][ENTROPY];
2302  // now prother[DISSSIMPLEINVCO][UU,ENTROPY] contains internal energy from simplified entropy inversion
2303 
2304 
2306  //
2307  // next obtain entropy from energy-evolution
2308  //
2310 
2311  // entropydiss is entropy from energy-evolution
2312  entropy_calc(ptrgeom,pr, &entropydiss);
2313 
2314 
2315 
2316 
2318  //
2319  // next obtain internal energy from full inversion (in general fluid elements being compared are actually different, so this can actually be less accurate)
2320  //
2322  // PALLLOOP(pl) prother[DISSFULLINVCO][pl]=pr[pl]; // guess
2323  PALLLOOP(pl) prother[DISSFULLINVCO][pl]=prbefore[pl]; // guess is better chosen from pre-energy evolution
2324  prother[DISSFULLINVCO][UU]=prother[DISSSIMPLEINVCO][UU]; // guess better with simple entropy version for internal energy
2325  prother[DISSFULLINVCO][ENTROPY]=prother[DISSSIMPLEINVCO][ENTROPY]; // guess much better with simple entropy version for internal energy
2326  // invert with entropy evolution
2327 #if(AVOIDFULLINVERSION==0)
2328  // Noticed this uses too much computational power and may not even have solution causing Newton's method to reach large number of iterations
2329  PFTYPE lpflagrad=0;
2330  int showmessages=0; // don't show messages if diagnostic fails unless debugging.
2331  int allowlocalfailurefixandnoreport=1;
2332  Utoprimdiss(showmessages,allowlocalfailurefixandnoreport, evolvetype, inputtype, U, ptrgeom, prother[DISSFULLINVCO],&otherfail, &newtonstats,newtonstats,&lpflagrad);
2333 #else
2334  PALLLOOP(pl) prother[DISSFULLINVCO][pl] = prother[DISSSIMPLEINVCO][pl];
2335  otherfail=0;
2336 #endif
2337  prother[DISSFULLINVCO][UU]=prother[DISSFULLINVCO][ENTROPY];
2338  // (prother[DISSFULLINVCO][UU,ENTROPY]) now both contain internal energy as derived from full entropy inversion
2339  // notice that all other quantities are could also be different (if doentropy==evolvefullentropy), hence the prother[DISSFULLINVCO] variable for temporary storage.
2340 
2341 
2342 
2343 
2345  //
2346  // Get primitive state so have u_\mu for writing energy-momentum source (used for LAB1)
2347  //
2348  // Also obtain conserved quantities that assume internal energy is one from entropy evolution versions (used for LAB2)
2349  //
2351 
2352  // assume u^\mu and b^\mu used from get_state() is same for all cases, so just get once
2353  if (get_stateforUdiss(pr, ptrgeom, &qenergy) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "get_state()", 1);
2354  qfull=qenergy; // copy entire q
2355  qsimple=qenergy; // copy entire q
2356 
2357 
2358  // if (get_stateforUdiss(prother[DISSSIMPLEINVCO], ptrgeom, &qsimple) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "get_state()", 1);
2359  if (primtoU(UNOTHING, prother[DISSSIMPLEINVCO], &qenergy, ptrgeom, Uother[DISSSIMPLEINVLAB2], NULL) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "primtoU()", 1);
2360 
2361 
2362  if(IFUTOPRIMNOFAILORFIXED(otherfail) && AVOIDFULLINVERSION==0){
2363  // if didn't fail, then setup qfull and conserved quantity associated with fullinv version of primitive
2364  // if (get_stateforUdiss(prother[DISSFULLINVCO], ptrgeom, &qfull) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "get_state()", 1);
2365  if (primtoU(UNOTHING, prother[DISSFULLINVCO], &qenergy, ptrgeom, Uother[DISSFULLINVLAB2], NULL) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "primtoU()", 1);
2366  }
2367  else{
2368  PALLLOOP(pl) Uother[DISSFULLINVLAB2][pl]=Uother[DISSSIMPLEINVLAB2][pl];
2369  }
2370 
2371 
2372 
2373 
2374  // get state for final energy primitive
2375  if (primtoU(UNOTHING, pr, &qenergy, ptrgeom, Uenergy, NULL) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "primtoU()", 1);
2376 
2378  //
2379  // choose to store entropy version of internal energy in primitive in case wanted
2380  //
2382  // just overwrite entropy primitive, leave rest same as from full energy equation
2383  // pr[ENTROPY]=prother[DISSFULLINVCO][UU];
2384  pr[ENTROPY]=prother[DISSSIMPLEINVCO][ENTROPY]; // don't use this currently
2385 
2386 
2387 
2389  //
2390  // now compare pr[UU] and pr[ENTROPY] with some kind of diagnostic
2391  //
2393  if(evolvetype==EVOLVEUTOPRIM){
2394  // then during evolution and pr[UU]-pr[ENTROPY] is relevant to physical calculation
2395  // store difference
2396 
2397  // report failure to invert
2398  if(DODISS){
2399  if(IFUTOPRIMFAIL(otherfail)) GLOBALMACP0A1(dissfunpos,ptrgeom->i,ptrgeom->j,ptrgeom->k,DISSFAILUREINV)+=1.0;
2400  }
2401 
2402 
2404  //
2405  // obtain various versions of dissipation
2406  //
2408  for(loopinv=0;loopinv<NUMDISSVERSIONS;loopinv++){
2409 
2410 
2412  //
2413  // Choose which prother[] to use
2414  //
2415  // only use if inversion succeeded (otherwise assume entropy evolution wanted negative internal energy and so not a good solution)
2416  //
2418  if(
2419  loopinv==DISSSIMPLEINVCO || loopinv==DISSSIMPLEINVCONOMAX
2420  || loopinv==DISSSIMPLEINVLAB1 || loopinv==DISSSIMPLEINVLAB1NOMAX
2421  || loopinv==DISSSIMPLEINVLAB2 || loopinv==DISSSIMPLEINVLAB2NOMAX
2422  || loopinv==DISSENTROPYCO || loopinv==DISSENTROPYCONOMAX
2423  || loopinv==DISSENTROPYLAB1 || loopinv==DISSENTROPYLAB1NOMAX
2424  || loopinv==DISSENTROPYLAB2 || loopinv==DISSENTROPYLAB2NOMAX
2425  ){
2426  choseninv=DISSSIMPLEINVCO;
2427  }
2428  else if(
2429  loopinv==DISSFULLINVCO || loopinv==DISSFULLINVCONOMAX
2430  || loopinv==DISSFULLINVLAB1 || loopinv==DISSFULLINVLAB1NOMAX
2431  || loopinv==DISSFULLINVLAB2 || loopinv==DISSFULLINVLAB2NOMAX
2432  ){
2433  if(IFUTOPRIMNOFAILORFIXED(otherfail)) choseninv=DISSFULLINVCO;
2434  else choseninv=DISSSIMPLEINVCO; // still want some result
2435  }
2436  else{
2437  dualfprintf(fail_file,"In setting up choseninv: No such loopinv=%d\n",loopinv);
2438  myexit(72698626);
2439  }
2440 
2441 
2442  if(! (choseninv==DISSSIMPLEINVCO||choseninv==DISSFULLINVCO)){
2443  dualfprintf(fail_file,"In setting up choseninv: Chose bad choseninv=%d\n",choseninv);
2444  myexit(1865728326);
2445  }
2446 
2447 
2449  //
2450  // Compute dissipation energy density over timestep
2451  // So final energy dissipated is found by multiplying by $\detg dV$ done after the below calculation
2452  //
2453  // GODMARK: The DISSVSR is only correct for diagonal coordinate Jacobian in which each i is same radius for all j
2454  //
2456 
2457  if(
2458  loopinv==DISSSIMPLEINVCO || loopinv==DISSFULLINVCO || loopinv==DISSSIMPLEINVLAB1 || loopinv==DISSFULLINVLAB1
2459  || loopinv==DISSSIMPLEINVCONOMAX || loopinv==DISSFULLINVCONOMAX || loopinv==DISSSIMPLEINVLAB1NOMAX || loopinv==DISSFULLINVLAB1NOMAX
2460  ){
2461  // dissipated energy is difference between entropy version of internal energy and energy version of internal energy
2462  // so a shock has this as positive
2463  // Notice Sod shock has negative dissipation sometimes at shock front
2464  // So use MAX to NOT ALLOW negative dissipation (assume dissipation is really 0 and assume error in entropy calculation)
2465  dissenergy[loopinv]=pr[UU]-prother[choseninv][UU];
2466 
2467  // MAX versions:
2468  if(loopinv==DISSSIMPLEINVCO || loopinv==DISSFULLINVCO || loopinv==DISSSIMPLEINVLAB1 || loopinv==DISSFULLINVLAB1){
2469  dissenergy[loopinv]=MAX(dissenergy[loopinv],0.0);
2470  }
2471 
2472  // LAB1 multiplications:
2473  if(loopinv==DISSSIMPLEINVLAB1 || loopinv==DISSSIMPLEINVLAB1NOMAX){
2474  // negative sign on u_t because dU/d\tau opposite sign compared to T^t_t
2475  // here we use fluid velocity from simple evolution, which is same fluid velocity as from energy evolution
2476  dissenergy[loopinv]*=(-qsimple.ucov[TT]);
2477  }
2478  else if(loopinv==DISSFULLINVLAB1 || loopinv==DISSFULLINVLAB1NOMAX ){
2479  // here we use fluid velocity from entropy evolution as consistent with the meaning of "full" throughout
2480  dissenergy[loopinv]*=(-qfull.ucov[TT]);
2481  }
2482 
2483  }
2484  else if(loopinv==DISSENTROPYCO || loopinv==DISSENTROPYCONOMAX){
2485  // assume rest are entropy generation quantities
2486  // assume entropy must increase
2487  dissenergy[loopinv]= entropydiss-entropy;
2488 
2489  if(loopinv==DISSENTROPYCO){
2490  dissenergy[loopinv]=MAX(dissenergy[loopinv],0.0);
2491  }
2492  }
2493  else if(loopinv==DISSENTROPYLAB1 || loopinv==DISSENTROPYLAB1NOMAX){
2494  // assume rest are entropy generation quantities
2495  // assume entropy must increase
2496  dissenergy[loopinv]= entropydiss-entropy; // same as comoving version as source term of entropy equation w.r.t. comparing fluid elements (not different times) That is, I set the d\rho_0/dt term to zero. GODMARK?
2497 
2498  if(loopinv==DISSENTROPYLAB1){
2499  dissenergy[loopinv]=MAX(dissenergy[loopinv],0.0);
2500  }
2501 
2502  }
2503  else if(loopinv==DISSSIMPLEINVLAB2 || loopinv==DISSSIMPLEINVLAB2NOMAX){
2504  // negative sign is because -T^t_t>0 for energy>0
2505  // otherwise order of U's are so dissipation is positive
2506  dissenergy[loopinv] = -(Uenergy[UU] - Uother[DISSSIMPLEINVLAB2][UU]); // could have used Ugeomfree[UU] too
2507 
2508  if(loopinv==DISSSIMPLEINVLAB2){
2509  dissenergy[loopinv]=MAX(dissenergy[loopinv],0.0);
2510  }
2511  }
2512  else if(loopinv==DISSFULLINVLAB2 || loopinv==DISSFULLINVLAB2NOMAX){
2513  // negative sign is because -T^t_t>0 for energy>0
2514  // otherwise order of U's are so dissipation is positive
2515  // leave-off MAX
2516  // note stored U in Uother for SIMPLEINV for simplicity
2517  dissenergy[loopinv] = -(Uenergy[UU] - Uother[DISSFULLINVLAB2][UU]); // could have used Ugeomfree[UU] too
2518 
2519  if(loopinv==DISSFULLINVLAB2){
2520  dissenergy[loopinv]=MAX(dissenergy[loopinv],0.0);
2521  }
2522  }
2523  else if(loopinv==DISSENTROPYLAB2 || loopinv==DISSENTROPYLAB2NOMAX){
2524  // computing S from U and S from Uother and taking difference
2525  // order of U's are so dissipation is positive
2526  // leave-off MAX
2527  // using SIMPLEINV here like used SIMPLEINV to obtain entropy comoving version of dissipation
2528  // Here Uenergy[ENTROPY] is conserved entropy as computed from energy-evolved pr's
2529  dissenergy[loopinv] = (Uenergy[ENTROPY] - Uother[DISSSIMPLEINVLAB2][ENTROPY]);
2530 
2531  if(loopinv==DISSENTROPYLAB2){
2532  dissenergy[loopinv]=MAX(dissenergy[loopinv],0.0);
2533  }
2534 
2535  }
2536  else{
2537  dualfprintf(fail_file,"In computing dissenergy: No such loopinv=%d\n",loopinv);
2538  myexit(72698627);
2539  }
2540 
2541 
2542 
2543  // only for enerregion==0
2544  if(DODISSVSR) dissvsr[loopinv][startpos[1]+ptrgeom->i]+=dissenergy[loopinv]*ptrgeom->gdet * dVF;
2545 
2546  if(DODISS){
2547  // local integral
2548  ENERREGIONLOOP(enerregion){
2549  diss=dissreg[enerregion];
2550  if( WITHINENERREGION(enerpos,ptrgeom->i,ptrgeom->j,ptrgeom->k) ){
2551  diss[loopinv]+=dissenergy[loopinv]*ptrgeom->gdet * dVF; // actual energy
2552  }
2553  }
2554 
2555  // function over all space to be written as dump file
2556  // energy density, which can be integrated in SM since grid is given
2557  // multiply by gdet*dV since gdet may change in time!
2558  GLOBALMACP0A1(dissfunpos,ptrgeom->i,ptrgeom->j,ptrgeom->k,loopinv)+=dissenergy[loopinv]*ptrgeom->gdet * dVF;
2559  }
2560 
2561 
2562  // DEBUG:
2563  // dualfprintf(fail_file,"dissenergy[%d][%d][%d][%d]=%21.15g :: simple=%21.15g full=%21.15g energy=%21.15g\n",ptrgeom->i,ptrgeom->j,ptrgeom->k,loopinv,GLOBALMACP0A1(dissfunpos,ptrgeom->i,ptrgeom->j,ptrgeom->k,loopinv),prother[DISSSIMPLEINVCO][ENTROPY],prother[DISSFULLINVCO][ENTROPY],pr[UU]);
2564 
2565 
2566 
2567  }// end loop over versions
2568  }// end if evolving
2569  }// end if DODISS||DODISSVSR
2570 
2571 
2572 
2573  // now must redefine U[ENTROPY] so consistent with p(U[normalgrmhd])
2574  if(DOENOFLUX!=NOENOFLUX){
2575  // primtoUcons=UENTROPY; // not UENTROPY since this doesn't have gdet factor!
2576  if (get_stateforUdiss(pr, ptrgeom, &q) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "get_state()", 1);
2577  if (primtoU(UEVOLVE, pr, &q, ptrgeom, Unew, NULL) >= 1) FAILSTATEMENT("utoprim.c:utoprim()", "primtoU()", 1);
2578  U[ENTROPY] = Unew[ENTROPY]; // now conserved entropy is consistent with real primitive state
2579  }
2580  // this is done automatically if doing NOENOFLUX since U is obtained again from new primitives.
2581 
2582 
2583  return(0);
2584 
2585 }
2586 
2587