HARM
harm and utilities
 All Data Structures Files Functions Variables Typedefs Macros Pages
dump_ener.c
Go to the documentation of this file.
1 #include "decs.h"
2 
3 #define PROBEFILE (0 &&DOENERDIAG) // whether to do probe file
4 
10 
11 int dump_ener(int doener, int dordump, int call_code)
12 {
13 
14  static FILE *flenerreg_file[NUMENERREGIONS];
15  static FILE *flener_file;
16  char FLENERREGIONNAME[NUMENERREGIONS][MAXFILENAME];
17  char *FLENERNAME;
18 
19  static FILE *enerreg_file[NUMENERREGIONS];
20  static FILE *ener_file;
21  char ENERREGIONNAME[NUMENERREGIONS][MAXFILENAME];
22  char *ENERNAME;
23 
24  static FILE *generreg_file[NUMENERREGIONS];
25  static FILE *gener_file;
26  char GENERREGIONNAME[NUMENERREGIONS][MAXFILENAME];
27  char *GENERNAME;
28 
29 
30  static FILE *metricparmsener_file;
31 
32  // per cpu file
33  static FILE *probe_file;
34 
35  // single region files
36  static FILE *debug_file;
37  static FILE *lumener_file;
38  static FILE *dissener_file[NUMDISSVERSIONS];
39 
40  int fileiter;
41 #define NUMSELFGRAV 9
42  static FILE *selfgravener_file[NUMSELFGRAV];
43 
44 
45  char dfnam[MAXFILENAME], ifnam[MAXFILENAME];
46  int i, j, k, pl, pliter, l, dir,sc,fl,indexfinalstep,floor,tscale;
47  int dissloop;
48  // FILE *imagecnt_file, *dumpcnt_file,*avgcnt_file,*debugcnt_file,*lumvsrcnt_file,*dissvsrcnt_file;
49 
50  // full grid only
51  FTYPE divb, divbmax = 0, divbavg =0;
52  // special jet2
53  SFTYPE pdottermsjet2_tot[COMPDIM*2][NUMFLUXTERMS][NPR];
54 
55  // all regions, local quantities
56  // these quantities can always be computed, no need in restart file and so in defs.h where many other _tot quantities appear
57  SFTYPE Ureg_tot[NUMENERREGIONS][NPR];
58  SFTYPE Ureg_final[NUMENERREGIONS][NPR];
59  // each region, local quantities
60  SFTYPE *U_tot;
61  SFTYPE *U_final;
62 
63  int enerregion;
64 
65  static int firsttime = 1;
66  SFTYPE ftemp0,ftemp1;
67 
68  int ii;
69  FTYPE phibh;
70  extern FTYPE phibh_compute(FTYPE M, FTYPE a, FTYPE r, FTYPE th);
71 
72 
73 
74 
75 
77  //
78  // Open/append some files
79  //
80 
81  if ((call_code == INIT_OUT) || (firsttime == 1)) {
82 
83  if(PROBEFILE&&DOENERDIAG){ // NOTEMARK: Locking probe output to DOENERDIAG=0/1
84  // PER CPU FILE
85  sprintf(dfnam,"probe.dat%s",myidtxt);
86  if((probe_file=fopen(dfnam,"at"))==NULL){
87  dualfprintf(fail_file,"Can't open probe file\n");
88  myexit(1);
89  }
90  }
91 
92 
93  if(myid==0) if(DODEBUG){
94  // CPU=0 only
95  sprintf(dfnam,"debug.out");
96  myfopen(dfnam,"a+","error opening debug output file\n",&debug_file);
97  }
98 
99  if(myid==0) if(DOLUMVSR){
100  // CPU=0 only
101  sprintf(dfnam,"lumvsr.out");
102  myfopen(dfnam,"a+","error opening lumvsr output file\n",&lumener_file);
103  }
104 
105  if(myid==0) if(DODISSVSR){
106  // CPU=0 only
107  for(fileiter=0;fileiter<NUMDISSVERSIONS;fileiter++){
108  sprintf(dfnam,"dissvsr%d.out",fileiter);
109  myfopen(dfnam,"a+","error opening dissvsr output file\n",&dissener_file[fileiter]);
110  }
111  }
112 
113  if(myid==0) if(DOSELFGRAVVSR){
114  // CPU=0 only
115  for(fileiter=0;fileiter<NUMSELFGRAV;fileiter++){
116  sprintf(dfnam,"selfgravvsr%d.out",fileiter);
117  myfopen(dfnam,"a+","error opening selfgravvsr output file\n",&selfgravener_file[fileiter]);
118  }
119  }
120 
121  if(myid==0) if(DOEVOLVEMETRIC){
122  // CPU=0 only
123  sprintf(dfnam,"metricparms.out");
124  myfopen(dfnam,"a+","error opening metricparms output file\n",&metricparmsener_file);
125  }
126 
127 
128  // CPU=0 only
129  if(myid==0) ENERREGIONLOOP(enerregion){
130  // set some pointers
131  FLENERNAME=FLENERREGIONNAME[enerregion];
132 
133  // when setting file pointers, need pointer to pointer or just set pointers directly (we do latter for naming reasons)
134  /* flener_file=flenerreg_file[enerregion]; */
135  /* ener_file=&enerreg_file[enerregion]; */
136  /* gener_file=generreg_file[enerregion]; */
137  ENERNAME=ENERREGIONNAME[enerregion];
138 
139  GENERNAME=GENERREGIONNAME[enerregion];
140 
141  pcum_tot=pcumreg_tot[enerregion];
142  fladd_tot=fladdreg_tot[enerregion];
143  sourceadd_tot=sourceaddreg_tot[enerregion];
144 
145  // flener
146  if(enerregion==GLOBALENERREGION) sprintf(FLENERNAME,"flener.out");
147  else sprintf(FLENERNAME,"flenerother%d.out",enerregion); // specific naming convention
148  myfopen(FLENERNAME,"a+","error opening FLenergy output file\n",&flenerreg_file[enerregion]);
149 
150  // ener
151  if(enerregion==GLOBALENERREGION) sprintf(ENERNAME,ENERFNAME);
152  else sprintf(ENERNAME,"enerother%d.out",enerregion); // specific naming convention
153 
154  if (appendold == 0) {
155  // a+ in general, or w for overwrites, but let's append for now
156  myfopen(ENERNAME,"a+","error opening energy output file\n",&enerreg_file[enerregion]);
157  }
158  else { // if appendold==1
159  trifprintf("Start setup of %s file append\n", ENERNAME);
160  myfopen(ENERNAME,"a+","error opening energy output file for append\n",&enerreg_file[enerregion]);
161  appendener(ener_file,pcum_tot,fladd_tot,sourceadd_tot);
162  }
163 
164  // gener
165  if(enerregion==GLOBALENERREGION) sprintf(GENERNAME,GENERFNAME);
166  else sprintf(GENERNAME,"generjet%d.out",enerregion-1); // specific naming convention
167  myfopen(GENERNAME,"a+","error opening energy output file\n",&generreg_file[enerregion]);
168 
169  }
170  }
171 
172 
174  //
175  // compute divB and output to logs
176  //
177  divbmaxavg(GLOBALPOINT(pdump),&divbmax,&divbavg);
178 
179 
180 
182  //
183  // ENER/FLENER/FRDOT/DEBUG output integrated diagnostics
184  // for any number of predetermined regions
185  //
187 
188  ENERREGIONLOOP(enerregion){
189  trifprintf(".BE.%d",enerregion);
190 
192  //
193  // setup some pointers
194  //
195  //
196  // each region, local quantities
197  U_tot=Ureg_tot[enerregion];
198  U_final=Ureg_final[enerregion];
199  pdot_tot=pdotreg_tot[enerregion];
200  pdotterms_tot=pdottermsreg_tot[enerregion];
201  // used for each region, related to global quantities
202  fladd=fladdreg[enerregion];
203  fladd_tot=fladdreg_tot[enerregion];
204  fladdterms=fladdtermsreg[enerregion];
205  fladdterms_tot=fladdtermsreg_tot[enerregion];
206  U_init=Ureg_init[enerregion];
207  U_init_tot=Ureg_init_tot[enerregion];
208  pcum=pcumreg[enerregion];
209  pcum_tot=pcumreg_tot[enerregion];
210  pdot=pdotreg[enerregion];
211  pdotterms=pdottermsreg[enerregion];
212  sourceaddterms=sourceaddtermsreg[enerregion];
214  sourceadd=sourceaddreg[enerregion];
215  sourceadd_tot=sourceaddreg_tot[enerregion];
216  diss=dissreg[enerregion];
217  diss_tot=dissreg_tot[enerregion];
218 
219  if(myid==0){ // only cpu=0 writes to these files, although doesn't matter
220  ener_file=enerreg_file[enerregion];
221  gener_file=generreg_file[enerregion];
222  flener_file=flenerreg_file[enerregion];
223  }
224 
226  //
227  // do some integrations
228  //
229  if(dordump||doener){
230 
231  trifprintf("BI%d",enerregion);
232 
234  //
235  // things that are done for EVERY region (can't sum over all regions at once since each region has different WITHINENERREGION() conditional)
236  // Below assumes continuous array starting at some address -- otherwise would have to do per quantity as in prior versions with extra loops
237  //
239 
240  // compute total conserved quantity
241  // SUPERGODMARK: Why is this U_tot,U_tot ?
242  if(integrate(NPR,U_tot,U_tot,CONSTYPE,enerregion)>=1) return(1);
243 
244  // DIRLOOP(dir)
245  if(integrate((COMPDIM*2)*NPR,&pdot[0][0],&pdot_tot[0][0],SURFACETYPE,enerregion)>=1) return(1);
246 
247  // above should be sum of below, approximately
248  // DIRLOOP(dir) FLLOOP(fl)
249  if(integrate((COMPDIM*2)*NUMFLUXTERMS*NPR,&pdotterms[0][0][0],&pdotterms_tot[0][0][0],SURFACETYPE,enerregion)>=1) return(1);
250  //DIRLOOP(dir)
251  if(integrate((COMPDIM*2)*NPR,&pcum[0][0],&pcum_tot[0][0],SURFACETYPE,enerregion)>=1) return(1);
252 
253  //FLOORLOOP(floor)
254  if(integrate(NUMFAILFLOORFLAGS*NPR,&fladdterms[0][0],&fladdterms_tot[0][0],CUMULATIVETYPE,enerregion)>=1) return(1);
255 
256  if(integrate(NPR,&fladd[0],&fladd_tot[0],CUMULATIVETYPE,enerregion)>=1) return(1);
257 
258  //SCLOOP(sc)
259  if(integrate(NUMSOURCES*NPR,&sourceaddterms[0][0],&sourceaddterms_tot[0][0],CUMULATIVETYPE,enerregion)>=1) return(1);
260 
261  if(integrate(NPR,&sourceadd[0],&sourceadd_tot[0],CUMULATIVETYPE,enerregion)>=1) return(1);
262 
263  if(DODISS){
264  if(integrate(NUMDISSVERSIONS,&diss[0],&diss_tot[0],CUMULATIVETYPE,enerregion)>=1) return(1);
265  }
266 
268  // below was subsumed into its own full enerregion (should have done so in first place!)
269  //
270  // OUTSIDEHORIZONENERREGION STUFF (i.e. only done once over that region)
271  // if(enerregion==OUTSIDEHORIZONENERREGION){
272  // now integrate horizon fluces over all CPUs
273  // this is also done during actual evolution in metric.c, but needed here too so diagnostics are correctly updated
274  // doesn't hurt evolution at all
275  // if(integrate(horizonflux,horizonflux_tot,SURFACETYPE,enerregion)>=1) return(1);
276  // if(integrate(horizoncum,horizoncum_tot,SURFACETYPE,enerregion)>=1) return(1);
277 
278  // assume DOSELFGRAVVSR has already cumulated dMvsr into dMvsr_tot and computed Mvsr_tot and phivsr_tot
279 
280  // }
281 
282 
284  //
285  // GLOBALENERREGION STUFF (i.e. only done once over that region or just simply done once at all and just keeping within loop for simplicity)
286  if(enerregion==GLOBALENERREGION){
287  if(DOLUMVSR){
288  //for(ii=0;ii<ncpux1*N1;ii++)
289  if(integrate(ncpux1*N1,&lumvsr[0],&lumvsr_tot[0],CUMULATIVETYPE,enerregion)>=1) return(1);
290  }
291 
292  if(DODISSVSR){
293  for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++){// this loop is over pointers, not a continuous memory space!
294  // for(ii=0;ii<ncpux1*N1;ii++)
295  if(integrate(ncpux1*N1,&dissvsr[dissloop][0],&dissvsr_tot[dissloop][0],CUMULATIVETYPE,enerregion)>=1) return(1);
296  }
297  }
298 
299 
300  // special jet2 accounting
301  //DIRLOOP(dir) FLLOOP(fl)
302  if(integrate((COMPDIM*2)*NUMFLUXTERMS*NPR,&pdottermsjet2[0][0][0],&pdottermsjet2_tot[0][0][0],SURFACETYPE,enerregion)>=1) return(1);
303 
304  // debug
305  if(DODEBUG){
306  //TSCALELOOP(tscale)
307  // below is special integratel and for CONSTYPE performs integration over GLOBALMACP0A3(failfloorcount,i,j,k,indexfinalstep,tscale,pl) for each enerregion
308  if(integratel(2*NUMTSCALES*NUMFAILFLOORFLAGS,&failfloorcountlocal[0][0][0],&failfloorcountlocal_tot[0][0][0],CONSTYPE,enerregion)>=1) return(1);
309  }
310  }
311  trifprintf("AI%d",enerregion);
312  }// end if dordump||doener
313 
314 
315 
317  //
318  // initialize the total conserved counters
319  //
320  if (call_code == INIT_OUT || firsttime) {
321  if(RESTARTMODE==0) PLOOP(pliter,pl) U_init_tot[pl] = U_init[pl] = U_tot[pl];
322  // otherwise read from restart file
323  }
324 
326  //
327  // output some interesting diagnostics to stderr/log files.
328  //
329  if (call_code == FINAL_OUT) {
330  PLOOP(pliter,pl) U_final[pl] = U_tot[pl];
331 
332  if(GAMMIEENER){
333  trifprintf("\n\nEnergy: ini,fin,del: %21.15g %21.15g %21.15g\n",
334  U_init[UU], U_final[UU], (U_final[UU] - U_init[UU]) / U_init[UU]);
335 
336  trifprintf("\n\nMass: ini,fin,del: %21.15g %21.15g %21.15g\n",
337  U_init[RHO], U_final[RHO], (U_final[RHO] - U_init[RHO]) / U_init[RHO]);
338  }
339  else{
340  trifprintf("\n");
341  PLOOP(pliter,pl){
342  ftemp0=(U_final[pl] - U_init[pl]);
343  ftemp1=(U_final[pl]-fladd_tot[pl]-sourceadd_tot[pl]-(pcum_tot[X1DN][pl]+pcum_tot[X2DN][pl]+pcum_tot[X3DN][pl]) + (pcum_tot[X1UP][pl]+pcum_tot[X2UP][pl]+pcum_tot[X3UP][pl]) - U_init[pl]);
344  trifprintf("U[%d]: ini,fin,fdf,del,tfdf,tdel: %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g\n",
345  pl,
346  U_init[pl],
347  U_final[pl],
348  U_init[pl]!=0.0 ? ftemp0/U_init[pl] : 0.0,
349  ftemp0,
350  U_init[pl]!=0.0 ? ftemp1/U_init[pl] : 0.0,
351  ftemp1 );
352  }
353  }
354  }
355 
356 
358  //
359  // output some ener diagnostics to ener, flener, debug, and probe(only cpu specific one) files
360  //
361  if (doener){
362  if(1||GAMMIEENER){
363  // 6+3=9 terms
364  myfprintf(gener_file, "%21.15g %21.15g %21.15g %21.15g %21.15g %21.15g ", t, U_tot[RHO], U_tot[U3], U_tot[UU], GLOBALMACP0A1(pdump,N1 / 2,N2 / 2,N3 / 2,UU) * pow(GLOBALMACP0A1(pdump,N1 / 2,N2 / 2,N3 / 2,RHO), -gam), GLOBALMACP0A1(pdump,N1 / 2,N2 / 2,N3 / 2,UU));
365  myfprintf(gener_file, "%21.15g %21.15g %21.15g ", pdot_tot[X1DN][RHO], pdot_tot[X1DN][RHO]-pdot_tot[X1DN][UU], pdot_tot[X1DN][U3]);
366  }
367  if(1){
368  // SM use gammie.m macro, jrdpener, gammieener's
369 
370 
372  // ENER FILE (only dir and pl)
373  //
375 
376  // 2+NPR+COMPDIM*2*NPR+NPR+(2) terms
377  // see jrdp3dener in gammie.m
378  myfprintf(ener_file,"%21.15g %ld ",t ,realnstep);
379  PLOOP(pliter,pl) myfprintf(ener_file, "%21.15g ", U_tot[pl]);
380  DIRLOOP(dir) PLOOP(pliter,pl) myfprintf(ener_file, "%21.15g ", pdot_tot[dir][pl]);
381  // COMPDIM*2*NPRDUMP more terms
382  DIRLOOP(dir) PLOOP(pliter,pl) myfprintf(ener_file, "%21.15g ", pcum_tot[dir][pl]);
383  PLOOP(pliter,pl) myfprintf(ener_file, "%21.15g ", fladd_tot[pl]);
384  PLOOP(pliter,pl) myfprintf(ener_file, "%21.15g ", sourceadd_tot[pl]);
385  myfprintf(ener_file, "%21.15g %21.15g ", divbmax,divbavg);
386  if(DODISS) for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++) myfprintf(ener_file, "%21.15g ", diss_tot[dissloop]);
387  else for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++) myfprintf(ener_file, "%21.15g ", -1.0); // dummy space holder
388  }
389 
390 
392  // FLENER FILE (dir, pl, and linear summed terms for flux, floor, and source quantities)
393  // (note the change in ordering for external file read in macros/functions)
394  myfprintf(flener_file,"%21.15g %ld ",t,realnstep);
395  DIRLOOP(dir) PLOOP(pliter,pl) FLLOOP(fl) myfprintf(flener_file, "%21.15g ", pdotterms_tot[dir][fl][pl]);
396  PLOOP(pliter,pl) FLOORLOOP(floor) myfprintf(flener_file, "%21.15g ", fladdterms_tot[floor][pl]);
397  PLOOP(pliter,pl) SCLOOP(sc) myfprintf(flener_file, "%21.15g ", sourceaddterms_tot[sc][pl]);
398 
399 
400  if(enerregion==GLOBALENERREGION){ // only for total region for now
401  // only care about inner and outer radial part
402  for(dir=0;dir<=1;dir++) PLOOP(pliter,pl) FLLOOP(fl) myfprintf(flener_file, "%21.15g ", pdottermsjet2_tot[dir][fl][pl]); // jet/pole values only
403 
404  if(DOLUMVSR){
405  // luminosity vs radius
406  myfprintf(lumener_file,"%21.15g %ld ",t,realnstep);
407  for(ii=0;ii<ncpux1*N1;ii++) myfprintf(lumener_file, "%21.15g ", lumvsr_tot[ii]);
408  }
409 
410  if(DODISSVSR){
411  for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++){
412  // dissipation vs radius
413  myfprintf(dissener_file[dissloop],"%21.15g %ld ",t,realnstep);
414  for(ii=0;ii<ncpux1*N1;ii++) myfprintf(dissener_file[dissloop], "%21.15g ", dissvsr_tot[dissloop][ii]);
415  }
416  }
417 
418  }
419 
420 
421 
422  if(enerregion==OUTSIDEHORIZONENERREGION){ // only for horizon region for now
423 
424 
425  if(DOEVOLVEMETRIC){
426  myfprintf(metricparmsener_file,"%21.15g %ld ",t,realnstep);
427 
428  // below 2 have been subsumed into its own full ener region (should have done in first place!)
429  //PLOOP(pliter,pl) myfprintf(metricparmsener_file, "%21.15g ",horizonflux_tot[pl]);
430  //PLOOP(pliter,pl) myfprintf(metricparmsener_file, "%21.15g ",horizoncum_tot[pl]);
431 
432  // first 2 are cumulative black hole mass and J in black hole metric units
433  // next 1 is what black hole mass would be if adding so-far cumulated mass
434  // next 1 is a if adding so-far mass and angular momentum
435  // next 1 is j=a/M if adding so-far mass and angular momentum
436  // next several items are original and present actual mass, a, j, Q, and q
437 
438  // by this time of the diagnostics, dE, dJ include horizon flux but MBH and a aren't yet updated.
439  // however, horizoncum_tot IS updated so that for this enerregion u0-horizoncum_tot[0] is conserved exactly
440  myfprintf(metricparmsener_file,"%21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %21.15g %d ",dE,dJ,MBH0+dE,a0+dabh,(a0+dabh)/(SMALL+MBH0+dE),MBH0,MBH,a0,a0/(SMALL+MBH0),a,a/(SMALL+MBH),QBH0,QBH0/(SMALL+MBH0),QBH,QBH/(SMALL+MBH),EP30,EP30/(SMALL+MBH0),EP3,EP3/(SMALL+MBH),THETAROT0,THETAROT0/(SMALL+MBH0),THETAROT,THETAROT/(SMALL+MBH),Rhor,Risco,horizoni+N1*horizoncpupos1);
441  }
442 
443  if(DOSELFGRAVVSR){
444  // self-gravitational potential vs radius
445  fileiter=0;
446  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
447  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", rcent_tot[ii]);
448 
449  // just self-gravitating part of Mvsr
450  fileiter=1;
451  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
452  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", Mvsr_tot[ii]);
453 
454  // true Mvsr with "point" mass included
455  fileiter=2;
456  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
457  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", MBH+Mvsr_tot[ii]);
458 
459  // self-gravitating part of phivsr
460  fileiter=3;
461  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
462  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", phivsr_tot[ii]);
463 
464  // true phivsr that comes into g_{tt} = -exp(2\phi)
465  fileiter=4;
466  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
467  DUMPGRAVLOOP(ii){
468  phibh = phibh_compute(MBH,a,rcent_tot[ii], M_PI*0.5);
469  if(!isfinite(phibh)) phibh=-BIG; // in reality g_{tt} stays well-defined
470  myfprintf(selfgravener_file[fileiter], "%21.15g ", phibh+phivsr_tot[ii]);
471  }
472  fileiter=5;
473  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
474  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ",dTrrvsr_tot[ii]);
475 
476  fileiter=6;
477  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
478  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", dVvsr_tot[ii]);
479 
480  fileiter=7;
481  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
482  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", vrsqvsr_tot[ii]);
483 
484  fileiter=8;
485  myfprintf(selfgravener_file[fileiter],"%21.15g %ld ",t,realnstep);
486  DUMPGRAVLOOP(ii) myfprintf(selfgravener_file[fileiter], "%21.15g ", Jvsr_tot[ii]);
487 
488  }
489 
490  }
491 
492 
493 
494 
495  if(enerregion==GLOBALENERREGION){ // only for total region for now
496 
498  // PROBE FILE
499  if(PROBEFILE&&DOENERDIAG){
500  // !!per CPU!! probe file NPRDUMP*3 terms
501  //
503 #define ITER1 MAX(N1/4,1)
504 #define ITER2 MAX(N2/4,1)
505 #define ITER3 MAX(N3/4,1)
506  // 2D probe
507  // k = N3/2+1; for(i=0;i<N1;i+=ITER1) for(j=0;j<N2;j+=ITER2){
508  for(i=0;i<N1;i+=ITER1) for(j=0;j<N2;j+=ITER2) for(k=0;k<N3;k+=ITER3){
509  // 2D probe (consistent with original SM macro)
510  // PDUMPLOOP(pliter,pl) fprintf(probe_file, "%d %d %d %ld %21.15g %21.15g\n",startpos[1]+i,startpos[2]+j,pl,realnstep, t, GLOBALMACP0A1(p,i,j,k,pl));
511  // 3d probe
512  PDUMPLOOP(pliter,pl) fprintf(probe_file, "%d %d %d %d %ld %21.15g %21.15g\n",startpos[1]+i,startpos[2]+j,startpos[3]+k,pl,realnstep, t, GLOBALMACP0A1(pdump,i,j,k,pl));
513  }
514  fflush(probe_file);
515  }
516 
517 
519  // DEBUG FILE
520  if(DODEBUG){
521  myfprintf(debug_file,"%21.15g %ld ",t,realnstep);
522  myfprintf(debug_file,"%21.15g %21.15g ",dt, 1.0*nstroke/(2.0*totalzones));
523  myfprintf(debug_file,"%21.15g %21.15g %21.15g ",wavedtglobal,sourcedtglobal,gravitydtglobal);
524  myfprintf(debug_file,"%d %d %d ",waveglobaldti[1],waveglobaldtj[1],waveglobaldtk[1]);
525  myfprintf(debug_file,"%d %d %d ",waveglobaldti[2],waveglobaldtj[2],waveglobaldtk[2]);
526  myfprintf(debug_file,"%d %d %d ",waveglobaldti[3],waveglobaldtj[3],waveglobaldtk[3]);
527  myfprintf(debug_file,"%d %d ",horizoni,horizoncpupos1);
528  FAILFLOORLOOP(indexfinalstep,tscale,floor){
529 #if(COUNTTYPE==LONGLONGINTTYPE)
530  myfprintf(debug_file, "%lld ", failfloorcountlocal_tot[indexfinalstep][tscale][floor]);
531 #elif(COUNTTYPE==DOUBLETYPE)
532  myfprintf(debug_file, "%21.15g ", failfloorcountlocal_tot[indexfinalstep][tscale][floor]);
533 #endif
534  }
535  }
536  }
537 
538 
539 
540  myfprintf(flener_file,"\n");
541  myfprintf(ener_file,"\n");
542  myfprintf(gener_file,"\n");
543  if(enerregion==GLOBALENERREGION){
544  if(DOLUMVSR) myfprintf(lumener_file,"\n");
545  if(DODISSVSR) for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++) myfprintf(dissener_file[dissloop],"\n");
546  if(DODEBUG) myfprintf(debug_file,"\n");
547  }
548  if(enerregion==OUTSIDEHORIZONENERREGION){
549 
550  if(DOEVOLVEMETRIC) myfprintf(metricparmsener_file,"\n");
551  if(DOSELFGRAVVSR){
552  for(fileiter=0;fileiter<NUMSELFGRAV;fileiter++) myfprintf(selfgravener_file[fileiter],"\n");
553  }
554  }
555 
556  trifprintf("W%d",enerregion);
557 
558  }// end if doener // done if writing to file
559 
561  //
562  // close the files at end of simulation
563  //
564  if(call_code==FINAL_OUT){
565  myfclose(&flener_file,"Couldn't close flener_file\n");
566  if(1) myfclose(&ener_file,"Couldn't close ener_file\n");
567  if(1||GAMMIEENER) myfclose(&gener_file,"Couldn't close gener_file\n");
568 
569 
570  if(enerregion==OUTSIDEHORIZONENERREGION){
571  if(DOEVOLVEMETRIC) myfclose(&metricparmsener_file,"Couldn't close metricparmsener_file\n");
572  if(DOSELFGRAVVSR){
573  for(fileiter=0;fileiter<NUMSELFGRAV;fileiter++) myfclose(&selfgravener_file[fileiter],"Couldn't close selfgravener_file\n");
574  }
575  }
576 
577  if(enerregion==GLOBALENERREGION){
578  if(DOLUMVSR) myfclose(&lumener_file,"Couldn't close lumener_file\n");
579  if(DODISSVSR) for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++) myfclose(&dissener_file[dissloop],"Couldn't close dissener_file\n");
580  if(DODEBUG) myfclose(&debug_file,"Couldn't close debug_file\n");
581  if(PROBEFILE&&DOENERDIAG) fclose(probe_file);
582  }
583  }
584  }// end enerregion loop
585 
586 
587 
588  trifprintf("E");
589  firsttime = 0;
590  return (0);
591 
592 
593 
594 
595 }
596 
597 
600 void appendener(FILE* ener_file,SFTYPE (*pcum_totvar)[NPR],SFTYPE*fladd_totvar,SFTYPE *sourceadd_totvar)
601 {
602  int gotit;
603  SFTYPE tcheck;
604  int l,pl,pliter,dir;
605  long fpos0;
606  FILE *ener_file_temp;
607  char dfnam[MAXFILENAME],dfnamback[MAXFILENAME], dfnamtemp[MAXFILENAME];
608 
609  // only CPU=0 does anything here
610  if(myid==0){
611 
612  rewind(ener_file); // go to start
613 
614  gotit = 0;
615  while ((!feof(ener_file)) && (gotit == 0)) {
616 
617  fscanf(ener_file, "%lf", &tcheck);
618 
619  if (fabs(tcheck - t) < 0.5 *DTdumpgen[ENERDUMPTYPE]) {
620  gotit = 1;
621  for (l = 1; l <= NUMENERVAR; l++) {
622  if ((l > 3+NPR+COMPDIM*2*NPR) && (l < 3+NPR+2*COMPDIM*2*NPR+NPR)) {
623  DIRLOOP(dir) PLOOP(pliter,pl) {
624  fscanf(ener_file, "%lf", &pcum_totvar[dir][pl]);
625  l++;
626  }
627  PLOOP(pliter,pl) {
628  fscanf(ener_file, "%lf", &fladd_totvar[pl]);
629  l++;
630  }
631  PLOOP(pliter,pl) {
632  fscanf(ener_file, "%lf", &sourceadd_totvar[pl]);
633  l++;
634  }
635  }
636  }
637  } else {
638  // skip this bad line
639  while ((fgetc(ener_file) != '\n') && (!feof(ener_file)));
640  }
641  // continue after successful get since successful get is good
642  // data and should keep since corresponds to dump one is
643  // keeping
644  fpos0 = ftell(ener_file); // position to continue
645  // writting at if successful get
646  }
647  if (gotit == 0) {
648  dualfprintf(fail_file,
649  "Never found right time in loss file when appending: looking for t=%21.15g lastt=%21.15g\n",
650  t, tcheck);
651  myexit(1);
652  } else {
653  dualfprintf(logfull_file,
654  "found goodtime t=%21.15g (wanted %21.15g) to restart ener file\n",
655  tcheck, t);
656  sprintf(dfnamtemp, "%s0_ener%s.temp", DATADIR, ".out");
657  sprintf(dfnam, "%sener%s", DATADIR, ".out");
658  sprintf(dfnamback, "%s0_ener%s.back", DATADIR, ".out");
659 
660  // now that done, fix up file
661  if ((ener_file_temp = fopen(dfnamtemp, "wt")) == NULL) {
662  dualfprintf(fail_file,
663  "Cannot open temp ener file for appending: %s\n",
664  dfnamtemp);
665  myexit(1);
666  } else {
667  rewind(ener_file);
668  while (ftell(ener_file) < fpos0 + 1) { // +1 is for
669  // '\n' at end
670  // of line
671  fputc(fgetc(ener_file), ener_file_temp);
672  }
673  fclose(ener_file_temp);
674  fclose(ener_file);
675  rename(dfnam, dfnamback); // move old to backup location
676  rename(dfnamtemp, dfnam); // move new to old name(normal
677  // name)
678  // reopen loss_file (now normal name)
679 
680  if ((ener_file = fopen(dfnam, "at")) == NULL) {
681  dualfprintf(fail_file,
682  "2: error opening ener output file %s\n", dfnam);
683  myexit(1);
684  }
685  trifprintf("End setup of ener file append\n");
686  } // end else if can open temp file
687  } // end else if gotit==1
688  }
689 }
690 
692 void divbmaxavg(FTYPE (*prim)[NSTORE2][NSTORE3][NPR],FTYPE*ptrdivbmax,FTYPE*ptrdivbavg)
693 {
694  int i,j,k;
695  int imax=0,jmax=0,kmax=0;
696  FTYPE divb;
697  FTYPE divbmax=0,divbavg=0;
698  FTYPE divbmaxsend,divbavgsend;
699 
700 
701 
702  LOOPDIVB { // diagonostic loop // OPENMPOPTMARK: Could optimize this, but not frequently done
703 
704  // avoid measuring divb=0 where don't treat fluxes
705  if(ISSPCMCOORD(MCOORD) && (startpos[1]+i>=totalsize[1]-1 || startpos[1]+i<0) ) continue;
706 
707 
708  // doesn't need geom, just use global gdet
709  // GODMARK: use of globals (for diagnostic this is probably ok)
710  setfdivb(&divb, prim, GLOBALPOINT(pstagdump), GLOBALPOINT(udump), GLOBALPOINT(Bhatdump), i, j, k); // udump set externally GODMARK
711  if (divb > divbmax) {
712  imax = i;
713  jmax = j;
714  kmax = k;
715  divbmax = divb;
716  }
717  divbavg += divb;
718  }
719 
720  // PER CPU
721  logfprintf(" proc: %04d : divbmax: %d %d %d : %21.15g divbavg: %21.15g\n", myid, imax, jmax, kmax, divbmax, divbavg / ((FTYPE) (N1*N2*N3)));
722 
723 #if(USEMPI) // give CPU=0 total
724  divbmaxsend = divbmax;
725  divbavgsend = divbavg;
726 
727 #if USINGORANGE == 1
728  MPI_Barrier(MPI_COMM_GRMHD); // GODMARK: OpenMP on Orange crashes without this
729 #endif
730  MPI_Reduce(&divbmaxsend, &divbmax, 1, MPI_FTYPE, MPI_MAX, MPIid[0], MPI_COMM_GRMHD);
731 #if USINGORANGE == 1
732  MPI_Barrier(MPI_COMM_GRMHD); // GODMARK: OpenMP on Orange crashes without this
733 #endif
734  MPI_Reduce(&divbavgsend, &divbavg, 1, MPI_FTYPE, MPI_SUM, MPIid[0], MPI_COMM_GRMHD);
735 #if USINGORANGE == 1
736  MPI_Barrier(MPI_COMM_GRMHD); // GODMARK: OpenMP on Orange crashes without this
737 #endif
738 
739 #endif
740  divbavg /= (FTYPE) (totalzones);
741 
742  // Total over all CPUs
743  myfprintf(logfull_file," divbmax: %21.15g divbavg: %21.15g\n",divbmax, divbavg);
744  *ptrdivbmax=divbmax;
745  *ptrdivbavg=divbavg;
746 }
747 
748 
749 
750 
751 
753 void setrestart(int*appendoldvar)
754 {
755  *appendoldvar = 0;
756  // 0: deal with ener.out manually
757  // 1: append automatically (no longer used)
758 }
759 
760 
769 void gettotal(int doall, int numvars, SFTYPE* vars[],int*sizes,SFTYPE*vars_tot[])
770 {
771  int j,k;
772  SFTYPE *ptrsend;
773  SFTYPE send;
774  int didmalloc;
775 
776  // for 1 CPU
777  if (numprocs == 1) {
778  // must use _tot since can't overwrite normal global integrator
779  // variable
780  for(k=0;k<numvars;k++){
781  for(j=0;j<sizes[k];j++){
782  vars_tot[k][j] = vars[k][j];
783  }
784  }
785  } else {
786  // give CPU=0 the totals
787 #if(USEMPI)
788 
789  for(k=0;k<numvars;k++){
790 
791  didmalloc=0;
792  // at least assume each pointer is a continuous array of memory and avoid extra MPI_Reduce()'s
793  // send and receive can't be same address, hence "ptrsend" variable
794  if(&vars[k][0] == &vars_tot[k][0]){
795  // then need to make scratch space since send and recieve can't be same address
796  // assume not too large
797  ptrsend = (SFTYPE*) malloc(sizes[k]*sizeof(SFTYPE));
798  if(ptrsend==NULL){
799  dualfprintf(fail_file,"Couldn't get memory for ptrsend in gettotal with k=%d numvars=%d sizes=%d\n",k,numvars,sizes[k]);
800  myexit(496365763);
801  }
802  didmalloc=1;
803  // now copy over to temporary space
804  for(j=0;j<sizes[k];j++){
805  ptrsend[j] = vars[k][j];
806  }
807  }
808  else{
809  // input and output not same space so can use vars directly as input
810  ptrsend = &vars[k][0];
811  }
812 
813  if(doall==0) MPI_Reduce(ptrsend, &vars_tot[k][0], sizes[k], MPI_SFTYPE, MPI_SUM, MPIid[0], MPI_COMM_GRMHD);
814  else MPI_Allreduce(ptrsend, &vars_tot[k][0], sizes[k], MPI_SFTYPE, MPI_SUM, MPI_COMM_GRMHD);
815 
816  // free malloc'ed memory
817  if(didmalloc) free(ptrsend);
818 
819  // OLD WAY:
820  // for(j=0;j<sizes[k];j++){
821  // send=vars[k][j];
822  // // send and receive can't be same address, hence "send" variable
823  // MPI_Reduce(&send, &vars_tot[k][j], 1, MPI_SFTYPE, MPI_SUM, MPIid[0], MPI_COMM_GRMHD);
824  // }
825 
826 
827  } // end loop over pointers [k]
828 
829 
830 
831 #endif
832  }
833 }
834 
835 
836 
838 void getalltotal(int numvars, SFTYPE* vars[],int*sizes,SFTYPE*vars_tot[])
839 {
840  int j,k;
841  SFTYPE send;
842 
843  // for 1 CPU
844  if (numprocs == 1) {
845  // must use _tot since can't overwrite normal global integrator
846  // variable
847  for(k=0;k<numvars;k++){
848  for(j=0;j<sizes[k];j++){
849  vars_tot[k][j] = vars[k][j];
850  }
851  }
852  } else {
853  // give all CPUs the totals
854 #if(USEMPI)
855  for(k=0;k<numvars;k++){
856  for(j=0;j<sizes[k];j++){
857  send=vars[k][j];
858  // send and receive can't be same address, hence "send" variable
859  // GODMARK: This could be optimized like gettotal, but not necessary for now since only used with CUMULATIVETYPE3 and not used right now
860  MPI_Allreduce(&send, &vars_tot[k][j], 1, MPI_SFTYPE, MPI_SUM, MPI_COMM_GRMHD);
861  }
862  }
863 #endif
864  }
865 }
866 
867 
869 void gettotall(int numvars, CTYPE* vars[],int*sizes,CTYPE *vars_tot[])
870 {
871  int j,k;
872  CTYPE send;
873 
874  // for 1 CPU
875  if (numprocs == 1) {
876  // must use _tot since can't overwrite normal global integrator
877  // variable
878  for(k=0;k<numvars;k++){
879  for(j=0;j<sizes[k];j++){
880  vars_tot[k][j] = vars[k][j];
881  }
882  }
883  } else {
884  // give CPU=0 the totals
885 #if(USEMPI)
886  for(k=0;k<numvars;k++){
887  for(j=0;j<sizes[k];j++){
888  send=vars[k][j];
889  // send and receive can't be same address, hence "send" variable
890  MPI_Reduce(&send, &vars_tot[k][j], 1, MPI_CTYPE, MPI_SUM, MPIid[0], MPI_COMM_GRMHD);
891  }
892  }
893 #endif
894  }
895 }
896 
897 
899 int constotal(int enerregion, SFTYPE *vars)
900 {
901  int i,j,k,pl,pliter;
902  FTYPE U[NPR];
903  struct of_state q;
904  FTYPE ftemp[NPR];
905  struct of_geom geomdontuse;
906  struct of_geom *ptrgeom=&geomdontuse;
907 
908 
909  PLOOP(pliter,pl) vars[pl]= 0.0;
910 
911  enerpos=enerposreg[enerregion];
912 
913  ZLOOP{ // diagonostic loop // OPENMPOPTMARK: Could optimize this, but not frequently done
914  if(WITHINENERREGION(enerpos,i,j,k)){
915 
916  if(DOENOFLUX != NOENOFLUX){
917  // GODMARK
918  // then need to use stored averaged conserved quantity
919  // assume done with all timesteps when here, so using unew
920  // for substep access one would need to know which substep and then use the correct version (ulast) on all substeps except in between full steps when unew is the correct answer
921  PLOOP(pliter,pl){
922  ftemp[pl]=GLOBALMACP0A1(udump,i,j,k,pl)*dVF;
923  vars[pl] += ftemp[pl];
924  }
925  }
926  else{
927  get_geometry(i,j,k,CENT,ptrgeom) ;
928  if(!failed){
929  if(get_state(GLOBALMAC(pdump,i,j,k),ptrgeom,&q)>=1) return(1);
930  if(primtoU(UDIAG,GLOBALMAC(pdump,i,j,k),&q,ptrgeom,U, NULL)>=1) return(1);
931  }
932 
933  // must use centered primitive for non-field stuff if staggered (or all if non-staggered) so *true* primitive-based conservative that we are really tracking
934  PLOOP(pliter,pl){
935  if(BPL(pl)==0 && FLUXB==FLUXCTSTAG || FLUXB!=FLUXCTSTAG){
936  ftemp[pl]=U[pl]*dVF;
937  vars[pl] += ftemp[pl];
938  }
939  }
940 
941  // must use staggered field because using true emf fluxes
942  if(FLUXB==FLUXCTSTAG){
943  PLOOP(pliter,pl){
944  if(BPL(pl)==1){
945  ftemp[pl]=GLOBALMACP0A1(unewglobal,i,j,k,pl)*dVF;
946  vars[pl] += ftemp[pl];
947  }
948  }
949  }
950 
951  }
952  }
953  }
954  return(0);
955 }
956 
957 
960 int constotal2(int enerregion, SFTYPE *vars)
961 {
962  int i,j,k,pl,pliter;
963  FTYPE U[NPR];
964  struct of_geom geom;
965  struct of_state q;
966  FTYPE ftemp[NPR];
967  // int dissloop;
968 
969  vars[0]= 0.0;
970 
971  enerpos=enerposreg[enerregion];
972 
973  ZLOOP{ // diagonostic loop // OPENMPOPTMARK: Could optimize this, but not frequently done
974  if(WITHINENERREGION(enerpos,i,j,k) ){
975  // for(dissloop=0;dissloop<NUMDISSVERSIONS;dissloop++){
976  vars[0] += GLOBALMACP0A1(dissfunpos,i,j,k,0);
977  // }
978  }
979  }
980  return(0);
981 }
982 
983 
984 
985 
987 int counttotal(int enerregion, CTYPE *vars, int num)
988 {
989  int i,j,k,variter;
990 
991  for(variter=0;variter<num;variter++) vars[variter]= 0;
992 
993  enerpos=enerposreg[enerregion];
994 
995  // looping over [0][variter] is equivalent to original FINALSTEPLOOP TSCALELOOP FLLOOP since at least last two entries in failfloorcount array are continuous in memory
996 
997  // ZLOOP: diagonostic loop // OPENMPOPTMARK: Could optimize this, but not frequently done
998 
999  ZLOOP {
1000  if(WITHINENERREGION(enerpos,i,j,k) ){
1001  for(variter=0;variter<num;variter++) vars[variter] += GLOBALMACP0A3(failfloorcount,i,j,k,0,0,variter) ;
1002  }
1003  }
1004 
1005  // below for restarting with counters in case no spatial counters
1006  // see restart.c and restart_read_defs_new()
1007  i=-N1NOT1;
1008  j=-N2NOT1;
1009  k=-N3NOT1;
1010  for(variter=0;variter<num;variter++) vars[variter] += GLOBALMACP0A3(failfloorcount,i,j,k,0,0,variter) ;
1011 
1012 
1013  return(0);
1014 }
1015 
1016 
1017 
1018 #define MAXPTRS 10
1019 
1021 int integrate(int numelements, SFTYPE * var,SFTYPE *var_tot,int type, int enerregion)
1022 {
1023  SFTYPE *totalptrs[MAXPTRS],*totaloptrs[MAXPTRS];
1024  int totalsizes[MAXPTRS],numptrs;
1025 
1026  switch(type){
1027  case CONSTYPE:
1028  if(constotal(enerregion,var)>=1) return(1);
1029  totalsizes[0]=numelements;
1030  gettotal(0,1,&var,totalsizes,&var_tot); // if only 1 object, then just send address
1031  break;
1032  // below not used or deprecated
1033  // case CONSTYPE2:
1034  // if(constotal2(enerregion,var)>=1) return(1);
1035  // totalsizes[0]=1;
1036  // gettotal(0,1,&var,totalsizes,&var_tot); // if only 1 object, then just send address
1037  // break;
1038  case SURFACETYPE:
1039  case CUMULATIVETYPE:
1040  totalsizes[0]=numelements;
1041  gettotal(0,1,&var,totalsizes,&var_tot);
1042  break;
1043  case CUMULATIVETYPE2:
1044  // CUMULATIVETYPE2 for 1 data object per ii
1045  totalsizes[0]=1; // GODMARK: don't see point in this since just ignore numelements
1046  gettotal(0,1,&var,totalsizes,&var_tot);
1047  // this case makes no sense currently, so fail
1048  myexit(4983463);
1049  break;
1050  case CUMULATIVETYPE3:
1051  // Like CUMULATIVETYPE, but put result in all cores.
1052  totalsizes[0]=numelements;
1053  // getalltotal(1,&var,totalsizes,&var_tot);
1054  gettotal(1,1,&var,totalsizes,&var_tot);
1055  break;
1056  default:
1057  dualfprintf(fail_file,"No defined type=%d in integrate\n",type);
1058  myexit(1);
1059  }
1060  return(0);
1061 }
1062 
1063 
1065 int integratel(int numelements, CTYPE * var, CTYPE *var_tot,int type, int enerregion)
1066 {
1067  CTYPE *totalptrs[MAXPTRS],*totaloptrs[MAXPTRS];
1068  int totalsizes[MAXPTRS],numptrs;
1069 
1070  switch(type){
1071  case CONSTYPE:
1072  // first add up over entire grid for each CPU
1073  if(counttotal(enerregion,var,numelements)>=1) return(1);
1074  totalsizes[0]=numelements;
1075  gettotall(1,&var,totalsizes,&var_tot);
1076  break;
1077  default:
1078  dualfprintf(fail_file,"No defined type=%d in integratel\n",type);
1079  myexit(1);
1080  }
1081  return(0);
1082 }
1083