You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

LiebLin_DSF_over_Ensemble_par.cc 7.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /**********************************************************
  2. This software is part of J.-S. Caux's ABACUS library.
  3. Copyright (c) J.-S. Caux.
  4. -----------------------------------------------------------
  5. File: LiebLin_DSF_over_Ensemble_par.cc
  6. Purpose: main function for ABACUS for LiebLin gas, averaging over an Ensemble, parallel implementation.
  7. ***********************************************************/
  8. #include "ABACUS.h"
  9. #include "mpi.h"
  10. using namespace std;
  11. using namespace ABACUS;
  12. int main(int argc, char* argv[])
  13. {
  14. if (argc != 10) { // provide some info
  15. cout << endl << "Welcome to ABACUS\t(copyright J.-S. Caux)." << endl;
  16. cout << endl << "Usage of LiebLin_DSF_Tgt0 executable: " << endl;
  17. cout << endl << "Provide the following arguments:" << endl << endl;
  18. cout << "char whichDSF \t\t Which structure factor should be calculated ? Options are: d for rho rho, g for psi psi{dagger}, o for psi{dagger} psi" << endl;
  19. cout << "DP c_int \t\t Value of the interaction parameter: use positive real values only" << endl;
  20. cout << "DP L \t\t\t Length of the system: use positive real values only" << endl;
  21. cout << "int N \t\t\t Number of particles: use positive integer values only" << endl;
  22. cout << "int iKmin" << endl << "int iKmax \t\t Min and max momentum integers to scan over: recommended values: -2*N and 2*N" << endl;
  23. cout << "DP kBT \t\t Temperature (positive only of course)" << endl;
  24. //cout << "int nstates \t\t\t Number of states to be considered in the ensemble" << endl;
  25. cout << "int Max_Secs \t\t Allowed computational time: (in seconds)" << endl;
  26. //cout << "DP target_sumrule \t sumrule saturation you're satisfied with" << endl;
  27. cout << "bool refine \t\t Is this a refinement of earlier calculations ? (0 == false, 1 == true)" << endl;
  28. cout << endl << "EXAMPLE: " << endl << endl;
  29. cout << "LiebLin_DSF_over_Ensemble d 1.0 100.0 100 0 200 0.56 10 600 0" << endl << endl;
  30. }
  31. else { // (argc == 10), correct nr of arguments
  32. char whichDSF = *argv[1];
  33. DP c_int = atof(argv[2]);
  34. DP L = atof(argv[3]);
  35. int N = atoi(argv[4]);
  36. int iKmin = atoi(argv[5]);
  37. int iKmax = atoi(argv[6]);
  38. DP kBT = atof(argv[7]);
  39. //int nstates_req = atoi(argv[8]);
  40. int Max_Secs = atoi(argv[8]);
  41. bool refine = (atoi(argv[9]) == 1);
  42. if (refine == false) ABACUSerror("Please run the serial version of LiebLin_DSF_over_Ensemble first.");
  43. MPI::Init(argc, argv);
  44. DP tstart = MPI::Wtime();
  45. int rank = MPI::COMM_WORLD.Get_rank();
  46. int nr_processors = MPI::COMM_WORLD.Get_size();
  47. if (nr_processors < 2) ABACUSerror("Give at least 2 processors to ABACUS parallel !");
  48. // Start by constructing (or loading) the state ensemble.
  49. LiebLin_Diagonal_State_Ensemble ensemble;
  50. stringstream ensfilestrstream;
  51. //ensfilestrstream << "LiebLin_c_int_" << c_int << "_L_" << L << "_N_" << N << "_kBT_" << kBT << "_ns_" << nstates_req << ".ens";
  52. ensfilestrstream << "LiebLin_c_int_" << c_int << "_L_" << L << "_N_" << N << "_kBT_" << kBT << ".ens";
  53. string ensfilestr = ensfilestrstream.str();
  54. const char* ensfile_Cstr = ensfilestr.c_str();
  55. if (!refine) { // Construct the state ensemble
  56. //ensemble = LiebLin_Thermal_Saddle_Point_Ensemble (c_int, L, N, kBT, nstates_req);
  57. ensemble = LiebLin_Thermal_Saddle_Point_Ensemble (c_int, L, N, kBT);
  58. ensemble.Save(ensfile_Cstr); // Save the ensemble
  59. }
  60. else { // load the ensemble data
  61. ensemble.Load(c_int, L, N, ensfile_Cstr);
  62. }
  63. MPI_Barrier (MPI::COMM_WORLD);
  64. // Now perform the DSF calculation over each state in the ensemble
  65. /* Original implementation: Scan always called serially. Superseded by version below, using successive parallel scans on each state in the ensemble.
  66. int nDSFperproc = ensemble.nstates/nr_processors + 1;
  67. //if (ensemble.nstates % nr_processors) ABACUSerror("Use nr_processors * integer multiple == ensemble.nstates in LiebLin_DSF_over_Ensemble_par.");
  68. // Processor with rank r does all
  69. int ns;
  70. int Max_Secs_used = Max_Secs/nDSFperproc;
  71. for (int ir = 0; ir < nDSFperproc; ++ir) {
  72. ns = rank + ir * nr_processors;
  73. //void Scan_LiebLin (char whichDSF, LiebLin_Bethe_State AveragingState, string defaultScanStatename, int iKmin, int iKmax,
  74. //int Max_Secs, DP target_sumrule, bool refine, int rank, int nr_processors)
  75. if (ns < ensemble.nstates) {
  76. //cout << "Processor rank " << rank << " going for ns = " << ns << " out of " << ensemble.nstates << endl;
  77. Scan_LiebLin (whichDSF, ensemble.state[ns], ensemble.state[ns].label, iKmin, iKmax, Max_Secs_used, 1.0e+6, refine, 0, 1);
  78. }
  79. }
  80. */
  81. // Version 2013 04 24:
  82. // Makes use of a parallel scan for each state in the ensemble, in succession.
  83. // Code is simple adaptation of LiebLin_DSF_par executable code.
  84. int Max_Secs_used = Max_Secs/ensemble.nstates;
  85. DP supercycle_time = 600.0; // allotted time per supercycle
  86. if (Max_Secs_used <= supercycle_time) ABACUSerror("Please allow more time in LiebLin_DSF_par.");
  87. // Main loop over ensemble:
  88. for (int ns = 0; ns < ensemble.nstates; ++ns) {
  89. tstart = MPI::Wtime();
  90. DP tnow = MPI::Wtime();
  91. string defaultScanStatename = ensemble.state[ns].label;
  92. while (tnow - tstart < Max_Secs_used - supercycle_time) { // space for one more supercycle
  93. if (rank == 0)
  94. // Split up thread list into chunks, one per processor
  95. //Prepare_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iK_UL, fixed_iK, iKneeded, nr_processors);
  96. Prepare_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors);
  97. // Barrier synchronization, to make sure other processes wait for process of rank 0
  98. // to have finished splitting up the thr file into pieces before starting:
  99. MPI_Barrier (MPI::COMM_WORLD);
  100. // then everybody gets going on their own chunk !
  101. //Scan_LiebLin (whichDSF, c_int, L, N, iK_UL, fixed_iK, iKneeded,
  102. //Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT,
  103. //supercycle_time, target_sumrule, refine, rank, nr_processors);
  104. Scan_LiebLin (whichDSF, ensemble.state[ns], ensemble.state[ns].label, iKmin, iKmax, supercycle_time, 1.0e+6, refine, rank, nr_processors);
  105. // Another barrier synchronization
  106. MPI_Barrier (MPI::COMM_WORLD);
  107. // Now that everybody is done, digest data into unique files
  108. if (rank == 0)
  109. //Wrapup_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iK_UL, fixed_iK, iKneeded, nr_processors);
  110. Wrapup_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors);
  111. // Another barrier synchronization
  112. MPI_Barrier (MPI::COMM_WORLD);
  113. tnow = MPI::Wtime();
  114. } // while (tnow - tstart...
  115. } // for ns
  116. MPI_Barrier (MPI::COMM_WORLD);
  117. // Final wrapup of the data
  118. if (rank == 0) {
  119. // Evaluate the f-sumrule
  120. stringstream FSR_stringstream; string FSR_string;
  121. Data_File_Name (FSR_stringstream, whichDSF, c_int, L, N, iKmin, iKmax, kBT, 0.0, "");
  122. FSR_stringstream << "_ns_" << ensemble.nstates << ".fsr";
  123. FSR_string = FSR_stringstream.str(); const char* FSR_Cstr = FSR_string.c_str();
  124. DP Chem_Pot = 0.0;
  125. Evaluate_F_Sumrule (whichDSF, c_int, L, N, kBT, ensemble.nstates, Chem_Pot, iKmin, iKmax, FSR_Cstr);
  126. }
  127. MPI_Barrier (MPI::COMM_WORLD);
  128. } // correct nr of arguments
  129. MPI::Finalize();
  130. return(0);
  131. }