ABACUS/deprec/src/EXECS/LiebLin_DSF_over_Ensemble_p...

159 рядки
5.3 KiB
C++

/**********************************************************
This software is part of J.-S. Caux's ABACUS library.
Copyright (c) J.-S. Caux.
-----------------------------------------------------------
File: LiebLin_DSF_over_Ensemble_par.cc
Purpose: main function for ABACUS for LiebLin gas, averaging over an Ensemble, parallel implementation.
***********************************************************/
#include "ABACUS.h"
#include "mpi.h"
using namespace std;
using namespace ABACUS;
int main(int argc, char* argv[])
{
if (argc != 10) { // provide some info
cout << endl << "Welcome to ABACUS\t(copyright J.-S. Caux)." << endl;
cout << endl << "Usage of LiebLin_DSF_Tgt0 executable: " << endl;
cout << endl << "Provide the following arguments:" << endl << endl;
cout << "char whichDSF \t\t Which structure factor should be calculated ? Options are: "
"d for rho rho, g for psi psi{dagger}, o for psi{dagger} psi" << endl;
cout << "DP c_int \t\t Value of the interaction parameter: use positive real values only" << endl;
cout << "DP L \t\t\t Length of the system: use positive real values only" << endl;
cout << "int N \t\t\t Number of particles: use positive integer values only" << endl;
cout << "int iKmin" << endl << "int iKmax \t\t Min and max momentum integers to scan over: "
"recommended values: -2*N and 2*N" << endl;
cout << "DP kBT \t\t Temperature (positive only of course)" << endl;
cout << "int Max_Secs \t\t Allowed computational time: (in seconds)" << endl;
cout << "bool refine \t\t Is this a refinement of earlier calculations ? (0 == false, 1 == true)" << endl;
cout << endl << "EXAMPLE: " << endl << endl;
cout << "LiebLin_DSF_over_Ensemble d 1.0 100.0 100 0 200 0.56 10 600 0" << endl << endl;
}
else { // (argc == 10), correct nr of arguments
char whichDSF = *argv[1];
DP c_int = atof(argv[2]);
DP L = atof(argv[3]);
int N = atoi(argv[4]);
int iKmin = atoi(argv[5]);
int iKmax = atoi(argv[6]);
DP kBT = atof(argv[7]);
int Max_Secs = atoi(argv[8]);
bool refine = (atoi(argv[9]) == 1);
if (refine == false) ABACUSerror("Please run the serial version of LiebLin_DSF_over_Ensemble first.");
MPI::Init(argc, argv);
DP tstart = MPI::Wtime();
int rank = MPI::COMM_WORLD.Get_rank();
int nr_processors = MPI::COMM_WORLD.Get_size();
if (nr_processors < 2) ABACUSerror("Give at least 2 processors to ABACUS parallel !");
// Start by constructing (or loading) the state ensemble.
LiebLin_Diagonal_State_Ensemble ensemble;
stringstream ensfilestrstream;
ensfilestrstream << "LiebLin_c_int_" << c_int << "_L_" << L << "_N_" << N << "_kBT_" << kBT << ".ens";
string ensfilestr = ensfilestrstream.str();
const char* ensfile_Cstr = ensfilestr.c_str();
if (!refine) { // Construct the state ensemble
ensemble = LiebLin_Thermal_Saddle_Point_Ensemble (c_int, L, N, kBT);
ensemble.Save(ensfile_Cstr); // Save the ensemble
}
else { // load the ensemble data
ensemble.Load(c_int, L, N, ensfile_Cstr);
}
MPI_Barrier (MPI::COMM_WORLD);
// Now perform the DSF calculation over each state in the ensemble
int Max_Secs_used = Max_Secs/ensemble.nstates;
DP supercycle_time = 600.0; // allotted time per supercycle
if (Max_Secs_used <= supercycle_time) ABACUSerror("Please allow more time in LiebLin_DSF_par.");
// Main loop over ensemble:
for (int ns = 0; ns < ensemble.nstates; ++ns) {
tstart = MPI::Wtime();
DP tnow = MPI::Wtime();
string defaultScanStatename = ensemble.state[ns].label;
while (tnow - tstart < Max_Secs_used - supercycle_time) { // space for one more supercycle
if (rank == 0)
// Split up thread list into chunks, one per processor
Prepare_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors);
// Barrier synchronization, to make sure other processes wait for process of rank 0
// to have finished splitting up the thr file into pieces before starting:
MPI_Barrier (MPI::COMM_WORLD);
// then everybody gets going on their own chunk !
Scan_LiebLin (whichDSF, ensemble.state[ns], ensemble.state[ns].label, iKmin, iKmax, supercycle_time, 1.0e+6, refine, rank, nr_processors);
// Another barrier synchronization
MPI_Barrier (MPI::COMM_WORLD);
// Now that everybody is done, digest data into unique files
if (rank == 0)
Wrapup_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors);
// Another barrier synchronization
MPI_Barrier (MPI::COMM_WORLD);
tnow = MPI::Wtime();
} // while (tnow - tstart...
} // for ns
MPI_Barrier (MPI::COMM_WORLD);
// Final wrapup of the data
if (rank == 0) {
// Evaluate the f-sumrule
stringstream FSR_stringstream; string FSR_string;
Data_File_Name (FSR_stringstream, whichDSF, c_int, L, N, iKmin, iKmax, kBT, 0.0, "");
FSR_stringstream << "_ns_" << ensemble.nstates << ".fsr";
FSR_string = FSR_stringstream.str(); const char* FSR_Cstr = FSR_string.c_str();
DP Chem_Pot = 0.0;
Evaluate_F_Sumrule (whichDSF, c_int, L, N, kBT, ensemble.nstates, Chem_Pot, iKmin, iKmax, FSR_Cstr);
}
MPI_Barrier (MPI::COMM_WORLD);
} // correct nr of arguments
MPI::Finalize();
return(0);
}