192 righe
7.3 KiB
C++
192 righe
7.3 KiB
C++
/**********************************************************
|
|
|
|
This software is part of J.-S. Caux's ABACUS library.
|
|
|
|
Copyright (c).
|
|
|
|
-----------------------------------------------------------
|
|
|
|
File: LiebLin_DSF_over_Ensemble_par.cc
|
|
|
|
Purpose: main function for ABACUS for LiebLin gas, averaging over an Ensemble, parallel implementation.
|
|
|
|
***********************************************************/
|
|
|
|
#include "JSC.h"
|
|
#include "mpi.h"
|
|
|
|
using namespace std;
|
|
using namespace JSC;
|
|
|
|
|
|
int main(int argc, char* argv[])
|
|
{
|
|
|
|
if (argc != 10) { // provide some info
|
|
|
|
cout << endl << "Welcome to ABACUS++\t(copyright J.-S. Caux)." << endl;
|
|
cout << endl << "Usage of LiebLin_DSF_Tgt0 executable: " << endl;
|
|
cout << endl << "Provide the following arguments:" << endl << endl;
|
|
cout << "char whichDSF \t\t Which structure factor should be calculated ? Options are: d for rho rho, g for psi psi{dagger}, o for psi{dagger} psi" << endl;
|
|
cout << "DP c_int \t\t Value of the interaction parameter: use positive real values only" << endl;
|
|
cout << "DP L \t\t\t Length of the system: use positive real values only" << endl;
|
|
cout << "int N \t\t\t Number of particles: use positive integer values only" << endl;
|
|
cout << "int iKmin" << endl << "int iKmax \t\t Min and max momentum integers to scan over: recommended values: -2*N and 2*N" << endl;
|
|
cout << "DP kBT \t\t Temperature (positive only of course)" << endl;
|
|
//cout << "int nstates \t\t\t Number of states to be considered in the ensemble" << endl;
|
|
cout << "int Max_Secs \t\t Allowed computational time: (in seconds)" << endl;
|
|
//cout << "DP target_sumrule \t sumrule saturation you're satisfied with" << endl;
|
|
cout << "bool refine \t\t Is this a refinement of earlier calculations ? (0 == false, 1 == true)" << endl;
|
|
cout << endl << "EXAMPLE: " << endl << endl;
|
|
cout << "LiebLin_DSF_over_Ensemble d 1.0 100.0 100 0 200 0.56 10 600 0" << endl << endl;
|
|
}
|
|
|
|
else { // (argc == 10), correct nr of arguments
|
|
char whichDSF = *argv[1];
|
|
DP c_int = atof(argv[2]);
|
|
DP L = atof(argv[3]);
|
|
int N = atoi(argv[4]);
|
|
int iKmin = atoi(argv[5]);
|
|
int iKmax = atoi(argv[6]);
|
|
DP kBT = atof(argv[7]);
|
|
//int nstates_req = atoi(argv[8]);
|
|
int Max_Secs = atoi(argv[8]);
|
|
bool refine = (atoi(argv[9]) == 1);
|
|
|
|
if (refine == false) JSCerror("Please run the serial version of LiebLin_DSF_over_Ensemble first.");
|
|
|
|
MPI::Init(argc, argv);
|
|
|
|
DP tstart = MPI::Wtime();
|
|
|
|
int rank = MPI::COMM_WORLD.Get_rank();
|
|
int nr_processors = MPI::COMM_WORLD.Get_size();
|
|
|
|
if (nr_processors < 2) JSCerror("Give at least 2 processors to ABACUS++ parallel !");
|
|
|
|
|
|
// Start by constructing (or loading) the state ensemble.
|
|
|
|
LiebLin_Diagonal_State_Ensemble ensemble;
|
|
|
|
stringstream ensfilestrstream;
|
|
//ensfilestrstream << "LiebLin_c_int_" << c_int << "_L_" << L << "_N_" << N << "_kBT_" << kBT << "_ns_" << nstates_req << ".ens";
|
|
ensfilestrstream << "LiebLin_c_int_" << c_int << "_L_" << L << "_N_" << N << "_kBT_" << kBT << ".ens";
|
|
string ensfilestr = ensfilestrstream.str();
|
|
const char* ensfile_Cstr = ensfilestr.c_str();
|
|
|
|
if (!refine) { // Construct the state ensemble
|
|
//ensemble = LiebLin_Thermal_Saddle_Point_Ensemble (c_int, L, N, kBT, nstates_req);
|
|
ensemble = LiebLin_Thermal_Saddle_Point_Ensemble (c_int, L, N, kBT);
|
|
ensemble.Save(ensfile_Cstr); // Save the ensemble
|
|
}
|
|
|
|
else { // load the ensemble data
|
|
ensemble.Load(c_int, L, N, ensfile_Cstr);
|
|
}
|
|
|
|
MPI_Barrier (MPI::COMM_WORLD);
|
|
|
|
// Now perform the DSF calculation over each state in the ensemble
|
|
|
|
/* Original implementation: Scan always called serially. Superseded by version below, using successive parallel scans on each state in the ensemble.
|
|
int nDSFperproc = ensemble.nstates/nr_processors + 1;
|
|
//if (ensemble.nstates % nr_processors) JSCerror("Use nr_processors * integer multiple == ensemble.nstates in LiebLin_DSF_over_Ensemble_par.");
|
|
|
|
// Processor with rank r does all
|
|
|
|
int ns;
|
|
int Max_Secs_used = Max_Secs/nDSFperproc;
|
|
|
|
for (int ir = 0; ir < nDSFperproc; ++ir) {
|
|
ns = rank + ir * nr_processors;
|
|
//void Scan_LiebLin (char whichDSF, LiebLin_Bethe_State AveragingState, string defaultScanStatename, int iKmin, int iKmax,
|
|
//int Max_Secs, DP target_sumrule, bool refine, int rank, int nr_processors)
|
|
if (ns < ensemble.nstates) {
|
|
//cout << "Processor rank " << rank << " going for ns = " << ns << " out of " << ensemble.nstates << endl;
|
|
Scan_LiebLin (whichDSF, ensemble.state[ns], ensemble.state[ns].label, iKmin, iKmax, Max_Secs_used, 1.0e+6, refine, 0, 1);
|
|
}
|
|
}
|
|
*/
|
|
|
|
// Version 2013 04 24:
|
|
// Makes use of a parallel scan for each state in the ensemble, in succession.
|
|
// Code is simple adaptation of LiebLin_DSF_par executable code.
|
|
|
|
int Max_Secs_used = Max_Secs/ensemble.nstates;
|
|
|
|
DP supercycle_time = 600.0; // allotted time per supercycle
|
|
|
|
if (Max_Secs_used <= supercycle_time) JSCerror("Please allow more time in LiebLin_DSF_par.");
|
|
|
|
// Main loop over ensemble:
|
|
for (int ns = 0; ns < ensemble.nstates; ++ns) {
|
|
|
|
tstart = MPI::Wtime();
|
|
DP tnow = MPI::Wtime();
|
|
|
|
string defaultScanStatename = ensemble.state[ns].label;
|
|
|
|
while (tnow - tstart < Max_Secs_used - supercycle_time) { // space for one more supercycle
|
|
|
|
if (rank == 0)
|
|
// Split up thread list into chunks, one per processor
|
|
//Prepare_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iK_UL, fixed_iK, iKneeded, nr_processors);
|
|
Prepare_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors);
|
|
|
|
// Barrier synchronization, to make sure other processes wait for process of rank 0
|
|
// to have finished splitting up the thr file into pieces before starting:
|
|
MPI_Barrier (MPI::COMM_WORLD);
|
|
|
|
// then everybody gets going on their own chunk !
|
|
//Scan_LiebLin (whichDSF, c_int, L, N, iK_UL, fixed_iK, iKneeded,
|
|
//Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT,
|
|
//supercycle_time, target_sumrule, refine, rank, nr_processors);
|
|
Scan_LiebLin (whichDSF, ensemble.state[ns], ensemble.state[ns].label, iKmin, iKmax, supercycle_time, 1.0e+6, refine, rank, nr_processors);
|
|
|
|
// Another barrier synchronization
|
|
MPI_Barrier (MPI::COMM_WORLD);
|
|
|
|
// Now that everybody is done, digest data into unique files
|
|
|
|
if (rank == 0)
|
|
//Wrapup_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iK_UL, fixed_iK, iKneeded, nr_processors);
|
|
Wrapup_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors);
|
|
|
|
// Another barrier synchronization
|
|
MPI_Barrier (MPI::COMM_WORLD);
|
|
|
|
tnow = MPI::Wtime();
|
|
|
|
} // while (tnow - tstart...
|
|
|
|
} // for ns
|
|
|
|
|
|
MPI_Barrier (MPI::COMM_WORLD);
|
|
|
|
|
|
// Final wrapup of the data
|
|
if (rank == 0) {
|
|
|
|
// Evaluate the f-sumrule
|
|
stringstream FSR_stringstream; string FSR_string;
|
|
Data_File_Name (FSR_stringstream, whichDSF, c_int, L, N, iKmin, iKmax, kBT, 0.0, "");
|
|
FSR_stringstream << "_ns_" << ensemble.nstates << ".fsr";
|
|
FSR_string = FSR_stringstream.str(); const char* FSR_Cstr = FSR_string.c_str();
|
|
|
|
DP Chem_Pot = 0.0;
|
|
|
|
Evaluate_F_Sumrule (whichDSF, c_int, L, N, kBT, ensemble.nstates, Chem_Pot, iKmin, iKmax, FSR_Cstr);
|
|
}
|
|
|
|
MPI_Barrier (MPI::COMM_WORLD);
|
|
|
|
} // correct nr of arguments
|
|
|
|
MPI::Finalize();
|
|
|
|
return(0);
|
|
}
|
|
|