/********************************************************** This software is part of J.-S. Caux's ABACUS library. Copyright (c) J.-S. Caux. ----------------------------------------------------------- File: LiebLin_DSF_par.cc Purpose: Parallel version of ABACUS using MPICH. ***********************************************************/ #include "ABACUS.h" #include "mpi.h" using namespace ABACUS; int main(int argc, char *argv[]) { char whichDSF; DP c_int, L; int N, Nl, DIl, DIr, iKmin, iKmax, Max_Secs, supercycle_time; DP target_sumrule = 1.0e+6; // effectively deactivated here bool refine = true; // always true for parallel mode DP kBT = 0.0; // dummy if (argc != 12) { // provide some info cout << endl << "Welcome to ABACUS\t(copyright J.-S. Caux)." << endl; cout << endl << "Usage of LiebLin_DSF_MosesState_par executable: " << endl; cout << endl << "This function runs ABACUS in parallel mode, starting from a preexisting " "serial run (obtained using the LiebLin_DSF executable) using the same model parameters." << endl; cout << endl << "Provide the following arguments:" << endl << endl; cout << "char whichDSF \t\t Which structure factor should be calculated ? Options are: " "d for rho rho, g for psi psi{dagger}, o for psi{dagger} psi" << endl; cout << "DP c_int \t\t Value of the interaction parameter: use positive real values only" << endl; cout << "DP L \t\t\t Length of the system: use positive real values only" << endl; cout << "int N \t\t\t Number of particles: use positive integer values only" << endl; cout << "int Nl \t\t\t Number of particles in left Fermi sea (Nr is then N - Nl)" << endl; cout << "int DIl \t\t shift of left sea as compared to its ground state position" << endl; cout << "int DIr \t\t shift of right sea as compared to its ground state position" << endl; cout << "int iKmin" << endl << "int iKmax \t\t Min and max momentum integers to scan over: " "recommended values: -2*N and 2*N" << endl; cout << "int Max_Secs \t\t Allowed computational time: (in seconds)" << endl; cout << "int supercycle_time \t\t time for one supercycle (in seconds)" << endl; cout << endl << "EXAMPLE: " << endl << endl; cout << "mpiexec -np 8 LiebLin_DSF_MosesState_par d 1.0 100.0 100 50 -30 20 -400 400 3600 600" << endl << endl; return(0); } else { // (argc == 11) correct nr of arguments whichDSF = *argv[1]; c_int = atof(argv[2]); L = atof(argv[3]); N = atoi(argv[4]); Nl = atoi(argv[5]); DIl = atoi(argv[6]); DIr = atoi(argv[7]); iKmin = atoi(argv[8]); iKmax = atoi(argv[9]); Max_Secs = atoi(argv[10]); supercycle_time = atoi(argv[11]); } if (Max_Secs <= supercycle_time) ABACUSerror("Please allow more time in LiebLin_DSF_par."); MPI::Init(argc, argv); DP tstart = MPI::Wtime(); int rank = MPI::COMM_WORLD.Get_rank(); int nr_processors = MPI::COMM_WORLD.Get_size(); if (nr_processors < 2) ABACUSerror("Give at least 2 processors to ABACUS parallel !"); refine = true; // ASSUMPTION: preexisting files (raw, thr, ...) exist for the run. DP tnow = MPI::Wtime(); // Define the Moses state: LiebLin_Bethe_State MosesState (c_int, L, N); // Split the sea: for (int i = 0; i < Nl; ++i) MosesState.Ix2[i] += 2 * DIl; for (int i = Nl; i < N; ++i) MosesState.Ix2[i] += 2 * DIr; MosesState.Compute_All (true); // Handy default name: stringstream defaultScanStatename_strstream; defaultScanStatename_strstream << "Moses_Nl_" << Nl << "_DIl_" << DIl << "_DIr_" << DIr; string defaultScanStatename = defaultScanStatename_strstream.str(); MPI_Barrier (MPI::COMM_WORLD); while (tnow - tstart < Max_Secs - supercycle_time - 120) { // space for one more supercycle, + 2 minutes safety if (rank == 0) // Split up thread list into chunks, one per processor Prepare_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors); // Barrier synchronization, to make sure other processes wait for process of rank 0 // to have finished splitting up the thr file into pieces before starting: MPI_Barrier (MPI::COMM_WORLD); // then everybody gets going on their own chunk ! Scan_LiebLin (whichDSF, MosesState, defaultScanStatename, iKmin, iKmax, supercycle_time, target_sumrule, refine, rank, nr_processors); // Another barrier synchronization MPI_Barrier (MPI::COMM_WORLD); // Now that everybody is done, digest data into unique files if (rank == 0) Wrapup_Parallel_Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, defaultScanStatename, nr_processors); // Another barrier synchronization MPI_Barrier (MPI::COMM_WORLD); tnow = MPI::Wtime(); } // while (tnow - tstart... MPI::Finalize(); return(0); }