You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

Heis_DSF_par.cc 3.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /**********************************************************
  2. This software is part of J.-S. Caux's ABACUS library.
  3. Copyright (c) J.-S. Caux.
  4. -----------------------------------------------------------
  5. File: Heis_DSF_par.cc
  6. Purpose: Parallel version of ABACUS using MPICH.
  7. ***********************************************************/
  8. #include "ABACUS.h"
  9. #include "mpi.h"
  10. using namespace ABACUS;
  11. int main(int argc, char *argv[])
  12. {
  13. char whichDSF;
  14. DP Delta;
  15. int N, M, iKneeded, iKmin, iKmax, Max_Secs;
  16. DP target_sumrule = 1.0e+6; // effectively deactivated here
  17. bool refine;
  18. if (argc != 8) { // provide some info
  19. cout << endl << "Welcome to ABACUS\t(copyright J.-S. Caux)." << endl;
  20. cout << endl << "Usage of Heis_DSF_par executable: " << endl;
  21. cout << endl << "This function runs ABACUS in parallel mode, starting from a preexisting "
  22. "serial run (obtained using the Heis_DSF executable) using the same model parameters." << endl;
  23. cout << endl << "Provide the following arguments:" << endl << endl;
  24. cout << "char whichDSF \t\t Which structure factor should be calculated ? Options are: "
  25. "m for S- S+, z for Sz Sz, p for S+ S-." << endl;
  26. cout << "DP Delta \t\t Value of the anisotropy: use positive real values only" << endl;
  27. cout << "int N \t\t\t Length (number of sites) of the system: use positive even integer values only" << endl;
  28. cout << "int M \t\t\t Number of down spins: use positive integer values between 1 and N/2" << endl;
  29. cout << "int iKmin" << endl << "int iKmax \t\t Min and max momentum integers to scan over: "
  30. "recommended values: 0 and N" << endl;
  31. cout << "int Max_Secs \t\t Allowed computational time: (in seconds)" << endl;
  32. cout << endl << "EXAMPLE: " << endl << endl;
  33. cout << "mpiexec -np 8 Heis_DSF_par z 1.0 100 40 0 100 600" << endl << endl;
  34. return(0);
  35. }
  36. else { // (argc == 8) correct nr of arguments
  37. whichDSF = *argv[1];
  38. Delta = atof(argv[2]);
  39. N = atoi(argv[3]);
  40. M = atoi(argv[4]);
  41. iKmin = atoi(argv[5]);
  42. iKmax = atoi(argv[6]);
  43. Max_Secs = atoi(argv[7]);
  44. }
  45. DP supercycle_time = 600.0; // allotted time per supercycle
  46. if (Max_Secs <= supercycle_time + 300) ABACUSerror("Please allow more time in Heis_DSF_par.");
  47. MPI::Init(argc, argv);
  48. DP tstart = MPI::Wtime();
  49. int rank = MPI::COMM_WORLD.Get_rank();
  50. int nr_processors = MPI::COMM_WORLD.Get_size();
  51. if (nr_processors < 2) ABACUSerror("Give at least 2 processors to ABACUS parallel !");
  52. refine = true;
  53. // ASSUMPTION: preexisting files (raw, thr, ...) exist for the run.
  54. // IMPORTANT PRECONDITION: no flags are being raised in General_Scan in parallel mode, so
  55. // the preinitializing serial run must be extensive enough to have flagged all base/type s necessary.
  56. DP tnow = MPI::Wtime();
  57. while (tnow - tstart < Max_Secs - supercycle_time - 300) { // space for one more supercycle, + 5 minutes safety
  58. if (rank == 0)
  59. // Split up thread list into chunks, one per processor
  60. Prepare_Parallel_Scan_Heis (whichDSF, Delta, N, M, iKmin, iKmax, nr_processors);
  61. // Barrier synchronization, to make sure other processes wait for process of rank 0
  62. // to have finished splitting up the thr file into pieces before starting:
  63. MPI_Barrier (MPI::COMM_WORLD);
  64. // then everybody gets going on their own chunk !
  65. Scan_Heis (whichDSF, Delta, N, M, iKmin, iKmax,
  66. supercycle_time, target_sumrule, refine, rank, nr_processors);
  67. // Another barrier synchronization
  68. MPI_Barrier (MPI::COMM_WORLD);
  69. // Now that everybody is done, digest data into unique files
  70. if (rank == 0)
  71. Wrapup_Parallel_Scan_Heis (whichDSF, Delta, N, M, iKmin, iKmax, nr_processors);
  72. // Another barrier synchronization
  73. MPI_Barrier (MPI::COMM_WORLD);
  74. tnow = MPI::Wtime();
  75. } // while (tnow - tstart...
  76. MPI::Finalize();
  77. return(0);
  78. }