From 305335c2f745e6f24ef216c0f9354f7d8372a187 Mon Sep 17 00:00:00 2001
From: Marcin Kirsz <mkirsz@ed.ac.uk>
Date: Tue, 8 Oct 2024 17:00:54 +0100
Subject: [PATCH] remove init method

---
 trainer.h | 164 +++++++++++++++++++++++++++---------------------------
 1 file changed, 81 insertions(+), 83 deletions(-)

diff --git a/trainer.h b/trainer.h
index d8c4f29..950aa80 100644
--- a/trainer.h
+++ b/trainer.h
@@ -113,93 +113,91 @@ class MPI_Trainer: public Trainer {
       Trainer(c),
       rank(rank),
       ncpu(ncpu)
-  {}
-    void init() {
-
-      if (rank==0) {
-        int nstruct_tot = StructureDB::count(config).first;
-        int natoms_tot = StructureDB::count(config).second;
-        PHI_cols = fb->get_phi_cols(config);
-        PHI_rows = DesignMatrixBase::phi_rows_num(config, nstruct_tot, natoms_tot);
-      }
-      MPI_Bcast(&PHI_rows, 1, MPI_INT, 0, MPI_COMM_WORLD);
-      MPI_Bcast(&PHI_cols, 1, MPI_INT, 0, MPI_COMM_WORLD);
-
-      // Initialize BLACS
-      // We create two contexts.
-      // context1 is used for the computation of phi matrices
-      // context2 is used for distribution of local phi to "block cyclic phi"
-      blacs_pinfo_(&b_rank, &ncpu) ; // BLACS rank and world size
-
-      rnb1 = ceil(PHI_rows/ncpu);
-      rnb2 = config.get<int>("MBLOCK");   // Row block size
-      cnb1 = PHI_cols;
-      cnb2 = config.get<int>("NBLOCK");   // Column block size
-
-      b_ncols1 = 1;      //  b_ncols2 = 2;
-      b_nrows1 = ncpu;   //  b_nrows2 = ncpu/b_ncols2;
-
-      // make as sqaure grid as possible
-      int sr = sqrt(ncpu);
-      if (sr*sr==ncpu) {
-        b_nrows2 = sr;
-        b_ncols2 = sr;
-      }
-      else {
-        // loop over all possible divisors
-        int before, /*sqrt(ncpu),*/ after;
-        for (int i = 1; i <= ncpu; ++i){
-          if (ncpu % i == 0) {
-            if (i>sqrt(ncpu)) {
-              after=i; 
-              break;
-            }
-            before=i;
+  {
+    if (rank==0) {
+      int nstruct_tot = StructureDB::count(config).first;
+      int natoms_tot = StructureDB::count(config).second;
+      PHI_cols = fb->get_phi_cols(config);
+      PHI_rows = DesignMatrixBase::phi_rows_num(config, nstruct_tot, natoms_tot);
+    }
+    MPI_Bcast(&PHI_rows, 1, MPI_INT, 0, MPI_COMM_WORLD);
+    MPI_Bcast(&PHI_cols, 1, MPI_INT, 0, MPI_COMM_WORLD);
+
+    // Initialize BLACS
+    // We create two contexts.
+    // context1 is used for the computation of phi matrices
+    // context2 is used for distribution of local phi to "block cyclic phi"
+    blacs_pinfo_(&b_rank, &ncpu) ; // BLACS rank and world size
+
+    rnb1 = ceil(PHI_rows/ncpu);
+    rnb2 = config.get<int>("MBLOCK");   // Row block size
+    cnb1 = PHI_cols;
+    cnb2 = config.get<int>("NBLOCK");   // Column block size
+
+    b_ncols1 = 1;      //  b_ncols2 = 2;
+    b_nrows1 = ncpu;   //  b_nrows2 = ncpu/b_ncols2;
+
+    // make as sqaure grid as possible
+    int sr = sqrt(ncpu);
+    if (sr*sr==ncpu) {
+      b_nrows2 = sr;
+      b_ncols2 = sr;
+    }
+    else {
+      // loop over all possible divisors
+      int before, /*sqrt(ncpu),*/ after;
+      for (int i = 1; i <= ncpu; ++i){
+        if (ncpu % i == 0) {
+          if (i>sqrt(ncpu)) {
+            after=i; 
+            break;
           }
+          before=i;
         }
-        b_nrows2 = after;
-        b_ncols2 = before;
       }
-
-      assert(b_nrows2 * b_ncols2 == ncpu);
-      assert(b_nrows1 * b_ncols1 == ncpu);
-
-
-      // Create first context
-      blacs_get_(&izero,&izero, &context1 ); // -> Create context1
-      blacs_gridinit_(&context1, &layout, &b_nrows1, &b_ncols1 ); // context1 -> Initialize the grid
-      blacs_gridinfo_(&context1, &b_nrows1, &b_ncols1, &b_row1, &b_col1 );
-
-      // Create second context
-      blacs_get_(&izero,&izero, &context2 ); // -> Create context2
-      blacs_gridinit_(&context2, &layout, &b_nrows2, &b_ncols2 ); // context2 -> Initialize the grid
-      blacs_gridinfo_(&context2, &b_nrows2, &b_ncols2, &b_row2, &b_col2 );
-
-      // Compute the size of the local phi matrices
-      phi_rows1 = numroc_( &PHI_rows, &rnb1, &b_row1, &izero, &b_nrows1 );
-      phi_cols1 = numroc_( &PHI_cols, &cnb1, &b_col1, &izero, &b_ncols1 );
-      phi_rows2 = numroc_( &PHI_rows, &rnb2, &b_row2, &izero, &b_nrows2 );
-      phi_cols2 = numroc_( &PHI_cols, &cnb2, &b_col2, &izero, &b_ncols2 );
-
-      // Define MPI datatype to send rows from the PHI matrix with column-major order
-      // used only in context1
-      MPI_Type_vector( phi_cols1, 1, phi_rows1, MPI_DOUBLE, &rowvec); 
-      MPI_Type_commit(&rowvec);
-      MPI_Type_create_resized(rowvec, 0, sizeof(double), &rowvecs);
-      MPI_Type_commit(&rowvecs);
-
-      // COUNTERS
-      rows_available=phi_rows1;  // number of available rows in the local phi array
-
-      // once we know the size of local phi, we can allocate memory to it
-      // including host as well. The host will collect excees computations from
-      // workers.
-      //DesignMatrix<DM_Function_Base&> dm(*fb, config);
-      dm.Phi.resize(phi_rows1,phi_cols1);
-      //int lda1 = phi_rows1 > phi_cols1 ? phi_rows1 : phi_cols1;
-      dm.T.resize(phi_rows1);
-      dm.Tlabels.resize(phi_rows1);
+      b_nrows2 = after;
+      b_ncols2 = before;
     }
+
+    assert(b_nrows2 * b_ncols2 == ncpu);
+    assert(b_nrows1 * b_ncols1 == ncpu);
+
+
+    // Create first context
+    blacs_get_(&izero,&izero, &context1 ); // -> Create context1
+    blacs_gridinit_(&context1, &layout, &b_nrows1, &b_ncols1 ); // context1 -> Initialize the grid
+    blacs_gridinfo_(&context1, &b_nrows1, &b_ncols1, &b_row1, &b_col1 );
+
+    // Create second context
+    blacs_get_(&izero,&izero, &context2 ); // -> Create context2
+    blacs_gridinit_(&context2, &layout, &b_nrows2, &b_ncols2 ); // context2 -> Initialize the grid
+    blacs_gridinfo_(&context2, &b_nrows2, &b_ncols2, &b_row2, &b_col2 );
+
+    // Compute the size of the local phi matrices
+    phi_rows1 = numroc_( &PHI_rows, &rnb1, &b_row1, &izero, &b_nrows1 );
+    phi_cols1 = numroc_( &PHI_cols, &cnb1, &b_col1, &izero, &b_ncols1 );
+    phi_rows2 = numroc_( &PHI_rows, &rnb2, &b_row2, &izero, &b_nrows2 );
+    phi_cols2 = numroc_( &PHI_cols, &cnb2, &b_col2, &izero, &b_ncols2 );
+
+    // Define MPI datatype to send rows from the PHI matrix with column-major order
+    // used only in context1
+    MPI_Type_vector( phi_cols1, 1, phi_rows1, MPI_DOUBLE, &rowvec); 
+    MPI_Type_commit(&rowvec);
+    MPI_Type_create_resized(rowvec, 0, sizeof(double), &rowvecs);
+    MPI_Type_commit(&rowvecs);
+
+    // COUNTERS
+    rows_available=phi_rows1;  // number of available rows in the local phi array
+
+    // once we know the size of local phi, we can allocate memory to it
+    // including host as well. The host will collect excees computations from
+    // workers.
+    //DesignMatrix<DM_Function_Base&> dm(*fb, config);
+    dm.Phi.resize(phi_rows1,phi_cols1);
+    //int lda1 = phi_rows1 > phi_cols1 ? phi_rows1 : phi_cols1;
+    dm.T.resize(phi_rows1);
+    dm.Tlabels.resize(phi_rows1);
+  }
     void probe() {
       MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
       worker = status.MPI_SOURCE;
-- 
GitLab