Xmipp  v3.23.11-Nereus
mpi_reconstruct_art.cpp
Go to the documentation of this file.
1 /***************************************************************************
2  * Authors: Joaquin Oton (joton@cnb.csic.es)
3  *
4  *
5  * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20  * 02111-1307 USA
21  *
22  * All comments concerning this program package may be sent to the
23  * e-mail address 'xmipp@cnb.csic.es'
24  ***************************************************************************/
25 
26 #include <fstream>
27 #include <sys/resource.h>
28 #include "mpi_reconstruct_art.h"
30 
33 {
34  isMpi = true;
35 }
36 
37 /* constructor ------------------------------------------------------- */
39 {
40  this->read(argc, argv);
41 }
42 
43 /* constructor providing an MpiNode
44  * this is useful for using this program from others
45  */
46 ProgMPIReconsArt::ProgMPIReconsArt(const std::shared_ptr<MpiNode> &node)
47 {
48  this->setNode(node);
49 }
50 
51 
52 /* Run --------------------------------------------------------------------- */
54 {
56 
57  Image<double> vol_voxels, vol_voxels_aux; // Volume to reconstruct
58  GridVolume vol_basis;
59 
60  int num_img_tot; // The total amount of images
61  int num_img_node; // Stores the total amount of images each node deals with
62  size_t remaining; // Number of projections still to compute
63  int Npart; // Number of projection to process
64  int myFirst; // Limits projections to process
65  double comms_t=0., aux_comm_t; // Communications time
66  double it_t; // iteration time
67  double cav_t; // time for CAV weights calculation
68  double cavk_it_t; // BiCAV weights calculation time (1 iter.)
69  double cavk_total_t; // Sum( cavk_it_t )
70  USWtime_t recons_t; // Reconstruction time
71  double total_t=0.; // Program execution time
72  double comms_t_it, aux_t; // Communications time in one iteration
73  GridVolumeT<int> GVNeq_aux; // This is a buffer for the communication
74  Matrix1D<int> Ordered_aux;
75 
76 
77  /*************************** PARAMETERS INITIALIZATION ***************************/
78  cavk_total_t = 0.0;
79 
80  if (node->isMaster()) // Master code
81  {
82  total_t = MPI_Wtime(); // to perform overall execution time
83  comms_t = 0.0; // Initializes time
84 
85  show();
86  artRecons->preProcess(vol_basis, FULL, node->rank);
87  if (verbose > 0)
88  {
89  std::cout << " ---------------------------------------------------------------------" << std::endl;
90  std::cout << " Projections : " << artRecons->artPrm.numIMG << std::endl;
91  }
92  //Initialize history
93  artRecons->initHistory(vol_basis);
94 
95  // ordered list must be the same in all nodes
96  aux_comm_t = MPI_Wtime();
97  MPI_Bcast(MULTIDIM_ARRAY(artPrm.ordered_list), MULTIDIM_SIZE(artPrm.ordered_list), MPI_INT, 0, MPI_COMM_WORLD);
98  comms_t += MPI_Wtime() - aux_comm_t;
99  }
100  else
101  {
102  // each process can handle its own history file
103  // so we add the id number to the root filename
104  FileName aux = artPrm.fn_root;
105 
106  artPrm.fn_root = artPrm.fn_root + integerToString(node->rank);
107  artRecons->preProcess(vol_basis, FULL, node->rank);
108 
109  // Restore original filename.
110  artPrm.fn_root = aux;
111 
112  // ordered list must be the same in all nodes
113  MPI_Bcast(MULTIDIM_ARRAY(artPrm.ordered_list),
114  MULTIDIM_SIZE(artPrm.ordered_list), MPI_INT, 0, MPI_COMM_WORLD);
115  }
116 
117  // How many images processes each node. It is done in such a way that every node receives the
118  // same amount of them
119 
120  num_img_tot = artPrm.numIMG;
121 
122  Npart = (int)((float)num_img_tot / (float)nProcs);
123 
124  num_img_node = Npart;
125 
126  remaining = num_img_tot % nProcs;
127 
128  // each process will only see the images it is interested in.
129  if (node->rank < remaining)
130  {
131  num_img_node++;
132  myFirst = node->rank * Npart + node->rank;
133  }
134  else
135  myFirst = Npart * node->rank + remaining;
136 
137  // Shift the starting position for each node
138 // myLast = myFirst + num_img_node - 1; // Actually it isn't used
139 
140  STARTINGX(artPrm.ordered_list) = -myFirst;
141 
142  GridVolume vol_basis_aux = vol_basis;
143  GridVolume vol_aux2 = vol_basis;
144 
147  {
148  if (artPrm.block_size < 1)
149  artPrm.block_size = 1; // Each processor will have at least one projection
150  else if (artPrm.block_size > num_img_node)
151  artPrm.block_size = num_img_node; // block_size is as much equal to num_img_node
152  }
153 
154  // Print some data
155  if (verbose > 0)
156  {
157  std::cout << " Parallel mode : " ;
159  std::cout << "pSART " << "BlockSize = " << artPrm.block_size << std::endl;
160  else if (artPrm.parallel_mode == BasicARTParameters::pSIRT)
161  std::cout << "pSIRT" << std::endl;
162  else if (artPrm.parallel_mode == BasicARTParameters::pfSIRT)
163  std::cout << "pfSIRT" << std::endl;
164  else if (artPrm.parallel_mode == BasicARTParameters::pBiCAV)
165  std::cout << "pBiCAV " << "BlockSize = " << artPrm.block_size << std::endl;
166  else if (artPrm.parallel_mode == BasicARTParameters::pAVSP)
167  std::cout << "pAVSP" << std::endl;
168  else if (artPrm.parallel_mode == BasicARTParameters::pCAV)
169  std::cout << "pCAV" << std::endl;
170  else if (artPrm.parallel_mode == BasicARTParameters::ART)
171  std::cout << "ART" << std::endl;
172  else if (artPrm.parallel_mode == BasicARTParameters::SIRT)
173  std::cout << "SIRT" << std::endl;
174  std::cout << " Number of processors : " << nProcs << std::endl;
175  std::cout << " ---------------------------------------------------------------------" << std::endl;
176  }
177 
178 
179  /*************************** CAV weights precalculation *************************/
181  {
182  // Creates and initializes special variables needed to CAV weights computation.
183 
184  /* EXTRA CALCULATIONS FOR CAV WEIGHTS: Each node computes its own part related to its images
185  and after that they send each other their results and sum up them. This part of code has been taken and
186  modified from Basic_art.cc produceSideInfo().*/
187 
188  cav_t = MPI_Wtime();
189  artPrm.computeCAVWeights(vol_basis, num_img_node, verbose-1);
190  GVNeq_aux = *(artPrm.GVNeq);
191 
192  MPI_Barrier(MPI_COMM_WORLD); // Actually, this isn't necessary.
193  for (size_t n = 0 ; n < (artPrm.GVNeq)->VolumesNo(); n++)
194  {
195  aux_comm_t = MPI_Wtime();
196  MPI_Allreduce(MULTIDIM_ARRAY((*(artPrm.GVNeq))(n)()),
197  MULTIDIM_ARRAY(GVNeq_aux(n)()),
198  MULTIDIM_SIZE((*(artPrm.GVNeq))(n)()), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
199  comms_t += MPI_Wtime() - aux_comm_t;
200  (*(artPrm.GVNeq))(n)() = GVNeq_aux(n)();
201  }
202  if (verbose > 0)
203  std::cout << "Elapsed time for pCAV weights computation : " << MPI_Wtime() - cav_t << std::endl;
204  }
205 
206  /*************************** PARALLEL ART ALGORITHM ***************************/
207  // A bit tricky. Allows us to use the sequential Basic_ART_iterations as
208  // in parallel it runs internally only one iteration.
209  int num_iter = artPrm.no_it;
210  artPrm.no_it = 1;
211 
212  for (int i = 0; i < num_iter; i++)
213  {
214  comms_t_it = 0.0;
215  cavk_it_t = 0.0;
216 
217  it_t = MPI_Wtime();
218 
219  artPrm.lambda_list(0) = artPrm.lambda(i);
220 
222  {
223  int numsteps = num_img_node / artPrm.block_size;
224 
225  // could be necessary another step for remaining projections
226  int stepR;
227  bool oneMoreStep = (stepR = num_img_node % artPrm.block_size) != 0;
228  if (oneMoreStep)
229  numsteps++;
230 
231  artPrm.numIMG = artPrm.block_size;
232  int blockSizeTot = artPrm.block_size * nProcs;
233 
234  // STARTINGX(artPrm.ordered_list) = -myFirst; //Already assigned
235  for (int ns = 0; ns < numsteps ; ns++)
236  {
237  // Calculus of blockSizeTot to normalize
238  if (ns == numsteps - 2 && oneMoreStep)
239  blockSizeTot = (artPrm.block_size - 1) * nProcs + stepR;
240  else if (ns == numsteps - 1)
241  {
242  artPrm.numIMG = num_img_node - artPrm.block_size*ns;
243  if (oneMoreStep)
244  blockSizeTot = artPrm.numIMG * stepR;
245  else
246  blockSizeTot = artPrm.block_size*stepR + artPrm.numIMG*(nProcs - stepR);
247  }
248 
249  for (size_t j = 0 ; j < vol_basis.VolumesNo() ; j++)
250  vol_aux2(j)() = vol_basis(j)();
251 
252  artRecons->iterations(vol_basis, node->rank);
253 
254  STARTINGX(artPrm.ordered_list) -= artPrm.numIMG;
255 
256 // node->updateComm();//Update communicator to avoid already finished nodes
257 
258  // All processors send their result and get the other's so all of them
259  // have the same volume for the next step.
260  for (size_t j = 0 ; j < vol_basis.VolumesNo() ; j++)
261  {
262  vol_basis(j)() = vol_basis(j)() - vol_aux2(j)(); // Adapt result to parallel environment from sequential routine
263  aux_comm_t = MPI_Wtime();
264  MPI_Allreduce(MULTIDIM_ARRAY(vol_basis(j)()),
265  MULTIDIM_ARRAY(vol_basis_aux(j)()),
266  MULTIDIM_SIZE(vol_basis(j)()),
267  MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
268  aux_t = MPI_Wtime() - aux_comm_t;
269  comms_t += aux_t;
270  comms_t_it += aux_t;
271  vol_basis(j)() = vol_aux2(j)() + (vol_basis_aux(j)() / (double) blockSizeTot);
272  }
273  }
274  }
275  else if (artPrm.parallel_mode == BasicARTParameters::pBiCAV)
276  {
277  // Creates and initializes special variables needed to BICAV weights computation.
278  GridVolumeT<int> GVNeq_aux; // This is a buffer for communications
279 
280  int numsteps = num_img_node / artPrm.block_size;
281 
282  int processed = 0;
283 
284  if ((num_img_node % artPrm.block_size) != 0)
285  numsteps++;
286 
287  artPrm.numIMG = artPrm.block_size;
288 
289  artPrm.parallel_mode = BasicARTParameters::pCAV; // Another trick
290 
291  STARTINGX(artPrm.ordered_list) = -myFirst;
292 
293  for (int ns = 0; ns < numsteps ; ns++)
294  {
295  if (ns == numsteps - 1)
296  artPrm.numIMG = num_img_node - processed;
297 
298  /*
299  EXTRA CALCULATIONS FOR BICAV WEIGHTS: Each node computes its own part related to its images
300  and after that send each others their results and sum up them. This part of code has been taken and
301  modified from Basic_art.cc produceSideInfo().
302  */
303 
304  cav_t = MPI_Wtime();
305 
306  artPrm.computeCAVWeights(vol_basis, artPrm.numIMG, verbose-1);
307  GVNeq_aux = *(artPrm.GVNeq);
308 
309 // node->updateComm();
310  // All processors send their result and get the other's so all of them
311  // have the weights.
312 
313  for (size_t n = 0 ; n < (artPrm.GVNeq)->VolumesNo(); n++)
314  {
315  aux_comm_t = MPI_Wtime();
316  MPI_Allreduce(MULTIDIM_ARRAY((*(artPrm.GVNeq))(n)()),
317  MULTIDIM_ARRAY(GVNeq_aux(n)()),
318  MULTIDIM_SIZE((*(artPrm.GVNeq))(n)()), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
319  aux_t = MPI_Wtime() - aux_comm_t;
320  comms_t += aux_t;
321  comms_t_it += aux_t;
322  (*(artPrm.GVNeq))(n)() = GVNeq_aux(n)();
323  }
324  cavk_it_t += MPI_Wtime() - cav_t;
325 
326  for (size_t jj = 0 ; jj < vol_basis.VolumesNo() ; jj++)
327  vol_aux2(jj)() = vol_basis(jj)();
328 
329  artRecons->iterations(vol_basis, node->rank);
330 
331  if (ns < (numsteps - 1))
332  {
333  STARTINGX(artPrm.ordered_list) -= artPrm.numIMG;
334  processed += artPrm.numIMG;
335  }
336 
337  // All processors send their result and get the other's so all of them
338  // have the same volume for the next step.
339 
340  for (size_t jj = 0 ; jj < vol_basis.VolumesNo() ; jj++)
341  {
342  vol_basis(jj)() = vol_basis(jj)() - vol_aux2(jj)(); // Adapt result to parallel environment from sequential routine
343  aux_comm_t = MPI_Wtime();
344  MPI_Allreduce(MULTIDIM_ARRAY(vol_basis(jj)()),
345  MULTIDIM_ARRAY(vol_basis_aux(jj)()),
346  MULTIDIM_SIZE(vol_basis(jj)()),
347  MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
348  aux_t = MPI_Wtime() - aux_comm_t;
349  comms_t += aux_t;
350  comms_t_it += aux_t;
351 
352  FOR_ALL_ELEMENTS_IN_ARRAY3D(vol_basis(jj)())
353  if (A3D_ELEM(GVNeq_aux(jj)(), k, i, j) == 0)
354  {
355  if (A3D_ELEM(vol_basis_aux(jj)(), k, i, j) == 0)
356  A3D_ELEM(vol_basis(jj)(), k, i, j) = 0;
357  else // This case should not happen as this element is not affected by actual projections
358  REPORT_ERROR(ERR_VALUE_INCORRECT,"Error with weights, contact developers!");
359  }
360  else
361  {
362  A3D_ELEM(vol_basis(jj)(), k, i, j) =
363  A3D_ELEM(vol_aux2(jj)(), k, i, j) +
364  A3D_ELEM(vol_basis_aux(jj)(), k, i, j) /
365  A3D_ELEM(GVNeq_aux(jj)(), k, i, j);
366  }
367  }
368  }
369  artPrm.parallel_mode = BasicARTParameters::pBiCAV; // trick undone
370  }
371  else if (artPrm.parallel_mode == BasicARTParameters::pCAV)
372  {
373  // CAV weights calculations have been done before iterations begin in order to avoid recalculate them
374  for (size_t j = 0 ; j < vol_basis.VolumesNo() ; j++)
375  vol_aux2(j)() = vol_basis(j)();
376 
377  artPrm.numIMG = num_img_node;
378 
379  STARTINGX(artPrm.ordered_list) = -myFirst;
380 
381  artRecons->iterations(vol_basis, node->rank);
382 
383  // All processors send their result and get the other's so all of them
384  // have the same volume for the next step.
385  MPI_Barrier(MPI_COMM_WORLD);
386  for (size_t jj = 0 ; jj < vol_basis.VolumesNo() ; jj++)
387  {
388  vol_basis(jj)() = vol_basis(jj)() - vol_aux2(jj)(); // Adapt result to parallel ennvironment from sequential routine
389  aux_comm_t = MPI_Wtime();
390  MPI_Allreduce(MULTIDIM_ARRAY(vol_basis(jj)()),
391  MULTIDIM_ARRAY(vol_basis_aux(jj)()),
392  MULTIDIM_SIZE(vol_basis(jj)()),
393  MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
394  aux_t = MPI_Wtime() - aux_comm_t;
395  comms_t += aux_t;
396  comms_t_it += aux_t;
397 
398  FOR_ALL_ELEMENTS_IN_ARRAY3D(vol_basis(jj)())
399  if (A3D_ELEM(GVNeq_aux(jj)(), k, i, j) == 0) // Division by 0
400  {
401  if (A3D_ELEM(vol_basis_aux(jj)(), k, i, j) == 0)
402  A3D_ELEM(vol_basis(jj)(), k, i, j) = 0;
403  else // This case should not happen as this element is not affected by actual projections
404  REPORT_ERROR(ERR_VALUE_INCORRECT,"Error with weights, contact developers!");
405  }
406  else
407  {
408  A3D_ELEM(vol_basis(jj)(), k, i, j) =
409  A3D_ELEM(vol_aux2(jj)(), k, i, j) +
410  A3D_ELEM(vol_basis_aux(jj)(), k, i, j) /
411  A3D_ELEM(GVNeq_aux(jj)(), k, i, j);
412  }
413  }
414  }
415  else
416  { // SIRT AND ASS
417 
418  artPrm.numIMG = num_img_node;
419 
420  STARTINGX(artPrm.ordered_list) = -myFirst;
421 
424  {
425  for (size_t jj = 0 ; jj < vol_basis.VolumesNo() ; jj++)
426  vol_aux2(jj)() = vol_basis(jj)();
427  }
428 
429  artRecons->iterations(vol_basis, node->rank);
430 
431  // All processors send their result and get the other's so all of them
432  // have the same volume for the next step.
433  MPI_Barrier(MPI_COMM_WORLD);
434  for (size_t jj = 0 ; jj < vol_basis.VolumesNo() ; jj++)
435  {
436  // SIRT Alg. needs to store previous results but AVSP doesn't
439  vol_basis(jj)() = vol_basis(jj)() - vol_aux2(jj)(); // Adapt result to parallel ennvironment from sequential routine
440 
441  aux_comm_t = MPI_Wtime();
442 
443  MPI_Allreduce(MULTIDIM_ARRAY(vol_basis(jj)()),
444  MULTIDIM_ARRAY(vol_basis_aux(jj)()),
445  MULTIDIM_SIZE(vol_basis(jj)()),
446  MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
447 
448  aux_t = MPI_Wtime() - aux_comm_t;
449  comms_t += aux_t;
450  comms_t_it += aux_t;
451 
453  {
454  auto norm_value = (double) num_img_tot;
455  vol_basis(jj)() = vol_aux2(jj)() + (vol_basis_aux(jj)() / norm_value);
456  }
457  else if (artPrm.parallel_mode == BasicARTParameters::pSIRT)
458  {
459  double norm_value = (double) num_img_tot * (double)(artPrm.ProjXdim() * artPrm.ProjYdim());
460  vol_basis(jj)() = vol_aux2(jj)() + (vol_basis_aux(jj)() / norm_value);
461  }
462  else // ASS
463  {
464  vol_basis(jj)() = vol_basis_aux(jj)();
465  vol_basis(jj)() /= nProcs; // Non-SIRT Normalization
466  }
467  }
468  }
469  cavk_total_t += cavk_it_t;
470 
471  // Only the Master process will output the results (it's the only with verbose!=0)
472  if (verbose)
473  {
474  std::cout << "\nIteration " << i << std::endl;
475  std::cout << "Time: " << MPI_Wtime() - it_t << " secs." << std::endl;
476  std::cout << "Comms. time: " << comms_t_it << " secs." << std::endl;
478  std::cout << "BiCAV weighting time: " << cavk_it_t << std::endl;
479  }
480  if (node->isMaster() && (artPrm.tell&TELL_SAVE_BASIS) && (i < num_iter - 1))
481  vol_basis.write(artPrm.fn_root + "_it" + integerToString(i + 1) + ".basis");
482  }
483 
484  /*************************** FINISHING AND STORING VALUES ***************************/
485 
486  if (node->isMaster())
487  {
488  int Xoutput_volume_size=(artPrm.Xoutput_volume_size==0) ?
489  artPrm.projXdim:artPrm.Xoutput_volume_size;
490  int Youtput_volume_size=(artPrm.Youtput_volume_size==0) ?
491  artPrm.projYdim:artPrm.Youtput_volume_size;
492  int Zoutput_volume_size=(artPrm.Zoutput_volume_size==0) ?
493  artPrm.projXdim:artPrm.Zoutput_volume_size;
494  artPrm.basis.changeToVoxels(vol_basis, &(vol_voxels()),
495  Zoutput_volume_size, Youtput_volume_size, Xoutput_volume_size);
496  vol_voxels.write(artPrm.fn_root+".vol");
497 
498  if (artPrm.tell&TELL_SAVE_BASIS)
499  vol_basis.write(artPrm.fn_root + ".vol");
500 
501  uswtime(&recons_t);
502  if (verbose)
503  {
504  std::cout << "\n\n------ FINAL STATISTICS ------" << std::endl;
505  std::cout << "\nTOTAL EXECUTION TIME: " << recons_t.wall - total_t << std::endl;
506  std::cout << "Communications time: " << comms_t << " secs." << std::endl;
507  std::cout << "CPU time: " << recons_t.user + recons_t.sys << " secs." << std::endl;
508  std::cout << "USER: " << recons_t.user << " SYSTEM: " << recons_t.sys << std::endl;
510  std::cout << "\nTotal pBiCAV Weighting time: " << cavk_total_t << std::endl;
511  }
512  }
513  artPrm.fh_hist->close();
514 }
515 
516 
517 /* ------------------------------------------------------------------------- */
518 /* Time managing stuff */
519 /* ------------------------------------------------------------------------- */
520 // Gets User and System times for use with MPI
522 {
523  struct rusage buffer;
524 
525  tm->wall = MPI_Wtime();
526  getrusage(RUSAGE_SELF, &buffer);
527  tm->user = (double) buffer.ru_utime.tv_sec + 1.0e-6 * buffer.ru_utime.tv_usec;
528  tm->sys = (double) buffer.ru_stime.tv_sec + 1.0e-6 * buffer.ru_stime.tv_usec;
529 }
530 
531 
int numIMG
Total number of images to process (taking symmetries into account)
Definition: basic_art.h:348
size_t projXdim
Projection X dimension.
Definition: basic_art.h:333
Basis basis
Basis function. By default, blobs.
Definition: basic_art.h:97
#define REPORT_ERROR(nerr, ErrormMsg)
Definition: xmipp_error.h:211
void initHistory(const GridVolume &vol_basis0)
#define MULTIDIM_SIZE(v)
#define FULL
Definition: basic_art.h:406
void write(const FileName &name="", size_t select_img=ALL_IMAGES, bool isStack=false, int mode=WRITE_OVERWRITE, CastWriteMode castMode=CW_CAST, int _swapWrite=0)
#define MULTIDIM_ARRAY(v)
#define TELL_SAVE_BASIS
Definition: basic_art.h:276
String integerToString(int I, int _width, char fill_with)
std::ofstream * fh_hist
File handler for the history file.
Definition: basic_art.h:339
size_t projYdim
Projection Y dimension.
Definition: basic_art.h:336
#define STARTINGX(v)
void changeToVoxels(GridVolume &vol_basis, MultidimArray< double > *vol_voxels, int Zdim, int Ydim, int Xdim, int threads=1) const
Definition: basis.cpp:261
#define i
ql0001_ & k(htemp+1),(cvec+1),(atemp+1),(bj+1),(bl+1),(bu+1),(x+1),(clamda+1), &iout, infoqp, &zero,(w+1), &lenw,(iw+1), &leniw, &glob_grd.epsmac
int block_size
Number of projections for each parallel block.
Definition: basic_art.h:112
#define A3D_ELEM(V, k, i, j)
int argc
Original command line arguments.
Definition: xmipp_program.h:86
#define FOR_ALL_ELEMENTS_IN_ARRAY3D(V)
void uswtime(USWtime_t *tm)
size_t VolumesNo() const
Definition: grids.h:1003
GridVolumeT< int > * GVNeq
Definition: basic_art.h:381
ARTReconsBase * artRecons
virtual void preProcess(GridVolume &vol_basis0, int level=FULL, int rank=-1)
Produce Plain side information from the Class parameters.
std::shared_ptr< MpiNode > node
Definition: xmipp_mpi.h:164
const char ** argv
Definition: xmipp_program.h:87
ARTParallelMode parallel_mode
Definition: basic_art.h:109
int verbose
Verbosity level.
FileName fn_root
Definition: basic_art.h:217
#define j
void computeCAVWeights(GridVolume &vol_basis0, int numProjs_node, int debug_level=0)
Definition: basic_art.cpp:692
int no_it
Number of iterations.
Definition: basic_art.h:100
void write(const FileName &fn) const
Definition: grids.h:1196
MultidimArray< int > ordered_list
Order in which projections will be presented to algorithm.
Definition: basic_art.h:345
void setNode(const std::shared_ptr< MpiNode > &node)
Definition: xmipp_mpi.cpp:256
void iterations(GridVolume &vol_basis, int rank=-1)
BasicARTParameters artPrm
double lambda(int n)
Definition: basic_art.h:438
Incorrect value received.
Definition: xmipp_error.h:195
int * n
void read(int argc, char **argv)
Definition: xmipp_mpi.cpp:240
Matrix1D< double > lambda_list
Relaxation parameter.
Definition: basic_art.h:103