CHROMA
syssolver_mdagm_clover_quda_multigrid_w.cc
Go to the documentation of this file.
1 /*! \file
2  * \QUDA MULTIGRID MdagM Clover solver.
3  */
4 // comment
9 #include "io/aniso_io.h"
10 
11 
12 #include "handle.h"
15 #include "meas/glue/mesplq.h"
16 // QUDA Headers
17 #include <quda.h>
18 // #include <util_quda.h>
19 
20 #include <string>
21 #include <iomanip>
22 #include <ctime>
23 #include <cstring>
24 namespace Chroma
25 {
26  namespace MdagMSysSolverQUDAMULTIGRIDCloverEnv
27  {
28 
29  //! Anonymous namespace
30  namespace
31  {
32  //! Name to be used
33  const std::string name("QUDA_MULTIGRID_CLOVER_INVERTER");
34 
35  //! Local registration flag
36  bool registered = false;
37  }
38 
39 
40 
42  const std::string& path,
43  Handle< FermState< LatticeFermion, multi1d<LatticeColorMatrix>, multi1d<LatticeColorMatrix> > > state,
44 
46  {
47  return new MdagMSysSolverQUDAMULTIGRIDClover(A, state,SysSolverQUDAMULTIGRIDCloverParams(xml_in, path));
48  }
49 
50  //! Register all the factories
51  bool registerAll()
52  {
53  bool success = true;
54  if (! registered)
55  {
57  registered = true;
58  }
59  return success;
60  }
61  }
62 
64  MdagMSysSolverQUDAMULTIGRIDClover::qudaInvert(const CloverTermT<T, U>& clover,
65  const CloverTermT<T, U>& invclov,
66  const T& chi_s,
67  T& psi_s) const{
68 
70 
71  T mod_chi;
72 
73  // Copy source into mod_chi, and zero the off-parity
74  mod_chi[rb[0]] = zero;
75 
76 
77  // This solver always solves with the SYMMETRIC preconditioned
78  // Operator. If we are working with Asymmetric preconditioning
79  // Then we must apply a clover inverse.
80  if( invParam.asymmetricP) {
81 
82  //
83  // symmetric
84  // Solve with M_symm = 1 - A^{-1}_oo D A^{-1}ee D
85  //
86  // Chroma M = A_oo ( M_symm )
87  //
88  // So M x = b => A_oo (M_symm) x = b
89  // => M_symm x = A^{-1}_oo b = chi_mod
90  invclov.apply(mod_chi, chi_s, PLUS, 1);
91  }
92  else {
93  // If we work with symmetric preconditioning nothing else needs done
94  mod_chi[rb[1]] = chi_s;
95  }
96 
97 #ifndef BUILD_QUDA_DEVIFACE_SPINOR
98  void* spinorIn =(void *)&(mod_chi.elem(rb[1].start()).elem(0).elem(0).real());
99  void* spinorOut =(void *)&(psi_s.elem(rb[1].start()).elem(0).elem(0).real());
100 #else
101  // void* spinorIn = GetMemoryPtr( mod_chi.getId() );
102  // void* spinorOut = GetMemoryPtr( psi_s.getId() );
103  void* spinorIn;
104  void* spinorOut;
105  GetMemoryPtr2(spinorIn,spinorOut,mod_chi.getId(),psi_s.getId());
106 #endif
107 
108  // Do the solve here
109  StopWatch swatch1;
110  swatch1.reset();
111  swatch1.start();
112  invertQuda(spinorOut, spinorIn, (QudaInvertParam*)&quda_inv_param);
113  swatch1.stop();
114 
115 
116 
117  QDPIO::cout << solver_string<<"time="<< quda_inv_param.secs <<" s" ;
118  QDPIO::cout << "\tPerformance="<< quda_inv_param.gflops/quda_inv_param.secs<<" GFLOPS" ;
119  QDPIO::cout << "\tTotal Time (incl. load gauge)=" << swatch1.getTimeInSeconds() <<" s"<<std::endl;
120 
121  ret.n_count =quda_inv_param.iter;
122 
123  return ret;
124 
125  }
126 
127 
128  void MdagMSysSolverQUDAMULTIGRIDClover::dumpYSolver(const LatticeFermion& chi, const LatticeFermion& Y) const
129  {
130  // Grab the time - took this C++ way from stackoverflow
131  auto time_value = std::time(nullptr);
132  auto local_time = std::localtime(&time_value);
133  std::ostringstream time_strstream;
134  time_strstream << "./failed_Y_solve_" << std::put_time(local_time, "%d-%m-%Y-%H-%M-%S");
135 
136 
137  std::string file_prefix( time_strstream.str() );
138 
139  std::string gauge_filename = file_prefix + "_gauge_field.lime";
140  std::string chi_filename = file_prefix + "_chi.lime";
141  std::string Y_filename = file_prefix + "_Y.lime";
142 
143  int foo = 5; // Some rubbish for the XML Files
144  // Dump gauge field
145  {
146  XMLBufferWriter filebuf;
147  XMLBufferWriter recbuf;
148  push( filebuf, "GaugeFile" );
149  write( filebuf, "FilePrefix", file_prefix);
150  pop( filebuf);
151 
152  push( recbuf, "GaugeRecord" );
153  write( recbuf, "FilePrefix", file_prefix);
154  pop( recbuf );
155 
156  QDPIO::cout << "Dumping gauge links to " << gauge_filename << std::endl;
157 
158  QDPFileWriter writer(filebuf,gauge_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
159  write(writer, recbuf, gstate->getLinks());
160  writer.close();
161  }
162 
163  // Dump chi
164  {
165  XMLBufferWriter filebuf;
166  XMLBufferWriter recbuf;
167  push( filebuf, "ChiFile" );
168  write( filebuf, "FilePrefix", file_prefix);
169  pop( filebuf);
170 
171  push( recbuf, "ChiRecord" );
172  write( recbuf, "FilePrefix", file_prefix);
173  pop( recbuf );
174 
175  QDPIO::cout << "Dumping chi (original source) vector to " << chi_filename << std::endl;
176 
177  QDPFileWriter writer(filebuf, chi_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
178  write(writer, recbuf, chi);
179  writer.close();
180 
181 
182  }
183 
184  // Dump Y
185  {
186  XMLBufferWriter filebuf;
187  XMLBufferWriter recbuf;
188  push( filebuf, "YFile" );
189  write( filebuf, "FilePrefix", file_prefix);
190  pop( filebuf);
191 
192  push( recbuf, "YRecord" );
193  write( recbuf, "FilePrefix", file_prefix);
194  pop( recbuf );
195 
196  QDPIO::cout << "Dumping Y (source) vector to " << Y_filename << std::endl;
197 
198  QDPFileWriter writer(filebuf, Y_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
199  write(writer, recbuf, Y);
200  writer.close();
201  }
202 
203  // Dump MG state
204  {
205  auto mg_params = *(invParam.MULTIGRIDParams);
206  for(int l = 0; l < mg_params.mg_levels; ++l) {
207  std::ostringstream subspace_prefix;
208  subspace_prefix << file_prefix << "_subspace_l" << l;
209 
210  // Up to the length of the buffer (256) padded with zeros
211  std::strncpy((subspace_pointers->mg_param).vec_outfile[l], (subspace_prefix.str()).c_str(), 256);
212  // If source string is too long it will be truncated and not null terminated, so null terminate
213  if( subspace_prefix.str().size() > 255 ) { (subspace_pointers->mg_param).vec_outfile[l][255] = '\0'; }
214 
215  }
216  // Make sure everyone has thei varibles set before calling dump Multigrid
217  // Strictly speaking I am using a sum as a barrier;
218  double i=10;
219  QDPInternal::globalSum(i);
220 
221 #ifdef QUDA_MG_DUMP_ENABLED
222  dumpMultigridQuda(subspace_pointers->preconditioner, &(subspace_pointers->mg_param));
223 #endif
224 
225  for(int l = 0; l < mg_params.mg_levels; ++l) {
226  (subspace_pointers->mg_param).vec_outfile[l][0] ='\0';
227  }
228  QDPInternal::globalSum(i);
229  }
230  }
231 
232 
233  unsigned long MdagMSysSolverQUDAMULTIGRIDClover::seqno = 0;
234 
235  void MdagMSysSolverQUDAMULTIGRIDClover::dumpXSolver(const LatticeFermion& chi,
236  const LatticeFermion& Y,
237  const LatticeFermion& X) const
238 
239  {
240  // Grab the time - took this C++ way from stackoverflow
241  auto time_value = std::time(nullptr);
242  auto local_time = std::localtime(&time_value);
243  std::ostringstream time_strstream;
244  time_strstream << "./failed_X_solve_" << std::put_time(local_time, "%d-%m-%Y-%H-%M-%S");
245 
246 
247  std::string file_prefix( time_strstream.str() );
248 
249  std::string gauge_filename = file_prefix + "_gauge_field.lime";
250  std::string chi_filename = file_prefix + "_chi.lime";
251  std::string Y_filename = file_prefix + "_Y.lime";
252  std::string X_filename = file_prefix + "_X.lime";
253 
254  int foo = 5; // Some rubbish for the XML Files
255  // Dump gauge field
256  {
257  XMLBufferWriter filebuf;
258  XMLBufferWriter recbuf;
259  push( filebuf, "GaugeFile" );
260  write( filebuf, "FilePrefix", file_prefix);
261  pop( filebuf);
262 
263  push( recbuf, "GaugeRecord" );
264  write( recbuf, "FilePrefix", file_prefix);
265  pop( recbuf );
266 
267  QDPIO::cout << "Dumping gauge links to " << gauge_filename << std::endl;
268 
269  QDPFileWriter writer(filebuf,gauge_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
270  write(writer, recbuf, gstate->getLinks());
271  writer.close();
272  }
273 
274  // Dump chi
275  {
276  XMLBufferWriter filebuf;
277  XMLBufferWriter recbuf;
278  push( filebuf, "ChiFile" );
279  write( filebuf, "FilePrefix", file_prefix);
280  pop( filebuf);
281 
282  push( recbuf, "ChiRecord" );
283  write( recbuf, "FilePrefix", file_prefix);
284  pop( recbuf );
285 
286  QDPIO::cout << "Dumping chi (original source) vector to " << chi_filename << std::endl;
287 
288  QDPFileWriter writer(filebuf, chi_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
289  write(writer, recbuf, chi);
290  writer.close();
291 
292 
293  }
294 
295  // Dump Y
296  {
297  XMLBufferWriter filebuf;
298  XMLBufferWriter recbuf;
299  push( filebuf, "YFile" );
300  write( filebuf, "FilePrefix", file_prefix);
301  pop( filebuf);
302 
303  push( recbuf, "YRecord" );
304  write( recbuf, "FilePrefix", file_prefix);
305  pop( recbuf );
306 
307  QDPIO::cout << "Dumping Y (source) vector to " << Y_filename << std::endl;
308 
309  QDPFileWriter writer(filebuf, Y_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
310  write(writer, recbuf, Y);
311  writer.close();
312  }
313 
314  // Dump final X
315  {
316  XMLBufferWriter filebuf;
317  XMLBufferWriter recbuf;
318  push( filebuf, "XFile" );
319  write( filebuf, "FilePrefix", file_prefix);
320  pop( filebuf);
321 
322  push( recbuf, "XRecord" );
323  write( recbuf, "FilePrefix", file_prefix);
324  pop( recbuf );
325 
326  QDPIO::cout << "Dumping X (solution) vector to " << X_filename << std::endl;
327 
328  QDPFileWriter writer(filebuf, X_filename, QDPIO_SINGLEFILE, QDPIO_PARALLEL);
329  write(writer, recbuf, X);
330  writer.close();
331  }
332 
333 
334  // Dump MG state
335  {
336  auto mg_params = *(invParam.MULTIGRIDParams);
337  for(int l = 0; l < mg_params.mg_levels; ++l) {
338  std::ostringstream subspace_prefix;
339  subspace_prefix << file_prefix << "_subspace_l" << l;
340 
341  // Up to the length of the buffer (256) padded with zeros
342  std::strncpy((subspace_pointers->mg_param).vec_outfile[l], (subspace_prefix.str()).c_str(), 256);
343  // If source string is too long it will be truncated and not null terminated, so null terminate
344  if( subspace_prefix.str().size() > 255 ) { (subspace_pointers->mg_param).vec_outfile[l][255] = '\0'; }
345 
346  }
347  // Make sure everyone has thei varibles set before calling dump Multigrid
348  // I use a global sum as a barrier here
349  double i=10;
350  QDPInternal::globalSum(i);
351 
352 
353 #ifdef QUDA_MG_DUMP_ENABLED
354  dumpMultigridQuda(subspace_pointers->preconditioner, &(subspace_pointers->mg_param));
355 #endif
356 
357  for(int l = 0; l < mg_params.mg_levels; ++l) {
358  (subspace_pointers->mg_param).vec_outfile[l][0] ='\0';
359  }
360  // I use a global sum as a weak barrier here.
361  QDPInternal::globalSum(i); // Make sure everyone is done
362  }
363  }
364 
365 
366 
367 } // namespace
368 
Anisotropy parameters.
Support class for fermion actions and linear operators.
Definition: state.h:94
Class for counted reference semantics.
Definition: handle.h:33
void apply(T &chi, const T &psi, enum PlusMinus isign, int cb) const
static T & Instance()
Definition: singleton.h:432
void write(XMLWriter &xml, const std::string &path, const AsqtadFermActParams &param)
Writer parameters.
Class for counted reference semantics.
Wilson Dslash linear operator.
static bool registered
Local registration flag.
const std::string name
Name to be used.
MdagMSystemSolver< LatticeFermion > * createFerm(XMLReader &xml_in, const std::string &path, Handle< FermState< LatticeFermion, multi1d< LatticeColorMatrix >, multi1d< LatticeColorMatrix > > > state, Handle< LinearOperator< LatticeFermion > > A)
Asqtad Staggered-Dirac operator.
Definition: klein_gord.cc:10
push(xml_out,"Condensates")
LinOpSysSolverMGProtoClover::T T
int i
Definition: pbg5p_w.cc:55
@ PLUS
Definition: chromabase.h:45
multi1d< LatticeFermion > chi(Ncb)
pop(xml_out)
A(A, psi, r, Ncb, PLUS)
const WilsonTypeFermAct< multi1d< LatticeFermion > > Handle< const ConnectState > state
Definition: pbg5p_w.cc:28
Double zero
Definition: invbicg.cc:106
::std::string string
Definition: gtest.h:1979
int l
Definition: pade_trln_w.cc:111
Periodic ferm state and a creator.
Holds return info from SystemSolver call.
Definition: syssolver.h:17
Register MdagM system solvers.
Factory for producing system solvers for MdagM*psi = chi.