GLAC  1.0
lattice.h
Go to the documentation of this file.
1 
13 #ifndef LATTICE_H
14 #define LATTICE_H
15 
16 #include <vector>
17 #include <mpi.h>
18 #include "parallelization/index.h"
21 #include "functions.h"
22 
23 template <class T>
24 class Lattice
25 {
26 public:
27  std::vector<T> m_sites;
28  std::vector<unsigned int> m_dim; // Lattice dimensions
29  unsigned long m_latticeSize;
30 
31  // Default contructors
32  Lattice() { }
33  Lattice(std::vector<unsigned int>latticeDimensions) {
34  allocate(latticeDimensions);
35  }
36 
37  // Destructor
38  ~Lattice() {}
39 
40  // Copy constructor
45  Lattice(const Lattice<T>& other) :
46  m_dim(other.m_dim),
48  {
49  m_sites = other.m_sites;
50  }
51 
52  // Move constructor
57  Lattice(Lattice<T> && other) noexcept :
58  m_sites(std::move(other.m_sites)),
59  m_dim(other.m_dim),
60  m_latticeSize(other.m_latticeSize)
61  {
62  }
63 
64  // Copy assignement operator
70  Lattice &operator =(const Lattice& other) {
71  Lattice tmp(other);
72  *this = std::move(tmp);
73  return *this;
74  }
75 
76  // Move assignement operator
82  Lattice &operator= (Lattice<T>&& other) noexcept {
83  m_latticeSize = other.m_latticeSize;
84  m_dim = other.m_dim;
85  m_sites = std::move(other.m_sites);
86  return *this;
87  }
88 
93  void allocate(std::vector<unsigned int> dim);
94 
95  // Overloading lattice position getter
101  T &operator[](unsigned long i) {
102  return m_sites[i];
103  }
104 
110  T &operator()(unsigned long i) {
111  if (i >= m_latticeSize) {
112  printf("OUT OF BUNDS WITH INDEX %lu of %lu", i, m_latticeSize);
113  exit(1);
114  }
115  return m_sites[i];
116  }
117 
118  // Overloading lattice operations
125  // Operators using doubles, operations affect the whole of lattice
126  Lattice<T> &operator*=(double B);
127  Lattice<T> &operator/=(double B);
128  // Operations involving complex operators
133  // Lattice value setters
137  void identity();
141  void zeros();
148 };
149 
153 // External lattice operator overloading
154 template <class T>
156  A += B;
157  return A;
158 }
159 
160 template <class T>
162  A += B;
163  return A;
164 }
165 
166 template <class T>
168  A -= B;
169  return A;
170 }
171 
172 template <class T>
174  A -= B;
175  return A;
176 }
177 
178 template <class T>
180  A *= B;
181  return A;
182 }
183 
184 template <class T>
186  A *= B;
187  return A;
188 }
189 
190 // External double operator overloading
191 template <class T>
192 inline Lattice<T> operator*(Lattice<T> A, double b) {
193  A *= b;
194  return A;
195 }
196 
197 template <class T>
198 inline Lattice<T> operator/(Lattice<T> A, double b) {
199  A /= b;
200  return A;
201 }
202 
203 // External complex operator overloading
204 template <class T>
206  A += b;
207  return A;
208 }
209 
210 template <class T>
212  A -= b;
213  return A;
214 }
215 
216 template <class T>
218  A *= b;
219  return A;
220 }
221 
222 template <class T>
224  A /= b;
225  return A;
226 }
227 
231 // Lattice operator overloading
232 template <class T>
234  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
235  m_sites[iSite] += B[iSite];
236  }
237  return *this;
238 }
239 
240 template <class T>
242  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
243  m_sites[iSite] += B[iSite];
244  }
245  return *this;
246 }
247 
248 template <class T>
250  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
251  m_sites[iSite] -= B[iSite];
252  }
253  return *this;
254 }
255 
256 template <class T>
258  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
259  m_sites[iSite] -= B[iSite];
260  }
261  return *this;
262 }
263 
264 template <class T>
266  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
267  m_sites[iSite] *= B[iSite];
268  }
269  return *this;
270 }
271 template <class T>
273  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
274  m_sites[iSite] *= B[iSite];
275  }
276  return *this;
277 }
278 // Double operator overloading
279 template <class T>
281  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
282  m_sites[iSite] *= b;
283  }
284  return *this;
285 }
286 
287 template <class T>
289  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
290  m_sites[iSite] /= b;
291  }
292  return *this;
293 }
294 
295 // Complex operator overloading
296 template <class T>
298  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
299  m_sites[iSite] += b;
300  }
301  return *this;
302 }
303 
304 template <class T>
306  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
307  m_sites[iSite] -= b;
308  }
309  return *this;
310 }
311 
312 template <class T>
314  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
315  m_sites[iSite] *= b;
316  }
317  return *this;
318 }
319 
320 template <class T>
322  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
323  m_sites[iSite] /= b;
324  }
325  return *this;
326 }
327 
331 // Value setters of the lattice
332 template <class T>
333 inline void Lattice<T>::identity() {
334  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
335  m_sites[iSite].identity();
336  }
337 }
338 
339 template <>
340 inline void Lattice<complex>::identity() {
341  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
342  m_sites[iSite].z[0] = 1.0;
343  m_sites[iSite].z[1] = 0.0;
344  }
345 }
346 
347 template <>
348 inline void Lattice<double>::identity() {
349  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
350  m_sites[iSite] = 1.0;
351  }
352 }
353 
354 template <class T>
355 inline void Lattice<T>::zeros() {
356  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
357  m_sites[iSite].zeros();
358  }
359 }
360 
361 template <>
362 inline void Lattice<complex>::zeros() {
363  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
364  m_sites[iSite].z[0] = 0;
365  m_sites[iSite].z[1] = 0;
366  }
367 }
368 
369 template <>
370 inline void Lattice<double>::zeros() {
371  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
372  m_sites[iSite] = 0;
373  }
374 }
375 
376 // Allocates memory to the lattice. Has to be called every time(unless we are copying)
377 template <class T>
378 inline void Lattice<T>::allocate(std::vector<unsigned int> dim) {
379  m_latticeSize = dim[0] * dim[1] * dim[2] * dim[3];
380  m_dim = dim;
381  m_sites.resize(m_latticeSize);
382 }
383 
384 template <class T> // TEMP TEST!!
386  m_dim = B.m_dim;
387  m_latticeSize = B.m_latticeSize;
388  m_sites.resize(m_latticeSize);
389  for (unsigned long int iSite = 0; iSite < m_latticeSize; iSite++) {
390  m_sites[iSite] = B[iSite];
391  }
392  return *this;
393 }
394 
398 template <class SU3>
400 {
401  Lattice<double> tempTraceSum(L.m_dim);
402  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
403  tempTraceSum[iSite] = (L[iSite][0] + L[iSite][8] + L[iSite][16]);
404  }
405  return tempTraceSum;
406 }
407 
408 template <class SU3>
410 {
411  Lattice<double> tempTraceSum(L.m_dim);
412  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
413  tempTraceSum[iSite] = (L[iSite][1] + L[iSite][9] + L[iSite][17]);
414  }
415  return tempTraceSum;
416 }
417 
418 template <class SU3>
420 {
421  Lattice<complex> tempTraceSum(L.m_dim);
422  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
423  tempTraceSum[iSite] = complex(L[iSite][0] + L[iSite][8] + L[iSite][16],
424  L[iSite][1] + L[iSite][9] + L[iSite][17]);
425  }
426  return tempTraceSum;
427 }
428 
429 template <class SU3>
431 {
432  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
433  L[iSite][1] -= other.m_sites[iSite];
434  L[iSite][9] -= other.m_sites[iSite];
435  L[iSite][17] -= other.m_sites[iSite];
436  }
437  return std::move(L);
438 }
439 
440 template <class SU3>
442 {
443  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
444  L[iSite][1] -= other[iSite];
445  L[iSite][9] -= other[iSite];
446  L[iSite][17] -= other[iSite];
447  }
448  return std::move(L);
449 }
450 
451 template <class SU3>
453 {
454  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
455  L[iSite][0] -= other.m_sites[iSite];
456  L[iSite][8] -= other.m_sites[iSite];
457  L[iSite][16] -= other.m_sites[iSite];
458  }
459  return std::move(L);
460 }
461 
462 template <class SU3>
464 {
465  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
466  L[iSite][0] -= other[iSite];
467  L[iSite][8] -= other[iSite];
468  L[iSite][16] -= other[iSite];
469  }
470  return std::move(L);
471 }
472 
473 template <class T>
474 inline T sum(Lattice<T> L)
475 {
476  T latticeSum;
477  latticeSum = 0.0;
478  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
479  latticeSum += L[iSite];
480  }
481  return latticeSum;
482 }
483 
484 //template <>
485 inline std::vector<double> sumSpatial(Lattice<double> L)
486 {
487  /*
488  * For summing the topological charge xyz into the time axis.
489  */
490 
491  // Creates empty vector for time axis points
492  std::vector<double> latticeSpatialSum(L.m_dim[3], 0);
493 
494  // Sums the xyz directions into the time axis
495  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
496  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
497  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
498  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
499  latticeSpatialSum[it] += L[Parallel::Index::getIndex(ix,iy,iz,it)];
500  }
501  }
502  }
503  }
504 
505  return latticeSpatialSum;
506 }
507 
509 {
510  /*
511  * Function for multiplying lattice and taking the real trace without summing the matrix
512  */
513  unsigned long site = 0;
514  Lattice<double>latticeSum;
515  latticeSum.allocate(L1.m_dim);
516  latticeSum.zeros();
517 
518  for (unsigned int ix = 0; ix < L1.m_dim[0]; ix++) {
519  for (unsigned int iy = 0; iy < L1.m_dim[1]; iy++) {
520  for (unsigned int iz = 0; iz < L1.m_dim[2]; iz++) {
521  for (unsigned int it = 0; it < L1.m_dim[3]; it++) {
522  site = Parallel::Index::getIndex(ix,iy,iz,it);
523  latticeSum[site] += traceRealMultiplication(L1[site],L2[site]);
524  }
525  }
526  }
527  }
528 
529  return latticeSum;
530 }
531 
532 
534 {
535  /*
536  * Function for multiplying lattice and taking the imaginary trace without summing the matrix.
537  */
538  unsigned long site = 0;
539  Lattice<double>latticeSum;
540  latticeSum.allocate(L1.m_dim);
541  latticeSum.zeros();
542 
543  for (unsigned int ix = 0; ix < L1.m_dim[0]; ix++) {
544  for (unsigned int iy = 0; iy < L1.m_dim[1]; iy++) {
545  for (unsigned int iz = 0; iz < L1.m_dim[2]; iz++) {
546  for (unsigned int it = 0; it < L1.m_dim[3]; it++) {
547  site = Parallel::Index::getIndex(ix,iy,iz,it);
548  latticeSum[site] += traceImagMultiplication(L1[site],L2[site]);
549  }
550  }
551  }
552  }
553 
554  return latticeSum;
555 }
556 
557 
558 template <class SU3>
559 inline double sumRealTrace(Lattice<SU3> L)
560 {
561  double latticeSum;
562  latticeSum = 0.0;
563  for (unsigned long int iSite = 0; iSite < L.m_latticeSize; iSite++) {
564  latticeSum += L[iSite][0] + L[iSite][8] + L[iSite][16];
565  }
566  return latticeSum;
567 }
568 
569 template <class SU3>
571 {
572  double latticeSum;
573  latticeSum = 0.0;
574  for (unsigned long int iSite = 0; iSite < L1.m_latticeSize; iSite++) {
575  latticeSum += traceRealMultiplication(L1[iSite],L2[iSite]);
576  }
577  return latticeSum;
578 }
579 
580 template <class SU3>
582 {
583  Lattice<SU3> _L;
584  _L.allocate(B.m_dim);
585  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
586  _L.m_sites[iSite] = B.m_sites[iSite].inv();
587  }
588  return std::move(_L);
589 }
590 
591 template <class SU3>
593 {
594  Lattice<SU3> _L;
595  _L.allocate(B.m_dim);
596  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
597  _L.m_sites[iSite] = B.m_sites[iSite].inv();
598  }
599  return std::move(_L);
600 }
601 
602 template <class SU3>
604 {
605  Lattice<SU3> _L;
606  _L.allocate(B.m_dim);
607  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
608  _L.m_sites[iSite] = B.m_sites[iSite].transpose();
609  }
610  return std::move(_L);
611 }
612 
613 template <class SU3>
615 {
616  Lattice<SU3> _L;
617  _L.allocate(B.m_dim);
618  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
619  _L.m_sites[iSite] = B.m_sites[iSite].transpose();
620  }
621  return std::move(_L);
622 }
623 
624 template <class SU3>
626 {
627  Lattice<SU3> _L;
628  _L.allocate(B.m_dim);
629  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
630  _L.m_sites[iSite] = B.m_sites[iSite].conjugate();
631  }
632  return std::move(_L);
633 }
634 
635 template <class SU3>
637 {
638  Lattice<SU3> _L;
639  _L.allocate(B.m_dim);
640  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
641  _L.m_sites[iSite] = B.m_sites[iSite].conjugate();
642  }
643  return std::move(_L);
644 }
645 
646 template <class SU3>
648 {
649  /*
650  * Multiplies by (i).
651  */
652  Lattice<SU3> _L;
653  _L.allocate(B.m_dim);
654  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
655  _L.m_sites[iSite] = B.m_sites[iSite].makeAntiHermitian();
656  }
657  return std::move(_L);
658 }
659 
660 template <class SU3>
662 {
663  /*
664  * Multiplies by (i).
665  */
666  Lattice<SU3> _L;
667  _L.allocate(B.m_dim);
668  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
669  _L.m_sites[iSite] = B.m_sites[iSite].makeAntiHermitian();
670  }
671  return std::move(_L);
672 }
673 
674 template <class SU3>
676 {
677  /*
678  * Multiplies by (-i).
679  */
680  Lattice<SU3> _L;
681  _L.allocate(B.m_dim);
682  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
683  _L.m_sites[iSite] = B.m_sites[iSite].makeHermitian();
684  }
685  return std::move(_L);
686 }
687 
688 template <class SU3>
690 {
691  /*
692  * Multiplies by (-i).
693  */
694  Lattice<SU3> _L;
695  _L.allocate(B.m_dim);
696  for (unsigned long int iSite = 0; iSite < B.m_latticeSize; iSite++) {
697  _L.m_sites[iSite] = B.m_sites[iSite].makeHermitian();
698  }
699  return std::move(_L);
700 }
701 
705 // Enumerator container for direction
711 enum DIR {
714 };
715 
728 inline Lattice<SU3> shift(Lattice<SU3> L, DIR direction, unsigned int lorentzVector)
729 {
730  /*
731  * Function for shifting lattice in on or another direction.
732  * The direction of the matrix is taken care if in what lattice we are passing.
733  */
734  Lattice<SU3> _L;
735  _L.allocate(L.m_dim);// MOVE THIS TO INITIALIZATION/HEADER-THING?
736  std::vector<SU3> sendCube; // Move indexes to index in order to avoid 2 integer multiplications)
737  std::vector<SU3> recvCube; // MOVE THIS TO HEADER; SO WE DONT ALLOCATE EVERY TIME!
738  // INSTEAD OF CUBE INDEX, JUST DO INDEX WITH DIMENSION SET TO ZERO
739  MPI_Request sendReq,recvReq;
740  switch(direction) {
741  case BACKWARDS: {
742  switch(lorentzVector) {
743  case 0: {
744  sendCube.resize(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]); // Four of these can actually be stored globally
745  recvCube.resize(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]);
746  /* Max memory usage: 48*48*48*96 /w 512 procs --> 12 12 12 12 --> 4 cubes of size 12^3 = 1728*18 bytes
747  * --> 1728*28 / 1024(to kilobytes) / 1024(to megabytes) = 0.03 MB per processor.
748  * Maximum use of 4 volumes, one for each direction(assuming that spatial directionality may vary) --> 0.12 MB in total for this part
749  */
750  // Populates package to send
751  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
752  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
753  for (unsigned int it = 0; it < L.m_dim[3]; it++) { // Ensure cube indexes match before and after in order to map correctly!!
754  sendCube[Parallel::Index::cubeIndex(iy,iz,it,L.m_dim[1],L.m_dim[2])] = L.m_sites[Parallel::Index::getIndex(L.m_dim[0]-1,iy,iz,it)];
755  }
756  }
757  }
758  // Sends and receives packages
759  MPI_Isend(&sendCube.front(),18*(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(1),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
760  MPI_Irecv(&recvCube.front(),18*(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(0),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
761  // Populates shifted lattice by elements not required to share
762  for (unsigned int ix = 1; ix < L.m_dim[0]; ix++) {
763  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
764  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
765  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
766  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix-1,iy,iz,it)];
767  }
768  }
769  }
770  }
771  // Ensures all results have been sent and received, then populates face cube with remaining results.
772  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
773  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
774  // Repopulates the lattice with missing cube
775  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
776  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
777  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
778  _L.m_sites[Parallel::Index::getIndex(0,iy,iz,it)] = recvCube[Parallel::Index::cubeIndex(iy,iz,it,L.m_dim[1],L.m_dim[2])];
779  }
780  }
781  }
782  return _L;
783  }
784  case 1: {
785  sendCube.resize(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]);
786  recvCube.resize(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]);
787  // Populates package to send
788  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
789  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
790  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
791  sendCube[Parallel::Index::cubeIndex(ix,iz,it,L.m_dim[0],L.m_dim[2])] = L.m_sites[Parallel::Index::getIndex(ix,L.m_dim[1]-1,iz,it)];
792  }
793  }
794  }
795  MPI_Isend(&sendCube.front(),18*(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(3),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
796  MPI_Irecv(&recvCube.front(),18*(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(2),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
797  // Populates shifted lattice by elements not required to share
798  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
799  for (unsigned int iy = 1; iy < L.m_dim[1]; iy++) {
800  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
801  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
802  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix,iy-1,iz,it)];
803  }
804  }
805  }
806  }
807  // Ensures all results have been sent and received
808  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
809  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
810  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
811  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
812  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
813  _L.m_sites[Parallel::Index::getIndex(ix,0,iz,it)] = recvCube[Parallel::Index::cubeIndex(ix,iz,it,L.m_dim[0],L.m_dim[2])];
814  }
815  }
816  }
817  return _L;
818  }
819  case 2: {
820  sendCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]);
821  recvCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]);
822  // Populates package to send
823  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
824  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
825  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
826  sendCube[Parallel::Index::cubeIndex(ix,iy,it,L.m_dim[0],L.m_dim[1])] = L.m_sites[Parallel::Index::getIndex(ix,iy,L.m_dim[2]-1,it)];
827  }
828  }
829  }
830  MPI_Isend(&sendCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(5),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
831  MPI_Irecv(&recvCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(4),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
832  // Populates shifted lattice by elements not required to share
833  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
834  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
835  for (unsigned int iz = 1; iz < L.m_dim[2]; iz++) {
836  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
837  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix,iy,iz-1,it)];
838  }
839  }
840  }
841  }
842  // Ensures all results have been sent and received
843  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
844  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
845  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
846  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
847  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
848  _L.m_sites[Parallel::Index::getIndex(ix,iy,0,it)] = recvCube[Parallel::Index::cubeIndex(ix,iy,it,L.m_dim[0],L.m_dim[1])];
849  }
850  }
851  }
852  return _L;
853  }
854  case 3: {
855  sendCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]);
856  recvCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]);
857  // Populates package to send
858  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
859  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
860  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
861  sendCube[Parallel::Index::cubeIndex(ix,iy,iz,L.m_dim[0],L.m_dim[1])] = L.m_sites[Parallel::Index::getIndex(ix,iy,iz,L.m_dim[3]-1)];
862  }
863  }
864  }
865  MPI_Isend(&sendCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]),MPI_DOUBLE,Parallel::Neighbours::get(7),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
866  MPI_Irecv(&recvCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]),MPI_DOUBLE,Parallel::Neighbours::get(6),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
867  // Populates shifted lattice by elements not required to share
868  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
869  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
870  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
871  for (unsigned int it = 1; it < L.m_dim[3]; it++) {
872  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it-1)];
873  }
874  }
875  }
876  }
877  // Ensures all results have been sent and received
878  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
879  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
880  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
881  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
882  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
883  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,0)] = recvCube[Parallel::Index::cubeIndex(ix,iy,iz,L.m_dim[0],L.m_dim[1])];
884  }
885  }
886  }
887  return _L;
888  }
889  }
890  break;
891  }
892  case FORWARDS: {
893  switch(lorentzVector) {
894  case 0: { // x direction
895  sendCube.resize(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]);
896  recvCube.resize(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]);
897  // Populates package to send
898  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
899  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
900  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
901  sendCube[Parallel::Index::cubeIndex(iy,iz,it,L.m_dim[1],L.m_dim[2])] = L.m_sites[Parallel::Index::getIndex(0,iy,iz,it)];
902  }
903  }
904  }
905  // Sends and receives packages
906  MPI_Isend(&sendCube.front(),18*(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(0),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
907  MPI_Irecv(&recvCube.front(),18*(L.m_dim[1]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(1),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
908  // Populates shifted lattice by elements not required to share
909  for (unsigned int ix = 0; ix < L.m_dim[0] - 1; ix++) {
910  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
911  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
912  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
913  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix+1,iy,iz,it)];
914  }
915  }
916  }
917  }
918  // Ensures all results have been sent and received, then populates face cube with remaining results.
919  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
920  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
921  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
922  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
923  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
924  _L.m_sites[Parallel::Index::getIndex(L.m_dim[0]-1,iy,iz,it)] = recvCube[Parallel::Index::cubeIndex(iy,iz,it,L.m_dim[1],L.m_dim[2])];
925  }
926  }
927  }
928  return _L;
929  }
930  case 1: { // y direction
931  sendCube.resize(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]);
932  recvCube.resize(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]);
933  // Populates package to send
934  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
935  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
936  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
937  sendCube[Parallel::Index::cubeIndex(ix,iz,it,L.m_dim[0],L.m_dim[2])] = L.m_sites[Parallel::Index::getIndex(ix,0,iz,it)];
938  }
939  }
940  }
941  MPI_Isend(&sendCube.front(),18*(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(2),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
942  MPI_Irecv(&recvCube.front(),18*(L.m_dim[0]*L.m_dim[2]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(3),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
943  // Populates shifted lattice by elements not required to share
944  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
945  for (unsigned int iy = 0; iy < L.m_dim[1] - 1; iy++) {
946  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
947  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
948  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix,iy+1,iz,it)];
949  }
950  }
951  }
952  }
953  // Ensures all results have been sent and received
954  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
955  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
956  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
957  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
958  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
959  _L.m_sites[Parallel::Index::getIndex(ix,L.m_dim[1]-1,iz,it)] = recvCube[Parallel::Index::cubeIndex(ix,iz,it,L.m_dim[0],L.m_dim[2])];
960  }
961  }
962  }
963  return _L;
964  }
965  case 2: { // z direction
966  sendCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]);
967  recvCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]);
968  // Populates package to send
969  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
970  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
971  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
972  sendCube[Parallel::Index::cubeIndex(ix,iy,it,L.m_dim[0],L.m_dim[1])] = L.m_sites[Parallel::Index::getIndex(ix,iy,0,it)];
973  }
974  }
975  }
976  MPI_Isend(&sendCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(4),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
977  MPI_Irecv(&recvCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[3]),MPI_DOUBLE,Parallel::Neighbours::get(5),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
978  // Populates shifted lattice by elements not required to share
979  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
980  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
981  for (unsigned int iz = 0; iz < L.m_dim[2] - 1; iz++) {
982  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
983  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix,iy,iz+1,it)];
984  }
985  }
986  }
987  }
988  // Ensures all results have been sent and received
989  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
990  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
991  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
992  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
993  for (unsigned int it = 0; it < L.m_dim[3]; it++) {
994  _L.m_sites[Parallel::Index::getIndex(ix,iy,L.m_dim[2]-1,it)] = recvCube[Parallel::Index::cubeIndex(ix,iy,it,L.m_dim[0],L.m_dim[1])];
995  }
996  }
997  }
998  return _L;
999  }
1000  case 3: { // t direction
1001  sendCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]);
1002  recvCube.resize(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]);
1003  // Populates package to send
1004  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
1005  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
1006  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
1007  sendCube[Parallel::Index::cubeIndex(ix,iy,iz,L.m_dim[0],L.m_dim[1])] = L.m_sites[Parallel::Index::getIndex(ix,iy,iz,0)];
1008  }
1009  }
1010  }
1011  MPI_Isend(&sendCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]),MPI_DOUBLE,Parallel::Neighbours::get(6),0,Parallel::ParallelParameters::ACTIVE_COMM,&sendReq);
1012  MPI_Irecv(&recvCube.front(),18*(L.m_dim[0]*L.m_dim[1]*L.m_dim[2]),MPI_DOUBLE,Parallel::Neighbours::get(7),0,Parallel::ParallelParameters::ACTIVE_COMM,&recvReq);
1013  // Populates shifted lattice by elements not required to share
1014  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
1015  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
1016  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
1017  for (unsigned int it = 0; it < L.m_dim[3] - 1; it++) {
1018  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it)] = L.m_sites[Parallel::Index::getIndex(ix,iy,iz,it+1)];
1019  }
1020  }
1021  }
1022  }
1023  // Ensures all results have been sent and received
1024  MPI_Wait(&recvReq,MPI_STATUS_IGNORE);
1025  MPI_Wait(&sendReq,MPI_STATUS_IGNORE);
1026  for (unsigned int ix = 0; ix < L.m_dim[0]; ix++) {
1027  for (unsigned int iy = 0; iy < L.m_dim[1]; iy++) {
1028  for (unsigned int iz = 0; iz < L.m_dim[2]; iz++) {
1029  _L.m_sites[Parallel::Index::getIndex(ix,iy,iz,L.m_dim[3]-1)] = recvCube[Parallel::Index::cubeIndex(ix,iy,iz,L.m_dim[0],L.m_dim[1])];
1030  }
1031  }
1032  }
1033  return _L;
1034  }
1035  }
1036  break;
1037  }
1038  }
1039  return _L;
1040 }
1041 
1042 #endif // LATTICE_H
Lattice< SU3 > shift(Lattice< SU3 > L, DIR direction, unsigned int lorentzVector)
shift copies the lattice L and shifts it in a specified lorentzVector direction DIR.
Definition: lattice.h:728
void zeros()
zeros sets all of the SU3 elements in the lattice to zero.
Definition: lattice.h:355
Lattice< T > operator-(Lattice< T > A, Lattice< T > &B)
Definition: lattice.h:167
void allocate(std::vector< unsigned int > dim)
allocate method for allocating memory.
Definition: lattice.h:378
static unsigned long cubeIndex(unsigned long int i, unsigned long int j, unsigned long int k, unsigned long int Ni, unsigned long int Nj)
Definition: index.h:27
static MPI_Comm ACTIVE_COMM
The communicator ACTIVE_COMM is the communicator for the ACTIVE_GROUP.
Definition: parallelparameters.h:36
Lattice< SU3 > inv(Lattice< SU3 > &B)
Definition: lattice.h:581
A complex number class, consisting of t.
Definition: complex.h:16
Lattice< double > realTraceMultiplication(Lattice< SU3 > L1, Lattice< SU3 > L2)
Definition: lattice.h:508
Lattice(std::vector< unsigned int >latticeDimensions)
Definition: lattice.h:33
T sum(Lattice< T > L)
Definition: lattice.h:474
Lattice< double > imagTrace(Lattice< SU3 > L)
Definition: lattice.h:409
static int get(int iProcDir)
get returns the neighbourlist for the the calling processor/rank and the direction given py iProcDir.
Definition: neighbours.h:118
Lattice< SU3 > transpose(Lattice< SU3 > &B)
Definition: lattice.h:603
Lattice< complex > trace(Lattice< SU3 > L)
Definition: lattice.h:419
Lattice(Lattice< T > &&other) noexcept
Lattice move constructor.
Definition: lattice.h:57
Lattice< SU3 > makeAntiHermitian(Lattice< SU3 > &B)
Definition: lattice.h:647
static unsigned long getIndex(unsigned int i, unsigned int j, unsigned int k, unsigned int l)
Definition: index.h:31
unsigned long m_latticeSize
Definition: lattice.h:29
double traceRealMultiplication(SU3 A, SU3 B)
traceRealMultiplication
Definition: functions.h:42
Lattice< T > operator+(Lattice< T > A, Lattice< T > &B)
Definition: lattice.h:155
Lattice< T > copy(Lattice< T > B)
copy method for copying content of another lattice B onto itself.
Definition: lattice.h:385
std::vector< unsigned int > m_dim
Definition: lattice.h:28
Lattice< double > imagTraceMultiplication(Lattice< SU3 > L1, Lattice< SU3 > L2)
Definition: lattice.h:533
T & operator[](unsigned long i)
operator [] access operator that has no boundary checking.
Definition: lattice.h:101
Lattice< T > & operator+=(Lattice< T > &B)
Definition: lattice.h:233
Lattice & operator=(const Lattice &other)
operator = Copy assignement operator
Definition: lattice.h:70
double sumRealTrace(Lattice< SU3 > L)
Definition: lattice.h:559
Lattice< T > & operator/=(double B)
Definition: lattice.h:288
DIR
The DIR enum specifies if we are to move BACKWARDS or FORWARDS in the lattice shifts.
Definition: lattice.h:711
std::vector< T > m_sites
Definition: lattice.h:27
Lattice< T > operator/(Lattice< T > A, double b)
Definition: lattice.h:198
Lattice< T > & operator-=(Lattice< T > &B)
Definition: lattice.h:249
~Lattice()
Definition: lattice.h:38
Definition: lattice.h:712
Lattice(const Lattice< T > &other)
Lattice copy constructor.
Definition: lattice.h:45
Lattice< SU3 > subtractImag(Lattice< SU3 > &L, const Lattice< double > &other)
Definition: lattice.h:430
void identity()
identity method for setting the SU3 matrices of the lattice to identity.
Definition: lattice.h:333
double sumRealTraceMultiplication(Lattice< SU3 > L1, Lattice< SU3 > L2)
Definition: lattice.h:570
Lattice< T > operator *(Lattice< T > A, Lattice< T > &B)
Definition: lattice.h:179
Lattice< SU3 > conjugate(Lattice< SU3 > &B)
Definition: lattice.h:625
Lattice< double > realTrace(Lattice< SU3 > L)
Definition: lattice.h:399
Lattice()
Definition: lattice.h:32
Lattice< SU3 > makeHermitian(Lattice< SU3 > &B)
Definition: lattice.h:675
Lattice< SU3 > subtractReal(Lattice< SU3 > &L, const Lattice< double > &other)
Definition: lattice.h:452
Lattice< T > & operator *=(Lattice< T > &B)
Definition: lattice.h:265
double traceImagMultiplication(SU3 A, SU3 B)
traceImagMultiplication
Definition: functions.h:57
A method for holding a lattice of type T.
Definition: lattice.h:24
Definition: lattice.h:713
T & operator()(unsigned long i)
operator () access operator that has boundary checking.
Definition: lattice.h:110
std::vector< double > sumSpatial(Lattice< double > L)
Definition: lattice.h:485