Xmipp  v3.23.11-Nereus
Classes | Public Member Functions | List of all members
CudaCorrelationComputer< T > Class Template Reference

#include <cuda_correlation_computer.h>

Inheritance diagram for CudaCorrelationComputer< T >:
Inheritance graph
[legend]
Collaboration diagram for CudaCorrelationComputer< T >:
Collaboration graph
[legend]

Public Member Functions

 CudaCorrelationComputer ()
 
 ~CudaCorrelationComputer ()
 
void loadReference (const T *ref) override
 
void compute (T *others) override
 
- Public Member Functions inherited from AMeritComputer< T >
 AMeritComputer ()
 
virtual ~AMeritComputer ()=default
 
void init (const MeritSettings &s, bool reuse)
 
const MeritSettingsgetSettings () const
 
const std::vector< float > & getFiguresOfMerit () const
 

Additional Inherited Members

- Protected Member Functions inherited from AMeritComputer< T >
constexpr bool isInitialized () const
 
constexpr bool isRefLoaded () const
 
void setIsRefLoaded (bool status)
 
std::vector< float > & getFiguresOfMerit ()
 

Detailed Description

template<typename T>
class CudaCorrelationComputer< T >

Definition at line 37 of file cuda_correlation_computer.h.

Constructor & Destructor Documentation

◆ CudaCorrelationComputer()

template<typename T>
CudaCorrelationComputer< T >::CudaCorrelationComputer ( )
inline

Definition at line 39 of file cuda_correlation_computer.h.

39  {
40  setDefault();
41  }

◆ ~CudaCorrelationComputer()

template<typename T>
CudaCorrelationComputer< T >::~CudaCorrelationComputer ( )
inline

Definition at line 43 of file cuda_correlation_computer.h.

43  {
44  release();
45  }

Member Function Documentation

◆ compute()

template<typename T >
void CudaCorrelationComputer< T >::compute ( T *  others)
overridevirtual

Implements AMeritComputer< T >.

Definition at line 75 of file cuda_correlation_computer.cpp.

75  {
76  bool isReady = this->isInitialized() && this->isRefLoaded();
77  if ( ! isReady) {
78  REPORT_ERROR(ERR_LOGIC_ERROR, "Not ready to execute. Call init() and load reference");
79  }
80  if ( ! m_stream->isGpuPointer(others)) {
81  REPORT_ERROR(ERR_NOT_IMPLEMENTED, "Processing data from host is not yet supported");
82  } else {
83  m_d_others = others;
84  }
85 
86  const auto &s = this->getSettings();
87  this->getFiguresOfMerit().resize(s.otherDims.n());
88  switch(s.type) {
89  case MeritType::OneToN: {
90  if (s.normalizeResult) {
91  computeOneToN<true>();
92  } else {
93  computeOneToN<false>();
94  }
95  break;
96  }
97  default:
98  REPORT_ERROR(ERR_NOT_IMPLEMENTED, "This case is not implemented");
99  }
100 }
Case or algorithm not implemented yet.
Definition: xmipp_error.h:177
#define REPORT_ERROR(nerr, ErrormMsg)
Definition: xmipp_error.h:211
const std::vector< float > & getFiguresOfMerit() const
constexpr bool isInitialized() const
const MeritSettings & getSettings() const
constexpr bool isRefLoaded() const
bool isGpuPointer(const void *)
Definition: gpu.cpp:153
Some logical error in the pipeline.
Definition: xmipp_error.h:147

◆ loadReference()

template<typename T >
void CudaCorrelationComputer< T >::loadReference ( const T *  ref)
overridevirtual

Implements AMeritComputer< T >.

Definition at line 103 of file cuda_correlation_computer.cpp.

103  {
104  const auto &s = this->getSettings();
105  this->setIsRefLoaded(nullptr != ref);
106 
107  if (m_stream->isGpuPointer(ref)) {
108  REPORT_ERROR(ERR_NOT_IMPLEMENTED, "Only reference data on CPU are currently supported");
109  } else {
110  size_t bytes = s.refDims.size() * sizeof(T);
111  bool hasToPin = ! m_stream->isMemoryPinned(ref);
112  if (hasToPin) {
113  m_stream->pinMemory(ref, bytes);
114  }
115  auto stream = *(cudaStream_t*)m_stream->stream();
116  // copy data to GPU
117  gpuErrchk(cudaMemcpyAsync(
118  m_d_ref,
119  ref,
120  bytes,
121  cudaMemcpyHostToDevice, stream));
122  if (hasToPin) {
123  m_stream->unpinMemory(ref);
124  }
125  }
126  if (s.normalizeResult) {
127  computeAvgStddevForRef();
128  } else {
129  // nothing to do, assume that stddev = 1 and avg = 0 for each signal
130  }
131 }
#define gpuErrchk(code)
Definition: cuda_asserts.h:31
void * stream() const
Definition: gpu.h:50
Case or algorithm not implemented yet.
Definition: xmipp_error.h:177
#define REPORT_ERROR(nerr, ErrormMsg)
Definition: xmipp_error.h:211
static bool isMemoryPinned(const void *h_mem)
Definition: gpu.cpp:140
void setIsRefLoaded(bool status)
static void pinMemory(const void *h_mem, size_t bytes, unsigned int flags=0)
Definition: gpu.cpp:92
const MeritSettings & getSettings() const
static void unpinMemory(const void *h_mem)
Definition: gpu.cpp:108
bool isGpuPointer(const void *)
Definition: gpu.cpp:153

The documentation for this class was generated from the following files: