32 const auto &s = this->getSettings();
34 for (
auto &hw : s.hw) {
35 if ( ! dynamic_cast<CPU*>(hw)) {
44 m_dest = std::unique_ptr<T[]>(
new T[s.dims.size()]);
50 const auto &s = this->getSettings();
65 bool isReady = this->isInitialized()
72 this->getSettings().dims.size() *
sizeof(T));
85 m_threadPool.resize(1);
91 if ( ! this->isInitialized()) {
94 auto &sOrig = this->getSettings();
95 result &= sOrig.dims.size() >= s.
dims.
size();
96 result &= !(( ! sOrig.keepSrcCopy) && s.
keepSrcCopy);
108 bool isReady = this->isInitialized()
113 const Dimensions dims = this->getSettings().dims;
114 const size_t n = dims.
n();
115 const size_t z = dims.
z();
116 const size_t y = dims.
y();
117 const size_t x = dims.
x();
119 auto futures = std::vector<std::future<void>>();
121 auto workload = [&](
int id,
size_t signalId){
126 out.setXmippOrigin();
129 const float *
f = matrices.data() + (9 * signalId);
130 for (
int i = 0;
i < 9; ++
i) {
136 for (
size_t i = 0;
i <
n; ++
i) {
137 futures.emplace_back(m_threadPool.push(workload,
i));
139 for (
auto &
f : futures) {
Case or algorithm not implemented yet.
#define REPORT_ERROR(nerr, ErrormMsg)
CUDA_HD constexpr size_t z() const
T norm(const std::vector< T > &v)
static unsigned findCores()
CUDA_HD constexpr size_t x() const
CUDA_HD constexpr size_t sizeSingle() const
CUDA_HD constexpr size_t y() const
CUDA_HD constexpr size_t n() const
constexpr size_t size() const
Some logical error in the pipeline.