18# include <cuda_runtime.h>
22#include <boost/fiber/cuda/waitfor.hpp>
23#include <boost/fiber/mutex.hpp>
24#include <boost/fiber/recursive_mutex.hpp>
36 using Mutex_t = std::recursive_mutex;
37 using Stream_t = cudaStream_t;
40 std::deque<Stream_t> queue;
45 void push(
const Stream_t& s ) {
46 std::unique_lock lck( queue_mtx );
52 bool pop( Stream_t& s ) {
53 std::unique_lock lck( queue_mtx );
54 if ( queue.empty() ) {
return false; }
63 cudaStreamDestroy( s );
68 StreamList available_streams{};
69 std::string err_fmt( cudaError_t err, std::string file,
int line ) {
70 const char* errname = cudaGetErrorName( err );
71 const char* errstr = cudaGetErrorString( err );
73 std::format(
"Encountered CUDA error {} [{}]: {} on {}:{}", errname,
int( err ), errstr, file, line );
79 if ( !available_streams.pop(
m_stream ) ) {
80 cudaError_t err = cudaStreamCreate( &m_stream );
81 if ( err != cudaSuccess ) {
83 throw GaudiException( err_fmt( err, __FILE__, __LINE__ ),
"CUDAStreamException", StatusCode::FAILURE );
85 err = cudaStreamSynchronize(
m_stream );
86 if ( err != cudaSuccess ) {
95 m_parent->error() << std::format(
"Stream destroyed before all its dependents ({} remaining)",
m_dependents )
103 auto res = boost::fibers::cuda::waitfor_all(
m_stream );
104 cudaError_t temp_error = std::get<1>( res );
105 if ( ( temp_error ) != cudaSuccess ) {
107 std::string errmsg = err_fmt( temp_error, __FILE__, __LINE__ );
111 return m_parent->restoreAfterSuspend();
MsgStream & endmsg(MsgStream &s)
MsgStream Modifier: endmsg. Calls the output method of the MsgStream.
Base class for asynchronous algorithms.
const Gaudi::AsynchronousAlgorithm * parent()
Access the parent algorithm.
StatusCode await()
Yield fiber until stream is done.
Stream(const Gaudi::AsynchronousAlgorithm *parent)
Create a new Stream. Should happen once per algorithm.
const Gaudi::AsynchronousAlgorithm * m_parent
Define general base for Gaudi exception.
This class is used for returning status codes from appropriate routines.
constexpr static const auto FAILURE