This commit is contained in:
HailoRT-Automation
2024-07-09 23:47:13 +03:00
committed by GitHub
parent e2190aeda8
commit 01e4c7f5a7
314 changed files with 16021 additions and 5389 deletions

View File

@@ -9,6 +9,16 @@ elseif(CLCACHE_PROGRAM)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CLCACHE_PROGRAM}")
endif()
if(WIN32)
find_program(SCCACHE sccache)
if(SCCACHE)
set(CMAKE_C_COMPILER_LAUNCHER ${SCCACHE})
set(CMAKE_CXX_COMPILER_LAUNCHER ${SCCACHE})
set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT Embedded)
cmake_policy(SET CMP0141 NEW)
endif()
endif()
project(HailoRT)
# Prevent in-tree building

View File

@@ -122,6 +122,9 @@ typedef enum __attribute__((packed)) {
CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CACHE_INPUT,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CACHE_OUTPUT,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_CACHE_UPDATED,
/* Must be last */
CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT
@@ -343,6 +346,15 @@ typedef struct {
uint8_t connected_d2h_packed_vdma_channel_id;
} CONTEXT_SWITCH_DEFS__activate_ddr_buffer_input_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
uint8_t stream_index;
uint8_t network_index;
CONTEXT_SWITCH_DEFS__stream_reg_info_t stream_reg_info;
CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;
uint32_t initial_credit_size;
} CONTEXT_SWITCH_DEFS__activate_cache_input_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
uint8_t stream_index;
@@ -367,6 +379,14 @@ typedef struct {
uint32_t buffered_rows_count;
} CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
uint8_t stream_index;
uint8_t network_index;
CONTEXT_SWITCH_DEFS__stream_reg_info_t stream_reg_info;
CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;
} CONTEXT_SWITCH_DEFS__activate_cache_output_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;

View File

@@ -81,7 +81,7 @@ extern "C" {
/* Value to represent an operation should be performed on all streams. */
#define CONTROL_PROTOCOL__ALL_DATAFLOW_MANAGERS (0xFF)
#define CONTROL_PROTOCOL__MAX_CONTEXT_SIZE (3072)
#define CONTROL_PROTOCOL__MAX_CONTEXT_SIZE (4096)
#define CONTROL_PROTOCOL__OPCODES_VARIABLES \
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_IDENTIFY, true, CPU_ID_APP_CPU)\
@@ -160,6 +160,10 @@ extern "C" {
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SET_SLEEP_STATE, false, CPU_ID_APP_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CHANGE_HW_INFER_STATUS, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SIGNAL_DRIVER_DOWN, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_INIT_CACHE_INFO, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_GET_CACHE_INFO, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_UPDATE_CACHE_READ_OFFSET, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_SIGNAL_CACHE_UPDATED, false, CPU_ID_CORE_CPU)\
typedef enum {
#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) name,
@@ -971,6 +975,26 @@ typedef struct {
#pragma warning(pop)
#endif
typedef struct {
uint32_t cache_size;
uint32_t current_read_offset;
int32_t write_offset_delta;
} CONTROL_PROTOCOL__context_switch_cache_info_t;
typedef struct {
uint32_t cache_info_length;
CONTROL_PROTOCOL__context_switch_cache_info_t cache_info;
} CONTROL_PROTOCOL__context_switch_init_cache_info_request_t;
typedef struct {
uint32_t cache_info_length;
CONTROL_PROTOCOL__context_switch_cache_info_t cache_info;
} CONTROL_PROTOCOL__context_switch_get_cache_info_response_t;
typedef struct {
uint32_t read_offset_delta_length;
int32_t read_offset_delta;
} CONTROL_PROTOCOL__context_switch_update_cache_read_offset_request_t;
typedef CONTROL_PROTOCOL__read_memory_request_t CONTROL_PROTOCOL__read_user_config_request_t;
typedef CONTROL_PROTOCOL__read_memory_response_t CONTROL_PROTOCOL__read_user_config_response_t;
@@ -1357,6 +1381,7 @@ typedef union {
CONTROL_PROTOCOL__get_overcurrent_state_response_t get_overcurrent_state_response;
CONTROL_PROTOCOL__get_hw_consts_response_t get_hw_consts_response;
CONTROL_PROTOCOL__change_hw_infer_status_response_t change_hw_infer_status_response;
CONTROL_PROTOCOL__context_switch_get_cache_info_response_t context_switch_get_cache_info_response;
// Note: This array is larger than any legal request:
// * Functions in this module won't write more than CONTROL_PROTOCOL__MAX_CONTROL_LENGTH bytes
@@ -1392,6 +1417,8 @@ typedef union {
CONTROL_PROTOCOL__sensor_set_generic_i2c_slave_request_t sensor_set_generic_i2c_slave_request;
CONTROL_PROTOCOL__context_switch_set_network_group_header_request_t context_switch_set_network_group_header_request;
CONTROL_PROTOCOL__context_switch_set_context_info_request_t context_switch_set_context_info_request;
CONTROL_PROTOCOL__context_switch_init_cache_info_request_t context_switch_init_cache_info_request;
CONTROL_PROTOCOL__context_switch_update_cache_read_offset_request_t context_switch_update_cache_read_offset_request;
CONTROL_PROTOCOL__idle_time_set_measurement_request_t idle_time_set_measurement_request;
CONTROL_PROTOCOL__download_context_action_list_request_t download_context_action_list_request;
CONTROL_PROTOCOL__change_context_switch_status_request_t change_context_switch_status_request;

View File

@@ -15,6 +15,8 @@ extern "C" {
#include "status.h"
#include "stdfloat.h"
#pragma pack(push, 1)
/**
* @brief The d2h event manager structures relevant in the host
*/
@@ -59,6 +61,7 @@ typedef enum {
HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID,
HW_INFER_MANAGER_INFER_DONE,
CONTEXT_SWITCH_RUN_TIME_ERROR,
START_UPDATE_CACHE_OFFSET_ID,
D2H_EVENT_ID_COUNT /* Must be last*/
} D2H_EVENT_ID_t;
@@ -157,6 +160,12 @@ typedef struct {
#define D2H_EVENT_CONTEXT_SWITCH_RUN_TIME_ERROR_EVENT_PARAMETER_COUNT (5)
typedef struct {
uint64_t cache_id_bitmask;
} D2H_EVENT_start_update_cache_offset_message_t;
#define D2H_EVENT_START_UPDATE_CACHE_OFFSET_PARAMETER_COUNT (1)
/* D2H_EVENT__message_parameters_t should be in the same order as hailo_notification_message_parameters_t */
typedef union {
D2H_EVENT_rx_error_event_message_t rx_error_event;
@@ -170,6 +179,7 @@ typedef union {
D2H_EVENT_health_monitor_clock_changed_event_message_t health_monitor_clock_changed_event;
D2H_EVENT_hw_infer_mamager_infer_done_message_t hw_infer_manager_infer_done_event;
D2H_EVENT_context_switch_run_time_error_event_message_t context_switch_run_time_error_event;
D2H_EVENT_start_update_cache_offset_message_t start_update_cache_offset_event;
} D2H_EVENT__message_parameters_t;
typedef struct {
@@ -187,6 +197,8 @@ typedef struct {
uint8_t buffer[D2H_EVENT_MAX_SIZE];
} D2H_event_buffer_t;
#pragma pack(pop)
/**********************************************************************
* Public Functions
**********************************************************************/

View File

@@ -19,8 +19,7 @@ extern "C" {
#define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
#define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
// TODO - HRT-11344 : change fw magic to pluto specific
#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
#define FIRMWARE_HEADER_MAGIC_PLUTO (0xF94739AB)
typedef enum {
FIRMWARE_HEADER_VERSION_INITIAL = 0,

View File

@@ -413,6 +413,8 @@ Updating rules:
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_HW_INFER_STATE_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CHANNELS_INFO_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_BATCH_COUNT_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CACHE_INFO_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_READ_OFFSET_DELTA_LENGTH)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__POWER_MEASUREMENT)\
FIRMWARE_STATUS__X(HAILO_POWER_MEASUREMENT_STATUS_POWER_INIT_ERROR)\
@@ -765,6 +767,8 @@ Updating rules:
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_MEMORY_SPACE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_BATCH_SWITCH_CONTEXT_TO_END)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_EXTERNAL_ACTION_LIST_ADDRESS)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_CACHE_SIZE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_READ_OFFSET_SIZE)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__D2H_EVENT_MANAGER)\
FIRMWARE_STATUS__X(HAILO_D2H_EVENT_MANAGER_STATUS_MESSAGE_HIGH_PRIORITY_QUEUE_CREATE_FAILED)\

1
hailort/.gitignore vendored
View File

@@ -1,4 +1,3 @@
build/
dist/
/external/
cmake/external/*/

View File

@@ -3,7 +3,6 @@ cmake_minimum_required(VERSION 3.0.0)
option(HAILO_BUILD_PYBIND "Build Python binding" OFF)
option(HAILO_BUILD_EMULATOR "Build hailort for emulator" OFF)
option(HAILO_BUILD_UT "Build Unit Tests" OFF)
option(HAILO_BUILD_DMABUF_TESTS "Build DMA buffer tests. Relevant only if HAILO_BUILD_UT is ON" OFF)
option(HAILO_BUILD_HW_DEBUG_TOOL "Build hw debug tool" OFF)
option(HAILO_BUILD_GSTREAMER "Compile gstreamer plugins" OFF)
option(HAILO_BUILD_EXAMPLES "Build examples" OFF)
@@ -12,6 +11,7 @@ option(HAILO_BUILD_SERVICE "Build hailort service" OFF)
option(HAILO_BUILD_PROFILER "Build hailort profiler" ON)
option(HAILO_COMPILE_WARNING_AS_ERROR "Add compilation flag for treating compilation warnings as errors" OFF)
option(HAILO_SUPPORT_PACKAGING "Create HailoRT package (internal)" OFF)
option(HAILO_BUILD_DOC "Build doc" OFF)
if (HAILO_COMPILE_WARNING_AS_ERROR)
if(WIN32)
@@ -31,8 +31,8 @@ endif()
# Set firmware version
add_definitions( -DFIRMWARE_VERSION_MAJOR=4 )
add_definitions( -DFIRMWARE_VERSION_MINOR=17 )
add_definitions( -DFIRMWARE_VERSION_REVISION=1 )
add_definitions( -DFIRMWARE_VERSION_MINOR=18 )
add_definitions( -DFIRMWARE_VERSION_REVISION=0 )
if(HAILO_BUILD_SERVICE)
add_definitions( -DHAILO_SUPPORT_MULTI_PROCESS )
endif()
@@ -57,6 +57,10 @@ set(HAILORT_COMMON_DIR ${PROJECT_SOURCE_DIR}/hailort/)
set(COMMON_INC_DIR ${PROJECT_SOURCE_DIR}/common/include)
set(DRIVER_INC_DIR ${PROJECT_SOURCE_DIR}/hailort/drivers/common)
set(RPC_DIR ${PROJECT_SOURCE_DIR}/hailort/rpc)
set(HRPC_DIR ${PROJECT_SOURCE_DIR}/hailort/hrpc)
set(HRPC_PROTOCOL_DIR ${PROJECT_SOURCE_DIR}/hailort/hrpc_protocol)
set(HAILORT_SERVICE_DIR ${PROJECT_SOURCE_DIR}/hailort/hailort_service)
set(HAILORT_SERVER_DIR ${PROJECT_SOURCE_DIR}/hailort/hailort_server)
if(CMAKE_SYSTEM_NAME STREQUAL QNX)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pevents.cmake)
@@ -67,6 +71,8 @@ if(HAILO_BUILD_SERVICE)
endif()
add_subdirectory(common)
add_subdirectory(hrpc)
add_subdirectory(hrpc_protocol)
add_subdirectory(libhailort)
add_subdirectory(hailortcli)
if(HAILO_BUILD_HW_DEBUG_TOOL)
@@ -89,3 +95,5 @@ endif()
if(CMAKE_SYSTEM_NAME STREQUAL QNX)
add_subdirectory(drivers/qnx)
endif()
add_subdirectory(hailort_server)

View File

@@ -20,18 +20,6 @@
namespace hailort
{
typedef struct {
volatile int head;
volatile int tail;
int size;
int size_mask;
} circbuf_t;
//TODO: Do not change the behavior of this module. see PLDA descs impl..
//TODO: optimize macros
#ifndef MIN
#define MIN(x,y) (((x) < (y)) ? (x) : (y))
#endif
#ifdef _WIN32
#define _CB_FETCH(x) (InterlockedOr((LONG volatile*)(&x), (LONG)0))
#define _CB_SET(x, value) (InterlockedExchange((LONG volatile*)(&x), (LONG)(value)))
@@ -40,25 +28,114 @@ typedef struct {
#define _CB_SET(x, value) ((void)__sync_lock_test_and_set(&(x), value))
#endif
#define CB_INIT(circbuf, s) \
(circbuf).head = 0; \
(circbuf).tail = 0; \
(circbuf).size = static_cast<int>(s); \
(circbuf).size_mask = static_cast<int>((s) - 1)
#define CB_RESET(circbuf) \
(circbuf).head = 0; \
(circbuf).tail = 0
#define CB_HEAD(x) _CB_FETCH((x).head)
#define CB_TAIL(x) _CB_FETCH((x).tail)
#define CB_SIZE(x) _CB_FETCH((x).size)
#define CB_ENQUEUE(circbuf, value) _CB_SET((circbuf).head, ((circbuf).head + (value)) & ((circbuf).size_mask))
#define CB_DEQUEUE(circbuf, value) _CB_SET((circbuf).tail, ((circbuf).tail + (value)) & ((circbuf).size_mask))
#define CB_AVAIL(circbuf, head, tail) ((((circbuf).size)-1+(tail)-(head)) & ((circbuf).size_mask))
#define CB_AVAIL_CONT(circbuf, head, tail) \
MIN(CB_AVAIL((circbuf), (head), (tail)), (circbuf).size - (head))
#define CB_PROG(circbuf, head, tail) ((((circbuf).size)+(head)-(tail)) & ((circbuf).size_mask))
#define CB_PROG_CONT(circbuf, head, tail) \
MIN(CB_PROG((circbuf), (head), (tail)), (circbuf).size - (tail))
// Note: We use tag dispatching to select the right implementation for power of 2 size
// There's a minor performance gain for power of 2 size, as we can use a mask instead of modulo
// * If a CircularBuffer/Array with the IsPow2Tag, then the size must be a power of 2.
// * If a CircularBuffer/Array with the IsNotPow2Tag, then the size may be any positive integer (we simply won't
// use the mask optimization for modulo operation, even if the size is a power of 2).
struct IsPow2Tag {};
struct IsNotPow2Tag {};
template <typename Pow2Tag>
struct CircularBuffer
{
public:
CircularBuffer(int s) :
m_head(0),
m_tail(0),
m_size(s),
m_size_mask(s - 1)
{
check_size(s, Pow2Tag());
}
void reset()
{
m_head = 0;
m_tail = 0;
}
void enqueue(int value)
{
_CB_SET(m_head, modulo(m_head + value, Pow2Tag()));
}
void set_head(int value)
{
_CB_SET(m_head, value);
}
void dequeue(int value)
{
_CB_SET(m_tail, modulo(m_tail + value, Pow2Tag()));
}
void set_tail(int value)
{
_CB_SET(m_tail, value);
}
int avail(int head, int tail) const
{
return modulo(m_size - 1 + tail - head, Pow2Tag());
}
int prog(int head, int tail) const
{
return modulo(m_size + head - tail, Pow2Tag());
}
int head() const
{
return _CB_FETCH(m_head);
}
int tail() const
{
return _CB_FETCH(m_tail);
}
int size() const
{
return m_size;
}
int size_mask() const
{
return m_size_mask;
}
private:
int modulo(int val, IsPow2Tag) const
{
return val & m_size_mask;
}
int modulo(int val, IsNotPow2Tag) const
{
return val % m_size;
}
void check_size(size_t size, IsPow2Tag)
{
assert(0 != size);
assert(is_powerof2(size));
(void)size; // For release
}
void check_size(size_t size, IsNotPow2Tag)
{
assert(0 != size);
(void)size; // For release
}
volatile int m_head;
volatile int m_tail;
const int m_size;
// For power of 2 size, we can use a mask instead of modulo
const int m_size_mask;
};
template<typename T>
@@ -69,11 +146,10 @@ struct is_std_array<std::array<T, N>> : public std::true_type {};
// TODO: implement more functionalities, better move semantic handle
// TODO: support consts methods (front(), empty()), right now CB_* macros requires non const pointer to head+tail
template<typename T, typename Container = std::vector<T>>
template<typename T, typename Pow2Tag = IsPow2Tag, typename Container = std::vector<T>>
class CircularArray final
{
public:
static_assert(std::is_default_constructible<T>::value, "CircularArray object must be default constructible");
// Based on https://en.cppreference.com/w/cpp/iterator/iterator
@@ -85,12 +161,16 @@ public:
{
public:
explicit iterator(int index, CircularArray &array) : m_array(array), m_index(index) {}
iterator& operator++() { m_index = ((m_index + 1) & m_array.m_circ.size_mask); return *this; }
iterator& operator++() { increment(Pow2Tag()); return *this; }
iterator operator++(int) { iterator retval = *this; ++(*this); return retval; }
bool operator==(iterator other) const { return m_index == other.m_index; }
bool operator!=(iterator other) const { return !(*this == other); }
T &operator*() const { return m_array.m_array[m_index]; }
private:
void increment(IsPow2Tag) { m_index = (m_index + 1) & m_array.m_circ.size_mask(); }
void increment(IsNotPow2Tag) { m_index = (m_index + 1) % m_array.m_circ.size(); }
CircularArray &m_array;
int m_index;
};
@@ -98,51 +178,47 @@ public:
// Ctor for Container=std::vector
template <typename C=Container,
class = typename std::enable_if_t<std::is_same<C, std::vector<T>>::value>>
CircularArray(size_t storage_size)
CircularArray(size_t storage_size) :
m_circ(static_cast<int>(storage_size))
{
// storage size must be a power of 2
assert(is_powerof2(storage_size));
CB_INIT(m_circ, storage_size);
m_array.resize(storage_size);
}
// Ctor for Container=std::array
template <typename C=Container,
class = typename std::enable_if_t<is_std_array<C>::value>>
CircularArray(size_t storage_size, int = 0)
CircularArray(size_t storage_size, int = 0) :
m_circ(static_cast<int>(storage_size))
{
// storage size must be a power of 2
assert(is_powerof2(storage_size));
assert(storage_size <= std::tuple_size<C>::value);
CB_INIT(m_circ, storage_size);
}
void push_back(T &&element)
{
assert(!full());
m_array[CB_HEAD(m_circ)] = std::move(element);
CB_ENQUEUE(m_circ, 1);
m_array[m_circ.head()] = std::move(element);
m_circ.enqueue(1);
}
void push_back(const T& element)
{
assert(!full());
m_array[CB_HEAD(m_circ)] = element;
CB_ENQUEUE(m_circ, 1);
m_array[m_circ.head()] = element;
m_circ.enqueue(1);
}
void pop_front()
{
assert(!empty());
// Clear previous front
m_array[CB_TAIL(m_circ)] = T();
CB_DEQUEUE(m_circ, 1);
m_array[m_circ.tail()] = T();
m_circ.dequeue(1);
}
T &front()
{
assert(!empty());
return m_array[CB_TAIL(m_circ)];
return m_array[m_circ.tail()];
}
void reset()
@@ -157,36 +233,36 @@ public:
bool empty() const
{
return CB_HEAD(m_circ) == CB_TAIL(m_circ);
return m_circ.head() == m_circ.tail();
}
bool full() const
{
return 0 == CB_AVAIL(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
return 0 == m_circ.avail(m_circ.head(), m_circ.tail());
}
size_t size() const
{
return CB_PROG(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
return m_circ.prog(m_circ.head(), m_circ.tail());
}
size_t capacity() const
{
return CB_SIZE(m_circ) - 1;
return m_circ.size() - 1;
}
iterator begin()
{
return iterator(CB_TAIL(m_circ), *this);
return iterator(m_circ.tail(), *this);
}
iterator end()
{
return iterator(CB_HEAD(m_circ), *this);
return iterator(m_circ.head(), *this);
}
private:
circbuf_t m_circ;
CircularBuffer<Pow2Tag> m_circ;
Container m_array;
};

View File

@@ -46,18 +46,15 @@ WaitOrShutdown::WaitOrShutdown(WaitablePtr waitable, EventPtr shutdown_event) :
hailo_status WaitOrShutdown::wait(std::chrono::milliseconds timeout)
{
auto index = m_waitable_group.wait_any(timeout);
if (index.status() == HAILO_TIMEOUT) {
return index.status();
}
CHECK_EXPECTED_AS_STATUS(index);
assert(index.value() <= WAITABLE_INDEX);
return (index.value() == SHUTDOWN_INDEX) ? HAILO_SHUTDOWN_EVENT_SIGNALED : HAILO_SUCCESS;
TRY_WITH_ACCEPTABLE_STATUS(HAILO_TIMEOUT, const auto index, m_waitable_group.wait_any(timeout));
assert(index <= WAITABLE_INDEX);
return (index == SHUTDOWN_INDEX) ? HAILO_SHUTDOWN_EVENT_SIGNALED : HAILO_SUCCESS;
}
hailo_status WaitOrShutdown::signal()
{
// Cannot signal a WaitOrShutdown which has only shutdown event
CHECK_NOT_NULL(m_waitable, HAILO_INVALID_OPERATION);
return m_waitable->signal();
}
@@ -71,7 +68,11 @@ WaitableGroup WaitOrShutdown::create_waitable_group(WaitablePtr waitable, EventP
// Note the order - consistent with SHUTDOWN_INDEX, WAITABLE_INDEX.
std::vector<std::reference_wrapper<Waitable>> waitables;
waitables.emplace_back(std::ref(*shutdown_event));
waitables.emplace_back(std::ref(*waitable));
if (nullptr != waitable) {
waitables.emplace_back(std::ref(*waitable));
}
return waitables;
}

View File

@@ -28,7 +28,7 @@ Expected<size_t> get_istream_size(std::ifstream &s)
s.seekg(beg_pos, s.beg);
CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
auto total_size = static_cast<uint64_t>(size - beg_pos);
auto total_size = static_cast<size_t>(size - beg_pos);
CHECK_AS_EXPECTED(total_size <= std::numeric_limits<size_t>::max(), HAILO_FILE_OPERATION_FAILURE,
"File size {} is too big", total_size);
return Expected<size_t>(static_cast<size_t>(total_size));
@@ -39,16 +39,193 @@ Expected<Buffer> read_binary_file(const std::string &file_path, const BufferStor
std::ifstream file(file_path, std::ios::in | std::ios::binary);
CHECK_AS_EXPECTED(file.good(), HAILO_OPEN_FILE_FAILURE, "Error opening file {}", file_path);
auto file_size = get_istream_size(file);
CHECK_EXPECTED(file_size, "Failed to get file size");
auto buffer = Buffer::create(file_size.value(), output_buffer_params);
CHECK_EXPECTED(buffer, "Failed to allocate file buffer ({} bytes}", file_size.value());
TRY(const auto file_size, get_istream_size(file), "Failed to get file size");
TRY(auto buffer, Buffer::create(file_size, output_buffer_params),
"Failed to allocate file buffer ({} bytes}", file_size);
// Read the data
file.read(reinterpret_cast<char*>(buffer->data()), buffer->size());
file.read(reinterpret_cast<char*>(buffer.data()), buffer.size());
CHECK_AS_EXPECTED(file.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading file {}", file_path);
return buffer.release();
return buffer;
}
Expected<std::shared_ptr<FileReader>> SeekableBytesReader::create_reader(const std::string &file_path)
{
auto ptr = make_shared_nothrow<FileReader>(file_path);
CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
return ptr;
}
Expected<std::shared_ptr<BufferReader>> SeekableBytesReader::create_reader(const MemoryView &memview)
{
auto ptr = make_shared_nothrow<BufferReader>(memview);
CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
return ptr;
}
FileReader::FileReader(const std::string &file_path) : m_file_path(file_path) {}
hailo_status FileReader::read(uint8_t *buffer, size_t n)
{
assert(nullptr != m_fstream);
(void)m_fstream->read(reinterpret_cast<char*>(buffer), n);
return m_fstream->good() ? HAILO_SUCCESS : HAILO_FILE_OPERATION_FAILURE;
}
hailo_status FileReader::read_from_offset(size_t offset, MemoryView &dst, size_t size)
{
assert(nullptr != m_fstream);
auto beg_pos = m_fstream->tellg();
(void)m_fstream->seekg(offset);
CHECK(m_fstream->good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
(void)m_fstream->read(reinterpret_cast<char*>(dst.data()), size);
CHECK(m_fstream->good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::read() failed");
(void)m_fstream->seekg(beg_pos);
CHECK(m_fstream->good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
return HAILO_SUCCESS;
}
hailo_status FileReader::open()
{
if (nullptr == m_fstream) { // The first call to open creates the ifstream object
m_fstream = std::make_shared<std::ifstream>(m_file_path, std::ios::in | std::ios::binary);
return m_fstream->good() ? HAILO_SUCCESS : HAILO_OPEN_FILE_FAILURE;
}
m_fstream->open(m_file_path, std::ios::in | std::ios::binary);
return m_fstream->good() ? HAILO_SUCCESS : HAILO_OPEN_FILE_FAILURE;
}
bool FileReader::is_open() const
{
return m_fstream->is_open();
}
hailo_status FileReader::seek(size_t position)
{
assert(nullptr != m_fstream);
(void)m_fstream->seekg(position, m_fstream->beg);
return m_fstream->good() ? HAILO_SUCCESS : HAILO_FILE_OPERATION_FAILURE;
}
Expected<size_t> FileReader::tell()
{
assert(nullptr != m_fstream);
auto offset = m_fstream->tellg();
return m_fstream->good() ? Expected<size_t>(static_cast<size_t>(offset)) : make_unexpected(HAILO_FILE_OPERATION_FAILURE);
}
hailo_status FileReader::close()
{
assert(nullptr != m_fstream);
m_fstream->close();
return m_fstream->good() ? HAILO_SUCCESS : HAILO_CLOSE_FAILURE;
}
Expected<size_t> FileReader::get_size()
{
assert(nullptr != m_fstream);
auto beg_pos = m_fstream->tellg();
CHECK_AS_EXPECTED(-1 != beg_pos, HAILO_FILE_OPERATION_FAILURE, "ifstream::tellg() failed");
(void)m_fstream->seekg(0, m_fstream->end);
CHECK_AS_EXPECTED(m_fstream->good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
auto file_size = m_fstream->tellg();
CHECK_AS_EXPECTED(-1 != file_size, HAILO_FILE_OPERATION_FAILURE, "ifstream::tellg() failed");
(void)m_fstream->seekg(beg_pos, m_fstream->beg);
CHECK_AS_EXPECTED(m_fstream->good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
return static_cast<size_t>(file_size);
}
std::shared_ptr<std::ifstream> FileReader::get_fstream() const
{
return m_fstream;
}
Expected<size_t> FileReader::calculate_remaining_size()
{
assert(nullptr != m_fstream);
auto remaining_size = get_istream_size(*m_fstream);
CHECK_AS_EXPECTED(m_fstream->good(), HAILO_FILE_OPERATION_FAILURE, "FileReader::calculate_remaining_size() failed");
return remaining_size;
}
Expected<bool> FileReader::good() const
{
assert(nullptr != m_fstream);
return m_fstream->good();
}
BufferReader::BufferReader(const MemoryView &memview) : m_memview(memview) {}
hailo_status BufferReader::read(uint8_t *buffer, size_t n)
{
assert(m_seek_offset + n <= m_memview.size());
memcpy(buffer, m_memview.data() + m_seek_offset, n);
m_seek_offset += n;
return HAILO_SUCCESS;
}
hailo_status BufferReader::read_from_offset(size_t offset, MemoryView &dst, size_t size)
{
memcpy(dst.data(), m_memview.data() + offset, size);
return HAILO_SUCCESS;
}
hailo_status BufferReader::open()
{
// In case we use the buffer, we don't need to check if the file is open
return HAILO_SUCCESS;
}
bool BufferReader::is_open() const
{
// In case we use the buffer, we don't need to check if the file is open
return true;
}
hailo_status BufferReader::seek(size_t position)
{
assert(position < m_memview.size());
m_seek_offset = position;
return HAILO_SUCCESS;
}
Expected<size_t> BufferReader::tell()
{
return Expected<size_t>(m_seek_offset);
}
hailo_status BufferReader::close()
{
return HAILO_SUCCESS;
}
Expected<size_t> BufferReader::get_size()
{
return Expected<size_t>(m_memview.size());
}
Expected<size_t> BufferReader::calculate_remaining_size()
{
return m_memview.size() - m_seek_offset;
}
Expected<bool> BufferReader::good() const
{
return true;
}
const MemoryView BufferReader::get_memview() const
{
return m_memview;
}
} /* namespace hailort */

View File

@@ -27,6 +27,74 @@ Expected<size_t> get_istream_size(std::ifstream &s);
Expected<Buffer> read_binary_file(const std::string &file_path,
const BufferStorageParams &output_buffer_params = {});
class FileReader;
class BufferReader;
class SeekableBytesReader
{
public:
virtual ~SeekableBytesReader() = default;
virtual hailo_status read(uint8_t *buffer, size_t n) = 0;
virtual hailo_status read_from_offset(size_t offset, MemoryView &dst, size_t n) = 0;
virtual hailo_status open() = 0;
virtual bool is_open() const = 0;
virtual hailo_status seek(size_t position) = 0;
virtual Expected<size_t> tell() = 0;
virtual hailo_status close() = 0;
virtual Expected<size_t> get_size() = 0;
virtual Expected<bool> good() const = 0;
virtual Expected<size_t> calculate_remaining_size() = 0;
static Expected<std::shared_ptr<FileReader>> create_reader(const std::string &file_path);
static Expected<std::shared_ptr<BufferReader>> create_reader(const MemoryView &memview);
};
class FileReader : public SeekableBytesReader
{
public:
FileReader(const std::string &file_path);
virtual hailo_status read(uint8_t *buffer, size_t n);
virtual hailo_status read_from_offset(size_t offset, MemoryView &dst, size_t n);
virtual hailo_status open();
virtual bool is_open() const;
virtual hailo_status seek(size_t position);
virtual Expected<size_t> tell();
virtual hailo_status close();
virtual Expected<size_t> get_size();
virtual Expected<bool> good() const;
virtual Expected<size_t> calculate_remaining_size();
std::shared_ptr<std::ifstream> get_fstream() const;
private:
std::shared_ptr<std::ifstream> m_fstream = nullptr;
std::string m_file_path;
};
class BufferReader : public SeekableBytesReader
{
public:
BufferReader(const MemoryView &memview);
virtual hailo_status read(uint8_t *buffer, size_t n);
virtual hailo_status read_from_offset(size_t offset, MemoryView &dst, size_t n);
virtual hailo_status open();
virtual bool is_open() const;
virtual hailo_status seek(size_t position);
virtual Expected<size_t> tell();
virtual hailo_status close();
virtual Expected<size_t> get_size();
virtual Expected<bool> good() const;
virtual Expected<size_t> calculate_remaining_size();
const MemoryView get_memview() const;
private:
MemoryView m_memview;
size_t m_seek_offset = 0;
};
} /* namespace hailort */
#endif /* _HAILO_FILE_UTILS_HPP_ */

View File

@@ -88,7 +88,7 @@ private:
FindFile &operator=(FindFile &&other) = delete;
FindFile(FindFile &&other);
Filesystem::FileInfo get_cur_file_info();
Filesystem::FileInfo get_cur_file_info() const;
// Will return HAILO_INVALID_OPERATION when the iteration is complete or HAILO_FILE_OPERATION_FAILURE upon failure
hailo_status next_file();

View File

@@ -24,8 +24,15 @@
#endif
#endif
#if defined(__linux__) && !defined(__ANDROID__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
#include <spdlog/spdlog.h>
#include <spdlog/fmt/ostr.h>
#if defined(__linux__) && !defined(__ANDROID__)
#pragma GCC diagnostic pop
#endif
inline std::ostream& operator<<(std::ostream& os, const hailo_status& status)
{

View File

@@ -75,13 +75,12 @@ Expected<std::string> EthernetUtils::get_ip_from_interface(const std::string &in
struct ifreq ifr = {};
/* Create socket */
auto socket = Socket::create(AF_INET, SOCK_DGRAM, 0);
CHECK_EXPECTED(socket);
TRY(const auto socket, Socket::create(AF_INET, SOCK_DGRAM, 0));
/* Convert interface name to ip address */
ifr.ifr_addr.sa_family = AF_INET;
(void)strncpy(ifr.ifr_name, interface_name.c_str(), IFNAMSIZ-1);
auto posix_rc = ioctl(socket->get_fd(), SIOCGIFADDR, &ifr);
auto posix_rc = ioctl(socket.get_fd(), SIOCGIFADDR, &ifr);
CHECK_AS_EXPECTED(posix_rc >= 0, HAILO_ETH_INTERFACE_NOT_FOUND,
"Interface was not found. ioctl with SIOCGIFADDR has failed. errno: {:#x}", errno);

View File

@@ -62,12 +62,11 @@ Expected<std::vector<std::string>> Filesystem::get_files_in_dir_flat(const std::
{
const std::string dir_path_with_sep = has_suffix(dir_path, SEPARATOR) ? dir_path : dir_path + SEPARATOR;
auto dir = DirWalker::create(dir_path_with_sep);
CHECK_EXPECTED(dir);
TRY(auto dir, DirWalker::create(dir_path_with_sep));
std::vector<std::string> files;
struct dirent *entry = nullptr;
while ((entry = dir->next_file()) != nullptr) {
while ((entry = dir.next_file()) != nullptr) {
if (entry->d_type != DT_REG) {
continue;
}
@@ -106,21 +105,19 @@ Expected<std::vector<std::string>> Filesystem::get_latest_files_in_dir_flat(cons
std::time_t curr_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
const std::string dir_path_with_sep = has_suffix(dir_path, SEPARATOR) ? dir_path : dir_path + SEPARATOR;
auto dir = DirWalker::create(dir_path_with_sep);
CHECK_EXPECTED(dir);
TRY(auto dir, DirWalker::create(dir_path_with_sep));
std::vector<std::string> files;
struct dirent *entry = nullptr;
while ((entry = dir->next_file()) != nullptr) {
while ((entry = dir.next_file()) != nullptr) {
if (entry->d_type != DT_REG) {
continue;
}
const std::string file_path = dir_path_with_sep + std::string(entry->d_name);
auto file_modified_time = get_file_modified_time(file_path);
CHECK_EXPECTED(file_modified_time);
TRY(const auto file_modified_time, get_file_modified_time(file_path));
auto time_diff_sec = std::difftime(curr_time, file_modified_time.value());
auto time_diff_sec = std::difftime(curr_time, file_modified_time);
auto time_diff_millisec = time_diff_sec * 1000;
if (time_diff_millisec <= static_cast<double>(time_interval.count())) {
files.emplace_back(file_path);

View File

@@ -17,12 +17,10 @@ namespace hailort
Expected<std::pair<int32_t, std::string>> Process::create_and_wait_for_output(const std::string &command_line, uint32_t max_output_size)
{
auto popen_expected = PopenWrapper::create(command_line);
CHECK_EXPECTED(popen_expected);
const auto output_expected = popen_expected->read_stdout(max_output_size);
CHECK_EXPECTED(output_expected);
const auto process_exit_code = popen_expected->close();
return std::make_pair(process_exit_code, output_expected.value());
TRY(auto popen, PopenWrapper::create(command_line));
TRY(const auto output, popen.read_stdout(max_output_size));
const auto process_exit_code = popen.close();
return std::make_pair(process_exit_code, output);
}
Expected<Process::PopenWrapper> Process::PopenWrapper::create(const std::string &command_line)
@@ -61,15 +59,14 @@ Expected<std::string> Process::PopenWrapper::read_stdout(uint32_t max_output_siz
{
assert (nullptr != m_pipe);
// We zero out the bufer so that output won't contain junk from the heap
auto output = Buffer::create(max_output_size, 0);
CHECK_EXPECTED(output);
// We zero out the buffer so that output won't contain junk from the heap
TRY(auto output, Buffer::create(max_output_size, 0));
const auto num_read = fread(reinterpret_cast<char*>(output->data()), sizeof(uint8_t), output->size(), m_pipe);
if (num_read != output->size()) {
const auto num_read = fread(reinterpret_cast<char*>(output.data()), sizeof(uint8_t), output.size(), m_pipe);
if (num_read != output.size()) {
if (feof(m_pipe)) {
// We remove the trailing newline we get from fread
const auto output_as_str = output->to_string();
const auto output_as_str = output.to_string();
if (output_as_str[output_as_str.length() - 1] == '\n') {
return output_as_str.substr(0, num_read - 1);
}
@@ -81,7 +78,7 @@ Expected<std::string> Process::PopenWrapper::read_stdout(uint32_t max_output_siz
} else {
// Truncate output
LOGGER__TRACE("Truncating output to {} chars long", max_output_size);
return output->to_string();
return output.to_string();
}
}

View File

@@ -34,13 +34,10 @@ hailo_status Socket::SocketModuleWrapper::free_module()
Expected<Socket> Socket::create(int af, int type, int protocol)
{
auto module_wrapper = SocketModuleWrapper::create();
CHECK_EXPECTED(module_wrapper);
TRY(auto module_wrapper, SocketModuleWrapper::create());
TRY(const auto socket_fd, create_socket_fd(af, type, protocol));
auto socket_fd = create_socket_fd(af, type, protocol);
CHECK_EXPECTED(socket_fd);
auto obj = Socket(module_wrapper.release(), socket_fd.release());
auto obj = Socket(std::move(module_wrapper), socket_fd);
return obj;
}

View File

@@ -22,17 +22,12 @@ namespace hailort
Expected<TrafficControlUtil> TrafficControlUtil::create(const std::string &ip, uint16_t port, uint32_t rate_bytes_per_sec)
{
auto interface_name = EthernetUtils::get_interface_from_board_ip(ip);
CHECK_EXPECTED(interface_name, "get_interface_name failed with status {}", interface_name.status());
TRY(const auto interface_name, EthernetUtils::get_interface_from_board_ip(ip), "get_interface_name failed");
TRY(const auto board_id, ip_to_board_id(ip), "ip_to_board_id failed");
TRY(const auto is_sudo_needed, check_is_sudo_needed(), "check_is_sudo_needed failed");
auto board_id = ip_to_board_id(ip);
CHECK_EXPECTED(board_id, "ip_to_board_id failed with status {}", board_id.status());
auto is_sudo_needed = check_is_sudo_needed();
CHECK_EXPECTED(is_sudo_needed, "check_is_sudo_needed failed with status {}", is_sudo_needed.status());
return TrafficControlUtil(ip, interface_name.release(), board_id.release(), port, port_to_port_id(port),
rate_bytes_per_sec, is_sudo_needed.release());
return TrafficControlUtil(ip, interface_name, board_id, port, port_to_port_id(port),
rate_bytes_per_sec, is_sudo_needed);
}
TrafficControlUtil::TrafficControlUtil(const std::string& board_address, const std::string& interface_name,
@@ -185,28 +180,26 @@ uint16_t TrafficControlUtil::port_to_port_id(uint16_t port)
Expected<bool> TrafficControlUtil::check_is_sudo_needed()
{
const auto result = Process::create_and_wait_for_output("id -u", MAX_COMMAND_OUTPUT_LENGTH);
CHECK_EXPECTED(result);
TRY(const auto result, Process::create_and_wait_for_output("id -u", MAX_COMMAND_OUTPUT_LENGTH));
// If the user id is zero then we don't need to add `sudo` to our commands
return std::move(result->second != "0");
return std::move(result.second != "0");
}
hailo_status TrafficControlUtil::run_command(const std::string &commnad, bool add_sudo,
const std::vector<std::string> &allowed_errors, bool ignore_fails)
{
// Note: we redirect stderr to stdout
const auto result = Process::create_and_wait_for_output(
TRY(const auto result, Process::create_and_wait_for_output(
add_sudo ? "sudo " + commnad + " 2>&1" : commnad + " 2>&1",
MAX_COMMAND_OUTPUT_LENGTH);
CHECK_EXPECTED_AS_STATUS(result);
MAX_COMMAND_OUTPUT_LENGTH));
const uint32_t exit_code = result->first;
const uint32_t exit_code = result.first;
if (0 == exit_code) {
return HAILO_SUCCESS;
}
std::string cmd_output = result->second;
std::string cmd_output = result.second;
// No output = everything was OK
bool is_output_valid = cmd_output.empty();
if ((!is_output_valid) && (!allowed_errors.empty())) {
@@ -225,11 +218,10 @@ hailo_status TrafficControlUtil::run_command(const std::string &commnad, bool ad
Expected<TrafficControl> TrafficControl::create(const std::string &ip, uint16_t port, uint32_t rate_bytes_per_sec)
{
auto tc_util = TrafficControlUtil::create(ip, port, rate_bytes_per_sec);
CHECK_EXPECTED(tc_util);
TRY(auto tc_util, TrafficControlUtil::create(ip, port, rate_bytes_per_sec));
hailo_status rate_set_status = HAILO_UNINITIALIZED;
TrafficControl tc(tc_util.release(), rate_set_status);
TrafficControl tc(std::move(tc_util), rate_set_status);
CHECK_SUCCESS_AS_EXPECTED(rate_set_status, "Failed setting rate limit with status {}", rate_set_status);
return tc;

View File

@@ -54,10 +54,9 @@ Expected<NetworkInterfaces> NetworkInterface::get_all_interfaces()
return make_unexpected(HAILO_UNEXPECTED_INTERFACE_INFO_FAILURE);
}
auto interface_info_buffer = Buffer::create(required_size, 0);
CHECK_EXPECTED(interface_info_buffer);
TRY(auto interface_info_buffer, Buffer::create(required_size, 0));
ret_value = GetAdaptersAddresses(IPV4, UNICAST_ONLY, RESERVED,
interface_info_buffer->as_pointer<IP_ADAPTER_ADDRESSES>(), &required_size);
interface_info_buffer.as_pointer<IP_ADAPTER_ADDRESSES>(), &required_size);
if (ret_value == ERROR_NO_DATA) {
LOGGER__ERROR("No IPv4 interfaces found");
return make_unexpected(HAILO_NO_IPV4_INTERFACES_FOUND);
@@ -67,7 +66,7 @@ Expected<NetworkInterfaces> NetworkInterface::get_all_interfaces()
}
NetworkInterfaces interfaces;
PIP_ADAPTER_ADDRESSES interface_info = interface_info_buffer->as_pointer<IP_ADAPTER_ADDRESSES>();
PIP_ADAPTER_ADDRESSES interface_info = interface_info_buffer.as_pointer<IP_ADAPTER_ADDRESSES>();
while (interface_info != nullptr) {
PIP_ADAPTER_UNICAST_ADDRESS first_unicast_address = interface_info->FirstUnicastAddress;
@@ -83,10 +82,9 @@ Expected<NetworkInterfaces> NetworkInterface::get_all_interfaces()
continue;
}
auto ip = Buffer::create(IPV4_STRING_MAX_LENGTH);
CHECK_EXPECTED(ip);
TRY(auto ip, Buffer::create(IPV4_STRING_MAX_LENGTH));
const auto result = Socket::ntop(AF_INET, &(reinterpret_cast<sockaddr_in *>(address_struct)->sin_addr),
ip->as_pointer<char>(), EthernetUtils::MAX_INTERFACE_SIZE);
ip.as_pointer<char>(), EthernetUtils::MAX_INTERFACE_SIZE);
if (result != HAILO_SUCCESS) {
LOGGER__DEBUG("Failed converting unicast address to string (result={}). Skipping.", result);
continue;
@@ -98,7 +96,7 @@ Expected<NetworkInterfaces> NetworkInterface::get_all_interfaces()
continue;
}
interfaces.emplace_back(interface_info->IfIndex, interface_info->AdapterName,
friendly_name_ansi.value(), ip->to_string());
friendly_name_ansi.value(), ip.to_string());
interface_info = interface_info->Next;
}
@@ -130,9 +128,8 @@ Expected<ArpTable> ArpTable::create(uint32_t interface_index)
return make_unexpected(HAILO_UNEXPECTED_ARP_TABLE_FAILURE);
}
auto ip_net_table_buffer = Buffer::create(required_size, 0);
CHECK_EXPECTED(ip_net_table_buffer);
ret_value = GetIpNetTable(ip_net_table_buffer->as_pointer<MIB_IPNETTABLE>(), &required_size, SORTED);
TRY(auto ip_net_table_buffer, Buffer::create(required_size, 0));
ret_value = GetIpNetTable(ip_net_table_buffer.as_pointer<MIB_IPNETTABLE>(), &required_size, SORTED);
if (ret_value == ERROR_NO_DATA) {
LOGGER__ERROR("No IPv4 interfaces found");
return make_unexpected(HAILO_NO_IPV4_INTERFACES_FOUND);
@@ -142,7 +139,7 @@ Expected<ArpTable> ArpTable::create(uint32_t interface_index)
}
std::unordered_map<uint32_t, MacAddress> result;
const PMIB_IPNETTABLE ip_net_table = ip_net_table_buffer->as_pointer<MIB_IPNETTABLE>();
const PMIB_IPNETTABLE ip_net_table = ip_net_table_buffer.as_pointer<MIB_IPNETTABLE>();
for (uint32_t i = 0; i < ip_net_table->dwNumEntries; i++) {
if (ip_net_table->table[i].dwIndex != interface_index) {
continue;
@@ -162,18 +159,15 @@ Expected<ArpTable> ArpTable::create(uint32_t interface_index)
Expected<std::string> EthernetUtils::get_interface_from_board_ip(const std::string &board_ip)
{
auto network_interfaces = NetworkInterface::get_all_interfaces();
CHECK_EXPECTED(network_interfaces);
TRY(const auto network_interfaces, NetworkInterface::get_all_interfaces());
struct in_addr board_ip_struct{};
auto status = Socket::pton(AF_INET, board_ip.c_str(), &board_ip_struct);
CHECK_SUCCESS_AS_EXPECTED(status, "Invalid board ip address {}", board_ip);
for (const auto& network_interface : network_interfaces.value()) {
auto arp_table = ArpTable::create(network_interface.index());
CHECK_EXPECTED(arp_table);
const auto mac_address = arp_table->get_mac_address(static_cast<uint32_t>(board_ip_struct.S_un.S_addr));
for (const auto &network_interface : network_interfaces) {
TRY(const auto arp_table, ArpTable::create(network_interface.index()));
const auto mac_address = arp_table.get_mac_address(static_cast<uint32_t>(board_ip_struct.S_un.S_addr));
if (mac_address) {
return network_interface.friendly_name();
}
@@ -184,10 +178,9 @@ Expected<std::string> EthernetUtils::get_interface_from_board_ip(const std::stri
Expected<std::string> EthernetUtils::get_ip_from_interface(const std::string &interface_name)
{
auto network_interfaces = NetworkInterface::get_all_interfaces();
CHECK_EXPECTED(network_interfaces);
TRY(const auto network_interfaces, NetworkInterface::get_all_interfaces());
for (const auto& network_interface : network_interfaces.value()) {
for (const auto &network_interface : network_interfaces) {
if (network_interface.friendly_name() == interface_name) {
return network_interface.ip();
}

View File

@@ -63,7 +63,7 @@ Filesystem::FindFile::FindFile(FindFile &&other) :
m_find_data(other.m_find_data)
{}
Filesystem::FileInfo Filesystem::FindFile::get_cur_file_info()
Filesystem::FileInfo Filesystem::FindFile::get_cur_file_info() const
{
return {m_find_data.cFileName, m_find_data.dwFileAttributes};
}
@@ -93,18 +93,17 @@ Expected<std::vector<std::string>> Filesystem::get_files_in_dir_flat(const std::
{
const std::string dir_path_with_sep = has_suffix(dir_path, SEPARATOR) ? dir_path : dir_path + SEPARATOR;
auto dir = FindFile::create(dir_path_with_sep);
CHECK_EXPECTED(dir);
TRY(auto dir, FindFile::create(dir_path_with_sep));
std::vector<std::string> files;
auto file_info = dir->get_cur_file_info();
auto file_info = dir.get_cur_file_info();
if (is_regular_or_readonly_file(file_info.attrs)) {
files.emplace_back(file_info.path);
}
hailo_status status = HAILO_UNINITIALIZED;
while (true) {
status = dir->next_file();
status = dir.next_file();
if (HAILO_INVALID_OPERATION == status) {
// We're done
break;
@@ -115,7 +114,7 @@ Expected<std::vector<std::string>> Filesystem::get_files_in_dir_flat(const std::
continue;
}
file_info = dir->get_cur_file_info();
file_info = dir.get_cur_file_info();
if (is_regular_or_readonly_file(file_info.attrs)) {
files.emplace_back(dir_path_with_sep + file_info.path);
}

View File

@@ -40,13 +40,10 @@ hailo_status Socket::SocketModuleWrapper::free_module()
Expected<Socket> Socket::create(int af, int type, int protocol)
{
auto module_wrapper = SocketModuleWrapper::create();
CHECK_EXPECTED(module_wrapper);
TRY(auto module_wrapper, SocketModuleWrapper::create());
TRY(const auto socket_fd, create_socket_fd(af, type, protocol));
auto socket_fd = create_socket_fd(af, type, protocol);
CHECK_EXPECTED(socket_fd);
auto obj = Socket(module_wrapper.release(), socket_fd.release());
auto obj = Socket(std::move(module_wrapper), socket_fd);
return std::move(obj);
}
@@ -118,8 +115,7 @@ hailo_status Socket::ntop(int af, const void *src, char *dst, socklen_t size)
CHECK_ARG_NOT_NULL(src);
CHECK_ARG_NOT_NULL(dst);
auto module_wrapper = SocketModuleWrapper::create();
CHECK_EXPECTED_AS_STATUS(module_wrapper);
TRY(const auto module_wrapper, SocketModuleWrapper::create());
const char *inet_result = inet_ntop(af, src, dst, size);
CHECK(nullptr != inet_result, HAILO_ETH_FAILURE, "Failed inet_ntop. WSALE={}", WSAGetLastError());
@@ -134,8 +130,7 @@ hailo_status Socket::pton(int af, const char *src, void *dst)
CHECK_ARG_NOT_NULL(src);
CHECK_ARG_NOT_NULL(dst);
auto module_wrapper = SocketModuleWrapper::create();
CHECK_EXPECTED_AS_STATUS(module_wrapper);
TRY(const auto module_wrapper, SocketModuleWrapper::create());
inet_result = inet_pton(af, src, dst);
if (1 != inet_result) {

View File

@@ -42,7 +42,7 @@ public:
m_module_wrapper(std::move(other.m_module_wrapper)), m_socket_fd(std::exchange(other.m_socket_fd, INVALID_SOCKET))
{};
socket_t get_fd() { return m_socket_fd; }
socket_t get_fd() const { return m_socket_fd; }
static hailo_status ntop(int af, const void *src, char *dst, socklen_t size);
static hailo_status pton(int af, const char *src, void *dst);

View File

@@ -63,13 +63,12 @@ Expected<int32_t> StringUtils::to_int32(const std::string &str, int base)
Expected<uint8_t> StringUtils::to_uint8(const std::string &str, int base)
{
auto number = to_uint32(str, base);
CHECK_EXPECTED(number);
TRY(const auto number, to_uint32(str, base));
CHECK_AS_EXPECTED(((number.value() >= std::numeric_limits<uint8_t>::min()) && (number.value() <= std::numeric_limits<uint8_t>::max())),
CHECK_AS_EXPECTED(((number >= std::numeric_limits<uint8_t>::min()) && (number <= std::numeric_limits<uint8_t>::max())),
HAILO_INVALID_ARGUMENT, "Failed to convert string {} to uint8_t.", str);
return static_cast<uint8_t>(number.value());
return static_cast<uint8_t>(number);
}
std::string StringUtils::to_hex_string(const uint8_t *array, size_t size, bool uppercase, const std::string &delimiter)

View File

@@ -14,6 +14,7 @@
#include "hailo/hailort.h"
#include "hailo/expected.hpp"
#include "hailo/buffer.hpp"
#include "common/logger_macros.hpp"
#include <spdlog/fmt/bundled/core.h>
@@ -22,6 +23,9 @@
#include <map>
#include <set>
#include <unordered_set>
#include <cstdint>
#include <cstddef>
#include <fstream>
namespace hailort
@@ -31,6 +35,10 @@ namespace hailort
#define IS_FIT_IN_UINT16(number) ((std::numeric_limits<uint16_t>::max() >= ((int32_t)(number))) && (std::numeric_limits<uint16_t>::min() <= ((int32_t)(number))))
#define IS_FIT_IN_UINT32(number) ((std::numeric_limits<uint32_t>::max() >= ((int64_t)(number))) && (std::numeric_limits<uint32_t>::min() <= ((int64_t)(number))))
static const uint32_t POLYNOMIAL = 0xEDB88320;
static const size_t MB = 1024 * 1024;
template <typename T>
static inline bool contains(const std::vector<T> &container, const T &value)
{
@@ -264,6 +272,10 @@ inline hailo_status get_status(const Expected<T> &exp)
#define CHECK_GRPC_STATUS_AS_EXPECTED(status) _CHECK_GRPC_STATUS(status, make_unexpected(HAILO_RPC_FAILED), SERVICE_WARNING_MSG)
#endif
// Macros that check status. If status is 'valid_error', return without printing error to the prompt.
#define CHECK_EXPECTED_WITH_ACCEPTABLE_STATUS(valid_error, exp, ...) if (valid_error == (exp).status()) {return make_unexpected(valid_error);} CHECK_SUCCESS(exp, __VA_ARGS__);
#define __HAILO_CONCAT(x, y) x ## y
#define _HAILO_CONCAT(x, y) __HAILO_CONCAT(x, y)
@@ -272,6 +284,11 @@ inline hailo_status get_status(const Expected<T> &exp)
CHECK_EXPECTED(expected_var_name, __VA_ARGS__); \
var_decl = expected_var_name.release()
#define _TRY_V(expected_var_name, var_decl, expr, ...) \
auto expected_var_name = (expr); \
CHECK_EXPECTED(expected_var_name, __VA_ARGS__); \
var_decl = expected_var_name.value()
/**
* The TRY macro is used to allow easier validation and access for variables returned as Expected<T>.
* If the expression returns an Expected<T> with status HAILO_SUCCESS, the macro will release the expected and assign
@@ -289,6 +306,19 @@ inline hailo_status get_status(const Expected<T> &exp)
*/
#define TRY(var_decl, expr, ...) _TRY(_HAILO_CONCAT(__expected, __COUNTER__), var_decl, expr, __VA_ARGS__)
/**
* Same us TRY macro but instead of returning released value, it will return the value itself.
*/
// TODO: HRT-13624: Remove after 'expected' implementation is fixed
#define TRY_V(var_decl, expr, ...) _TRY_V(_HAILO_CONCAT(__expected, __COUNTER__), var_decl, expr, __VA_ARGS__)
#define _TRY_WITH_ACCEPTABLE_STATUS(valid_error, expected_var_name, var_decl, expr, ...) \
auto expected_var_name = (expr); \
CHECK_EXPECTED_WITH_ACCEPTABLE_STATUS(valid_error, expected_var_name, __VA_ARGS__); \
var_decl = expected_var_name.release()
#define TRY_WITH_ACCEPTABLE_STATUS(valid_error, var_decl, expr, ...) _TRY_WITH_ACCEPTABLE_STATUS(valid_error, _HAILO_CONCAT(__expected, __COUNTER__), var_decl, expr, __VA_ARGS__)
#ifndef _MSC_VER
#define IGNORE_DEPRECATION_WARNINGS_BEGIN _Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
@@ -343,6 +373,124 @@ static inline bool is_env_variable_on(const char* env_var_name, const std::strin
return ((nullptr != env_var) && (strncmp(env_var, required_value.c_str(), required_value.size()) == 0));
}
static inline Expected<std::string> get_env_variable(const std::string &env_var_name)
{
const auto env_var = std::getenv(env_var_name.c_str());
// Using ifs instead of CHECKs to avoid printing the error message
if (nullptr == env_var) {
return make_unexpected(HAILO_NOT_FOUND);
}
const auto result = std::string(env_var);
if (result.empty()) {
return make_unexpected(HAILO_NOT_FOUND);
}
return Expected<std::string>(result);
}
class CRC32 {
public:
CRC32() {
generate_table();
}
uint32_t calculate(std::ifstream &s, size_t buffer_size) const {
auto beg_pos = s.tellg(); // Saves current position
uint32_t crc = 0xFFFFFFFF;
std::vector<char> buffer(MB);
size_t total_bytes_read = 0;
while (total_bytes_read < buffer_size) {
size_t bytes_to_read = std::min(buffer_size - total_bytes_read, MB);
s.read(buffer.data(), bytes_to_read);
size_t bytes_read = s.gcount();
total_bytes_read += bytes_read;
for (size_t i = 0; i < bytes_read; ++i) {
crc = (crc >> 8) ^ table[(crc ^ static_cast<uint8_t>(buffer[i])) & 0xFF];
}
}
s.seekg(beg_pos, std::ios::beg); // Return to the original position
return crc ^ 0xFFFFFFFF;
}
uint32_t calculate(const MemoryView &buffer) const {
uint32_t crc = 0xFFFFFFFF;
auto data = buffer.data();
for (size_t i = 0; i < buffer.size(); ++i) {
crc = (crc >> 8) ^ table[(crc ^ data[i]) & 0xFF];
}
return crc ^ 0xFFFFFFFF;
}
static Expected<uint32_t> calc_crc_on_buffer(const MemoryView &buffer)
{
CRC32 crcCalculator;
return crcCalculator.calculate(buffer);
}
static Expected<uint32_t> calc_crc_on_stream(std::ifstream &s, size_t size)
{
CRC32 crcCalculator;
return crcCalculator.calculate(s, size);
}
private:
uint32_t table[256];
void generate_table() {
for (uint32_t i = 0; i < 256; ++i) {
uint32_t crc = i;
for (uint32_t j = 0; j < 8; ++j) {
crc = (crc & 1) ? (crc >> 1) ^ POLYNOMIAL : (crc >> 1);
}
table[i] = crc;
}
}
};
class BufferUtils final
{
public:
BufferUtils() = delete;
static void summarize_buffer(const Buffer& buffer, std::ostream& os)
{
os << "Buffer addr = " << static_cast<const void *>(buffer.data()) << ", size = " << buffer.size() << std::endl;
if (buffer.size() == 0) {
os << "Buffer is empty" << std::endl;
return;
}
size_t range_start = 0;
uint8_t current_value = buffer[0];
for (size_t i = 1; i < buffer.size(); ++i) {
if (buffer[i] != current_value) {
print_range(range_start, i, current_value, os);
current_value = buffer[i];
range_start = i;
}
}
// Print the last range
print_range(range_start, buffer.size(), current_value, os);
}
private:
static void print_range(size_t range_start, size_t range_end_exclusive, uint8_t value, std::ostream& os)
{
const auto message = fmt::format("[0x{:08X}:0x{:08X}] - 0x{:02X} ({} bytes)",
range_start, range_end_exclusive - 1, static_cast<int>(value), range_end_exclusive - range_start);
os << message << std::endl;
}
};
} /* namespace hailort */
#endif /* HAILO_UTILS_H_ */

View File

@@ -6,6 +6,14 @@
#ifndef _HAILO_IOCTL_COMMON_H_
#define _HAILO_IOCTL_COMMON_H_
#define HAILO_DRV_VER_MAJOR 4
#define HAILO_DRV_VER_MINOR 18
#define HAILO_DRV_VER_REVISION 0
#define _STRINGIFY_EXPANDED( x ) #x
#define _STRINGIFY_NUMBER( x ) _STRINGIFY_EXPANDED(x)
#define HAILO_DRV_VER _STRINGIFY_NUMBER(HAILO_DRV_VER_MAJOR) "." _STRINGIFY_NUMBER(HAILO_DRV_VER_MINOR) "." _STRINGIFY_NUMBER(HAILO_DRV_VER_REVISION)
// This value is not easily changeable.
// For example: the channel interrupts ioctls assume we have up to 32 channels
@@ -23,14 +31,17 @@
#define INVALID_DRIVER_HANDLE_VALUE ((uintptr_t)-1)
// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
#define FW_ACCESS_SOC_CONNECT_SHIFT (3)
#define FW_ACCESS_SOC_CONNECT_MASK (1 << FW_ACCESS_SOC_CONNECT_SHIFT)
#define INVALID_VDMA_CHANNEL (0xff)
#define INVALID_VDMA_CHANNEL (0xff)
#if !defined(__cplusplus) && defined(NTDDI_VERSION)
#include <wdm.h>
@@ -53,14 +64,23 @@ typedef uint8_t bool;
#define INT_MAX 0x7FFFFFFF
#endif // !defined(INT_MAX)
#if !defined(ECONNRESET)
#define ECONNRESET 104 /* Connection reset by peer */
#endif // !defined(ECONNRESET)
// {d88d31f1-fede-4e71-ac2a-6ce0018c1501}
DEFINE_GUID (GUID_DEVINTERFACE_HailoKM,
DEFINE_GUID (GUID_DEVINTERFACE_HailoKM_NNC,
0xd88d31f1,0xfede,0x4e71,0xac,0x2a,0x6c,0xe0,0x01,0x8c,0x15,0x01);
#define HAILO_GENERAL_IOCTL_MAGIC 0
#define HAILO_VDMA_IOCTL_MAGIC 1
#define HAILO_NON_LINUX_IOCTL_MAGIC 2
// {7f16047d-64b8-207a-0092-e970893970a2}
DEFINE_GUID (GUID_DEVINTERFACE_HailoKM_SOC,
0x7f16047d,0x64b8,0x207a,0x00,0x92,0xe9,0x70,0x89,0x39,0x70,0xa2);
#define HAILO_GENERAL_IOCTL_MAGIC 0
#define HAILO_VDMA_IOCTL_MAGIC 1
#define HAILO_SOC_IOCTL_MAGIC 2
#define HAILO_PCI_EP_IOCTL_MAGIC 3
#define HAILO_NNC_IOCTL_MAGIC 4
#define HAILO_IOCTL_COMPATIBLE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x802, METHOD_BUFFERED, FILE_ANY_ACCESS)
@@ -114,9 +134,11 @@ static ULONG FORCEINLINE _IOC_(ULONG nr, ULONG type, ULONG size, bool read, bool
#define _IOWR_ _IOWR
#define _IO_ _IO
#define HAILO_GENERAL_IOCTL_MAGIC 'g'
#define HAILO_VDMA_IOCTL_MAGIC 'v'
#define HAILO_NON_LINUX_IOCTL_MAGIC 'w'
#define HAILO_GENERAL_IOCTL_MAGIC 'g'
#define HAILO_VDMA_IOCTL_MAGIC 'v'
#define HAILO_SOC_IOCTL_MAGIC 's'
#define HAILO_NNC_IOCTL_MAGIC 'n'
#define HAILO_PCI_EP_IOCTL_MAGIC 'p'
#elif defined(__QNX__) // #ifdef _MSC_VER
#include <devctl.h>
@@ -132,7 +154,6 @@ static ULONG FORCEINLINE _IOC_(ULONG nr, ULONG type, ULONG size, bool read, bool
#define _IO_ __DION
#define HAILO_GENERAL_IOCTL_MAGIC _DCMD_ALL
#define HAILO_VDMA_IOCTL_MAGIC _DCMD_MISC
#define HAILO_NON_LINUX_IOCTL_MAGIC _DCMD_PROC
#else // #ifdef _MSC_VER
#error "unsupported platform!"
@@ -161,6 +182,16 @@ enum hailo_dma_data_direction {
HAILO_DMA_MAX_ENUM = INT_MAX,
};
// Enum that states what type of buffer we are working with in the driver
// TODO: HRT-13580 - Add specific type for user allocated and for driver allocated
enum hailo_dma_buffer_type {
HAILO_DMA_USER_PTR_BUFFER = 0,
HAILO_DMA_DMABUF_BUFFER = 1,
/** Max enum value to maintain ABI Integrity */
HAILO_DMA_BUFFER_MAX_ENUM = INT_MAX,
};
// Enum that determines if buffer should be allocated from user space or from driver
enum hailo_allocation_mode {
HAILO_ALLOCATION_MODE_USERSPACE = 0,
@@ -170,10 +201,19 @@ enum hailo_allocation_mode {
HAILO_ALLOCATION_MODE_MAX_ENUM = INT_MAX,
};
enum hailo_vdma_interrupts_domain {
HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
/** Max enum value to maintain ABI Integrity */
HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
};
/* structure used in ioctl HAILO_VDMA_BUFFER_MAP */
struct hailo_vdma_buffer_map_params {
#if defined(__linux__) || defined(_MSC_VER)
void* user_address; // in
uintptr_t user_address; // in
#elif defined(__QNX__)
shm_handle_t shared_memory_handle; // in
#else
@@ -181,6 +221,7 @@ struct hailo_vdma_buffer_map_params {
#endif // __linux__
size_t size; // in
enum hailo_dma_data_direction data_direction; // in
enum hailo_dma_buffer_type buffer_type; // in
uintptr_t allocated_buffer_handle; // in
size_t mapped_handle; // out
};
@@ -204,31 +245,27 @@ struct hailo_desc_list_release_params {
uintptr_t desc_handle; // in
};
/* structure used in ioctl HAILO_NON_LINUX_DESC_LIST_MMAP */
struct hailo_non_linux_desc_list_mmap_params {
uintptr_t desc_handle; // in
size_t size; // in
void* user_address; // out
};
/* structure used in ioctl HAILO_DESC_LIST_BIND_VDMA_BUFFER */
struct hailo_desc_list_bind_vdma_buffer_params {
struct hailo_desc_list_program_params {
size_t buffer_handle; // in
size_t buffer_size; // in
size_t buffer_offset; // in
uintptr_t desc_handle; // in
uint8_t channel_index; // in
uint32_t starting_desc; // in
bool should_bind; // in
enum hailo_vdma_interrupts_domain last_interrupts_domain; // in
bool is_debug; // in
};
/* structure used in ioctl HAILO_VDMA_INTERRUPTS_ENABLE */
struct hailo_vdma_interrupts_enable_params {
/* structure used in ioctl HAILO_VDMA_ENABLE_CHANNELS */
struct hailo_vdma_enable_channels_params {
uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
bool enable_timestamps_measure; // in
};
/* structure used in ioctl HAILO_VDMA_INTERRUPTS_DISABLE */
struct hailo_vdma_interrupts_disable_params {
/* structure used in ioctl HAILO_VDMA_DISABLE_CHANNELS */
struct hailo_vdma_disable_channels_params {
uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
};
@@ -237,7 +274,7 @@ struct hailo_vdma_interrupts_channel_data {
uint8_t engine_index;
uint8_t channel_index;
bool is_active; // If not activate, num_processed is ignored.
uint16_t host_num_processed;
uint8_t transfers_completed; // Number of transfers completed.
uint8_t host_error; // Channel errors bits on source side
uint8_t device_error; // Channel errors bits on dest side
bool validation_success; // If the validation of the channel was successful
@@ -312,6 +349,10 @@ enum hailo_transfer_memory_type {
HAILO_TRANSFER_MEMORY_DMA_ENGINE1,
HAILO_TRANSFER_MEMORY_DMA_ENGINE2,
// PCIe EP driver memories
HAILO_TRANSFER_MEMORY_PCIE_EP_CONFIG = 0x400,
HAILO_TRANSFER_MEMORY_PCIE_EP_BRIDGE,
/** Max enum value to maintain ABI Integrity */
HAILO_TRANSFER_MEMORY_MAX_ENUM = INT_MAX,
};
@@ -352,15 +393,26 @@ enum hailo_board_type {
HAILO_BOARD_TYPE_HAILO8 = 0,
HAILO_BOARD_TYPE_HAILO15,
HAILO_BOARD_TYPE_PLUTO,
HAILO_BOARD_TYPE_HAILO10H,
HAILO_BOARD_TYPE_HAILO10H_LEGACY,
HAILO_BOARD_TYPE_COUNT,
/** Max enum value to maintain ABI Integrity */
HAILO_BOARD_TYPE_MAX_ENUM = INT_MAX
};
enum hailo_accelerator_type {
HAILO_ACCELERATOR_TYPE_NNC,
HAILO_ACCELERATOR_TYPE_SOC,
/** Max enum value to maintain ABI Integrity */
HAILO_ACCELERATOR_TYPE_MAX_ENUM = INT_MAX
};
enum hailo_dma_type {
HAILO_DMA_TYPE_PCIE,
HAILO_DMA_TYPE_DRAM,
HAILO_DMA_TYPE_PCI_EP,
/** Max enum value to maintain ABI Integrity */
HAILO_DMA_TYPE_MAX_ENUM = INT_MAX,
@@ -428,15 +480,6 @@ struct hailo_vdma_transfer_buffer {
uint32_t size; // in
};
enum hailo_vdma_interrupts_domain {
HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
/** Max enum value to maintain ABI Integrity */
HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
};
// We allow maximum 2 buffers per transfer since we may have an extra buffer
// to make sure each buffer is aligned to page size.
#define HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER (2)
@@ -460,6 +503,35 @@ struct hailo_vdma_launch_transfer_params {
// more info (e.g desc complete status)
uint32_t descs_programed; // out, amount of descriptors programed.
int launch_transfer_status; // out, status of the launch transfer call. (only used in case of error)
};
/* structure used in ioctl HAILO_SOC_CONNECT */
struct hailo_soc_connect_params {
uint8_t input_channel_index; // out
uint8_t output_channel_index; // out
uintptr_t input_desc_handle; // in
uintptr_t output_desc_handle; // in
};
/* structure used in ioctl HAILO_SOC_CLOSE */
struct hailo_soc_close_params {
uint8_t input_channel_index; // in
uint8_t output_channel_index; // in
};
/* structure used in ioctl HAILO_PCI_EP_ACCEPT */
struct hailo_pci_ep_accept_params {
uint8_t input_channel_index; // out
uint8_t output_channel_index; // out
uintptr_t input_desc_handle; // in
uintptr_t output_desc_handle; // in
};
/* structure used in ioctl HAILO_PCI_EP_CLOSE */
struct hailo_pci_ep_close_params {
uint8_t input_channel_index; // in
uint8_t output_channel_index; // in
};
#ifdef _MSC_VER
@@ -469,8 +541,8 @@ struct tCompatibleHailoIoctlData
ULONG_PTR Value;
union {
struct hailo_memory_transfer_params MemoryTransfer;
struct hailo_vdma_interrupts_enable_params VdmaInterruptsEnable;
struct hailo_vdma_interrupts_disable_params VdmaInterruptsDisable;
struct hailo_vdma_enable_channels_params VdmaEnableChannels;
struct hailo_vdma_disable_channels_params VdmaDisableChannels;
struct hailo_vdma_interrupts_read_timestamp_params VdmaInterruptsReadTimestamps;
struct hailo_vdma_interrupts_wait_params VdmaInterruptsWait;
struct hailo_vdma_buffer_sync_params VdmaBufferSync;
@@ -479,14 +551,17 @@ struct tCompatibleHailoIoctlData
struct hailo_vdma_buffer_unmap_params VdmaBufferUnmap;
struct hailo_desc_list_create_params DescListCreate;
struct hailo_desc_list_release_params DescListReleaseParam;
struct hailo_desc_list_bind_vdma_buffer_params DescListBind;
struct hailo_desc_list_program_params DescListProgram;
struct hailo_d2h_notification D2HNotification;
struct hailo_device_properties DeviceProperties;
struct hailo_driver_info DriverInfo;
struct hailo_non_linux_desc_list_mmap_params DescListMmap;
struct hailo_read_log_params ReadLog;
struct hailo_mark_as_in_use_params MarkAsInUse;
struct hailo_vdma_launch_transfer_params LaunchTransfer;
struct hailo_soc_connect_params ConnectParams;
struct hailo_soc_close_params SocCloseParams;
struct hailo_pci_ep_accept_params AcceptParams;
struct hailo_pci_ep_close_params PciEpCloseParams;
} Buffer;
};
#endif // _MSC_VER
@@ -495,30 +570,20 @@ struct tCompatibleHailoIoctlData
enum hailo_general_ioctl_code {
HAILO_MEMORY_TRANSFER_CODE,
HAILO_FW_CONTROL_CODE,
HAILO_READ_NOTIFICATION_CODE,
HAILO_DISABLE_NOTIFICATION_CODE,
HAILO_QUERY_DEVICE_PROPERTIES_CODE,
HAILO_QUERY_DRIVER_INFO_CODE,
HAILO_READ_LOG_CODE,
HAILO_RESET_NN_CORE_CODE,
// Must be last
HAILO_GENERAL_IOCTL_MAX_NR,
};
#define HAILO_MEMORY_TRANSFER _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_MEMORY_TRANSFER_CODE, struct hailo_memory_transfer_params)
#define HAILO_FW_CONTROL _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
#define HAILO_READ_NOTIFICATION _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
#define HAILO_QUERY_DEVICE_PROPERTIES _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DEVICE_PROPERTIES_CODE, struct hailo_device_properties)
#define HAILO_QUERY_DRIVER_INFO _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DRIVER_INFO_CODE, struct hailo_driver_info)
#define HAILO_READ_LOG _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
#define HAILO_RESET_NN_CORE _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
enum hailo_vdma_ioctl_code {
HAILO_VDMA_INTERRUPTS_ENABLE_CODE,
HAILO_VDMA_INTERRUPTS_DISABLE_CODE,
HAILO_VDMA_ENABLE_CHANNELS_CODE,
HAILO_VDMA_DISABLE_CHANNELS_CODE,
HAILO_VDMA_INTERRUPTS_WAIT_CODE,
HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE,
HAILO_VDMA_BUFFER_MAP_CODE,
@@ -526,7 +591,7 @@ enum hailo_vdma_ioctl_code {
HAILO_VDMA_BUFFER_SYNC_CODE,
HAILO_DESC_LIST_CREATE_CODE,
HAILO_DESC_LIST_RELEASE_CODE,
HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE,
HAILO_DESC_LIST_PROGRAM_CODE,
HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE,
HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE,
HAILO_MARK_AS_IN_USE_CODE,
@@ -538,38 +603,67 @@ enum hailo_vdma_ioctl_code {
HAILO_VDMA_IOCTL_MAX_NR,
};
#define HAILO_VDMA_INTERRUPTS_ENABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_ENABLE_CODE, struct hailo_vdma_interrupts_enable_params)
#define HAILO_VDMA_INTERRUPTS_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_DISABLE_CODE, struct hailo_vdma_interrupts_disable_params)
#define HAILO_VDMA_ENABLE_CHANNELS _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_ENABLE_CHANNELS_CODE, struct hailo_vdma_enable_channels_params)
#define HAILO_VDMA_DISABLE_CHANNELS _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_DISABLE_CHANNELS_CODE, struct hailo_vdma_disable_channels_params)
#define HAILO_VDMA_INTERRUPTS_WAIT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_WAIT_CODE, struct hailo_vdma_interrupts_wait_params)
#define HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE, struct hailo_vdma_interrupts_read_timestamp_params)
#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
#define HAILO_DESC_LIST_PROGRAM _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_PROGRAM_CODE, struct hailo_desc_list_program_params)
#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
enum hailo_non_linux_ioctl_code {
HAILO_NON_LINUX_DESC_LIST_MMAP_CODE,
enum hailo_nnc_ioctl_code {
HAILO_FW_CONTROL_CODE,
HAILO_READ_NOTIFICATION_CODE,
HAILO_DISABLE_NOTIFICATION_CODE,
HAILO_READ_LOG_CODE,
HAILO_RESET_NN_CORE_CODE,
// Must be last
HAILO_NON_LINUX_IOCTL_MAX_NR,
HAILO_NNC_IOCTL_MAX_NR
};
#define HAILO_NON_LINUX_DESC_LIST_MMAP _IOWR_(HAILO_NON_LINUX_IOCTL_MAGIC, HAILO_NON_LINUX_DESC_LIST_MMAP_CODE, struct hailo_non_linux_desc_list_mmap_params)
#define HAILO_FW_CONTROL _IOWR_(HAILO_NNC_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
#define HAILO_READ_NOTIFICATION _IOW_(HAILO_NNC_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_NNC_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
#define HAILO_READ_LOG _IOWR_(HAILO_NNC_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
#define HAILO_RESET_NN_CORE _IO_(HAILO_NNC_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
enum hailo_soc_ioctl_code {
HAILO_SOC_IOCTL_CONNECT_CODE,
HAILO_SOC_IOCTL_CLOSE_CODE,
// Must be last
HAILO_SOC_IOCTL_MAX_NR,
};
#define HAILO_SOC_CONNECT _IOWR_(HAILO_SOC_IOCTL_MAGIC, HAILO_SOC_IOCTL_CONNECT_CODE, struct hailo_soc_connect_params)
#define HAILO_SOC_CLOSE _IOR_(HAILO_SOC_IOCTL_MAGIC, HAILO_SOC_IOCTL_CLOSE_CODE, struct hailo_soc_close_params)
enum hailo_pci_ep_ioctl_code {
HAILO_PCI_EP_ACCEPT_CODE,
HAILO_PCI_EP_CLOSE_CODE,
// Must be last
HAILO_PCI_EP_IOCTL_MAX_NR,
};
#define HAILO_PCI_EP_ACCEPT _IOWR_(HAILO_PCI_EP_IOCTL_MAGIC, HAILO_PCI_EP_ACCEPT_CODE, struct hailo_pci_ep_accept_params)
#define HAILO_PCI_EP_CLOSE _IOR_(HAILO_PCI_EP_IOCTL_MAGIC, HAILO_PCI_EP_CLOSE_CODE, struct hailo_pci_ep_close_params)
#endif /* _HAILO_IOCTL_COMMON_H_ */

View File

@@ -1,10 +0,0 @@
#ifndef _HAILO_PCIE_VERSION_H_
#define _HAILO_PCIE_VERSION_H_
#include "..\..\common\hailo_pcie_version.h"
#define STRINGIFY_EXPANDED( x ) #x
#define STRINGIFY_NUMBER( x ) STRINGIFY_EXPANDED(x)
#define HAILO_DRV_VER STRINGIFY_NUMBER(HAILO_DRV_VER_MAJOR) "." STRINGIFY_NUMBER(HAILO_DRV_VER_MINOR) "." STRINGIFY_NUMBER(HAILO_DRV_VER_REVISION)
#endif /* _HAILO_PCIE_VERSION_H_ */

View File

@@ -0,0 +1,12 @@
Copyright 2024 (C) Hailo Technologies Ltd. ("Hailo")
All rights reserved.
By downloading, installing, copying, or otherwise using the software,
you agree to be bound by the terms of this license.
Hailo hereby grants You a limited, non-exclusive, non-assignable, non-transferable, revocable, non-sublicensable, license to (a) use the Licensed Software, (b) Redistribution of this software in its binary form only, both (a) and (b) solely: (i) according to the Documentation and this license ; (ii) for the purpose of developing applications for use in Your system(s) ("End-User Product(s)), and in conjunction with Hailo's proprietary products previously purchased by You from Hailo ("Products") which are utilizing the Licensed Software;
Restrictions. You agree that except as expressly permitted by Hailo under this license, you will not, nor allow any third party on Your behalf to (a) license, rent, lease, sublicense, loan, sell or otherwise allow any access the Licensed Software; (b) modify, alter, copy, transfer, emulate or create any derivative works of the Licensed Software or of any part thereof; (c) reverse engineer, decompile, decode, decrypt, disassemble, or in any way attempt to derive source code, designs, or otherwise discover the underlying Intellectual Property or technology of the Licensed Software or any part thereof; (d) remove, alter or obscure any copyright, trademark or other proprietary rights notice, on or in, the Licensed Software and/or the Documentation; (e) use the Licensed Software for any benchmarking to be publicly published or for competing development activities, (f) distribute without this license and/or not in the Licensed software binary form (g) publish or disclose to any third party any technical features, quality, performance or benchmark test, or comparative analyses relating to the Licensed Software ("Technical Data"), or (h) utilize the Licensed Software with any product(s) other than End User Products in conjunction with the Product(s) as described hereunder and under the Documentation, or (i) use the Licensed Software in any manner that would cause the Licensed Software and/or the Product to become subject to an Open Source License. "Open Source License" includes, without limitation, a software license that requires as a condition of use, modification, and/or distribution, of such software that the Licensed Software be (1) disclosed or distributed in source code form; (2) be licensed under other and/or terms other than this license;
Hailo Technologies Ltd. disclaims any warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose.
This software is provided on an "AS IS" basis, and Hailo has no obligation to provide maintenance, support, updates, enhancements, or modifications

View File

@@ -0,0 +1,55 @@
cmake_minimum_required(VERSION 3.0.0)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/readerwriterqueue.cmake)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
set(HAILORT_SERVER_SOURCES
hailort_server.cpp
${HRPC_CPP_SOURCES}
${HRPC_PROTOCOL_CPP_SOURCES}
${HAILORT_COMMON_OS_DIR}/os_utils.cpp
${HAILORT_SERVICE_DIR}/cng_buffer_pool.cpp
${HAILORT_COMMON_DIR}/common/event_internal.cpp
${HAILO_FULL_OS_DIR}/event.cpp # TODO HRT-10681: move to common
${DRIVER_OS_DIR}/driver_os_specific.cpp
${HAILO_OS_DIR}/file_descriptor.cpp
${HAILO_OS_DIR}/mmap_buffer.cpp
${HAILORT_SRC_DIR}/vdma/pcie_session.cpp
${HAILORT_SRC_DIR}/vdma/memory/descriptor_list.cpp
${HAILORT_SRC_DIR}/vdma/memory/mapped_buffer.cpp
${HAILORT_SRC_DIR}/vdma/memory/dma_able_buffer.cpp
${HAILORT_SRC_DIR}/vdma/memory/vdma_edge_layer.cpp
${HAILORT_SRC_DIR}/vdma/driver/hailort_driver.cpp
${HAILORT_SRC_DIR}/vdma/channel/interrupts_dispatcher.cpp
${HAILORT_SRC_DIR}/vdma/channel/transfer_launcher.cpp
${HAILORT_SRC_DIR}/vdma/channel/boundary_channel.cpp
${HAILORT_SRC_DIR}/vdma/channel/channels_group.cpp
${HAILORT_SRC_DIR}/stream_common/transfer_common.cpp
)
if(WIN32)
# hailort_driver.cpp in windows depends on string_conversion
# dma_able_buffer.cpp in windows depends on virtual_alloc_guard
set(HAILORT_SERVER_SOURCES ${HAILORT_SERVER_SOURCES}
${HAILORT_COMMON_OS_DIR}/string_conversion.cpp
${HAILO_FULL_OS_DIR}/virtual_alloc_guard.cpp)
endif()
add_executable(hailort_server ${HAILORT_SERVER_SOURCES})
target_include_directories(hailort_server PRIVATE
${HAILORT_SRC_DIR}
${COMMON_INC_DIR}
${DRIVER_INC_DIR}
)
target_compile_options(hailort_server PRIVATE ${HAILORT_COMPILE_OPTIONS})
set_property(TARGET hailort_server PROPERTY CXX_STANDARD 14)
set_property(TARGET hailort_server PROPERTY INSTALL_RPATH "$ORIGIN" "../lib/") # Link with a relative libhailort
target_link_libraries(hailort_server PRIVATE
libhailort
Threads::Threads
rpc_proto
spdlog::spdlog
readerwriterqueue
)

View File

@@ -0,0 +1,539 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file hailo_server.cpp
* @brief Hailo Server
**/
#include "hailort_server.hpp"
#include "hrpc/server.hpp"
#include "hailo/vdevice.hpp"
#include "hailo/infer_model.hpp"
#include "hrpc_protocol/serializer.hpp"
#include "net_flow/ops/nms_post_process.hpp"
#include "hailort_service/service_resource_manager.hpp"
#include <spdlog/spdlog.h>
#include <spdlog/sinks/stdout_color_sinks.h>
using namespace hailort;
// TODO: These macros should be merged with the grpc macros, also change them to TRY
#define CHECK_EXPECTED_AS_HRPC_STATUS(_exepcted, T) \
do { \
if (!_exepcted) { \
LOGGER__ERROR("CHECK_EXPECTED_AS_HRPC_STATUS failed, status: {}", _exepcted.status()); \
auto reply = T::serialize_reply(_exepcted.status()); \
if (reply) return reply; \
LOGGER__CRITICAL("Failed to create reply with status: {}", reply.status()); \
return make_unexpected(HAILO_INTERNAL_FAILURE); \
} \
} while (0)
#define CHECK_SUCCESS_AS_HRPC_STATUS(_status, T) \
do { \
if (_status != HAILO_SUCCESS) { \
LOGGER__ERROR("CHECK_SUCCESS_AS_HRPC_STATUS failed, status: {}", _status); \
auto reply = T::serialize_reply(_status); \
if (reply) return reply; \
LOGGER__CRITICAL("Failed to create reply with status: {}", reply.status()); \
return make_unexpected(HAILO_INTERNAL_FAILURE); \
} \
} while (0)
#define __HAILO_CONCAT(x, y) x ## y
#define _HAILO_CONCAT(x, y) __HAILO_CONCAT(x, y)
#define _TRY_AS_HRPC_STATUS(expected_var_name, var_decl, expr, ...) \
auto expected_var_name = (expr); \
CHECK_EXPECTED_AS_HRPC_STATUS(expected_var_name, __VA_ARGS__); \
var_decl = expected_var_name.release()
#define TRY_AS_HRPC_STATUS(var_decl, expr, ...) _TRY_AS_HRPC_STATUS(_HAILO_CONCAT(__expected, __COUNTER__), var_decl, expr, __VA_ARGS__)
#ifdef NDEBUG
#define LOGGER_PATTERN ("[%n] [%^%l%$] %v")
#else
#define LOGGER_PATTERN ("[%Y-%m-%d %X.%e] [%P] [%t] [%n] [%^%l%$] [%s:%#] [%!] %v")
#endif
#define BUFFER_POOL_SIZE (10) // TODO: this may hurt performance, should be configurable
struct InferModelInfo
{
std::unordered_map<std::string, size_t> input_streams_sizes;
std::unordered_map<std::string, size_t> output_streams_sizes;
std::vector<std::string> inputs_names;
std::vector<std::string> outputs_names;
};
void init_logger(const std::string &name)
{
auto console_sink = make_shared_nothrow<spdlog::sinks::stderr_color_sink_mt>();
console_sink->set_level(spdlog::level::info);
console_sink->set_pattern(LOGGER_PATTERN);
spdlog::set_default_logger(make_shared_nothrow<spdlog::logger>(name, console_sink));
}
void hrpc::HailoRTServer::cleanup_infer_model_hef_buffers(const std::vector<uint32_t> &infer_model_handles)
{
for (const auto &infer_model_handle : infer_model_handles) {
auto hef_buffers_iter = m_hef_buffers_per_infer_model.find(infer_model_handle);
if (m_hef_buffers_per_infer_model.end() != hef_buffers_iter) {
m_hef_buffers_per_infer_model.erase(infer_model_handle);
}
}
}
void hrpc::HailoRTServer::cleanup_cim_buffer_pools(const std::vector<uint32_t> &cim_handles)
{
for (const auto &cim_handle : cim_handles) {
auto buffer_pool_iter = m_buffer_pool_per_cim.find(cim_handle);
if (m_buffer_pool_per_cim.end() != buffer_pool_iter) {
m_buffer_pool_per_cim.erase(cim_handle);
}
}
}
hailo_status hrpc::HailoRTServer::cleanup_client_resources(RpcConnection client_connection)
{
std::set<uint32_t> pids = {SINGLE_CLIENT_PID};
auto cim_handles = ServiceResourceManager<ConfiguredInferModel>::get_instance().resources_handles_by_pids(pids);
(void)ServiceResourceManager<ConfiguredInferModel>::get_instance().release_by_pid(SINGLE_CLIENT_PID);
cleanup_cim_buffer_pools(cim_handles);
auto infer_model_handles = ServiceResourceManager<InferModel>::get_instance().resources_handles_by_pids(pids);
(void)ServiceResourceManager<InferModelInfo>::get_instance().release_by_pid(SINGLE_CLIENT_PID);
(void)ServiceResourceManager<InferModel>::get_instance().release_by_pid(SINGLE_CLIENT_PID);
cleanup_infer_model_hef_buffers(infer_model_handles);
m_infer_model_to_info_id.clear();
(void)ServiceResourceManager<VDevice>::get_instance().release_by_pid(SINGLE_CLIENT_PID);
CHECK_SUCCESS(client_connection.close());
return HAILO_SUCCESS;
}
Expected<std::unique_ptr<hrpc::HailoRTServer>> hrpc::HailoRTServer::create_unique()
{
TRY(auto connection_context, ConnectionContext::create_shared(true));
auto res = make_unique_nothrow<HailoRTServer>(connection_context);
CHECK_NOT_NULL(res, HAILO_OUT_OF_HOST_MEMORY);
return res;
}
int main()
{
init_logger("HailoRT-Server");
TRY(auto server, hrpc::HailoRTServer::create_unique());
hrpc::Dispatcher dispatcher;
// TODO: add a server implementation class, with resources heiracrhy and more
auto &infer_model_to_info_id = server->get_infer_model_to_info_id();
auto &buffer_pool_per_cim = server->get_buffer_pool_per_cim();
// Because the infer model is created with a hef buffer, we need to keep the buffer until the configure stage.
// Here I keep it until the infer model is destroyed
auto &hef_buffers = server->get_hef_buffers();
dispatcher.register_action(HailoRpcActionID::VDEVICE__CREATE,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
TRY_AS_HRPC_STATUS(auto vdevice_params, CreateVDeviceSerializer::deserialize_request(request), CreateVDeviceSerializer);
TRY_AS_HRPC_STATUS(auto vdevice, VDevice::create(vdevice_params), CreateVDeviceSerializer);
auto &manager = ServiceResourceManager<VDevice>::get_instance();
auto id = manager.register_resource(SINGLE_CLIENT_PID, std::move(vdevice));
auto reply = CreateVDeviceSerializer::serialize_reply(HAILO_SUCCESS, id);
return reply;
});
dispatcher.register_action(HailoRpcActionID::VDEVICE__DESTROY,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &manager = ServiceResourceManager<VDevice>::get_instance();
TRY_AS_HRPC_STATUS(auto vdevice_handle, DestroyVDeviceSerializer::deserialize_request(request), DestroyVDeviceSerializer);
(void)manager.release_resource(vdevice_handle, SINGLE_CLIENT_PID);
TRY_AS_HRPC_STATUS(auto reply, DestroyVDeviceSerializer::serialize_reply(HAILO_SUCCESS), DestroyVDeviceSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::VDEVICE__CREATE_INFER_MODEL,
[&hef_buffers] (const MemoryView &request, hrpc::ServerContextPtr server_context) -> Expected<Buffer> {
TRY_AS_HRPC_STATUS(auto tuple, CreateInferModelSerializer::deserialize_request(request), CreateInferModelSerializer);
auto vdevice_handle = std::get<0>(tuple);
uint64_t hef_size = std::get<1>(tuple);
assert(hef_size <= SIZE_MAX);
TRY_AS_HRPC_STATUS(auto hef_buffer, Buffer::create(static_cast<size_t>(hef_size), BufferStorageParams::create_dma()), CreateInferModelSerializer);
auto status = server_context->connection().read_buffer(MemoryView(hef_buffer));
CHECK_SUCCESS_AS_HRPC_STATUS(status, CreateInferModelSerializer);
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
auto lambda = [view = MemoryView(hef_buffer)] (std::shared_ptr<VDevice> vdevice) {
return vdevice->create_infer_model(view);
};
auto infer_model = vdevice_manager.execute<Expected<std::shared_ptr<InferModel>>>(vdevice_handle, lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(infer_model, CreateInferModelSerializer);
auto &infer_model_manager = ServiceResourceManager<InferModel>::get_instance();
auto infer_model_id = infer_model_manager.register_resource(SINGLE_CLIENT_PID, std::move(infer_model.release()));
hef_buffers.emplace(infer_model_id, std::move(hef_buffer));
TRY_AS_HRPC_STATUS(auto reply, CreateInferModelSerializer::serialize_reply(HAILO_SUCCESS, infer_model_id), CreateInferModelSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::INFER_MODEL__DESTROY,
[&hef_buffers] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &manager = ServiceResourceManager<InferModel>::get_instance();
TRY_AS_HRPC_STATUS(auto infer_model_handle, DestroyInferModelSerializer::deserialize_request(request), DestroyInferModelSerializer);
hef_buffers.erase(infer_model_handle);
(void)manager.release_resource(infer_model_handle, SINGLE_CLIENT_PID);
TRY_AS_HRPC_STATUS(auto reply, DestroyInferModelSerializer::serialize_reply(HAILO_SUCCESS), DestroyInferModelSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::INFER_MODEL__CREATE_CONFIGURED_INFER_MODEL,
[&buffer_pool_per_cim, &infer_model_to_info_id]
(const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &infer_model_manager = ServiceResourceManager<InferModel>::get_instance();
TRY_AS_HRPC_STATUS(auto request_params, CreateConfiguredInferModelSerializer::deserialize_request(request), CreateConfiguredInferModelSerializer);
const auto &infer_model_handle = request_params.infer_model_handle;
const auto &vdevice_handle = request_params.vdevice_handle;
auto lambda = [&request_params] (std::shared_ptr<InferModel> infer_model) -> Expected<ConfiguredInferModel> {
const auto &input_streams_formats = request_params.input_streams_params;
const auto &output_streams_formats = request_params.output_streams_params;
for (const auto &input_stream_format : input_streams_formats) {
TRY(auto input, infer_model->input(input_stream_format.first));
input.set_format_order(static_cast<hailo_format_order_t>(input_stream_format.second.format_order));
input.set_format_type(static_cast<hailo_format_type_t>(input_stream_format.second.format_type));
if (INVALID_NMS_CONFIG != input_stream_format.second.nms_score_threshold) {
input.set_nms_score_threshold(input_stream_format.second.nms_score_threshold);
}
if (INVALID_NMS_CONFIG != input_stream_format.second.nms_iou_threshold) {
input.set_nms_iou_threshold(input_stream_format.second.nms_iou_threshold);
}
if (static_cast<uint32_t>(INVALID_NMS_CONFIG) != input_stream_format.second.nms_max_proposals_per_class) {
input.set_nms_max_proposals_per_class(input_stream_format.second.nms_max_proposals_per_class);
}
if (static_cast<uint32_t>(INVALID_NMS_CONFIG) != input_stream_format.second.nms_max_accumulated_mask_size) {
input.set_nms_max_accumulated_mask_size(input_stream_format.second.nms_max_accumulated_mask_size);
}
}
for (const auto &output_stream_format : output_streams_formats) {
TRY(auto output, infer_model->output(output_stream_format.first));
output.set_format_order(static_cast<hailo_format_order_t>(output_stream_format.second.format_order));
output.set_format_type(static_cast<hailo_format_type_t>(output_stream_format.second.format_type));
if (INVALID_NMS_CONFIG != output_stream_format.second.nms_score_threshold) {
output.set_nms_score_threshold(output_stream_format.second.nms_score_threshold);
}
if (INVALID_NMS_CONFIG != output_stream_format.second.nms_iou_threshold) {
output.set_nms_iou_threshold(output_stream_format.second.nms_iou_threshold);
}
if (static_cast<uint32_t>(INVALID_NMS_CONFIG) != output_stream_format.second.nms_max_proposals_per_class) {
output.set_nms_max_proposals_per_class(output_stream_format.second.nms_max_proposals_per_class);
}
if (static_cast<uint32_t>(INVALID_NMS_CONFIG) != output_stream_format.second.nms_max_accumulated_mask_size) {
output.set_nms_max_accumulated_mask_size(output_stream_format.second.nms_max_accumulated_mask_size);
}
}
infer_model->set_batch_size(request_params.batch_size);
infer_model->set_power_mode(request_params.power_mode);
infer_model->set_hw_latency_measurement_flags(request_params.latency_flag);
return infer_model->configure();
};
auto configured_infer_model = infer_model_manager.execute<Expected<ConfiguredInferModel>>(infer_model_handle, lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(configured_infer_model, CreateConfiguredInferModelSerializer);
TRY_AS_HRPC_STATUS(auto async_queue_size, configured_infer_model->get_async_queue_size(), CreateConfiguredInferModelSerializer);
auto set_model_info_lambda = [] (std::shared_ptr<InferModel> infer_model) -> Expected<std::shared_ptr<InferModelInfo>> {
auto infer_model_info = make_shared_nothrow<InferModelInfo>();
CHECK_NOT_NULL_AS_EXPECTED(infer_model_info, HAILO_OUT_OF_HOST_MEMORY);
for (const auto &input : infer_model->inputs()) {
infer_model_info->input_streams_sizes.emplace(input.name(), input.get_frame_size());
infer_model_info->inputs_names.push_back(input.name());
}
for (const auto &output : infer_model->outputs()) {
infer_model_info->output_streams_sizes.emplace(output.name(), output.get_frame_size());
infer_model_info->outputs_names.push_back(output.name());
}
return infer_model_info;
};
auto model_info = infer_model_manager.execute<Expected<std::shared_ptr<InferModelInfo>>>(infer_model_handle, set_model_info_lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(model_info, CreateConfiguredInferModelSerializer);
auto &infer_model_infos_manager = ServiceResourceManager<InferModelInfo>::get_instance();
auto infer_model_info_id = infer_model_infos_manager.register_resource(SINGLE_CLIENT_PID, std::move(model_info.release()));
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
auto cim_id = cim_manager.register_resource(SINGLE_CLIENT_PID,
std::move(make_shared_nothrow<ConfiguredInferModel>(configured_infer_model.release())));
auto buffer_pool = ServiceNetworkGroupBufferPool::create(vdevice_handle);
CHECK_EXPECTED_AS_HRPC_STATUS(buffer_pool, CreateConfiguredInferModelSerializer);
auto buffer_pool_ptr = buffer_pool.release();
auto get_infer_model_info_lambda = [] (std::shared_ptr<InferModelInfo> infer_model_info) {
return *infer_model_info;
};
auto infer_model_info = infer_model_infos_manager.execute<Expected<InferModelInfo>>(infer_model_info_id, get_infer_model_info_lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(infer_model_info, CreateConfiguredInferModelSerializer);
for (const auto &input_name : infer_model_info->inputs_names) {
auto status = buffer_pool_ptr->allocate_pool(input_name, HAILO_DMA_BUFFER_DIRECTION_D2H,
infer_model_info->input_streams_sizes[input_name], BUFFER_POOL_SIZE);
CHECK_SUCCESS_AS_HRPC_STATUS(status, CreateConfiguredInferModelSerializer);
}
for (const auto &output_name : infer_model_info->outputs_names) {
auto status = buffer_pool_ptr->allocate_pool(output_name, HAILO_DMA_BUFFER_DIRECTION_H2D,
infer_model_info->output_streams_sizes[output_name], BUFFER_POOL_SIZE);
CHECK_SUCCESS_AS_HRPC_STATUS(status, CreateConfiguredInferModelSerializer);
}
buffer_pool_per_cim.emplace(cim_id, buffer_pool_ptr);
infer_model_to_info_id[infer_model_handle] = infer_model_info_id;
TRY_AS_HRPC_STATUS(auto reply,
CreateConfiguredInferModelSerializer::serialize_reply(HAILO_SUCCESS, cim_id, static_cast<uint32_t>(async_queue_size)),
CreateConfiguredInferModelSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__DESTROY,
[&buffer_pool_per_cim] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
TRY_AS_HRPC_STATUS(auto configured_infer_model_handle, DestroyConfiguredInferModelSerializer::deserialize_request(request), DestroyInferModelSerializer);
auto shutdown_lambda = [] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
configured_infer_model->shutdown();
return HAILO_SUCCESS;
};
manager.execute<hailo_status>(configured_infer_model_handle, shutdown_lambda);
buffer_pool_per_cim.erase(configured_infer_model_handle);
(void)manager.release_resource(configured_infer_model_handle, SINGLE_CLIENT_PID);
TRY_AS_HRPC_STATUS(auto reply, DestroyConfiguredInferModelSerializer::serialize_reply(HAILO_SUCCESS), DestroyInferModelSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__SET_SCHEDULER_TIMEOUT,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
TRY_AS_HRPC_STATUS(auto tuple, SetSchedulerTimeoutSerializer::deserialize_request(request), SetSchedulerTimeoutSerializer);
const auto &configured_infer_model_handle = std::get<0>(tuple);
const auto &timeout = std::get<1>(tuple);
auto lambda = [timeout] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->set_scheduler_timeout(timeout);
};
auto status = cim_manager.execute<hailo_status>(configured_infer_model_handle, lambda);
TRY_AS_HRPC_STATUS(auto reply, SetSchedulerTimeoutSerializer::serialize_reply(status), SetSchedulerTimeoutSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__SET_SCHEDULER_THRESHOLD,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
TRY_AS_HRPC_STATUS(auto tuple, SetSchedulerThresholdSerializer::deserialize_request(request), SetSchedulerThresholdSerializer);
const auto &configured_infer_model_handle = std::get<0>(tuple);
const auto &threshold = std::get<1>(tuple);
auto lambda = [threshold] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->set_scheduler_threshold(threshold);
};
auto status = cim_manager.execute<hailo_status>(configured_infer_model_handle, lambda);
TRY_AS_HRPC_STATUS(auto reply, SetSchedulerThresholdSerializer::serialize_reply(status), SetSchedulerThresholdSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__SET_SCHEDULER_PRIORITY,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
TRY_AS_HRPC_STATUS(auto tuple, SetSchedulerPrioritySerializer::deserialize_request(request), SetSchedulerPrioritySerializer);
const auto &configured_infer_model_handle = std::get<0>(tuple);
const auto &priority = std::get<1>(tuple);
auto lambda = [priority] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->set_scheduler_priority(static_cast<uint8_t>(priority));
};
auto status = cim_manager.execute<hailo_status>(configured_infer_model_handle, lambda);
TRY_AS_HRPC_STATUS(auto reply, SetSchedulerPrioritySerializer::serialize_reply(status), SetSchedulerPrioritySerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__GET_HW_LATENCY_MEASUREMENT,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
auto configured_infer_model_handle = GetHwLatencyMeasurementSerializer::deserialize_request(request);
CHECK_EXPECTED_AS_HRPC_STATUS(configured_infer_model_handle, GetHwLatencyMeasurementSerializer);
auto lambda = [] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->get_hw_latency_measurement();
};
auto latency_measurement_result = cim_manager.execute<Expected<LatencyMeasurementResult>>(configured_infer_model_handle.value(), lambda);
if (HAILO_NOT_AVAILABLE == latency_measurement_result.status()) {
return GetHwLatencyMeasurementSerializer::serialize_reply(HAILO_NOT_AVAILABLE);
}
CHECK_EXPECTED_AS_HRPC_STATUS(latency_measurement_result, GetHwLatencyMeasurementSerializer);
uint32_t avg_hw_latency = static_cast<uint32_t>(latency_measurement_result.value().avg_hw_latency.count());
TRY_AS_HRPC_STATUS(auto reply, GetHwLatencyMeasurementSerializer::serialize_reply(latency_measurement_result.status(), avg_hw_latency), GetHwLatencyMeasurementSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__ACTIVATE,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
auto configured_infer_model_handle = ActivateSerializer::deserialize_request(request);
CHECK_EXPECTED_AS_HRPC_STATUS(configured_infer_model_handle, ActivateSerializer);
auto lambda = [] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->activate();
};
auto status = cim_manager.execute<hailo_status>(configured_infer_model_handle.value(), lambda);
TRY_AS_HRPC_STATUS(auto reply, ActivateSerializer::serialize_reply(status), ActivateSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__DEACTIVATE,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
auto configured_infer_model_handle = DeactivateSerializer::deserialize_request(request);
CHECK_EXPECTED_AS_HRPC_STATUS(configured_infer_model_handle, DeactivateSerializer);
auto lambda = [] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->deactivate();
};
auto status = cim_manager.execute<hailo_status>(configured_infer_model_handle.value(), lambda);
TRY_AS_HRPC_STATUS(auto reply, DeactivateSerializer::serialize_reply(status), DeactivateSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__SHUTDOWN,
[] (const MemoryView &request, hrpc::ServerContextPtr /*server_context*/) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
auto configured_infer_model_handle = ShutdownSerializer::deserialize_request(request);
CHECK_EXPECTED_AS_HRPC_STATUS(configured_infer_model_handle, ShutdownSerializer);
auto lambda = [] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->shutdown();
};
auto status = cim_manager.execute<hailo_status>(configured_infer_model_handle.value(), lambda);
TRY_AS_HRPC_STATUS(auto reply, ShutdownSerializer::serialize_reply(status), ShutdownSerializer);
return reply;
});
dispatcher.register_action(HailoRpcActionID::CONFIGURED_INFER_MODEL__RUN_ASYNC,
[&infer_model_to_info_id, &buffer_pool_per_cim]
(const MemoryView &request, hrpc::ServerContextPtr server_context) -> Expected<Buffer> {
auto &cim_manager = ServiceResourceManager<ConfiguredInferModel>::get_instance();
auto bindings_lambda = [] (std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->create_bindings();
};
TRY_AS_HRPC_STATUS(auto request_tuple, RunAsyncSerializer::deserialize_request(request), RunAsyncSerializer);
auto configured_infer_model_handle = std::get<0>(request_tuple);
auto infer_model_handle = std::get<1>(request_tuple);
auto callback_id = std::get<2>(request_tuple);
auto bindings = cim_manager.execute<Expected<ConfiguredInferModel::Bindings>>(configured_infer_model_handle, bindings_lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(bindings, RunAsyncSerializer);
auto infer_model_info_lambda = [] (std::shared_ptr<InferModelInfo> infer_model_info) {
return *infer_model_info;
};
auto &infer_model_infos_manager = ServiceResourceManager<InferModelInfo>::get_instance();
auto infer_model_info = infer_model_infos_manager.execute<Expected<InferModelInfo>>(infer_model_to_info_id[infer_model_handle],
infer_model_info_lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(infer_model_info, RunAsyncSerializer);
std::vector<BufferPtr> inputs; // TODO: add infer vector pool
inputs.reserve(infer_model_info->inputs_names.size());
for (const auto &input_name : infer_model_info->inputs_names) {
TRY_AS_HRPC_STATUS(auto input, bindings->input(input_name), RunAsyncSerializer);
TRY_AS_HRPC_STATUS(auto buffer_ptr, buffer_pool_per_cim[configured_infer_model_handle]->acquire_buffer(input_name),
RunAsyncSerializer);
auto status = server_context->connection().read_buffer(MemoryView(*buffer_ptr));
CHECK_SUCCESS_AS_HRPC_STATUS(status, RunAsyncSerializer);
inputs.emplace_back(buffer_ptr);
status = input.set_buffer(MemoryView(*buffer_ptr));
CHECK_SUCCESS_AS_HRPC_STATUS(status, RunAsyncSerializer);
}
std::vector<BufferPtr> outputs; // TODO: add infer vector pool
outputs.reserve(infer_model_info->outputs_names.size());
for (const auto &output_name : infer_model_info->outputs_names) {
TRY_AS_HRPC_STATUS(auto buffer_ptr, buffer_pool_per_cim[configured_infer_model_handle]->acquire_buffer(output_name),
RunAsyncSerializer);
auto output = bindings->output(output_name);
CHECK_EXPECTED_AS_HRPC_STATUS(output, RunAsyncSerializer);
auto status = output->set_buffer(MemoryView(buffer_ptr->data(), buffer_ptr->size()));
CHECK_SUCCESS_AS_HRPC_STATUS(status, RunAsyncSerializer);
outputs.emplace_back(buffer_ptr);
}
auto infer_lambda =
[bindings = bindings.release(), callback_id, server_context, inputs, outputs, &buffer_pool_per_cim, configured_infer_model_handle,
infer_model_info]
(std::shared_ptr<ConfiguredInferModel> configured_infer_model) {
return configured_infer_model->run_async(bindings,
[callback_id, server_context, inputs, outputs, &buffer_pool_per_cim, configured_infer_model_handle, infer_model_info]
(const AsyncInferCompletionInfo &completion_info) {
auto status = server_context->trigger_callback(callback_id, completion_info.status, [outputs, completion_info] (hrpc::RpcConnection connection) -> hailo_status {
if (HAILO_SUCCESS == completion_info.status) {
for (auto output : outputs) {
auto status = connection.write_buffer(MemoryView(*output));
CHECK_SUCCESS(status);
}
}
return HAILO_SUCCESS;
});
// HAILO_COMMUNICATION_CLOSED means the client disconnected. Server doesn't need to restart in this case.
if ((status != HAILO_SUCCESS) && (status != HAILO_COMMUNICATION_CLOSED)) {
LOGGER__CRITICAL("Error {} returned from connection.write(). Server Should restart!", status);
}
for (uint32_t i = 0; i < inputs.size(); i++) {
status = buffer_pool_per_cim[configured_infer_model_handle]->return_to_pool(infer_model_info->inputs_names[i], inputs[i]);
if (status != HAILO_SUCCESS) {
LOGGER__CRITICAL("return_to_pool failed for input {}, status = {}. Server should restart!", infer_model_info->inputs_names[i], status);
return;
}
}
for (uint32_t i = 0; i < outputs.size(); i++) {
status = buffer_pool_per_cim[configured_infer_model_handle]->return_to_pool(infer_model_info->outputs_names[i], outputs[i]);
if (status != HAILO_SUCCESS) {
LOGGER__CRITICAL("return_to_pool failed for output {}, status = {}. Server should restart!", infer_model_info->outputs_names[i], status);
return;
}
}
});
};
auto job = cim_manager.execute<Expected<AsyncInferJob>>(configured_infer_model_handle, infer_lambda);
CHECK_EXPECTED_AS_HRPC_STATUS(job, RunAsyncSerializer);
job->detach();
TRY_AS_HRPC_STATUS(auto reply, RunAsyncSerializer::serialize_reply(HAILO_SUCCESS), RunAsyncSerializer);
return reply;
});
server->set_dispatcher(dispatcher);
auto status = server->serve();
if (status != HAILO_SUCCESS) {
LOGGER__ERROR("Error in serve, status = {}", status);
return status;
}
return 0;
}

View File

@@ -0,0 +1,44 @@
#ifndef HAILORT_SERVER_HPP_
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file hailort_server.hpp
* @brief RPC Hailort Server Header
**/
#define HAILORT_SERVER_HPP_
#include "hrpc/server.hpp"
#include "hailort_service/cng_buffer_pool.hpp"
namespace hrpc
{
using infer_model_handle_t = uint32_t;
class Server;
class HailoRTServer : public Server {
public:
static Expected<std::unique_ptr<HailoRTServer>> create_unique();
explicit HailoRTServer(std::shared_ptr<ConnectionContext> connection_context) : Server(connection_context) {};
std::unordered_map<uint32_t, uint32_t> &get_infer_model_to_info_id() { return m_infer_model_to_info_id; };
std::unordered_map<uint32_t, std::shared_ptr<ServiceNetworkGroupBufferPool>> &get_buffer_pool_per_cim() { return m_buffer_pool_per_cim; };
std::unordered_map<infer_model_handle_t, Buffer> &get_hef_buffers() { return m_hef_buffers_per_infer_model; };
private:
std::unordered_map<uint32_t, uint32_t> m_infer_model_to_info_id;
std::unordered_map<uint32_t, std::shared_ptr<ServiceNetworkGroupBufferPool>> m_buffer_pool_per_cim;
std::unordered_map<infer_model_handle_t, Buffer> m_hef_buffers_per_infer_model;
virtual hailo_status cleanup_client_resources(RpcConnection client_connection) override;
void cleanup_cim_buffer_pools(const std::vector<uint32_t> &cim_handles);
void cleanup_infer_model_hef_buffers(const std::vector<uint32_t> &infer_model_handles);
};
} // namespace hrpc
#endif // HAILORT_SERVER_HPP_

View File

@@ -0,0 +1,39 @@
#! /bin/bash
### BEGIN INIT INFO
# Provides: hailort_server
# Required-Start: $local_fs $network
# Required-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: hailort_server service
# Description: Run hailort_server daemon
### END INIT INFO
# TODO: Remove this file once the hailort_server will use systemd
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting hailort_server"
bash -c "cd /usr/bin && hailort_server &"
;;
stop)
echo "Stopping hailort_server..."
bash -c "killall hailort_server"
;;
restart)
$0 stop
sleep 1
$0 start
;;
*)
echo "Usage: /etc/init.d/hailort_server {start|stop|restart}"
exit 1
;;
esac
exit 0

View File

@@ -23,26 +23,25 @@ Expected<std::shared_ptr<ServiceStreamBufferPool>> ServiceStreamBufferPool::crea
};
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
auto free_buffers_queue = SpscQueue<BufferPtr>::create(buffer_count, shutdown_event, DEFAULT_TRANSFER_TIMEOUT);
CHECK_EXPECTED(free_buffers_queue);
TRY(auto free_buffers_queue,
SpscQueue<BufferPtr>::create(buffer_count, shutdown_event, DEFAULT_TRANSFER_TIMEOUT));
std::vector<AllocatedMappedBuffer> buffers;
buffers.reserve(buffer_count);
for (size_t i = 0; i < buffer_count; i++) {
auto buffer = Buffer::create_shared(buffer_size, BufferStorageParams::create_dma());
CHECK_EXPECTED(buffer);
TRY(auto buffer, Buffer::create_shared(buffer_size, BufferStorageParams::create_dma()));
auto mapped_buffer = vdevice_manager.execute<Expected<DmaMappedBuffer>>(vdevice_handle, map_buffer_lambda, buffer.value());
CHECK_EXPECTED(mapped_buffer);
TRY(auto mapped_buffer,
vdevice_manager.execute<Expected<DmaMappedBuffer>>(vdevice_handle, map_buffer_lambda, buffer));
auto status = free_buffers_queue->enqueue(buffer.value());
auto status = free_buffers_queue.enqueue(buffer);
CHECK_SUCCESS(status);
buffers.emplace_back(AllocatedMappedBuffer{ buffer.release(), mapped_buffer.release()});
buffers.emplace_back(AllocatedMappedBuffer{ buffer, std::move(mapped_buffer)});
}
auto buffer_pool_ptr = make_shared_nothrow<ServiceStreamBufferPool>(buffer_size, std::move(buffers),
free_buffers_queue.release(), buffer_count);
std::move(free_buffers_queue), buffer_count);
CHECK_NOT_NULL_AS_EXPECTED(buffer_pool_ptr, HAILO_OUT_OF_HOST_MEMORY);
return buffer_pool_ptr;
@@ -58,18 +57,9 @@ ServiceStreamBufferPool::ServiceStreamBufferPool(size_t buffer_size, std::vector
Expected<BufferPtr> ServiceStreamBufferPool::acquire_buffer()
{
auto buffer = m_free_buffers_queue.dequeue(DEFAULT_TRANSFER_TIMEOUT);
if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
return make_unexpected(buffer.status());
}
else if (HAILO_TIMEOUT == buffer.status()) {
LOGGER__WARNING(
"Failed to acquire buffer because the buffer pool is empty. This could be caused by uneven reading and writing speeds");
return make_unexpected(buffer.status());
}
CHECK_EXPECTED(buffer);
return buffer.release();
TRY_WITH_ACCEPTABLE_STATUS(HAILO_SHUTDOWN_EVENT_SIGNALED, auto buffer,
m_free_buffers_queue.dequeue(DEFAULT_TRANSFER_TIMEOUT));
return buffer;
}
hailo_status ServiceStreamBufferPool::return_to_pool(BufferPtr buffer)
@@ -91,9 +81,7 @@ size_t ServiceStreamBufferPool::buffers_count()
Expected<std::shared_ptr<ServiceNetworkGroupBufferPool>> ServiceNetworkGroupBufferPool::create(uint32_t vdevice_handle)
{
auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_EXPECTED(shutdown_event_exp);
auto shutdown_event = shutdown_event_exp.release();
TRY(auto shutdown_event, Event::create_shared(Event::State::not_signalled));
auto cng_buffer_pool_ptr = make_shared_nothrow<ServiceNetworkGroupBufferPool>(shutdown_event, vdevice_handle);
CHECK_NOT_NULL_AS_EXPECTED(cng_buffer_pool_ptr, HAILO_OUT_OF_HOST_MEMORY);
@@ -102,54 +90,53 @@ Expected<std::shared_ptr<ServiceNetworkGroupBufferPool>> ServiceNetworkGroupBuff
}
ServiceNetworkGroupBufferPool::ServiceNetworkGroupBufferPool(EventPtr shutdown_event, uint32_t vdevice_handle) :
m_output_name_to_buffer_pool(), m_shutdown_event(shutdown_event), m_vdevice_handle(vdevice_handle)
m_stream_name_to_buffer_pool(), m_shutdown_event(shutdown_event), m_vdevice_handle(vdevice_handle)
{}
hailo_status ServiceNetworkGroupBufferPool::allocate_pool(const std::string &name, size_t frame_size, size_t pool_size)
hailo_status ServiceNetworkGroupBufferPool::allocate_pool(const std::string &name,
hailo_dma_buffer_direction_t direction, size_t frame_size, size_t pool_size)
{
auto buffer_pool = ServiceStreamBufferPool::create(m_vdevice_handle, frame_size,
pool_size, HAILO_DMA_BUFFER_DIRECTION_D2H, m_shutdown_event);
CHECK_EXPECTED(buffer_pool);
TRY(auto buffer_pool, ServiceStreamBufferPool::create(m_vdevice_handle, frame_size,
pool_size, direction, m_shutdown_event));
std::unique_lock<std::mutex> lock(m_mutex);
m_output_name_to_buffer_pool[name] = buffer_pool.release();
m_stream_name_to_buffer_pool[name] = buffer_pool;
return HAILO_SUCCESS;
}
hailo_status ServiceNetworkGroupBufferPool::reallocate_pool(const std::string &name, size_t frame_size)
hailo_status ServiceNetworkGroupBufferPool::reallocate_pool(const std::string &name,
hailo_dma_buffer_direction_t direction, size_t frame_size)
{
std::unique_lock<std::mutex> lock(m_mutex);
auto pool_size = m_output_name_to_buffer_pool[name]->buffers_count();
m_output_name_to_buffer_pool[name].reset();
auto pool_size = m_stream_name_to_buffer_pool[name]->buffers_count();
m_stream_name_to_buffer_pool[name].reset();
auto buffer_pool = ServiceStreamBufferPool::create(m_vdevice_handle, frame_size,
pool_size, HAILO_DMA_BUFFER_DIRECTION_D2H, m_shutdown_event);
CHECK_EXPECTED(buffer_pool);
m_output_name_to_buffer_pool[name] = buffer_pool.release();
TRY(auto buffer_pool, ServiceStreamBufferPool::create(m_vdevice_handle, frame_size,
pool_size, direction, m_shutdown_event));
m_stream_name_to_buffer_pool[name] = buffer_pool;
return HAILO_SUCCESS;
}
Expected<BufferPtr> ServiceNetworkGroupBufferPool::acquire_buffer(const std::string &output_name)
Expected<BufferPtr> ServiceNetworkGroupBufferPool::acquire_buffer(const std::string &stream_name)
{
CHECK_AS_EXPECTED(contains(m_output_name_to_buffer_pool, output_name), HAILO_INTERNAL_FAILURE,
"acquire_buffer() for output {} failed, output name does not exist in buffer pool", output_name);
CHECK_AS_EXPECTED(contains(m_stream_name_to_buffer_pool, stream_name), HAILO_INTERNAL_FAILURE,
"acquire_buffer() for stream {} failed, stream name does not exist in buffer pool", stream_name);
std::unique_lock<std::mutex> lock(m_mutex);
auto buffer = m_output_name_to_buffer_pool.at(output_name)->acquire_buffer();
CHECK_EXPECTED(buffer);
TRY(auto buffer, m_stream_name_to_buffer_pool.at(stream_name)->acquire_buffer());
return buffer.release();
return buffer;
}
hailo_status ServiceNetworkGroupBufferPool::return_to_pool(const std::string &output_name, BufferPtr buffer)
hailo_status ServiceNetworkGroupBufferPool::return_to_pool(const std::string &stream_name, BufferPtr buffer)
{
CHECK(contains(m_output_name_to_buffer_pool, output_name), HAILO_INTERNAL_FAILURE,
"acquire_buffer() for output {} failed, output name does not exist in buffer pool", output_name);
CHECK(contains(m_stream_name_to_buffer_pool, stream_name), HAILO_INTERNAL_FAILURE,
"acquire_buffer() for stream {} failed, stream name does not exist in buffer pool", stream_name);
std::unique_lock<std::mutex> lock(m_mutex);
auto status = m_output_name_to_buffer_pool.at(output_name)->return_to_pool(buffer);
auto status = m_stream_name_to_buffer_pool.at(stream_name)->return_to_pool(buffer);
CHECK_SUCCESS(status);
return HAILO_SUCCESS;

View File

@@ -4,7 +4,7 @@
**/
/**
* @file cng_buffer_pool.hpp
* @brief This model represents the buffer pools for the output reads for each network group. Used in async API
* @brief This model represents the buffer pools for the streams of each network group. Used in async API
**/
#ifndef _HAILO_CNG_BUFFER_POOL_HPP_
@@ -49,9 +49,9 @@ private:
};
using BufferPoolPtr = std::shared_ptr<ServiceStreamBufferPool>;
using output_name_t = std::string;
using stream_name_t = std::string;
// This object holds a buffer pool for each output streams of the network group.
// This object holds a buffer pool for each stream of the network group.
// It is used to pre-allocate all the buffers necessary for the reads from the device.
// The buffers are reuseable, which also prevents allocation during inference.
// The buffers are mapped to the device during their creation, which prevent lazy mapping each frame inference.
@@ -61,9 +61,9 @@ class ServiceNetworkGroupBufferPool
public:
static Expected<std::shared_ptr<ServiceNetworkGroupBufferPool>> create(uint32_t vdevice_handle);
hailo_status allocate_pool(const std::string &name, size_t frame_size, size_t pool_size);
hailo_status allocate_pool(const std::string &name, hailo_dma_buffer_direction_t direction, size_t frame_size, size_t pool_size);
// Used in order to reallocate the pool buffers with different frame_size
hailo_status reallocate_pool(const std::string &name, size_t frame_size);
hailo_status reallocate_pool(const std::string &name, hailo_dma_buffer_direction_t direction, size_t frame_size);
ServiceNetworkGroupBufferPool(ServiceNetworkGroupBufferPool &&) = delete;
ServiceNetworkGroupBufferPool(const ServiceNetworkGroupBufferPool &) = delete;
@@ -72,12 +72,12 @@ public:
virtual ~ServiceNetworkGroupBufferPool() = default;
ServiceNetworkGroupBufferPool(EventPtr shutdown_event, uint32_t vdevice_handle);
Expected<BufferPtr> acquire_buffer(const std::string &output_name);
hailo_status return_to_pool(const std::string &output_name, BufferPtr buffer);
Expected<BufferPtr> acquire_buffer(const std::string &stream_name);
hailo_status return_to_pool(const std::string &stream_name, BufferPtr buffer);
hailo_status shutdown();
private:
std::unordered_map<output_name_t, BufferPoolPtr> m_output_name_to_buffer_pool;
std::unordered_map<stream_name_t, BufferPoolPtr> m_stream_name_to_buffer_pool;
EventPtr m_shutdown_event;
uint32_t m_vdevice_handle;
std::mutex m_mutex;

View File

@@ -307,26 +307,23 @@ grpc::Status HailoRtRpcService::VDevice_configure(grpc::ServerContext*, const VD
hailo_status HailoRtRpcService::create_buffer_pools_for_ng(uint32_t vdevice_handle, uint32_t ng_handle, uint32_t request_pid,
bool allocate_for_raw_streams)
{
auto cng_buffer_pool = ServiceNetworkGroupBufferPool::create(vdevice_handle);
CHECK_EXPECTED_AS_STATUS(cng_buffer_pool);
TRY(auto cng_buffer_pool, ServiceNetworkGroupBufferPool::create(vdevice_handle));
auto &cng_buffer_pool_manager = ServiceResourceManager<ServiceNetworkGroupBufferPool>::get_instance();
auto cng_buffer_pool_handle = cng_buffer_pool_manager.register_resource(request_pid, cng_buffer_pool.release());
auto cng_buffer_pool_handle = cng_buffer_pool_manager.register_resource(request_pid, cng_buffer_pool);
CHECK(cng_buffer_pool_handle == ng_handle, HAILO_INTERNAL_FAILURE,
"cng_buffer_pool_handle = {} must be equal to network_group_handle ={}", cng_buffer_pool_handle, ng_handle);
if (allocate_for_raw_streams) {
// For Async API - The buffer size in the pool will be the stream's hw frame size as used in the infer_model pipeline
auto min_buffer_pool_size = get_min_buffer_pool_size(ng_handle);
CHECK_EXPECTED_AS_STATUS(min_buffer_pool_size);
TRY(const auto min_buffer_pool_size, get_min_buffer_pool_size(ng_handle));
TRY(const auto streams_infos, get_all_stream_infos(ng_handle));
auto streams_infos = get_all_stream_infos(ng_handle);
CHECK_EXPECTED_AS_STATUS(streams_infos);
for (const auto &stream_info : streams_infos.value()) {
for (const auto &stream_info : streams_infos) {
if (stream_info.direction == HAILO_D2H_STREAM) {
auto allocate_lambda = [&](std::shared_ptr<ServiceNetworkGroupBufferPool> cng_buffer_pool) {
return cng_buffer_pool->allocate_pool(stream_info.name, stream_info.hw_frame_size, min_buffer_pool_size.value());
return cng_buffer_pool->allocate_pool(stream_info.name, HAILO_DMA_BUFFER_DIRECTION_D2H,
stream_info.hw_frame_size, min_buffer_pool_size);
};
CHECK_SUCCESS(cng_buffer_pool_manager.execute(ng_handle, allocate_lambda));
}
@@ -465,10 +462,8 @@ hailo_status HailoRtRpcService::add_input_named_buffer(const ProtoTransferReques
mem_view = MemoryView::create_const(data, proto_stream_transfer_request.data().size());
} else {
// The memory is not aligned to 8, therefore we need to copy the data into a buffer
auto buffer_exp = Buffer::create_shared(data, proto_stream_transfer_request.data().size(),
BufferStorageParams::create_dma());
CHECK_EXPECTED(buffer_exp);
buffer = buffer_exp.release();
TRY(buffer, Buffer::create_shared(data, proto_stream_transfer_request.data().size(),
BufferStorageParams::create_dma()));
mem_view = MemoryView(*buffer);
}
@@ -487,7 +482,11 @@ hailo_status HailoRtRpcService::add_input_named_buffer(const ProtoTransferReques
enqueue_cb_identifier(vdevice_handle, std::move(cb_identifier));
};
named_buffers_callbacks.emplace(stream_name, std::make_pair(mem_view, transfer_done));
BufferRepresentation buffer_representation {};
buffer_representation.buffer_type = BufferType::VIEW;
buffer_representation.view = mem_view;
named_buffers_callbacks.emplace(stream_name, std::make_pair(buffer_representation, transfer_done));
return HAILO_SUCCESS;
}
@@ -496,9 +495,7 @@ hailo_status HailoRtRpcService::add_output_named_buffer(const ProtoTransferReque
{
// Prepare output buffer
auto &stream_name = proto_stream_transfer_request.stream_name();
auto buffer_exp = acquire_buffer_from_cng_pool(ng_handle, stream_name);
CHECK_EXPECTED(buffer_exp);
auto buffer = buffer_exp.release();
TRY(auto buffer, acquire_buffer_from_cng_pool(ng_handle, stream_name));
// Prepare callback
auto cb_idx = proto_stream_transfer_request.cb_idx();
@@ -511,7 +508,11 @@ hailo_status HailoRtRpcService::add_output_named_buffer(const ProtoTransferReque
enqueue_cb_identifier(vdevice_handle, std::move(cb_identifier));
};
named_buffers_callbacks.emplace(stream_name, std::make_pair(MemoryView(*buffer), transfer_done));
BufferRepresentation buffer_representation {};
buffer_representation.buffer_type = BufferType::VIEW;
buffer_representation.view = MemoryView(*buffer);
named_buffers_callbacks.emplace(stream_name, std::make_pair(buffer_representation, transfer_done));
return HAILO_SUCCESS;
}
@@ -566,9 +567,12 @@ Expected<BufferPtr> HailoRtRpcService::acquire_buffer_from_cng_pool(uint32_t ng_
auto lambda_acquire_buffer = [](std::shared_ptr<ServiceNetworkGroupBufferPool> cng_buffer_pool, const std::string &output_name) {
return cng_buffer_pool->acquire_buffer(output_name);
};
auto buffer = cng_buffer_pool_manager.execute<Expected<BufferPtr>>(ng_handle, lambda_acquire_buffer, output_name);
CHECK_EXPECTED(buffer);
return buffer.release();
TRY(auto buffer,
cng_buffer_pool_manager.execute<Expected<BufferPtr>>(
ng_handle, lambda_acquire_buffer, output_name)
);
return buffer;
}
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_infer_async(grpc::ServerContext*,
@@ -1091,6 +1095,7 @@ void serialize_op_matadata(hailort::net_flow::OpMetadata &op_metadata, ProtoOpMe
nms_config_proto->set_background_removal(nms_config.background_removal);
nms_config_proto->set_background_removal_index(nms_config.background_removal_index);
nms_config_proto->set_cross_classes(nms_config.cross_classes);
nms_config_proto->set_bbox_only(nms_config.bbox_only);
}
switch (op_metadata.type()) {
@@ -1379,7 +1384,8 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
auto &vstream_manager = ServiceResourceManager<OutputVStream>::get_instance();
for (size_t i = 0; i < vstreams.size(); i++) {
auto allocate_lambda = [&](std::shared_ptr<ServiceNetworkGroupBufferPool> cng_buffer_pool) {
return cng_buffer_pool->allocate_pool(vstreams[i].name(), vstreams[i].get_frame_size(), output_params.at(vstreams[i].name()).queue_size);
return cng_buffer_pool->allocate_pool(vstreams[i].name(), HAILO_DMA_BUFFER_DIRECTION_D2H,
vstreams[i].get_frame_size(), output_params.at(vstreams[i].name()).queue_size);
};
CHECK_SUCCESS_AS_RPC_STATUS(cng_buffer_pool_manager.execute(network_group_handle, allocate_lambda), reply);
@@ -1513,7 +1519,7 @@ grpc::Status HailoRtRpcService::OutputVStream_read(grpc::ServerContext*, const O
auto buffer_exp = acquire_buffer_from_cng_pool(ng_handle, vstream_name.value());
CHECK_EXPECTED_AS_RPC_STATUS(buffer_exp, reply);
auto buffer = buffer_exp.release();
auto buffer = buffer_exp.value();
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, MemoryView &buffer) {
return output_vstream->read(std::move(buffer));
@@ -1549,10 +1555,10 @@ Expected<std::vector<hailo_stream_info_t>> HailoRtRpcService::get_all_stream_inf
return cng->get_all_stream_infos();
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_stream_infos = manager.execute<Expected<std::vector<hailo_stream_info_t>>>(ng_handle, lambda);
CHECK_EXPECTED(expected_stream_infos);
TRY(auto stream_infos,
manager.execute<Expected<std::vector<hailo_stream_info_t>>>(ng_handle, lambda));
return expected_stream_infos.release();
return stream_infos;
}
Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcService::get_all_vstream_infos(uint32_t ng_handle)
@@ -1561,10 +1567,10 @@ Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcService::get_all_vstream_i
return cng->get_all_vstream_infos();
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_vstream_infos = manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(ng_handle, lambda);
CHECK_EXPECTED(expected_vstream_infos);
TRY(auto vstream_infos,
manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(ng_handle, lambda));
return expected_vstream_infos.release();
return vstream_infos;
}
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_all_stream_infos(grpc::ServerContext*,
@@ -1683,10 +1689,9 @@ Expected<size_t> HailoRtRpcService::get_min_buffer_pool_size(uint32_t ng_handle)
return cng->get_min_buffer_pool_size();
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto min_buffer_pool_size = manager.execute<Expected<size_t>>(ng_handle, lambda);
CHECK_EXPECTED(min_buffer_pool_size);
TRY(auto min_buffer_pool_size, manager.execute<Expected<size_t>>(ng_handle, lambda));
return min_buffer_pool_size.release();
return min_buffer_pool_size;
}
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_min_buffer_pool_size(grpc::ServerContext*,
@@ -1899,10 +1904,9 @@ Expected<std::string> HailoRtRpcService::output_vstream_name(uint32_t vstream_ha
return output_vstream->name();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto name = manager.execute<Expected<std::string>>(vstream_handle, lambda);
CHECK_EXPECTED(name);
TRY(auto name, manager.execute<Expected<std::string>>(vstream_handle, lambda));
return name.release();
return name;
}
Expected<size_t> HailoRtRpcService::output_vstream_frame_size(uint32_t vstream_handle)
@@ -1911,10 +1915,9 @@ Expected<size_t> HailoRtRpcService::output_vstream_frame_size(uint32_t vstream_h
return output_vstream->get_frame_size();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto frame_size = manager.execute<Expected<size_t>>(vstream_handle, lambda);
CHECK_EXPECTED(frame_size);
TRY(auto frame_size, manager.execute<Expected<size_t>>(vstream_handle, lambda));
return frame_size.release();
return frame_size;
}
grpc::Status HailoRtRpcService::OutputVStream_name(grpc::ServerContext*, const VStream_name_Request *request,
@@ -2194,15 +2197,12 @@ grpc::Status HailoRtRpcService::OutputVStream_set_nms_iou_threshold(grpc::Server
hailo_status HailoRtRpcService::update_buffer_size_in_pool(uint32_t vstream_handle, uint32_t network_group_handle)
{
auto vstream_name = output_vstream_name(vstream_handle);
CHECK_EXPECTED(vstream_name);
auto frame_size = output_vstream_frame_size(vstream_handle);
CHECK_EXPECTED(frame_size);
TRY(const auto vstream_name, output_vstream_name(vstream_handle));
TRY(const auto frame_size, output_vstream_frame_size(vstream_handle));
auto &cng_buffer_pool_manager = ServiceResourceManager<ServiceNetworkGroupBufferPool>::get_instance();
auto allocate_lambda = [&](std::shared_ptr<ServiceNetworkGroupBufferPool> cng_buffer_pool) {
return cng_buffer_pool->reallocate_pool(vstream_name.release(), frame_size.release());
return cng_buffer_pool->reallocate_pool(vstream_name, HAILO_DMA_BUFFER_DIRECTION_D2H, frame_size);
};
CHECK_SUCCESS(cng_buffer_pool_manager.execute(network_group_handle, allocate_lambda));

View File

@@ -19,6 +19,8 @@
#include <shared_mutex>
#include <unordered_set>
#define SINGLE_CLIENT_PID (0)
namespace hailort
{
@@ -48,10 +50,7 @@ public:
K execute(uint32_t handle, Func &lambda, Args... args)
{
std::unique_lock<std::mutex> lock(m_mutex);
auto resource_expected = resource_lookup(handle);
CHECK_EXPECTED(resource_expected);
auto resource = resource_expected.release();
TRY(auto resource, resource_lookup(handle));
assert(contains(m_resources_mutexes, handle));
std::shared_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
lock.unlock();
@@ -64,10 +63,7 @@ public:
hailo_status execute(uint32_t handle, Func &lambda, Args... args)
{
std::unique_lock<std::mutex> lock(m_mutex);
auto resource_expected = resource_lookup(handle);
CHECK_EXPECTED_AS_STATUS(resource_expected);
auto resource = resource_expected.release();
TRY(auto resource, resource_lookup(handle));
assert(contains(m_resources_mutexes, handle));
std::shared_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
lock.unlock();
@@ -90,10 +86,7 @@ public:
Expected<uint32_t> dup_handle(uint32_t handle, uint32_t pid)
{
std::unique_lock<std::mutex> lock(m_mutex);
auto resource_expected = resource_lookup(handle);
CHECK_EXPECTED(resource_expected);
auto resource = resource_expected.release();
TRY(auto resource, resource_lookup(handle));
assert(contains(m_resources_mutexes, handle));
std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
resource->pids.insert(pid);
@@ -118,7 +111,7 @@ public:
{
std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
resource->pids.erase(pid);
if (all_pids_dead(resource)) {
if ((SINGLE_CLIENT_PID == pid) || all_pids_dead(resource)) {
release_resource = true;
res = resource->resource;
m_resources.erase(handle);

View File

@@ -29,14 +29,12 @@ class VDeviceCallbacksQueue final
public:
static Expected<std::unique_ptr<VDeviceCallbacksQueue>> create(uint32_t max_queue_size)
{
auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_EXPECTED(shutdown_event_exp);
auto shutdown_event = shutdown_event_exp.release();
TRY(auto shutdown_event, Event::create_shared(Event::State::not_signalled));
auto cb_ids_queue = SpscQueue<ProtoCallbackIdentifier>::create(max_queue_size, shutdown_event, HAILO_INFINITE_TIMEOUT);
CHECK_EXPECTED(cb_ids_queue);
TRY(auto cb_ids_queue,
SpscQueue<ProtoCallbackIdentifier>::create(max_queue_size, shutdown_event, HAILO_INFINITE_TIMEOUT));
auto queue_ptr = make_unique_nothrow<VDeviceCallbacksQueue>(cb_ids_queue.release(), shutdown_event);
auto queue_ptr = make_unique_nothrow<VDeviceCallbacksQueue>(std::move(cb_ids_queue), shutdown_event);
CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY);
return queue_ptr;
@@ -57,16 +55,8 @@ public:
Expected<ProtoCallbackIdentifier> dequeue()
{
auto callback_id = m_callbacks_ids_queue.dequeue();
if (HAILO_SHUTDOWN_EVENT_SIGNALED == callback_id.status()) {
return make_unexpected(callback_id.status());
}
else if (HAILO_TIMEOUT == callback_id.status()) {
LOGGER__WARNING("Failed to dequeue callback_id because the queue is empty, status={}", HAILO_TIMEOUT);
return make_unexpected(callback_id.status());
}
CHECK_EXPECTED(callback_id);
TRY_WITH_ACCEPTABLE_STATUS(HAILO_SHUTDOWN_EVENT_SIGNALED, auto callback_id,
m_callbacks_ids_queue.dequeue());
return callback_id;
}

View File

@@ -65,35 +65,32 @@ hailo_status BenchmarkCommand::execute()
std::cout << "Starting Measurements..." << std::endl;
std::cout << "Measuring FPS in hw_only mode" << std::endl;
auto hw_only_mode_info = hw_only_mode();
CHECK_EXPECTED_AS_STATUS(hw_only_mode_info, "hw_only measuring failed");
TRY(auto hw_only_mode_info, hw_only_mode(), "hw_only measuring failed");
std::cout << "Measuring FPS " << (!m_not_measure_power ? "and Power " : "") << "in streaming mode" << std::endl;
auto streaming_mode_info = fps_streaming_mode();
CHECK_EXPECTED_AS_STATUS(streaming_mode_info, "FPS in streaming mode failed");
TRY(auto streaming_mode_info, fps_streaming_mode(), "FPS in streaming mode failed");
// TODO - HRT-6931 - measure latency only in the case of single device.
std::cout << "Measuring HW Latency" << std::endl;
auto latency_info = latency();
CHECK_EXPECTED_AS_STATUS(latency_info, "Latency measuring failed");
TRY(auto latency_info, latency(), "Latency measuring failed");
assert(hw_only_mode_info->network_group_results().size() == streaming_mode_info->network_group_results().size());
assert(latency_info->network_group_results().size() == streaming_mode_info->network_group_results().size());
assert(hw_only_mode_info.network_group_results().size() == streaming_mode_info.network_group_results().size());
assert(latency_info.network_group_results().size() == streaming_mode_info.network_group_results().size());
std::cout << std::endl;
std::cout << "=======" << std::endl;
std::cout << "Summary" << std::endl;
std::cout << "=======" << std::endl;
for (auto &hw_only_res : hw_only_mode_info->network_group_results()) {
for (auto &hw_only_res : hw_only_mode_info.network_group_results()) {
auto network_group_name = hw_only_res.network_group_name();
auto streaming_res = std::find_if(streaming_mode_info->network_group_results().begin(), streaming_mode_info->network_group_results().end(),
auto streaming_res = std::find_if(streaming_mode_info.network_group_results().begin(), streaming_mode_info.network_group_results().end(),
[network_group_name] (NetworkGroupInferResult &infer_results) { return (infer_results.network_group_name() == network_group_name); });
CHECK(streaming_mode_info->network_group_results().end() != streaming_res, HAILO_INTERNAL_FAILURE, "Failed to fun streaming results for network group {}", network_group_name);
CHECK(streaming_mode_info.network_group_results().end() != streaming_res, HAILO_INTERNAL_FAILURE, "Failed to fun streaming results for network group {}", network_group_name);
auto latency_res = std::find_if(latency_info->network_group_results().begin(), latency_info->network_group_results().end(),
auto latency_res = std::find_if(latency_info.network_group_results().begin(), latency_info.network_group_results().end(),
[network_group_name] (NetworkGroupInferResult &infer_results) { return (infer_results.network_group_name() == network_group_name); });
CHECK(latency_info->network_group_results().end() != latency_res, HAILO_INTERNAL_FAILURE, "Failed to fun latency results for network group {}", network_group_name);
CHECK(latency_info.network_group_results().end() != latency_res, HAILO_INTERNAL_FAILURE, "Failed to fun latency results for network group {}", network_group_name);
std::cout << "FPS (hw_only) = " << hw_only_res.fps().value() <<std::endl;
std::cout << " (streaming) = " << streaming_res->fps().value() <<std::endl;
@@ -105,7 +102,7 @@ hailo_status BenchmarkCommand::execute()
}
}
if (!m_not_measure_power) {
for (const auto &pair : streaming_mode_info->m_power_measurements) {
for (const auto &pair : streaming_mode_info.m_power_measurements) {
std::cout << "Device " << pair.first << ":" << std::endl;
const auto &data = pair.second->data();
const auto &power_units = pair.second->power_units();
@@ -116,11 +113,9 @@ hailo_status BenchmarkCommand::execute()
if (!m_csv_file_path.empty()){
m_params.csv_output = m_csv_file_path;
auto printer = InferStatsPrinter::create(m_params, false);
CHECK_EXPECTED_AS_STATUS(printer, "Failed to initialize infer stats printer");
printer->print_benchmark_csv_header();
printer->print_benchmark_csv(hw_only_mode_info.value(),
streaming_mode_info.value(), latency_info.value());
TRY(auto printer, InferStatsPrinter::create(m_params, false), "Failed to initialize infer stats printer");
printer.print_benchmark_csv_header();
printer.print_benchmark_csv(hw_only_mode_info, streaming_mode_info, latency_info);
}
return HAILO_SUCCESS;
}

View File

@@ -35,13 +35,12 @@ hailo_status BoardConfigReadSubcommand::execute_on_device(Device &device)
CHECK_SUCCESS(status,
"'board-config read' command should get a specific device-id.");
auto buffer = device.read_board_config();
CHECK_EXPECTED_AS_STATUS(buffer, "Failed reading board config from device");
TRY(auto buffer, device.read_board_config(), "Failed reading board config from device");
auto output_file = std::ofstream(m_output_file_path, std::ios::out | std::ios::binary);
CHECK(output_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening output file {} with errno: {}", m_output_file_path, errno);
output_file.write(reinterpret_cast<char*>(buffer->data()), buffer->size());
output_file.write(reinterpret_cast<char*>(buffer.data()), buffer.size());
CHECK(output_file.good(), HAILO_FILE_OPERATION_FAILURE, "Failed writing board config into file {}.", m_output_file_path);
return HAILO_SUCCESS;
@@ -57,10 +56,8 @@ BoardConfigWriteSubcommand::BoardConfigWriteSubcommand(CLI::App &parent_app) :
hailo_status BoardConfigWriteSubcommand::execute_on_device(Device &device)
{
auto buffer = read_binary_file(m_input_file_path);
CHECK_EXPECTED_AS_STATUS(buffer);
hailo_status status = device.write_board_config(MemoryView(buffer.value()));
TRY(auto buffer, read_binary_file(m_input_file_path));
hailo_status status = device.write_board_config(MemoryView(buffer));
CHECK_SUCCESS(status, "Failed writing board config to device.");
return HAILO_SUCCESS;

View File

@@ -34,13 +34,21 @@ hailo_status ContainerCommand::execute()
}
DeviceCommand::DeviceCommand(CLI::App *app) :
Command(app)
Command(app),
m_show_stdout(true)
{
add_device_options(m_app, m_device_params);
}
void DeviceCommand::pre_execute()
{
// Do nothing by default
}
hailo_status DeviceCommand::execute()
{
pre_execute();
auto devices = create_devices(m_device_params);
if (!devices) {
return devices.status();
@@ -52,7 +60,9 @@ hailo_status DeviceCommand::execute_on_devices(std::vector<std::unique_ptr<Devic
{
auto status = HAILO_SUCCESS; // Best effort
for (auto &device : devices) {
std::cout << "Executing on device: " << device->get_dev_id() << std::endl;
if (m_show_stdout) {
std::cout << "Executing on device: " << device->get_dev_id() << std::endl;
}
auto execute_status = execute_on_device(*device);
if (HAILO_SUCCESS != execute_status) {
std::cerr << "Failed to execute on device: " << device->get_dev_id() << ". status= " << execute_status << std::endl;
@@ -66,9 +76,8 @@ hailo_status DeviceCommand::validate_specific_device_is_given()
{
if ((1 != m_device_params.device_ids.size()) || contains(m_device_params.device_ids, std::string("*"))) {
// No specific device-id given, make sure there is only 1 device on the machine.
auto scan_res = Device::scan();
CHECK_EXPECTED_AS_STATUS(scan_res, "Failed to scan for devices");
if (1 != scan_res->size()) {
TRY(auto scan_res, Device::scan(), "Failed to scan for devices");
if (1 != scan_res.size()) {
return HAILO_INVALID_OPERATION;
}
}

View File

@@ -65,7 +65,9 @@ public:
protected:
hailo_device_params m_device_params;
bool m_show_stdout; // Set to false in subclasses to disable this class' prints to stdout
virtual void pre_execute(); // Override this function to do any pre-execution setup
virtual hailo_status execute_on_device(Device &device) = 0;
hailo_status execute_on_devices(std::vector<std::unique_ptr<Device>> &devices);
hailo_status validate_specific_device_is_given();

View File

@@ -36,13 +36,8 @@ DownloadActionListCommand::DownloadActionListCommand(CLI::App &parent_app) :
hailo_status DownloadActionListCommand::execute(Device &device, const std::string &output_file_path,
const ConfiguredNetworkGroupVector &network_groups, const std::string &hef_file_path)
{
auto expected_action_list_json = init_json_object(device, hef_file_path);
CHECK_EXPECTED_AS_STATUS(expected_action_list_json);
auto action_list_json = expected_action_list_json.value();
auto network_groups_list_json = parse_network_groups(device, network_groups);
CHECK_EXPECTED_AS_STATUS(network_groups_list_json);
action_list_json["network_groups"] = network_groups_list_json.release();
TRY(auto action_list_json, init_json_object(device, hef_file_path));
TRY(action_list_json["network_groups"], parse_network_groups(device, network_groups));
return write_to_json(action_list_json, output_file_path);
}
@@ -50,9 +45,7 @@ hailo_status DownloadActionListCommand::execute(Device &device, const std::strin
hailo_status DownloadActionListCommand::execute(Device &device, std::shared_ptr<ConfiguredNetworkGroup> network_group,
uint16_t batch_size, ordered_json &action_list_json_param, double fps, uint32_t network_group_index)
{
auto expected_network_groups_list_json = parse_network_group(device, network_group, network_group_index);
CHECK_EXPECTED_AS_STATUS(expected_network_groups_list_json);
auto network_groups_list_json = expected_network_groups_list_json.release();
TRY(auto network_groups_list_json, parse_network_group(device, network_group, network_group_index));
network_groups_list_json[0]["batch_size"] = batch_size;
network_groups_list_json[0]["fps"] = fps;
action_list_json_param["runs"] += network_groups_list_json[0];
@@ -73,30 +66,25 @@ hailo_status DownloadActionListCommand::write_to_json(ordered_json &action_list_
Expected<ordered_json> DownloadActionListCommand::init_json_object(Device &device, const std::string &hef_file_path)
{
ordered_json action_list_json = {};
auto curr_time = CliCommon::current_time_to_string();
CHECK_EXPECTED(curr_time);
TRY(auto curr_time, CliCommon::current_time_to_string());
TRY(auto chip_arch, device.get_architecture());
auto chip_arch = device.get_architecture();
CHECK_EXPECTED(chip_arch);
unsigned int clock_cycle = 0;
// TODO - HRT-8046 Implement extended device info for hailo15
if (HAILO_ARCH_HAILO15H == chip_arch.value()) {
if (HAILO_ARCH_HAILO15H == chip_arch) {
clock_cycle = HAILO15_VPU_CORE_CPU_DEFAULT_FREQ_MHZ;
} else {
auto extended_info = device.get_extended_device_information();
CHECK_EXPECTED(extended_info);
clock_cycle = (extended_info->neural_network_core_clock_rate / NN_CORE_TO_TIMER_FREQ_FACTOR) / MHz;
TRY(auto extended_info, device.get_extended_device_information());
clock_cycle = (extended_info.neural_network_core_clock_rate / NN_CORE_TO_TIMER_FREQ_FACTOR) / MHz;
}
action_list_json["version"] = ACTION_LIST_FORMAT_VERSION();
action_list_json["creation_time"] = curr_time.release();
action_list_json["creation_time"] = curr_time;
action_list_json["clock_cycle_MHz"] = clock_cycle;
action_list_json["hef"] = json({});
if (!hef_file_path.empty()) {
auto hef_info = parse_hef_metadata(hef_file_path);
CHECK_EXPECTED(hef_info);
action_list_json["hef"] = hef_info.release();
TRY(action_list_json["hef"], parse_hef_metadata(hef_file_path));
}
action_list_json["runs"] = ordered_json::array();
@@ -123,12 +111,11 @@ Expected<ordered_json> DownloadActionListCommand::parse_hef_metadata(const std::
CHECK_AS_EXPECTED(is_valid_hef(hef_file_path), HAILO_INTERNAL_FAILURE,
"Hef '{}' is not valid", hef_file_path);
auto hef_md5 = calc_md5_hexdigest(hef_file_path);
CHECK_EXPECTED(hef_md5);
TRY(auto hef_md5, calc_md5_hexdigest(hef_file_path));
ordered_json hef_info_json = {
{"path", hef_file_path},
{"file_hash", hef_md5.release()}
{"file_hash", hef_md5}
};
return hef_info_json;
@@ -144,13 +131,12 @@ bool DownloadActionListCommand::is_valid_hef(const std::string &hef_file_path)
Expected<std::string> DownloadActionListCommand::calc_md5_hexdigest(const std::string &hef_file_path)
{
auto hef_bin = read_binary_file(hef_file_path);
CHECK_EXPECTED(hef_bin);
TRY(auto hef_bin, read_binary_file(hef_file_path));
MD5_CTX md5_ctx{};
MD5_SUM_t md5_sum{};
MD5_Init(&md5_ctx);
MD5_Update(&md5_ctx, hef_bin->data(), hef_bin->size());
MD5_Update(&md5_ctx, hef_bin.data(), hef_bin.size());
MD5_Final(md5_sum, &md5_ctx);
const bool LOWERCASE = false;
@@ -300,6 +286,18 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CACHE_INPUT:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__activate_cache_input_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__activate_cache_input_data_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CACHE_OUTPUT:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__activate_cache_output_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__activate_cache_output_data_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_CACHE_UPDATED:
data_json = json({});
action_length_local = 0;
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_VDMA_TO_STREAM_MAPPING:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__change_vdma_to_stream_mapping_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__change_vdma_to_stream_mapping_data_t);
@@ -402,12 +400,11 @@ Expected<ordered_json> DownloadActionListCommand::parse_single_action(uint32_t b
static const bool DONT_SET_SUB_ACTION_INDEX = false;
uint32_t action_data_length = 0;
auto json = parse_action_data(base_address, &context_action_list[current_buffer_offset], current_buffer_offset, &action_data_length,
action_header->action_type, time_stamp_local, 0, DONT_SET_SUB_ACTION_INDEX, is_repeated, num_repeated, sub_action_type);
CHECK_EXPECTED(json);
TRY(auto json, parse_action_data(base_address, &context_action_list[current_buffer_offset], current_buffer_offset, &action_data_length,
action_header->action_type, time_stamp_local, 0, DONT_SET_SUB_ACTION_INDEX, is_repeated, num_repeated, sub_action_type));
*action_length = static_cast<uint32_t>(action_length_local + action_data_length);
*time_stamp = time_stamp_local;
return json.release();
return json;
}
Expected<ordered_json> DownloadActionListCommand::parse_context(Device &device, uint32_t network_group_id,
@@ -417,47 +414,44 @@ Expected<ordered_json> DownloadActionListCommand::parse_context(Device &device,
uint32_t action_list_base_address = 0;
uint32_t batch_counter = 0;
auto action_list = device.download_context_action_list(network_group_id, converted_context_type, context_index,
&action_list_base_address, &batch_counter);
CHECK_EXPECTED(action_list);
TRY(auto action_list, device.download_context_action_list(network_group_id, converted_context_type, context_index,
&action_list_base_address, &batch_counter));
// Needs to fit in 2 bytes due to firmware limitation of action list size
CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(action_list->size()), HAILO_INTERNAL_FAILURE,
"Action list size is expected to fit in 2B. actual size is {}", action_list->size());
CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(action_list.size()), HAILO_INTERNAL_FAILURE,
"Action list size is expected to fit in 2B. actual size is {}", action_list.size());
ordered_json context_json {
{"action_list_base_address", action_list_base_address},
{"action_list_size", action_list->size() },
{"action_list_size", action_list.size() },
{"batch_counter", batch_counter},
{"context_name", context_name},
};
ordered_json action_list_json;
uint16_t current_buffer_offset = 0;
while (current_buffer_offset < action_list->size()) {
while (current_buffer_offset < action_list.size()) {
bool is_repeated = false;
uint8_t num_repeated = 0;
CONTEXT_SWITCH_DEFS__ACTION_TYPE_t sub_action_type = CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT;
uint32_t single_action_length = 0;
uint32_t timestamp = 0;
auto action_json = parse_single_action(action_list_base_address, action_list->data(),
current_buffer_offset, &single_action_length, &is_repeated, &num_repeated, &sub_action_type, &timestamp);
CHECK_EXPECTED(action_json);
TRY(auto action_json, parse_single_action(action_list_base_address, action_list.data(),
current_buffer_offset, &single_action_length, &is_repeated, &num_repeated, &sub_action_type, &timestamp));
current_buffer_offset = (uint16_t)(current_buffer_offset + single_action_length);
action_list_json.emplace_back(action_json.release());
action_list_json.emplace_back(std::move(action_json));
if (is_repeated) {
for (uint8_t index_in_repeated_block = 0; index_in_repeated_block < num_repeated; index_in_repeated_block++) {
uint32_t sub_action_length = 0;
auto repeated_action_json = parse_single_repeated_action(action_list_base_address,
action_list->data() + current_buffer_offset, current_buffer_offset, &sub_action_length,
sub_action_type, timestamp, index_in_repeated_block);
CHECK_EXPECTED(repeated_action_json);
TRY(auto repeated_action_json, parse_single_repeated_action(action_list_base_address,
action_list.data() + current_buffer_offset, current_buffer_offset, &sub_action_length,
sub_action_type, timestamp, index_in_repeated_block));
current_buffer_offset = (uint16_t)(current_buffer_offset + sub_action_length);
action_list_json.emplace_back(repeated_action_json.release());
action_list_json.emplace_back(std::move(repeated_action_json));
}
}
}
CHECK_AS_EXPECTED(current_buffer_offset == action_list->size(), HAILO_INTERNAL_FAILURE,
CHECK_AS_EXPECTED(current_buffer_offset == action_list.size(), HAILO_INTERNAL_FAILURE,
"PARSING ERROR ! Reached forbidden memory space");
context_json["actions"] = action_list_json;
@@ -473,24 +467,21 @@ double DownloadActionListCommand::get_accumulator_mean_value(const AccumulatorPt
Expected<ordered_json> DownloadActionListCommand::parse_network_groups(Device &device, const ConfiguredNetworkGroupVector &network_groups)
{
const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group();
CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group);
TRY(const auto number_of_dynamic_contexts_per_network_group, device.get_number_of_dynamic_contexts_per_network_group());
auto number_of_network_groups = (uint32_t)number_of_dynamic_contexts_per_network_group->size();
auto number_of_network_groups = (uint32_t)number_of_dynamic_contexts_per_network_group.size();
ordered_json network_group_list_json;
for (uint32_t network_group_index = 0; network_group_index < number_of_network_groups; network_group_index++) {
auto &network_group = (network_group_index < network_groups.size()) ? network_groups[network_group_index] : nullptr;
auto expected_json_file = parse_network_group(device, network_group, network_group_index);
CHECK_EXPECTED(expected_json_file);
network_group_list_json.emplace_back(expected_json_file.value());
TRY(auto json_file, parse_network_group(device, network_group, network_group_index));
network_group_list_json.emplace_back(std::move(json_file));
}
return network_group_list_json;
}
Expected<ordered_json> DownloadActionListCommand::parse_network_group(Device &device, const std::shared_ptr<ConfiguredNetworkGroup> network_group, uint32_t network_group_id)
{
const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group();
CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group);
TRY(const auto number_of_dynamic_contexts_per_network_group, device.get_number_of_dynamic_contexts_per_network_group());
ordered_json network_group_list_json;
// TODO: network_group_name via Hef::get_network_groups_names (HRT-5997)
@@ -510,30 +501,26 @@ Expected<ordered_json> DownloadActionListCommand::parse_network_group(Device &de
network_group->get_deactivation_time_accumulator());
}
auto activation_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION, 0, "activation");
CHECK_EXPECTED(activation_context_json);
network_group_json["contexts"].emplace_back(activation_context_json.release());
TRY(auto activation_context_json, parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION, 0, "activation"));
network_group_json["contexts"].emplace_back(std::move(activation_context_json));
auto preliminary_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY, 0, "preliminary");
CHECK_EXPECTED(preliminary_context_json);
network_group_json["contexts"].emplace_back(preliminary_context_json.release());
TRY(auto preliminary_context_json, parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY, 0, "preliminary"));
network_group_json["contexts"].emplace_back(std::move(preliminary_context_json));
const auto dynamic_contexts_count = number_of_dynamic_contexts_per_network_group.value()[network_group_id];
const auto dynamic_contexts_count = number_of_dynamic_contexts_per_network_group[network_group_id];
for (uint16_t context_index = 0; context_index < dynamic_contexts_count; context_index++) {
auto context_json = parse_context(device, network_group_id,
TRY(auto context_json, parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC, context_index,
fmt::format("dynamic_{}", context_index));
CHECK_EXPECTED(context_json);
fmt::format("dynamic_{}", context_index)));
network_group_json["contexts"].emplace_back(context_json.release());
network_group_json["contexts"].emplace_back(std::move(context_json));
}
auto batch_switching_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING, 0, "batch_switching");
CHECK_EXPECTED(batch_switching_context_json);
network_group_json["contexts"].emplace_back(batch_switching_context_json.release());
TRY(auto batch_switching_context_json, parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING, 0, "batch_switching"));
network_group_json["contexts"].emplace_back(std::move(batch_switching_context_json));
network_group_list_json.emplace_back(network_group_json);
@@ -595,6 +582,19 @@ void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data
j["stream_index"] = data.stream_index;
}
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_cache_input_data_t &data)
{
j = unpack_vdma_channel_id(data);
j["stream_index"] = data.stream_index;
}
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_cache_output_data_t &data)
{
j = unpack_vdma_channel_id(data);
j["stream_index"] = data.stream_index;
}
// Needs to be backwards compatible, so we use "channel_index" instead of "vdma_channel_index".
void to_json(json& j, const CONTEXT_SWITCH_DEFS__fetch_cfg_channel_descriptors_action_data_t& data) {
uint8_t engine_index = 0;

View File

@@ -84,6 +84,9 @@ static std::pair<CONTEXT_SWITCH_DEFS__ACTION_TYPE_t, std::string> mapping[] = {
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_INTER_CONTEXT_OUTPUT, "activate_inter_context_output"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_DDR_BUFFER_INPUT, "activate_ddr_buffer_input"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_DDR_BUFFER_OUTPUT, "activate_ddr_buffer_output"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CACHE_INPUT, "activate_cache_input"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CACHE_OUTPUT, "activate_cache_output"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_CACHE_UPDATED, "wait_for_cache_updated"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_DEACTIVATE_VDMA_CHANNEL, "deactivate_vdma_channel"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL, "validate_vdma_channel"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_VDMA_TO_STREAM_MAPPING, "change_vdma_to_stream_mapping"},
@@ -137,6 +140,8 @@ void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_ddr_buffer_input_data_
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_inter_context_output_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_cache_input_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__activate_cache_output_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__enable_lcu_action_default_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__enable_lcu_action_non_default_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__disable_lcu_action_data_t &data);

View File

@@ -22,12 +22,11 @@ hailo_status FwConfigReadSubcommand::execute_on_device(Device &device)
CHECK_SUCCESS(status,
"'fw-config read' command should get a specific device-id.");
auto user_config_buffer = device.read_user_config();
CHECK_EXPECTED_AS_STATUS(user_config_buffer, "Failed reading user config from device");
TRY(auto user_config_buffer, device.read_user_config(), "Failed reading user config from device");
status = FwConfigJsonSerializer::deserialize_config(
*reinterpret_cast<USER_CONFIG_header_t*>(user_config_buffer->data()),
user_config_buffer->size(), m_output_file);
*reinterpret_cast<USER_CONFIG_header_t*>(user_config_buffer.data()),
user_config_buffer.size(), m_output_file);
CHECK_SUCCESS(status);
return HAILO_SUCCESS;
@@ -43,20 +42,16 @@ FwConfigWriteSubcommand::FwConfigWriteSubcommand(CLI::App &parent_app) :
hailo_status FwConfigWriteSubcommand::execute_on_device(Device &device)
{
auto config_buffer = Buffer::create(FLASH_USER_CONFIG_SECTION_SIZE);
CHECK_EXPECTED_AS_STATUS(config_buffer);
TRY(auto config_buffer, Buffer::create(FLASH_USER_CONFIG_SECTION_SIZE));
TRY(auto config_size, FwConfigJsonSerializer::serialize_config(
*reinterpret_cast<USER_CONFIG_header_t*>(config_buffer.data()), config_buffer.size(), m_input_file));
auto config_size = FwConfigJsonSerializer::serialize_config(
*reinterpret_cast<USER_CONFIG_header_t*>(config_buffer->data()), config_buffer->size(), m_input_file);
CHECK_EXPECTED_AS_STATUS(config_size);
// We only need to write 'config_size' bytes from config_buffer, so we "resize" the buffer
CHECK(config_buffer.size() >= config_size, HAILO_INTERNAL_FAILURE,
"Unexpected config size {} (max_size={})", config_size, config_buffer.size());
TRY(auto resized_config_buffer, Buffer::create(config_buffer.data(), config_size));
// We only need to write config_size.value() bytes from config_buffer, so we "resize" the buffer
CHECK(config_buffer->size() >= config_size.value(), HAILO_INTERNAL_FAILURE,
"Unexpected config size {} (max_size={})", config_size.value(), config_buffer->size());
auto resized_config_buffer = Buffer::create(config_buffer->data(), config_size.value());
CHECK_EXPECTED_AS_STATUS(resized_config_buffer);
hailo_status status = device.write_user_config(MemoryView(resized_config_buffer.value()));
hailo_status status = device.write_user_config(MemoryView(resized_config_buffer));
CHECK_SUCCESS(status, "Failed writing user firmware configuration to device");
return HAILO_SUCCESS;
@@ -74,17 +69,15 @@ FwConfigSerializeSubcommand::FwConfigSerializeSubcommand(CLI::App &parent_app) :
hailo_status FwConfigSerializeSubcommand::execute()
{
auto config_buffer = Buffer::create(FLASH_USER_CONFIG_SECTION_SIZE);
CHECK_EXPECTED_AS_STATUS(config_buffer);
TRY(auto config_buffer, Buffer::create(FLASH_USER_CONFIG_SECTION_SIZE));
USER_CONFIG_header_t *config_header = reinterpret_cast<USER_CONFIG_header_t*>(config_buffer->data());
auto config_size = FwConfigJsonSerializer::serialize_config(*config_header, config_buffer->size(), m_input_file);
CHECK_EXPECTED_AS_STATUS(config_size);
USER_CONFIG_header_t *config_header = reinterpret_cast<USER_CONFIG_header_t*>(config_buffer.data());
TRY(auto config_size, FwConfigJsonSerializer::serialize_config(*config_header, config_buffer.size(), m_input_file));
std::ofstream ofs(m_output_file, std::ios::out | std::ios::binary);
CHECK(ofs.good(), HAILO_OPEN_FILE_FAILURE, "Failed opening file: {}, with errno: {}", m_output_file, errno);
ofs.write(reinterpret_cast<char*>(config_header), config_size.value());
ofs.write(reinterpret_cast<char*>(config_header), config_size);
CHECK(ofs.good(), HAILO_FILE_OPERATION_FAILURE,
"Failed writing binary firmware configuration to file: {}, with errno: {}", m_output_file, errno);

View File

@@ -110,18 +110,17 @@ hailo_status FwConfigJsonSerializer::dump_config(const ordered_json &config_json
hailo_status FwConfigJsonSerializer::deserialize_config(const USER_CONFIG_header_t &user_config_header, size_t config_size, const std::string &file_path)
{
try {
auto categories = get_deserialize_vector();
CHECK_EXPECTED_AS_STATUS(categories);
TRY(const auto categories, get_deserialize_vector());
ordered_json config_json;
size_t current_deserialized_data_size = 0;
uintptr_t current_entry_offset = (uintptr_t)(&(user_config_header.entries));
for (size_t i = 0; i < user_config_header.entry_count; i++) {
USER_CONFIG_ENTRY_t *config_entry = reinterpret_cast<USER_CONFIG_ENTRY_t*>(current_entry_offset);
CHECK(config_entry->category < categories->size(), HAILO_INTERNAL_FAILURE,
"Category id is out of bounds. Category id = {}, Max category id = {}", config_entry->category, (categories->size()-1));
CHECK(config_entry->category < categories.size(), HAILO_INTERNAL_FAILURE,
"Category id is out of bounds. Category id = {}, Max category id = {}", config_entry->category, (categories.size()-1));
auto category = categories.value()[config_entry->category];
auto category = categories[config_entry->category];
CHECK(config_entry->entry_id < category.size(), HAILO_INTERNAL_FAILURE,
"Entry id is out of bounds. Entry id = {}, Max entry id = {}", config_entry->entry_id, (category.size() - 1));
@@ -156,74 +155,51 @@ hailo_status FwConfigJsonSerializer::deserialize_entry(ordered_json &config_json
entry_definition["size"].get<uint32_t>();
if (deserialize_as == "str") {
auto str_val = deserialize_str(entry_value, size);
CHECK_EXPECTED_AS_STATUS(str_val);
config_json[category_name][entry_name] = str_val.value();
TRY(config_json[category_name][entry_name], deserialize_str(entry_value, size));
}
else if (deserialize_as == "bool") {
auto bool_val = deserialize_bool(entry_value, size);
CHECK_EXPECTED_AS_STATUS(bool_val);
config_json[category_name][entry_name] = bool_val.value();
TRY(config_json[category_name][entry_name], deserialize_bool(entry_value, size));
}
else if (deserialize_as == "int") {
auto int_val = deserialize_int(entry_value, size);
CHECK_EXPECTED_AS_STATUS(int_val);
config_json[category_name][entry_name] = int_val.value();
TRY(config_json[category_name][entry_name], deserialize_int(entry_value, size));
}
else if (deserialize_as == "i2c_speed") {
auto i2c_speed_val = deserialize_i2c_speed(entry_value, size);
CHECK_EXPECTED_AS_STATUS(i2c_speed_val);
config_json[category_name][entry_name]["value"] = i2c_speed_val.value();
TRY(config_json[category_name][entry_name]["value"], deserialize_i2c_speed(entry_value, size));
}
else if (deserialize_as == "supported_aspm_states") {
auto supported_aspm_states_val = deserialize_supported_aspm_states(entry_value, size);
CHECK_EXPECTED_AS_STATUS(supported_aspm_states_val);
config_json[category_name][entry_name]["value"] = supported_aspm_states_val.value();
TRY(config_json[category_name][entry_name]["value"], deserialize_supported_aspm_states(entry_value, size));
}
else if (deserialize_as == "supported_aspm_l1_substates") {
auto supported_aspm_l1_substates_val = deserialize_supported_aspm_l1_substates(entry_value, size);
CHECK_EXPECTED_AS_STATUS(supported_aspm_l1_substates_val);
config_json[category_name][entry_name]["value"] = supported_aspm_l1_substates_val.value();
TRY(config_json[category_name][entry_name]["value"],
deserialize_supported_aspm_l1_substates(entry_value, size));
}
else if (deserialize_as == "ipv4") {
auto ipv4_val = deserialize_ipv4(entry_value, size);
CHECK_EXPECTED_AS_STATUS(ipv4_val);
config_json[category_name][entry_name]["value"] = ipv4_val.value();
TRY(config_json[category_name][entry_name]["value"], deserialize_ipv4(entry_value, size));
}
else if (deserialize_as == "mac_address") {
auto mac_address_val = deserialize_mac_address(entry_value, size);
CHECK_EXPECTED_AS_STATUS(mac_address_val);
config_json[category_name][entry_name]["value"] = mac_address_val.value();
TRY(config_json[category_name][entry_name]["value"], deserialize_mac_address(entry_value, size));
}
else if (deserialize_as == "clock_frequency") {
auto clock_frequency_val = deserialize_clock_frequency(entry_value, size);
CHECK_EXPECTED_AS_STATUS(clock_frequency_val);
config_json[category_name][entry_name]["value"] = clock_frequency_val.value();
TRY(config_json[category_name][entry_name]["value"],
deserialize_clock_frequency(entry_value, size));
}
else if (deserialize_as == "logger_level") {
auto logger_level = deserialize_logger_level(entry_value, size);
CHECK_EXPECTED_AS_STATUS(logger_level);
config_json[category_name][entry_name]["value"] = logger_level.value();
TRY(config_json[category_name][entry_name]["value"], deserialize_logger_level(entry_value, size));
}
else if (deserialize_as == "watchdog_mode") {
auto watchdog_mode_val = deserialize_watchdog_mode(entry_value, size);
CHECK_EXPECTED_AS_STATUS(watchdog_mode_val);
config_json[category_name][entry_name]["value"] = watchdog_mode_val.value();
TRY(config_json[category_name][entry_name]["value"], deserialize_watchdog_mode(entry_value, size));
}
else if (deserialize_as == "overcurrent_parameters_source") {
auto overcurrent_parameters_source_val = deserialize_overcurrent_parameters_source(entry_value, size);
CHECK_EXPECTED_AS_STATUS(overcurrent_parameters_source_val);
config_json[category_name][entry_name]["value"] = overcurrent_parameters_source_val.value();
TRY(config_json[category_name][entry_name]["value"],
deserialize_overcurrent_parameters_source(entry_value, size));
}
else if (deserialize_as == "temperature_parameters_source") {
auto temperature_parameters_source_val = deserialize_temperature_parameters_source(entry_value, size);
CHECK_EXPECTED_AS_STATUS(temperature_parameters_source_val);
config_json[category_name][entry_name]["value"] = temperature_parameters_source_val.value();
TRY(config_json[category_name][entry_name]["value"],
deserialize_temperature_parameters_source(entry_value, size));
}
else if (deserialize_as == "conversion_time") {
auto conversion_time_val = deserialize_conversion_time(entry_value, size);
CHECK_EXPECTED_AS_STATUS(conversion_time_val);
config_json[category_name][entry_name]["value"] = conversion_time_val.value();
TRY(config_json[category_name][entry_name]["value"],
deserialize_conversion_time(entry_value, size));
}
else {
LOGGER__ERROR("Failed deserializing entry. Serialization format {} not found", deserialize_as);
@@ -240,9 +216,8 @@ Expected<json> FwConfigJsonSerializer::deserialize_str(uint8_t *entry_value, uin
Expected<json> FwConfigJsonSerializer::deserialize_bool(uint8_t *entry_value, uint32_t size)
{
auto bool_val = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(bool_val);
json bool_str = bool_val.value() ? true : false;
TRY(const auto bool_val, get_int_value<uint8_t>(entry_value, size));
json bool_str = bool_val ? true : false;
return bool_str;
}
@@ -273,10 +248,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_mac_address(uint8_t *entry_va
Expected<json> FwConfigJsonSerializer::deserialize_supported_aspm_states(uint8_t *entry_value, uint32_t size)
{
auto aspm_state = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(aspm_state);
TRY(const auto aspm_state, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<PCIE_CONFIG_SUPPOPRTED_ASPM_STATES_t>(aspm_state.value())) {
switch (static_cast<PCIE_CONFIG_SUPPOPRTED_ASPM_STATES_t>(aspm_state)) {
case ASPM_DISABLED:
return json("ASPM DISABLED");
case ASPM_L1_ONLY:
@@ -291,10 +265,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_supported_aspm_states(uint8_t
Expected<json> FwConfigJsonSerializer::deserialize_supported_aspm_l1_substates(uint8_t *entry_value, uint32_t size)
{
auto aspm_l1_substate = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(aspm_l1_substate);
TRY(const auto aspm_l1_substate, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<PCIE_CONFIG_SUPPOPRTED_L1_ASPM_SUBSTATES_t>(aspm_l1_substate.value())) {
switch (static_cast<PCIE_CONFIG_SUPPOPRTED_L1_ASPM_SUBSTATES_t>(aspm_l1_substate)) {
case ASPM_L1_SUBSTATES_DISABLED:
return json("ASPM L1 SUBSTATES DISABLED");
case ASPM_L1_SUBSTATES_L11_ONLY:
@@ -309,10 +282,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_supported_aspm_l1_substates(u
Expected<json> FwConfigJsonSerializer::deserialize_clock_frequency(uint8_t *entry_value, uint32_t size)
{
auto clock_frequency = get_int_value<uint32_t>(entry_value, size);
CHECK_EXPECTED(clock_frequency);
TRY(const auto clock_frequency, get_int_value<uint32_t>(entry_value, size));
switch (clock_frequency.value()) {
switch (clock_frequency) {
case SOC__NN_CLOCK_400MHz:
return json("400MHZ");
case SOC__NN_CLOCK_375MHz:
@@ -341,10 +313,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_clock_frequency(uint8_t *entr
Expected<json> FwConfigJsonSerializer::deserialize_watchdog_mode(uint8_t *entry_value, uint32_t size)
{
auto watchdog_mode = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(watchdog_mode);
TRY(const auto watchdog_mode, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<WD_SERVICE_wd_mode_t>(watchdog_mode.value())) {
switch (static_cast<WD_SERVICE_wd_mode_t>(watchdog_mode)) {
case WD_SERVICE_MODE_HW_SW:
return json("WD MODE HW SW");
case WD_SERVICE_MODE_HW_ONLY:
@@ -357,10 +328,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_watchdog_mode(uint8_t *entry_
Expected<json> FwConfigJsonSerializer::deserialize_i2c_speed(uint8_t *entry_value, uint32_t size)
{
auto i2c_speed = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(i2c_speed);
TRY(const auto i2c_speed, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<i2c_speed_mode_t>(i2c_speed.value())) {
switch (static_cast<i2c_speed_mode_t>(i2c_speed)) {
case I2C_SPEED_STANDARD:
return json("I2C SPEED STANDARD");
case I2C_SPEED_FAST:
@@ -373,10 +343,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_i2c_speed(uint8_t *entry_valu
Expected<json> FwConfigJsonSerializer::deserialize_logger_level(uint8_t *entry_value, uint32_t size)
{
auto logger_level = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(logger_level);
TRY(const auto logger_level, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<FW_LOGGER_LEVEL_t>(logger_level.value())) {
switch (static_cast<FW_LOGGER_LEVEL_t>(logger_level)) {
case FW_LOGGER_LEVEL_TRACE:
return json("TRACE");
case FW_LOGGER_LEVEL_DEBUG:
@@ -397,10 +366,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_logger_level(uint8_t *entry_v
Expected<json> FwConfigJsonSerializer::deserialize_overcurrent_parameters_source(uint8_t *entry_value, uint32_t size)
{
auto overcurrent_parameters_source = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(overcurrent_parameters_source);
TRY(const auto overcurrent_parameters_source, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<OVERCURRENT_parameters_source_t>(overcurrent_parameters_source.value())) {
switch (static_cast<OVERCURRENT_parameters_source_t>(overcurrent_parameters_source)) {
case OVERCURRENT_PARAMETERS_SOURCE_FW_VALUES:
return json("FW VALUES");
case OVERCURRENT_PARAMETERS_SOURCE_USER_CONFIG_VALUES:
@@ -417,10 +385,9 @@ Expected<json> FwConfigJsonSerializer::deserialize_overcurrent_parameters_source
Expected<json> FwConfigJsonSerializer::deserialize_temperature_parameters_source(uint8_t *entry_value, uint32_t size)
{
auto temperature_parameters_source = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(temperature_parameters_source);
TRY(const auto temperature_parameters_source, get_int_value<uint8_t>(entry_value, size));
switch (static_cast<TEMPERATURE_PROTECTION_parameters_source_t>(temperature_parameters_source.value())) {
switch (static_cast<TEMPERATURE_PROTECTION_parameters_source_t>(temperature_parameters_source)) {
case TEMPERATURE_PROTECTION_PARAMETERS_SOURCE_FW_VALUES:
return json("FW VALUES");
case TEMPERATURE_PROTECTION_PARAMETERS_SOURCE_USER_CONFIG_VALUES:
@@ -433,9 +400,8 @@ Expected<json> FwConfigJsonSerializer::deserialize_temperature_parameters_source
Expected<json> FwConfigJsonSerializer::deserialize_conversion_time(uint8_t *entry_value, uint32_t size)
{
auto conversion_time = get_int_value<uint32_t>(entry_value, size);
CHECK_EXPECTED(conversion_time);
auto conversion_time_value = static_cast<OVERCURRENT_conversion_time_us_t>(conversion_time.value());
TRY(const auto conversion_time, get_int_value<uint32_t>(entry_value, size));
auto conversion_time_value = static_cast<OVERCURRENT_conversion_time_us_t>(conversion_time);
if (conversion_time_value == OVERCURRENT_CONVERSION_PERIOD_140US ||
conversion_time_value == OVERCURRENT_CONVERSION_PERIOD_204US ||
@@ -459,21 +425,18 @@ Expected<json> FwConfigJsonSerializer::deserialize_int(uint8_t *entry_value, uin
switch (size) {
case sizeof(uint8_t):
{
auto uint8_val = get_int_value<uint8_t>(entry_value, size);
CHECK_EXPECTED(uint8_val);
return json(uint8_val.value());
TRY(const auto uint8_val, get_int_value<uint8_t>(entry_value, size));
return json(uint8_val);
}
case sizeof(uint16_t):
{
auto uint16_val = get_int_value<uint16_t>(entry_value, size);
CHECK_EXPECTED(uint16_val);
return json(uint16_val.value());
TRY(const auto uint16_val, get_int_value<uint16_t>(entry_value, size));
return json(uint16_val);
}
case sizeof(uint32_t):
{
auto uint32_val = get_int_value<uint32_t>(entry_value, size);
CHECK_EXPECTED(uint32_val);
return json(uint32_val.value());
TRY(const auto uint32_val, get_int_value<uint32_t>(entry_value, size));
return json(uint32_val);
}
default:
LOGGER__ERROR("Failed deserializing int value");
@@ -487,20 +450,17 @@ Expected<uint32_t> FwConfigJsonSerializer::serialize_config(USER_CONFIG_header_t
size_t data_size = sizeof(USER_CONFIG_header_t);
try {
auto config_json = FwConfigJsonSerializer::read_json_file(file_path);
CHECK_EXPECTED(config_json);
TRY_V(const auto config_json, FwConfigJsonSerializer::read_json_file(file_path));
TRY(auto definitions, FwConfigJsonSerializer::get_serialize_map());
auto definitions = FwConfigJsonSerializer::get_serialize_map();
CHECK_EXPECTED(definitions);
user_config_header.version = definitions.value()["version"]["value"].get<uint32_t>();
user_config_header.version = definitions["version"]["value"].get<uint32_t>();
user_config_header.magic = USER_CONFIG_MAGIC;
user_config_header.entry_count = 0;
uintptr_t current_entry_offset = (uintptr_t)(&(user_config_header.entries));
for (auto &config_category : config_json->items()) {
for (auto &config_category : config_json.items()) {
for (auto &config_entry : config_category.value().items()) {
ordered_json entry_definition = definitions.value()[config_category.key()][config_entry.key()];
ordered_json entry_definition = definitions[config_category.key()][config_entry.key()];
USER_CONFIG_ENTRY_t *curr_entry = (USER_CONFIG_ENTRY_t *)current_entry_offset;
curr_entry->entry_size = entry_definition.contains("length") ?
(entry_definition["length"].get<uint32_t>() * entry_definition["size"].get<uint32_t>()) :
@@ -603,6 +563,7 @@ hailo_status FwConfigJsonSerializer::serialize_str(USER_CONFIG_ENTRY_t &entry, c
CHECK(entry.entry_size >= str.length(), HAILO_INVALID_ARGUMENT,
"Failed serializing string value {}. String length must be equal or shorter than {}", str, entry.entry_size);
memset(&(entry.value), 0, entry.entry_size);
memcpy(&(entry.value), str.c_str(), str.length());
return HAILO_SUCCESS;

View File

@@ -73,9 +73,7 @@ static bool extended_device_information_is_array_not_empty(uint8_t *array_for_pr
static hailo_status print_extended_device_information(Device &device)
{
auto extended_info_expected = device.get_extended_device_information();
CHECK_EXPECTED_AS_STATUS(extended_info_expected, "Failed identify");
auto device_info = extended_info_expected.release();
TRY(auto device_info, device.get_extended_device_information());
// Print Board Extended information
std::cout << "Boot source: " << extended_device_information_boot_string(device_info.boot_source) << std::endl;
@@ -144,6 +142,8 @@ static std::string identity_arch_string(const hailo_device_identity_t &identity)
return "PLUTO";
case HAILO_ARCH_HAILO15M:
return "HAILO15M";
case HAILO_ARCH_HAILO10H:
return "HAILO10H";
default:
return "Unknown";
}
@@ -167,9 +167,7 @@ FwControlIdentifyCommand::FwControlIdentifyCommand(CLI::App &parent_app) :
hailo_status FwControlIdentifyCommand::execute_on_device(Device &device)
{
auto identity_expected = device.identify();
CHECK_EXPECTED_AS_STATUS(identity_expected, "Failed identify");
auto identity = identity_expected.release();
TRY(const auto identity, device.identify());
// Print board information
std::cout << "Identifying board" << std::endl;

View File

@@ -14,67 +14,81 @@
FwLoggerCommand::FwLoggerCommand(CLI::App &parent_app) :
DeviceCommand(parent_app.add_subcommand("fw-logger", "Download fw logs to a file")),
m_should_overwrite(false)
m_should_overwrite(false),
m_stdout(false),
m_continuos(false)
{
m_app->add_option("output_file", m_output_file, "File path to write binary firmware log into")
->required();
m_app->add_flag("--overwrite", m_should_overwrite, "Should overwrite the file or not");
m_app->add_flag("--stdout", m_stdout, "Write the output to stdout instead of a file");
m_app->add_flag("--continuos", m_continuos, "Write to file/stdout, until the process is killed");
}
hailo_status write_logs_to_file(Device &device, std::ofstream &ofs, hailo_cpu_id_t cpu_id){
hailo_status FwLoggerCommand::write_logs(Device &device, std::ostream *os, hailo_cpu_id_t cpu_id)
{
auto still_has_logs = true;
static const auto buffer_size = AMOUNT_OF_BYTES_TO_READ;
auto expected_buffer = Buffer::create(buffer_size);
CHECK_EXPECTED_AS_STATUS(expected_buffer);
Buffer buffer = expected_buffer.release();
TRY(auto buffer, Buffer::create(buffer_size));
while(still_has_logs) {
while (still_has_logs || m_continuos) {
MemoryView response_view(buffer);
auto response_size_expected = device.read_log(response_view, cpu_id);
CHECK_EXPECTED_AS_STATUS(response_size_expected);
auto response_size = response_size_expected.release();
TRY(const auto response_size, device.read_log(response_view, cpu_id));
if (response_size == 0) {
still_has_logs = false;
}
else {
ofs.write((char *)buffer.data(), response_size);
CHECK(ofs.good(), HAILO_FILE_OPERATION_FAILURE,
} else {
os->write((char *)buffer.data(), response_size);
CHECK(os->good(), HAILO_FILE_OPERATION_FAILURE,
"Failed writing firmware logger to output file, with errno: {}", errno);
os->flush();
}
}
return HAILO_SUCCESS;
}
void FwLoggerCommand::pre_execute()
{
if (m_stdout) {
// We want only the binary data from the logger to be written to stdout
DeviceCommand::m_show_stdout = false;
}
}
hailo_status FwLoggerCommand::execute_on_device(Device &device)
{
auto status = validate_specific_device_is_given();
CHECK_SUCCESS(status,
"'fw-logger' command should get a specific device-id");
auto ofs_flags = std::ios::out | std::ios::binary;
if (!m_should_overwrite){
ofs_flags |= std::ios::app;
// Initialization dependency
std::ofstream ofs;
std::ostream *os = nullptr;
if (m_stdout) {
os = &std::cout;
} else {
auto ofs_flags = std::ios::out | std::ios::binary;
if (!m_should_overwrite){
ofs_flags |= std::ios::app;
}
ofs.open(m_output_file, ofs_flags);
CHECK(ofs.good(), HAILO_OPEN_FILE_FAILURE, "Failed opening file: {}, with errno: {}", m_output_file, errno);
os = &ofs;
}
std::ofstream ofs(m_output_file, ofs_flags);
CHECK(ofs.good(), HAILO_OPEN_FILE_FAILURE, "Failed opening file: {}, with errno: {}", m_output_file, errno);
if (Device::Type::ETH == device.get_type()) {
LOGGER__ERROR("Read FW log is not supported over Eth device");
return HAILO_INVALID_OPERATION;
}
if (Device::Type::INTEGRATED != device.get_type()) {
status = write_logs_to_file(device, ofs, HAILO_CPU_ID_0);
status = write_logs(device, os, HAILO_CPU_ID_0);
if (status != HAILO_SUCCESS){
return status;
}
}
status = write_logs_to_file(device, ofs, HAILO_CPU_ID_1);
status = write_logs(device, os, HAILO_CPU_ID_1);
if (status != HAILO_SUCCESS){
return status;
}

View File

@@ -24,11 +24,16 @@ public:
explicit FwLoggerCommand(CLI::App &parent_app);
protected:
virtual void pre_execute() override;
virtual hailo_status execute_on_device(Device &device) override;
private:
std::string m_output_file;
bool m_should_overwrite;
bool m_stdout;
bool m_continuos;
hailo_status write_logs(Device &device, std::ostream *os, hailo_cpu_id_t cpu_id);
};
#endif /* _HAILO_FW_LOGGER_COMMAND_COMMAND_HPP_ */

View File

@@ -61,13 +61,10 @@ Expected<std::vector<std::unique_ptr<Device>>> create_devices(const hailo_device
{
std::vector<std::unique_ptr<Device>> res;
auto device_ids = get_device_ids(device_params);
CHECK_EXPECTED(device_ids);
for (auto device_id : device_ids.value()) {
auto device = Device::create(device_id);
CHECK_EXPECTED(device);
res.emplace_back(device.release());
TRY(const auto device_ids, get_device_ids(device_params));
for (auto device_id : device_ids) {
TRY(auto device, Device::create(device_id));
res.emplace_back(std::move(device));
}
return res;

View File

@@ -67,49 +67,44 @@ Expected<std::map<std::string, ConfigureNetworkParams>> get_configure_params(con
hailo_status HwInferEstimatorCommand::execute()
{
auto devices = create_devices(m_params.vdevice_params.device_params);
CHECK_EXPECTED_AS_STATUS(devices, "Failed creating device");
TRY(auto devices, create_devices(m_params.vdevice_params.device_params), "Failed creating device");
/* This function supports controls for multiple devices.
We validate there is only 1 device generated as we are on a single device flow */
CHECK(1 == devices->size(), HAILO_INTERNAL_FAILURE, "Hw infer command support only one physical device");
auto &device = devices.value()[0];
CHECK(1 == devices.size(), HAILO_INTERNAL_FAILURE, "Hw infer command support only one physical device");
auto &device = devices[0];
auto hef = Hef::create(m_params.hef_path.c_str());
CHECK_EXPECTED_AS_STATUS(hef, "Failed reading hef file {}", m_params.hef_path);
TRY(auto hef,
Hef::create(m_params.hef_path.c_str()), "Failed reading hef file {}", m_params.hef_path);
auto interface = device->get_default_streams_interface();
CHECK_EXPECTED_AS_STATUS(interface, "Failed to get default streams interface");
TRY(const auto interface, device->get_default_streams_interface(), "Failed to get default streams interface");
auto configure_params = get_configure_params(m_params, hef.value(), interface.value());
CHECK_EXPECTED_AS_STATUS(configure_params);
TRY(auto configure_params, get_configure_params(m_params, hef, interface));
/* Use Env var to configure all desc list with max depth */
setenv("HAILO_CONFIGURE_FOR_HW_INFER","Y",1);
auto network_group_list = device->configure(hef.value(), configure_params.value());
CHECK_EXPECTED_AS_STATUS(network_group_list, "Failed configure device from hef");
TRY(auto network_group_list,
device->configure(hef, configure_params), "Failed configure device from hef");
unsetenv("HAILO_CONFIGURE_FOR_HW_INFER");
CHECK(1 == network_group_list->size(), HAILO_INVALID_OPERATION,
CHECK(1 == network_group_list.size(), HAILO_INVALID_OPERATION,
"HW Inference is not supported on HEFs with multiple network groups");
auto network_group_ptr = network_group_list.value()[0];
auto network_group_ptr = network_group_list[0];
std::cout << "Starting HW infer Estimator..." << std::endl;
auto results = network_group_ptr->run_hw_infer_estimator();
CHECK_EXPECTED_AS_STATUS(results);
TRY(const auto results, network_group_ptr->run_hw_infer_estimator());
std::cout << std::endl;
std::cout << "======================" << std::endl;
std::cout << " Summary" << std::endl;
std::cout << "======================" << std::endl;
std::cout << "Batch count: " << results->batch_count << std::endl;
std::cout << "Total transfer size [KB]: " << (results->total_transfer_size / BYTES_TO_KILOBYTES) << std::endl;
std::cout << "Total frames passed: " << results->total_frames_passed << std::endl;
std::cout << "Total time [s]: " << results->time_sec << std::endl;
std::cout << "Total FPS [1/s]: " << results->fps << std::endl;
std::cout << "BW [Gbps]: " << results->BW_Gbps << std::endl;
std::cout << "Batch count: " << results.batch_count << std::endl;
std::cout << "Total transfer size [KB]: " << (results.total_transfer_size / BYTES_TO_KILOBYTES) << std::endl;
std::cout << "Total frames passed: " << results.total_frames_passed << std::endl;
std::cout << "Total time [s]: " << results.time_sec << std::endl;
std::cout << "Total FPS [1/s]: " << results.fps << std::endl;
std::cout << "BW [Gbps]: " << results.BW_Gbps << std::endl;
std::cout << "======================" << std::endl;
std::cout << " End of report" << std::endl;

View File

@@ -230,25 +230,20 @@ hailo_status MonCommand::run_monitor()
signal(SIGINT, signit_handler);
std::chrono::milliseconds time_interval = DEFAULT_SCHEDULER_MON_INTERVAL + EPSILON_TIME;
auto terminal_line_width_expected = get_terminal_line_width();
CHECK_EXPECTED_AS_STATUS(terminal_line_width_expected);
auto terminal_line_width = terminal_line_width_expected.release();
TRY(const auto terminal_line_width, get_terminal_line_width());
AlternativeTerminal alt_terminal;
while (keep_running) {
bool print_warning_msg = true; // Will change to false only if mon directory is valid and there are updated files in it.
auto mon_dir_valid = Filesystem::is_directory(SCHEDULER_MON_TMP_DIR);
CHECK_EXPECTED_AS_STATUS(mon_dir_valid);
TRY(const auto mon_dir_valid, Filesystem::is_directory(SCHEDULER_MON_TMP_DIR));
std::vector<ProtoMon> mon_messages;
if (mon_dir_valid.value()) {
auto scheduler_mon_files = Filesystem::get_latest_files_in_dir_flat(SCHEDULER_MON_TMP_DIR, time_interval);
CHECK_EXPECTED_AS_STATUS(scheduler_mon_files);
print_warning_msg = scheduler_mon_files->empty();
if (mon_dir_valid) {
TRY(auto scheduler_mon_files, Filesystem::get_latest_files_in_dir_flat(SCHEDULER_MON_TMP_DIR, time_interval));
print_warning_msg = scheduler_mon_files.empty();
mon_messages.reserve(scheduler_mon_files->size());
for (const auto &mon_file : scheduler_mon_files.value()) {
mon_messages.reserve(scheduler_mon_files.size());
for (const auto &mon_file : scheduler_mon_files) {
auto file = LockedFile::create(mon_file, "r");
if (HAILO_SUCCESS != file.status()) {
LOGGER__ERROR("Failed to open and lock file {}, with status: {}", mon_file, file.status());

View File

@@ -24,10 +24,8 @@ ParseHefCommand::ParseHefCommand(CLI::App &parent_app) :
hailo_status ParseHefCommand::execute()
{
auto is_dir = Filesystem::is_directory(m_hef_path.c_str());
CHECK_EXPECTED_AS_STATUS(is_dir, "Failed checking if path is directory");
if (is_dir.value()){
TRY(const auto is_dir, Filesystem::is_directory(m_hef_path.c_str()), "Failed checking if path is directory");
if (is_dir) {
return ParseHefCommand::parse_hefs_infos_dir(m_hef_path, m_parse_streams, m_parse_vstreams);
} else {
return ParseHefCommand::parse_hefs_info(m_hef_path, m_parse_streams, m_parse_vstreams);
@@ -36,13 +34,9 @@ hailo_status ParseHefCommand::execute()
hailo_status ParseHefCommand::parse_hefs_info(const std::string &hef_path, bool stream_infos, bool vstream_infos)
{
auto hef_exp = Hef::create(hef_path);
CHECK_EXPECTED_AS_STATUS(hef_exp, "Failed to parse HEF");
auto hef = hef_exp.release();
auto hef_info = hef.get_description(stream_infos, vstream_infos);
CHECK_EXPECTED_AS_STATUS(hef_info, "Failed to parse HEF");
std::cout << hef_info.release();
TRY(const auto hef, Hef::create(hef_path));
TRY(const auto hef_info, hef.get_description(stream_infos, vstream_infos));
std::cout << hef_info;
return HAILO_SUCCESS;
}
@@ -50,10 +44,9 @@ hailo_status ParseHefCommand::parse_hefs_infos_dir(const std::string &hef_path,
{
bool contains_hef = false;
std::string hef_dir = hef_path;
const auto files = Filesystem::get_files_in_dir_flat(hef_dir);
CHECK_EXPECTED_AS_STATUS(files);
TRY(const auto files, Filesystem::get_files_in_dir_flat(hef_dir));
for (const auto &full_path : files.value()) {
for (const auto &full_path : files) {
if (Filesystem::has_suffix(full_path, ".hef")) {
contains_hef = true;
std::cout << std::string(80, '*') << std::endl << "Parsing " << full_path << ":"<< std::endl;

View File

@@ -153,28 +153,27 @@ private:
{
const uint8_t const_byte = 0xAB;
auto constant_buffer = Buffer::create_shared(frame_size, const_byte, BufferStorageParams::create_dma());
CHECK_EXPECTED(constant_buffer);
TRY(auto constant_buffer,
Buffer::create_shared(frame_size, const_byte, BufferStorageParams::create_dma()));
return std::vector<BufferPtr>{constant_buffer.release()};
return std::vector<BufferPtr>{ constant_buffer };
}
static Expected<std::vector<BufferPtr>> create_dataset_from_input_file(const std::string &file_path, size_t frame_size)
{
auto buffer = read_binary_file(file_path);
CHECK_EXPECTED(buffer);
CHECK_AS_EXPECTED(0 == (buffer->size() % frame_size), HAILO_INVALID_ARGUMENT,
TRY(auto buffer, read_binary_file(file_path));
CHECK_AS_EXPECTED(0 == (buffer.size() % frame_size), HAILO_INVALID_ARGUMENT,
"Input file ({}) size {} must be a multiple of the frame size {}",
file_path, buffer->size(), frame_size);
file_path, buffer.size(), frame_size);
std::vector<BufferPtr> dataset;
const size_t frames_count = buffer->size() / frame_size;
const size_t frames_count = buffer.size() / frame_size;
dataset.reserve(frames_count);
for (size_t i = 0; i < frames_count; i++) {
const auto offset = frame_size * i;
auto frame_buffer = Buffer::create_shared(buffer->data() + offset, frame_size, BufferStorageParams::create_dma());
CHECK_EXPECTED(frame_buffer);
dataset.emplace_back(frame_buffer.release());
TRY(auto frame_buffer,
Buffer::create_shared(buffer.data() + offset, frame_size, BufferStorageParams::create_dma()));
dataset.emplace_back(frame_buffer);
}
return dataset;
@@ -183,9 +182,9 @@ private:
static Expected<std::vector<DmaMappedBuffer>> dma_map_dataset(const std::vector<BufferPtr> &dataset, VDevice &vdevice) {
std::vector<DmaMappedBuffer> dataset_mapped_buffers;
for (const auto &buffer : dataset) {
auto mapped_buffer = DmaMappedBuffer::create(vdevice, buffer->data(), buffer->size(), HAILO_DMA_BUFFER_DIRECTION_H2D);
CHECK_EXPECTED(mapped_buffer);
dataset_mapped_buffers.emplace_back(mapped_buffer.release());
TRY(auto mapped_buffer,
DmaMappedBuffer::create(vdevice, buffer->data(), buffer->size(), HAILO_DMA_BUFFER_DIRECTION_H2D));
dataset_mapped_buffers.emplace_back(std::move(mapped_buffer));
}
return dataset_mapped_buffers;
}

View File

@@ -132,9 +132,9 @@ Expected<std::vector<double>> LiveStats::get_last_measured_fps_per_network_group
CHECK_AS_EXPECTED(contains(m_tracks, NETWORK_STATS_LEVEL), HAILO_NOT_AVAILABLE);
for (size_t network_stats_track_index = 0; network_stats_track_index < m_tracks[NETWORK_STATS_LEVEL].size(); network_stats_track_index++) {
auto expected_fps = m_tracks[NETWORK_STATS_LEVEL][network_stats_track_index]->get_last_measured_fps();
CHECK_EXPECTED(expected_fps);
last_measured_fpss.emplace_back(expected_fps.release());
TRY(auto fps,
m_tracks[NETWORK_STATS_LEVEL][network_stats_track_index]->get_last_measured_fps());
last_measured_fpss.emplace_back(fps);
}
return last_measured_fpss;

View File

@@ -24,23 +24,19 @@ Expected<std::shared_ptr<MeasurementLiveTrack>> MeasurementLiveTrack::create_sha
{
std::shared_ptr<PowerMeasurement> power_measurement = nullptr;
if (measure_power) {
auto power_measurement_exp = PowerMeasurement::create_shared(device, HAILO_POWER_MEASUREMENT_TYPES__POWER);
CHECK_EXPECTED(power_measurement_exp);
power_measurement = power_measurement_exp.release();
TRY(power_measurement,
PowerMeasurement::create_shared(device, HAILO_POWER_MEASUREMENT_TYPES__POWER));
}
std::shared_ptr<PowerMeasurement> current_measurement = nullptr;
if (measure_current) {
auto current_measurement_exp = PowerMeasurement::create_shared(device, HAILO_POWER_MEASUREMENT_TYPES__CURRENT);
CHECK_EXPECTED(current_measurement_exp);
current_measurement = current_measurement_exp.release();
TRY(current_measurement,
PowerMeasurement::create_shared(device, HAILO_POWER_MEASUREMENT_TYPES__CURRENT));
}
std::shared_ptr<TemperatureMeasurement> temp_measurement = nullptr;
if (measure_temp) {
auto temp_measurement_exp = TemperatureMeasurement::create_shared(device);
CHECK_EXPECTED(temp_measurement_exp);
temp_measurement = temp_measurement_exp.release();
TRY(temp_measurement, TemperatureMeasurement::create_shared(device));
}
auto ptr = make_shared_nothrow<MeasurementLiveTrack>(power_measurement, current_measurement, temp_measurement, device.get_dev_id());

View File

@@ -138,12 +138,8 @@ Expected<std::string> NetworkRunner::get_network_group_name(const NetworkParams
Expected<std::shared_ptr<FullAsyncNetworkRunner>> FullAsyncNetworkRunner::create_shared(VDevice &vdevice,
NetworkParams params)
{
auto infer_model = vdevice.create_infer_model(params.hef_path);
CHECK_EXPECTED(infer_model);
auto infer_model_ptr = infer_model.release();
auto expected_net_group_name = get_network_group_name(params, infer_model_ptr->hef());
CHECK_EXPECTED(expected_net_group_name);
TRY(auto infer_model_ptr, vdevice.create_infer_model(params.hef_path));
TRY(auto net_group_name, get_network_group_name(params, infer_model_ptr->hef()));
/* Configure Params */
infer_model_ptr->set_batch_size(params.batch_size);
@@ -163,10 +159,9 @@ Expected<std::shared_ptr<FullAsyncNetworkRunner>> FullAsyncNetworkRunner::create
});
auto input_params = (input_params_it == params.vstream_params.end()) ? VStreamParams() : *input_params_it;
auto input_config = infer_model_ptr->input(input_name);
CHECK_EXPECTED(input_config);
input_config->set_format_order(input_params.params.user_buffer_format.order);
input_config->set_format_type(input_params.params.user_buffer_format.type);
TRY(auto input_config, infer_model_ptr->input(input_name));
input_config.set_format_order(input_params.params.user_buffer_format.order);
input_config.set_format_type(input_params.params.user_buffer_format.type);
}
for (const auto &output_name : infer_model_ptr->get_output_names()) {
auto output_params_it = std::find_if(params.vstream_params.begin(), params.vstream_params.end(),
@@ -175,18 +170,16 @@ Expected<std::shared_ptr<FullAsyncNetworkRunner>> FullAsyncNetworkRunner::create
});
auto output_params = (output_params_it == params.vstream_params.end()) ? VStreamParams() : *output_params_it;
auto output_config = infer_model_ptr->output(output_name);
CHECK_EXPECTED(output_config);
output_config->set_format_order(output_params.params.user_buffer_format.order);
output_config->set_format_type(output_params.params.user_buffer_format.type);
TRY(auto output_config, infer_model_ptr->output(output_name));
output_config.set_format_order(output_params.params.user_buffer_format.order);
output_config.set_format_type(output_params.params.user_buffer_format.type);
}
auto configured_model = infer_model_ptr->configure();
CHECK_EXPECTED(configured_model);
auto configured_infer_model_ptr = make_shared_nothrow<ConfiguredInferModel>(configured_model.release());
TRY(auto configured_model, infer_model_ptr->configure());
auto configured_infer_model_ptr = make_shared_nothrow<ConfiguredInferModel>(std::move(configured_model));
CHECK_NOT_NULL_AS_EXPECTED(configured_infer_model_ptr, HAILO_OUT_OF_HOST_MEMORY);
auto res = make_shared_nothrow<FullAsyncNetworkRunner>(params, expected_net_group_name.value(), vdevice,
auto res = make_shared_nothrow<FullAsyncNetworkRunner>(params, net_group_name, vdevice,
infer_model_ptr, configured_infer_model_ptr);
CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
@@ -216,35 +209,27 @@ Expected<std::shared_ptr<NetworkRunner>> NetworkRunner::create_shared(VDevice &v
std::shared_ptr<NetworkRunner> net_runner_ptr = nullptr;
if (InferenceMode::FULL_ASYNC == final_net_params.mode) {
auto runner_exp = FullAsyncNetworkRunner::create_shared(vdevice, final_net_params);
CHECK_EXPECTED(runner_exp);
net_runner_ptr = runner_exp.release();
TRY(net_runner_ptr, FullAsyncNetworkRunner::create_shared(vdevice, final_net_params));
} else {
auto hef = Hef::create(final_net_params.hef_path);
CHECK_EXPECTED(hef);
auto expected_net_group_name = get_network_group_name(final_net_params, hef.value());
CHECK_EXPECTED(expected_net_group_name);
auto cfg_params = vdevice.create_configure_params(hef.value(), expected_net_group_name.value());
CHECK_EXPECTED(cfg_params);
cfg_params->batch_size = final_net_params.batch_size;
TRY(auto hef, Hef::create(final_net_params.hef_path));
TRY(auto net_group_name, get_network_group_name(final_net_params, hef));
TRY(auto cfg_params, vdevice.create_configure_params(hef, net_group_name));
cfg_params.batch_size = final_net_params.batch_size;
if (final_net_params.batch_size == HAILO_DEFAULT_BATCH_SIZE) {
// Changing batch_size to 1 (after configuring the vdevice) - as we iterate over 'final_net_params.batch_size' in latency measurements scenarios
final_net_params.batch_size = 1;
}
if (final_net_params.measure_hw_latency) {
cfg_params->latency |= HAILO_LATENCY_MEASURE;
cfg_params.latency |= HAILO_LATENCY_MEASURE;
}
if (final_net_params.is_async()) {
for (auto &stream_name_params_pair : cfg_params->stream_params_by_name) {
for (auto &stream_name_params_pair : cfg_params.stream_params_by_name) {
stream_name_params_pair.second.flags = HAILO_STREAM_FLAGS_ASYNC;
}
}
auto cfgr_net_groups = vdevice.configure(hef.value(), {{expected_net_group_name.value(), cfg_params.value()}});
CHECK_EXPECTED(cfgr_net_groups);
assert(1 == cfgr_net_groups->size());
auto cfgr_net_group = cfgr_net_groups.value()[0];
TRY(auto cfgr_net_groups, vdevice.configure(hef, {{ net_group_name, cfg_params }}));
assert(1 == cfgr_net_groups.size());
auto cfgr_net_group = cfgr_net_groups[0];
if (HAILO_SCHEDULING_ALGORITHM_NONE != final_net_params.scheduling_algorithm) {
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_threshold(final_net_params.scheduler_threshold));
@@ -260,11 +245,10 @@ Expected<std::shared_ptr<NetworkRunner>> NetworkRunner::create_shared(VDevice &v
for (auto &vstream_params : final_net_params.vstream_params) {
vstreams_params.emplace(vstream_params.name, vstream_params.params);
}
auto vstreams = create_vstreams(*cfgr_net_group, vstreams_params);
CHECK_EXPECTED(vstreams);
TRY(auto vstreams, create_vstreams(*cfgr_net_group, vstreams_params));
auto net_runner = make_shared_nothrow<FullSyncNetworkRunner>(final_net_params, expected_net_group_name.value(), vdevice,
std::move(vstreams->first), std::move(vstreams->second), cfgr_net_group);
auto net_runner = make_shared_nothrow<FullSyncNetworkRunner>(final_net_params, net_group_name, vdevice,
std::move(vstreams.first), std::move(vstreams.second), cfgr_net_group);
CHECK_NOT_NULL_AS_EXPECTED(net_runner, HAILO_OUT_OF_HOST_MEMORY);
net_runner_ptr = std::static_pointer_cast<NetworkRunner>(net_runner);
break;
@@ -279,7 +263,7 @@ Expected<std::shared_ptr<NetworkRunner>> NetworkRunner::create_shared(VDevice &v
auto output_streams = cfgr_net_group->get_output_streams();
CHECK_AS_EXPECTED(output_streams.size() > 0, HAILO_INTERNAL_FAILURE);
auto net_runner = make_shared_nothrow<RawNetworkRunner>(final_net_params, expected_net_group_name.value(), vdevice,
auto net_runner = make_shared_nothrow<RawNetworkRunner>(final_net_params, net_group_name, vdevice,
std::move(input_streams), std::move(output_streams), cfgr_net_group);
CHECK_NOT_NULL_AS_EXPECTED(net_runner, HAILO_OUT_OF_HOST_MEMORY);
net_runner_ptr = std::static_pointer_cast<NetworkRunner>(net_runner);
@@ -331,7 +315,7 @@ hailo_status NetworkRunner::run(EventPtr shutdown_event, LiveStats &live_stats,
if (!ang_exp) {
activation_barrier.terminate();
}
CHECK_EXPECTED_AS_STATUS(ang_exp);
CHECK_EXPECTED_AS_STATUS(ang_exp); // TODO (HRT-13278): Figure out how to remove CHECK_EXPECTED here
ang = ang_exp.release();
}
}
@@ -350,12 +334,11 @@ hailo_status NetworkRunner::run(EventPtr shutdown_event, LiveStats &live_stats,
if ((InferenceMode::RAW_ASYNC_SINGLE_THREAD == m_params.mode) || (InferenceMode::FULL_ASYNC == m_params.mode)) {
return run_single_thread_async_infer(shutdown_event, net_live_track);
} else {
auto threads = start_inference_threads(shutdown_event, net_live_track);
CHECK_EXPECTED_AS_STATUS(threads);
TRY(auto threads, start_inference_threads(shutdown_event, net_live_track));
CHECK_SUCCESS(shutdown_event->wait(HAILO_INFINITE_TIMEOUT));
stop();
return wait_for_threads(threads.value());
return wait_for_threads(threads);
}
}
@@ -390,9 +373,8 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
size_t match_count = 0;
std::map<std::string, hailo_vstream_params_t> input_vstreams_params;
auto input_vstreams_info = net_group.get_input_vstream_infos();
CHECK_EXPECTED(input_vstreams_info);
for (auto &input_vstream_info : input_vstreams_info.value()) {
TRY(auto input_vstreams_info, net_group.get_input_vstream_infos());
for (auto &input_vstream_info : input_vstreams_info) {
if (params.end() != params.find(input_vstream_info.name)) {
match_count++;
input_vstreams_params.emplace(input_vstream_info.name, params.at(input_vstream_info.name));
@@ -402,9 +384,8 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
}
std::map<std::string, hailo_vstream_params_t> output_vstreams_params;
auto output_vstreams_info = net_group.get_output_vstream_infos();
CHECK_EXPECTED(output_vstreams_info);
for (auto &output_vstream_info : output_vstreams_info.value()) {
TRY(auto output_vstreams_info, net_group.get_output_vstream_infos());
for (auto &output_vstream_info : output_vstreams_info) {
if (params.end() != params.find(output_vstream_info.name)) {
match_count++;
output_vstreams_params.emplace(output_vstream_info.name, params.at(output_vstream_info.name));
@@ -415,13 +396,10 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
CHECK(match_count == params.size(), make_unexpected(HAILO_INVALID_ARGUMENT), "One of the params has an invalid vStream name");
auto input_vstreams = VStreamsBuilder::create_input_vstreams(net_group, input_vstreams_params);
CHECK_EXPECTED(input_vstreams);
TRY(auto input_vstreams, VStreamsBuilder::create_input_vstreams(net_group, input_vstreams_params));
TRY(auto output_vstreams, VStreamsBuilder::create_output_vstreams(net_group, output_vstreams_params));
auto output_vstreams = VStreamsBuilder::create_output_vstreams(net_group, output_vstreams_params);
CHECK_EXPECTED(output_vstreams);
return {{input_vstreams.release(), output_vstreams.release()}};//TODO: move? copy elision?
return std::make_pair(std::move(input_vstreams), std::move(output_vstreams));
}
const std::vector<hailo_status> NetworkRunner::ALLOWED_INFERENCE_RETURN_VALUES{
@@ -444,24 +422,22 @@ Expected<std::vector<AsyncThreadPtr<hailo_status>>> FullSyncNetworkRunner::start
std::vector<AsyncThreadPtr<hailo_status>> threads;
for (auto &input_vstream : m_input_vstreams) {
const auto vstream_params = get_params(input_vstream.name());
auto writer = WriterWrapper<InputVStream>::create(input_vstream, vstream_params, m_vdevice,
m_overall_latency_meter, m_params.framerate, SYNC_API);
CHECK_EXPECTED(writer);
TRY(auto writer, WriterWrapper<InputVStream>::create(input_vstream, vstream_params, m_vdevice,
m_overall_latency_meter, m_params.framerate, SYNC_API));
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("WRITE",
[this, writer = writer.release(), shutdown_event]() mutable {
[this, writer, shutdown_event]() mutable {
return run_write(writer, shutdown_event, m_latency_barrier);
}));
}
bool first = true; //TODO: check with multiple outputs
for (auto &output_vstream : m_output_vstreams) {
auto reader = ReaderWrapper<OutputVStream>::create(output_vstream, m_vdevice,
m_overall_latency_meter, first ? net_live_track : nullptr, SYNC_API);
CHECK_EXPECTED(reader);
TRY(auto reader, ReaderWrapper<OutputVStream>::create(output_vstream, m_vdevice,
m_overall_latency_meter, first ? net_live_track : nullptr, SYNC_API));
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("READ",
[this, reader=reader.release(), shutdown_event]() mutable {
[this, reader, shutdown_event]() mutable {
return run_read(reader, shutdown_event, m_latency_barrier);
}));
first = false;
@@ -552,8 +528,8 @@ Expected<AsyncInferJob> FullAsyncNetworkRunner::create_infer_job(const Configure
if (m_overall_latency_meter) {
m_overall_latency_meter->add_start_sample(std::chrono::steady_clock::now().time_since_epoch());
}
auto job = m_configured_infer_model->run_async(bindings, [=, &inference_status] (const AsyncInferCompletionInfo &completion_info) {
TRY(auto job, m_configured_infer_model->run_async(bindings, [=, &inference_status] (const AsyncInferCompletionInfo &completion_info) {
if (HAILO_SUCCESS != completion_info.status) {
inference_status = completion_info.status;
if (HAILO_STREAM_ABORT != completion_info.status) {
@@ -569,9 +545,8 @@ Expected<AsyncInferJob> FullAsyncNetworkRunner::create_infer_job(const Configure
so there's a circular dependency */
net_live_track->progress();
}
});
CHECK_EXPECTED(job);
return job.release();
}));
return job;
}
hailo_status FullAsyncNetworkRunner::run_single_thread_async_infer(EventPtr shutdown_event,
@@ -590,13 +565,10 @@ hailo_status FullAsyncNetworkRunner::run_single_thread_async_infer(EventPtr shut
status = m_configured_infer_model->set_scheduler_priority(m_params.scheduler_priority);
CHECK_SUCCESS(status);
} else {
auto guard_exp = ConfiguredInferModelActivationGuard::create(m_configured_infer_model);
CHECK_EXPECTED_AS_STATUS(guard_exp);
guard = guard_exp.release();
TRY(guard, ConfiguredInferModelActivationGuard::create(m_configured_infer_model));
}
auto bindings = m_configured_infer_model->create_bindings();
CHECK_EXPECTED_AS_STATUS(bindings);
TRY(auto bindings, m_configured_infer_model->create_bindings());
std::unordered_map<std::string, Buffer> input_buffers; // Keys are inputs names
std::vector<Buffer> output_buffers;
@@ -604,40 +576,36 @@ hailo_status FullAsyncNetworkRunner::run_single_thread_async_infer(EventPtr shut
const uint8_t const_byte = 0xAB;
for (const auto &name : get_input_names()) {
auto input_config = m_infer_model->input(name);
CHECK_EXPECTED_AS_STATUS(input_config);
TRY(auto input_config, m_infer_model->input(name));
auto params = get_params(name);
auto buffer = params.input_file_path.empty() ?
Buffer::create(input_config->get_frame_size(), const_byte, BufferStorageParams::create_dma()) :
read_binary_file(params.input_file_path, BufferStorageParams::create_dma());
CHECK_EXPECTED_AS_STATUS(buffer);
CHECK(0 == (buffer->size() % input_config->get_frame_size()), HAILO_INVALID_ARGUMENT,
"Size of data for input '{}' must be a multiple of the frame size {}. Received - {}", name, input_config->get_frame_size(), buffer->size());
input_buffers.emplace(name, buffer.release());
Buffer buffer {};
if (params.input_file_path.empty()) {
TRY(buffer, Buffer::create(input_config.get_frame_size(), const_byte, BufferStorageParams::create_dma()));
} else {
TRY(buffer, read_binary_file(params.input_file_path, BufferStorageParams::create_dma()));
}
CHECK(0 == (buffer.size() % input_config.get_frame_size()), HAILO_INVALID_ARGUMENT,
"Size of data for input '{}' must be a multiple of the frame size {}. Received - {}", name, input_config.get_frame_size(), buffer.size());
input_buffers.emplace(name, std::move(buffer));
for (uint32_t i = 0; i < (input_buffers.at(name).size() % input_config->get_frame_size()); i++) {
auto mapped_buffer = DmaMappedBuffer::create(m_vdevice, input_buffers.at(name).data() + (i * input_config->get_frame_size()),
input_config->get_frame_size(), HAILO_DMA_BUFFER_DIRECTION_H2D);
CHECK_EXPECTED_AS_STATUS(mapped_buffer);
dma_mapped_buffers.emplace_back(mapped_buffer.release());
for (uint32_t i = 0; i < (input_buffers.at(name).size() % input_config.get_frame_size()); i++) {
TRY(auto mapped_buffer, DmaMappedBuffer::create(m_vdevice, input_buffers.at(name).data() + (i * input_config.get_frame_size()),
input_config.get_frame_size(), HAILO_DMA_BUFFER_DIRECTION_H2D));
dma_mapped_buffers.emplace_back(std::move(mapped_buffer));
}
}
for (const auto &name : get_output_names()) {
auto output_config = m_infer_model->output(name);
CHECK_EXPECTED_AS_STATUS(output_config);
TRY(auto output_config, m_infer_model->output(name));
TRY(auto buffer, Buffer::create(output_config.get_frame_size(), 0, BufferStorageParams::create_dma()));
output_buffers.emplace_back(std::move(buffer));
auto buffer = Buffer::create(output_config->get_frame_size(), 0, BufferStorageParams::create_dma());
CHECK_EXPECTED_AS_STATUS(buffer);
output_buffers.emplace_back(buffer.release());
TRY(auto mapped_buffer, DmaMappedBuffer::create(m_vdevice, output_buffers.back().data(), output_buffers.back().size(),
HAILO_DMA_BUFFER_DIRECTION_D2H));
dma_mapped_buffers.emplace_back(std::move(mapped_buffer));
auto mapped_buffer = DmaMappedBuffer::create(m_vdevice, output_buffers.back().data(), output_buffers.back().size(),
HAILO_DMA_BUFFER_DIRECTION_D2H);
CHECK_EXPECTED_AS_STATUS(mapped_buffer);
dma_mapped_buffers.emplace_back(mapped_buffer.release());
CHECK_SUCCESS(bindings->output(name)->set_buffer(MemoryView(output_buffers.back())));
CHECK_SUCCESS(bindings.output(name)->set_buffer(MemoryView(output_buffers.back())));
}
FramerateThrottle frame_rate_throttle(m_params.framerate);
@@ -648,17 +616,14 @@ hailo_status FullAsyncNetworkRunner::run_single_thread_async_infer(EventPtr shut
while (HAILO_TIMEOUT == shutdown_event->wait(std::chrono::milliseconds(0)) && (HAILO_SUCCESS == inference_status)) {
for (uint32_t frames_in_cycle = 0; frames_in_cycle < m_params.batch_size; frames_in_cycle++) {
for (const auto &name : get_input_names()) {
auto input_config = m_infer_model->input(name);
CHECK_EXPECTED_AS_STATUS(input_config);
auto offset = (frame_id % (input_buffers.at(name).size() / input_config->get_frame_size())) * input_config->get_frame_size();
CHECK_SUCCESS(bindings->input(name)->set_buffer(MemoryView(input_buffers.at(name).data() + offset,
input_config->get_frame_size())));
TRY(auto input_config, m_infer_model->input(name));
auto offset = (frame_id % (input_buffers.at(name).size() / input_config.get_frame_size())) * input_config.get_frame_size();
CHECK_SUCCESS(bindings.input(name)->set_buffer(MemoryView(input_buffers.at(name).data() + offset,
input_config.get_frame_size())));
}
frame_id++;
if (HAILO_SUCCESS == m_configured_infer_model->wait_for_async_ready(DEFAULT_TRANSFER_TIMEOUT)) {
auto job_exp = create_infer_job(*bindings, net_live_track, frame_rate_throttle, inference_status);
CHECK_EXPECTED_AS_STATUS(job_exp);
last_job = job_exp.release();
TRY(last_job, create_infer_job(bindings, net_live_track, frame_rate_throttle, inference_status));
last_job.detach();
}
}
@@ -689,18 +654,17 @@ Expected<std::vector<AsyncThreadPtr<hailo_status>>> RawNetworkRunner::start_infe
std::vector<AsyncThreadPtr<hailo_status>> threads;
for (auto &input_stream : m_input_streams) {
const auto stream_params = get_params(input_stream.get().name());
auto writer = WriterWrapper<InputStream>::create(input_stream.get(), stream_params, m_vdevice,
m_overall_latency_meter, m_params.framerate, async_streams);
CHECK_EXPECTED(writer);
TRY(auto writer, WriterWrapper<InputStream>::create(input_stream.get(), stream_params, m_vdevice,
m_overall_latency_meter, m_params.framerate, async_streams));
if (async_streams) {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("WRITE_ASYNC",
[this, writer = writer.release(), shutdown_event]() mutable {
[this, writer, shutdown_event]() mutable {
return run_write_async(writer, shutdown_event, m_latency_barrier);
}));
} else {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("WRITE",
[this, writer = writer.release(), shutdown_event]() mutable {
[this, writer, shutdown_event]() mutable {
return run_write(writer, shutdown_event, m_latency_barrier);
}));
}
@@ -708,18 +672,17 @@ Expected<std::vector<AsyncThreadPtr<hailo_status>>> RawNetworkRunner::start_infe
bool first = true; //TODO: check with multiple outputs
for (auto &output_stream : m_output_streams) {
auto reader = ReaderWrapper<OutputStream>::create(output_stream.get(), m_vdevice,
m_overall_latency_meter, first ? net_live_track : nullptr, async_streams);
CHECK_EXPECTED(reader);
TRY(auto reader, ReaderWrapper<OutputStream>::create(output_stream.get(), m_vdevice,
m_overall_latency_meter, first ? net_live_track : nullptr, async_streams));
if (async_streams) {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("READ_ASYNC",
[this, reader=reader.release(), shutdown_event]() mutable {
[this, reader, shutdown_event]() mutable {
return run_read_async(reader, shutdown_event, m_latency_barrier);
}));
} else {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("READ",
[this, reader=reader.release(), shutdown_event]() mutable {
[this, reader, shutdown_event]() mutable {
return run_read(reader, shutdown_event, m_latency_barrier);
}));
}
@@ -739,37 +702,29 @@ hailo_status RawNetworkRunner::run_single_thread_async_infer(EventPtr shutdown_e
std::vector<SemaphorePtr> output_semaphores;
bool is_first_output = true;
for (auto &output_stream : m_output_streams) {
auto reader_wrapper = ReaderWrapper<OutputStream>::create(output_stream.get(), m_vdevice,
m_overall_latency_meter, is_first_output ? net_live_track : nullptr, ASYNC_API);
CHECK_EXPECTED_AS_STATUS(reader_wrapper);
TRY(auto reader_wrapper, ReaderWrapper<OutputStream>::create(output_stream.get(), m_vdevice,
m_overall_latency_meter, is_first_output ? net_live_track : nullptr, ASYNC_API));
is_first_output = false;
auto max_queue_size = reader_wrapper.value()->get().get_async_max_queue_size();
CHECK_EXPECTED_AS_STATUS(max_queue_size);
TRY(auto max_queue_size, reader_wrapper->get().get_async_max_queue_size());
TRY(auto semaphore, Semaphore::create_shared(static_cast<uint32_t>(max_queue_size)));
auto semaphore = Semaphore::create_shared(static_cast<uint32_t>(*max_queue_size));
CHECK_EXPECTED_AS_STATUS(semaphore);
output_semaphores.emplace_back(semaphore.release());
reader_wrappers.emplace_back(reader_wrapper.release());
output_semaphores.emplace_back(semaphore);
reader_wrappers.emplace_back(reader_wrapper);
}
// Build input wrappers
std::vector<WriterWrapperPtr<InputStream>> writer_wrappers;
std::vector<SemaphorePtr> input_semaphores;
for (auto &input_stream : m_input_streams) {
auto writer_wrapper = WriterWrapper<InputStream>::create(input_stream.get(),
get_params(input_stream.get().name()), m_vdevice, m_overall_latency_meter, m_params.framerate, ASYNC_API);
CHECK_EXPECTED_AS_STATUS(writer_wrapper);
TRY(auto writer_wrapper, WriterWrapper<InputStream>::create(input_stream.get(),
get_params(input_stream.get().name()), m_vdevice, m_overall_latency_meter, m_params.framerate, ASYNC_API));
auto max_queue_size = writer_wrapper.value()->get().get_async_max_queue_size();
CHECK_EXPECTED_AS_STATUS(max_queue_size);
TRY(auto max_queue_size, writer_wrapper->get().get_async_max_queue_size());
TRY(auto semaphore, Semaphore::create_shared(static_cast<uint32_t>(max_queue_size)));
auto semaphore = Semaphore::create_shared(static_cast<uint32_t>(*max_queue_size));
CHECK_EXPECTED_AS_STATUS(semaphore);
input_semaphores.emplace_back(semaphore.release());
writer_wrappers.emplace_back(writer_wrapper.release());
input_semaphores.emplace_back(semaphore);
writer_wrappers.emplace_back(writer_wrapper);
}
// Build waitables list with reference to previous input/output semaphores.
@@ -788,17 +743,16 @@ hailo_status RawNetworkRunner::run_single_thread_async_infer(EventPtr shutdown_e
// Inference
while (true) {
auto wait_index = wait_group.wait_any(HAILORTCLI_DEFAULT_TIMEOUT);
CHECK_EXPECTED_AS_STATUS(wait_index);
TRY(auto wait_index, wait_group.wait_any(HAILORTCLI_DEFAULT_TIMEOUT));
if (*wait_index == shutdown_index) {
if (wait_index == shutdown_index) {
// Stopping the network so we won't get timeout on the flush. The async operations may still be active
// (until network deactivation).
stop();
break;
} else if ((*wait_index >= output_index_start) && (*wait_index < input_index_start)) {
} else if ((wait_index >= output_index_start) && (wait_index < input_index_start)) {
// output is ready
const size_t output_index = *wait_index - output_index_start;
const size_t output_index = wait_index - output_index_start;
auto status = reader_wrappers[output_index]->read_async(
[semaphore=output_semaphores[output_index]](const OutputStream::CompletionInfo &) {
(void)semaphore->signal();
@@ -807,7 +761,7 @@ hailo_status RawNetworkRunner::run_single_thread_async_infer(EventPtr shutdown_e
CHECK_SUCCESS(status);
} else {
// input is ready
const size_t input_index = *wait_index - input_index_start;
const size_t input_index = wait_index - input_index_start;
auto status = writer_wrappers[input_index]->write_async(
[semaphore=input_semaphores[input_index]](const InputStream::CompletionInfo &) {
(void)semaphore->signal();

View File

@@ -186,9 +186,7 @@ protected:
// sync_event will be used to send one frame at a time
EventPtr sync_event = nullptr;
if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
auto sync_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_EXPECTED_AS_STATUS(sync_event_exp);
sync_event = sync_event_exp.release();
TRY(sync_event, Event::create_shared(Event::State::not_signalled));
}
while (true) {
@@ -263,9 +261,7 @@ protected:
// sync_event will be used to send one frame at a time
EventPtr sync_event = nullptr;
if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
auto sync_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_EXPECTED_AS_STATUS(sync_event_exp);
sync_event = sync_event_exp.release();
TRY(sync_event, Event::create_shared(Event::State::not_signalled));
}
while (true) {

View File

@@ -670,10 +670,10 @@ std::string format_measure_fw_actions_output_path(const std::string &base_output
Expected<std::reference_wrapper<Device>> get_single_physical_device(VDevice &vdevice)
{
auto expected_physical_devices = vdevice.get_physical_devices();
CHECK_EXPECTED(expected_physical_devices);
CHECK_AS_EXPECTED(1 == expected_physical_devices->size(), HAILO_INVALID_OPERATION, "Operation not allowed for multi-device");
auto &res = expected_physical_devices->at(0);
TRY(auto physical_devices, vdevice.get_physical_devices());
CHECK_AS_EXPECTED(1 == physical_devices.size(), HAILO_INVALID_OPERATION,
"Operation not allowed for multi-device");
auto &res = physical_devices.at(0);
return std::move(res);
}
@@ -712,16 +712,11 @@ Expected<std::unique_ptr<VDevice>> Run2::create_vdevice()
Expected<std::vector<std::shared_ptr<NetworkRunner>>> Run2::init_and_run_net_runners(VDevice *vdevice)
{
std::vector<std::shared_ptr<NetworkRunner>> net_runners;
auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_EXPECTED(shutdown_event_exp);
auto shutdown_event = shutdown_event_exp.release();
TRY(auto shutdown_event, Event::create_shared(Event::State::not_signalled));
// create network runners
for (auto &net_params : get_network_params()) {
auto expected_net_runner = NetworkRunner::create_shared(*vdevice, net_params);
CHECK_EXPECTED(expected_net_runner);
auto net_runner = expected_net_runner.release();
TRY(auto net_runner, NetworkRunner::create_shared(*vdevice, net_params));
net_runners.emplace_back(net_runner);
}
@@ -743,15 +738,24 @@ Expected<std::vector<std::shared_ptr<NetworkRunner>>> Run2::init_and_run_net_run
activation_barrier.arrive_and_wait();
if (get_measure_power() || get_measure_current() || get_measure_temp()) {
auto physical_devices = vdevice->get_physical_devices();
CHECK_EXPECTED(physical_devices);
TRY(auto physical_devices, vdevice->get_physical_devices());
for (auto &device : physical_devices.value()) {
auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), get_measure_power(),
get_measure_current(), get_measure_temp());
CHECK_EXPECTED(measurement_live_track);
for (auto &device : physical_devices) {
TRY(const auto identity, device.get().identify());
CHECK_AS_EXPECTED(HailoRTCommon::is_power_measurement_supported(identity.device_architecture) || !(get_measure_power()),
HAILO_INVALID_OPERATION, "HW arch {} does not support power measurement. Disable the power-measure option",
HailoRTCommon::get_device_arch_str(identity.device_architecture));
CHECK_AS_EXPECTED(HailoRTCommon::is_current_measurement_supported(identity.device_architecture) || !(get_measure_current()),
HAILO_INVALID_OPERATION, "HW arch {} does not support current measurement. Disable the current-measure option",
HailoRTCommon::get_device_arch_str(identity.device_architecture));
CHECK_AS_EXPECTED(HailoRTCommon::is_temp_measurement_supported(identity.device_architecture) || !(get_measure_temp()),
HAILO_INVALID_OPERATION, "HW arch {} does not support temperature measurement. Disable the temp-measure option",
HailoRTCommon::get_device_arch_str(identity.device_architecture));
live_stats->add(measurement_live_track.release(), 2);
TRY(auto measurement_live_track, MeasurementLiveTrack::create_shared(device.get(),
get_measure_power(), get_measure_current(), get_measure_temp()));
live_stats->add(measurement_live_track, 2);
}
}
@@ -764,9 +768,7 @@ Expected<std::vector<std::shared_ptr<NetworkRunner>>> Run2::init_and_run_net_run
if (!get_output_json_path().empty()){
live_stats->dump_stats(get_output_json_path(), get_str_infer_mode(get_mode()));
}
auto expected_fps_per_network = live_stats->get_last_measured_fps_per_network_group();
CHECK_EXPECTED(expected_fps_per_network);
auto fps_per_network = expected_fps_per_network.release();
TRY(auto fps_per_network, live_stats->get_last_measured_fps_per_network_group());
for (size_t network_runner_index = 0; network_runner_index < fps_per_network.size(); network_runner_index++) {
net_runners[network_runner_index]->set_last_measured_fps(fps_per_network[network_runner_index]);
}
@@ -793,10 +795,7 @@ hailo_status Run2Command::execute()
LOGGER__WARNING("\"hailortcli run2\" is not optimized for single model usage. It is recommended to use \"hailortcli run\" command for a single model");
}
auto expected_vdevice = app->create_vdevice();
CHECK_EXPECTED_AS_STATUS(expected_vdevice);
auto vdevice = expected_vdevice.release();
TRY(auto vdevice, app->create_vdevice());
std::vector<uint16_t> batch_sizes_to_run = { app->get_network_params()[0].batch_size };
if(app->get_measure_fw_actions() && app->get_network_params()[0].batch_size == HAILO_DEFAULT_BATCH_SIZE) {
// In case measure-fw-actions is enabled and no batch size was provided - we want to run with batch sizes 1,2,4,8,16
@@ -807,12 +806,9 @@ hailo_status Run2Command::execute()
ordered_json action_list_json;
if (app->get_measure_fw_actions()) {
auto device = get_single_physical_device(*vdevice);
CHECK_EXPECTED_AS_STATUS(device);
auto expected_action_list_json = DownloadActionListCommand::init_json_object(device.release(), app->get_network_params()[0].hef_path);
CHECK_EXPECTED_AS_STATUS(expected_action_list_json);
action_list_json = expected_action_list_json.release();
TRY(auto device, get_single_physical_device(*vdevice));
TRY(action_list_json,
DownloadActionListCommand::init_json_object(device, app->get_network_params()[0].hef_path));
runtime_data_output_path = format_measure_fw_actions_output_path(
app->get_measure_fw_actions_output_path(), app->get_network_params()[0].hef_path);
}
@@ -821,23 +817,16 @@ hailo_status Run2Command::execute()
for (auto batch_size : batch_sizes_to_run) {
if(app->get_measure_fw_actions()) {
app->set_batch_size(batch_size);
auto device = get_single_physical_device(*vdevice);
CHECK_EXPECTED_AS_STATUS(device);
auto status = DownloadActionListCommand::set_batch_to_measure(device.release(), RUNTIME_DATA_BATCH_INDEX_TO_MEASURE_DEFAULT);
TRY(auto device, get_single_physical_device(*vdevice));
auto status = DownloadActionListCommand::set_batch_to_measure(device, RUNTIME_DATA_BATCH_INDEX_TO_MEASURE_DEFAULT);
CHECK_SUCCESS(status);
}
auto expected_net_runners = app->init_and_run_net_runners(vdevice.get());
CHECK_EXPECTED_AS_STATUS(expected_net_runners);
auto net_runners = expected_net_runners.release();
TRY(auto net_runners, app->init_and_run_net_runners(vdevice.get()));
if(app->get_measure_fw_actions()) { // Collecting runtime data
auto device = get_single_physical_device(*vdevice);
CHECK_EXPECTED_AS_STATUS(device);
auto status = DownloadActionListCommand::execute(device.release(), net_runners[0]->get_configured_network_group(), batch_size, action_list_json, net_runners[0]->get_last_measured_fps(), network_group_index);
TRY(auto device, get_single_physical_device(*vdevice));
auto status = DownloadActionListCommand::execute(device, net_runners[0]->get_configured_network_group(),
batch_size, action_list_json, net_runners[0]->get_last_measured_fps(), network_group_index);
CHECK_SUCCESS(status);
network_group_index++;

View File

@@ -44,10 +44,8 @@ std::condition_variable wait_for_exit_cv;
constexpr uint32_t DEFAULT_TIME_TO_RUN_SECONDS = 5;
#ifndef HAILO_EMULATOR
constexpr std::chrono::milliseconds TIME_TO_WAIT_FOR_CONFIG(300);
#define HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS (HAILO_DEFAULT_VSTREAM_TIMEOUT_MS)
#else /* ifndef HAILO_EMULATOR */
constexpr std::chrono::milliseconds TIME_TO_WAIT_FOR_CONFIG(30000);
#define HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS (HAILO_DEFAULT_VSTREAM_TIMEOUT_MS * 100)
#endif /* ifndef HAILO_EMULATOR */
static const char *RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER = "<hef>";
@@ -449,21 +447,18 @@ Expected<std::map<std::string, std::vector<InputVStream>>> create_input_vstreams
const inference_runner_params &params)
{
std::map<std::string, std::vector<InputVStream>> res;
auto network_infos = configured_net_group.get_network_infos();
CHECK_EXPECTED(network_infos);
for (auto &network_info : network_infos.value()) {
TRY(const auto network_infos, configured_net_group.get_network_infos());
for (const auto &network_info : network_infos) {
auto quantized = (params.transform.format_type != HAILO_FORMAT_TYPE_FLOAT32);
auto input_vstreams_params = configured_net_group.make_input_vstream_params(quantized,
params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name);
CHECK_EXPECTED(input_vstreams_params);
TRY(auto input_vstreams_params, configured_net_group.make_input_vstream_params(quantized,
params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name));
for (auto &vstream_params : input_vstreams_params.value()) {
for (auto &vstream_params : input_vstreams_params) {
vstream_params.second.pipeline_elements_stats_flags = inference_runner_params_to_pipeline_elem_stats_flags(params.pipeline_stats);
vstream_params.second.vstream_stats_flags = inference_runner_params_to_vstream_stats_flags(params.pipeline_stats);
}
auto input_vstreams = VStreamsBuilder::create_input_vstreams(configured_net_group, input_vstreams_params.value());
CHECK_EXPECTED(input_vstreams);
res.emplace(network_info.name, input_vstreams.release());
TRY(auto input_vstreams, VStreamsBuilder::create_input_vstreams(configured_net_group, input_vstreams_params));
res.emplace(network_info.name, std::move(input_vstreams));
}
return res;
}
@@ -472,28 +467,24 @@ Expected<std::map<std::string, std::vector<OutputVStream>>> create_output_vstrea
const inference_runner_params &params)
{
std::map<std::string, std::vector<OutputVStream>> res;
auto network_infos = configured_net_group.get_network_infos();
CHECK_EXPECTED(network_infos);
for (auto &network_info : network_infos.value()) {
TRY(const auto network_infos, configured_net_group.get_network_infos());
for (const auto &network_info : network_infos) {
// Data is not quantized if format_type is explicitly float32, or if an output is NMS (which also enforces float32 output)
// We don't cover a case of multiple outputs where only some of them are NMS (no such model currently), and anyway it is handled in run2
auto vstream_infos = configured_net_group.get_output_vstream_infos();
CHECK_EXPECTED(vstream_infos);
auto nms_output = std::any_of(vstream_infos->begin(), vstream_infos->end(), [] (const hailo_vstream_info_t &output_info) {
TRY(const auto vstream_infos, configured_net_group.get_output_vstream_infos());
auto nms_output = std::any_of(vstream_infos.begin(), vstream_infos.end(), [] (const hailo_vstream_info_t &output_info) {
return HailoRTCommon::is_nms(output_info);
});
auto quantized = ((params.transform.format_type != HAILO_FORMAT_TYPE_FLOAT32) && !nms_output);
auto output_vstreams_params = configured_net_group.make_output_vstream_params(quantized,
params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name);
CHECK_EXPECTED(output_vstreams_params);
TRY(auto output_vstreams_params, configured_net_group.make_output_vstream_params(quantized,
params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name));
for (auto &vstream_params : output_vstreams_params.value()) {
for (auto &vstream_params : output_vstreams_params) {
vstream_params.second.pipeline_elements_stats_flags = inference_runner_params_to_pipeline_elem_stats_flags(params.pipeline_stats);
vstream_params.second.vstream_stats_flags = inference_runner_params_to_vstream_stats_flags(params.pipeline_stats);
}
auto output_vstreams = VStreamsBuilder::create_output_vstreams(configured_net_group, output_vstreams_params.value());
CHECK_EXPECTED(output_vstreams);
res.emplace(network_info.name, output_vstreams.release());
TRY(auto output_vstreams, VStreamsBuilder::create_output_vstreams(configured_net_group, output_vstreams_params));
res.emplace(network_info.name, std::move(output_vstreams));
}
return res;
}
@@ -501,12 +492,10 @@ Expected<std::map<std::string, std::vector<OutputVStream>>> create_output_vstrea
Expected<std::map<std::string, std::vector<std::reference_wrapper<InputStream>>>> create_input_streams(ConfiguredNetworkGroup &configured_net_group)
{
std::map<std::string, std::vector<std::reference_wrapper<InputStream>>> res;
auto network_infos = configured_net_group.get_network_infos();
CHECK_EXPECTED(network_infos);
for (auto &network_info : network_infos.value()) {
auto input_streams = configured_net_group.get_input_streams_by_network(network_info.name);
CHECK_EXPECTED(input_streams);
res.emplace(network_info.name, input_streams.release());
TRY(const auto network_infos, configured_net_group.get_network_infos());
for (const auto &network_info : network_infos) {
TRY(auto input_streams, configured_net_group.get_input_streams_by_network(network_info.name));
res.emplace(network_info.name, std::move(input_streams));
}
return res;
}
@@ -514,12 +503,10 @@ Expected<std::map<std::string, std::vector<std::reference_wrapper<InputStream>>>
Expected<std::map<std::string, std::vector<std::reference_wrapper<OutputStream>>>> create_output_streams(ConfiguredNetworkGroup &configured_net_group)
{
std::map<std::string, std::vector<std::reference_wrapper<OutputStream>>> res;
auto network_infos = configured_net_group.get_network_infos();
CHECK_EXPECTED(network_infos);
for (auto &network_info : network_infos.value()) {
auto output_streams = configured_net_group.get_output_streams_by_network(network_info.name);
CHECK_EXPECTED(output_streams);
res.emplace(network_info.name, output_streams.release());
TRY(const auto network_infos, configured_net_group.get_network_infos());
for (const auto &network_info : network_infos) {
TRY(auto output_streams, configured_net_group.get_output_streams_by_network(network_info.name));
res.emplace(network_info.name, std::move(output_streams));
}
return res;
}
@@ -532,9 +519,8 @@ Expected<std::map<std::string, BufferPtr>> create_output_buffers(
std::map<std::string, BufferPtr> dst_data;
for (auto &recv_objects : recv_objects_per_network) {
for (auto &recv_object : recv_objects.second) {
auto buffer = Buffer::create_shared(recv_object.get().get_frame_size());
CHECK_EXPECTED(buffer);
dst_data[recv_object.get().name()] = buffer.release();
TRY(dst_data[recv_object.get().name()],
Buffer::create_shared(recv_object.get().get_frame_size()));
}
}
@@ -643,9 +629,8 @@ static hailo_status run_streaming_impl(std::shared_ptr<ConfiguredNetworkGroup> c
std::vector<AsyncThreadPtr<hailo_status>> results;
auto network_progress_bar_exp = progress_bar.create_network_progress_bar(configured_net_group, network_name);
CHECK_EXPECTED_AS_STATUS(network_progress_bar_exp);
auto network_progress_bar = network_progress_bar_exp.release();
TRY(auto network_progress_bar,
progress_bar.create_network_progress_bar(configured_net_group, network_name));
const auto start = std::chrono::high_resolution_clock::now();
// Launch async read/writes
@@ -712,12 +697,10 @@ static hailo_status run_streaming_impl(std::shared_ptr<ConfiguredNetworkGroup> c
// TODO: HRT-7798
if (!params.vdevice_params.multi_process_service) {
auto network_input_streams = configured_net_group->get_input_streams_by_network(network_name);
CHECK_EXPECTED_AS_STATUS(network_input_streams);
inference_result.m_total_send_frame_size = total_send_frame_size(network_input_streams.value());
auto network_output_streams = configured_net_group->get_output_streams_by_network(network_name);
CHECK_EXPECTED_AS_STATUS(network_output_streams);
inference_result.m_total_recv_frame_size = total_recv_frame_size(network_output_streams.value());
TRY(auto network_input_streams, configured_net_group->get_input_streams_by_network(network_name));
inference_result.m_total_send_frame_size = total_send_frame_size(network_input_streams);
TRY(auto network_output_streams, configured_net_group->get_output_streams_by_network(network_name));
inference_result.m_total_recv_frame_size = total_recv_frame_size(network_output_streams);
}
if (params.measure_latency) {
@@ -731,9 +714,8 @@ static hailo_status run_streaming_impl(std::shared_ptr<ConfiguredNetworkGroup> c
}
if (params.measure_overall_latency) {
auto overall_latency = overall_latency_meter.get_latency(true);
CHECK_EXPECTED_AS_STATUS(overall_latency);
inference_result.m_overall_latency = std::make_unique<std::chrono::nanoseconds>(*overall_latency);
TRY(auto overall_latency, overall_latency_meter.get_latency(true));
inference_result.m_overall_latency = std::make_unique<std::chrono::nanoseconds>(std::move(overall_latency));
}
return HAILO_SUCCESS;
@@ -759,10 +741,7 @@ static Expected<InferResult> run_streaming(const std::vector<std::shared_ptr<Con
std::vector<std::map<std::string, NetworkInferResult>> networks_results; // Map of networks results for each network group
networks_results.reserve(configured_net_groups.size());
auto progress_bar_exp = InferProgress::create(params, std::chrono::seconds(1));
CHECK_EXPECTED(progress_bar_exp);
auto progress_bar = progress_bar_exp.release();
TRY(auto progress_bar, InferProgress::create(params, std::chrono::seconds(1)));
for (size_t network_group_index = 0; network_group_index < configured_net_groups.size(); network_group_index++) {
networks_threads_status.emplace_back();
networks_results.emplace_back();
@@ -851,15 +830,13 @@ static Expected<InferResult> run_inference(const std::vector<std::shared_ptr<Con
for (size_t network_group_index = 0; network_group_index < configured_net_groups.size(); network_group_index++) {
input_vstreams.emplace_back();
output_vstreams.emplace_back();
auto in_vstreams = create_input_vstreams(*configured_net_groups[network_group_index], params);
CHECK_EXPECTED(in_vstreams);
auto in_vstreams_ptr = make_shared_nothrow<std::map<std::string, std::vector<InputVStream>>>(in_vstreams.release());
TRY(auto in_vstreams, create_input_vstreams(*configured_net_groups[network_group_index], params));
auto in_vstreams_ptr = make_shared_nothrow<std::map<std::string, std::vector<InputVStream>>>(std::move(in_vstreams));
CHECK_NOT_NULL_AS_EXPECTED(in_vstreams_ptr, HAILO_OUT_OF_HOST_MEMORY);
input_vstreams[network_group_index] = in_vstreams_ptr;
auto out_vstreams = create_output_vstreams(*configured_net_groups[network_group_index], params);
CHECK_EXPECTED(out_vstreams);
auto out_vstreams_ptr = make_shared_nothrow<std::map<std::string, std::vector<OutputVStream>>>(out_vstreams.release());
TRY(auto out_vstreams, create_output_vstreams(*configured_net_groups[network_group_index], params));
auto out_vstreams_ptr = make_shared_nothrow<std::map<std::string, std::vector<OutputVStream>>>(std::move(out_vstreams));
CHECK_NOT_NULL_AS_EXPECTED(out_vstreams_ptr, HAILO_OUT_OF_HOST_MEMORY);
output_vstreams[network_group_index] = out_vstreams_ptr;
@@ -879,9 +856,7 @@ static Expected<InferResult> run_inference(const std::vector<std::shared_ptr<Con
output_vstreams_refs[network_group_index].emplace(output_vstreams_per_network.first, output_refs);
}
auto network_group_output_buffers = create_output_buffers(output_vstreams_refs[network_group_index]);
CHECK_EXPECTED(network_group_output_buffers);
output_buffers[network_group_index] = network_group_output_buffers.release();
TRY(output_buffers[network_group_index], create_output_buffers(output_vstreams_refs[network_group_index]));
}
auto res = run_streaming<InputVStream, OutputVStream>(configured_net_groups, input_datasets,
@@ -896,7 +871,7 @@ static Expected<InferResult> run_inference(const std::vector<std::shared_ptr<Con
input_vstreams.clear();
output_vstreams.clear();
CHECK_EXPECTED(res);
CHECK_EXPECTED(res); // TODO (HRT-13278): Figure out how to remove CHECK_EXPECTED here
return res;
}
case InferMode::HW_ONLY:
@@ -910,15 +885,9 @@ static Expected<InferResult> run_inference(const std::vector<std::shared_ptr<Con
std::map<std::string, hailort::BufferPtr>());
for (size_t network_group_index = 0; network_group_index < configured_net_groups.size(); network_group_index++) {
auto input_streams = create_input_streams(*configured_net_groups[network_group_index]);
CHECK_EXPECTED(input_streams);
input_streams_refs[network_group_index] = input_streams.release();
auto output_streams = create_output_streams(*configured_net_groups[network_group_index]);
output_streams_refs[network_group_index] = output_streams.release();
auto network_group_output_buffers = create_output_buffers(output_streams_refs[network_group_index]);
CHECK_EXPECTED(network_group_output_buffers);
output_buffers[network_group_index] = network_group_output_buffers.release();
TRY(input_streams_refs[network_group_index], create_input_streams(*configured_net_groups[network_group_index]));
TRY(output_streams_refs[network_group_index], create_output_streams(*configured_net_groups[network_group_index]));
TRY(output_buffers[network_group_index], create_output_buffers(output_streams_refs[network_group_index]));
}
return run_streaming<InputStream, OutputStream>(configured_net_groups, input_datasets, output_buffers,
@@ -929,19 +898,6 @@ static Expected<InferResult> run_inference(const std::vector<std::shared_ptr<Con
}
}
static Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_network_group(ConfiguredNetworkGroup &network_group)
{
hailo_activate_network_group_params_t network_group_params = {};
auto activated_network_group = network_group.activate(network_group_params);
CHECK_EXPECTED(activated_network_group, "Failed activating network group");
// Wait for configuration
// TODO: HRT-2492 wait for config in a normal way
std::this_thread::sleep_for(TIME_TO_WAIT_FOR_CONFIG);
return activated_network_group;
}
static Expected<std::map<std::string, BufferPtr>> create_constant_dataset(
const std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>> &input_infos, const hailo_transform_params_t &trans_params,
InferMode mode)
@@ -1000,35 +956,31 @@ static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
const auto file_path_it = file_paths.find(stream_name);
CHECK_AS_EXPECTED(file_paths.end() != file_path_it, HAILO_INVALID_ARGUMENT, "Missing input file for input: {}", stream_name);
auto host_buffer = read_binary_file(file_path_it->second);
CHECK_EXPECTED(host_buffer, "Failed reading file {}", file_path_it->second);
CHECK_AS_EXPECTED((host_buffer->size() % host_frame_size) == 0, HAILO_INVALID_ARGUMENT,
"Input file ({}) size {} must be a multiple of the frame size {} ({})", file_path_it->second, host_buffer->size(), host_frame_size, stream_name);
TRY(auto host_buffer, read_binary_file(file_path_it->second));
CHECK_AS_EXPECTED((host_buffer.size() % host_frame_size) == 0, HAILO_INVALID_ARGUMENT,
"Input file ({}) size {} must be a multiple of the frame size {} ({})", file_path_it->second, host_buffer.size(), host_frame_size, stream_name);
if (InferMode::HW_ONLY == mode) {
auto matching_stream_info = std::find_if(input_infos.first.begin(), input_infos.first.end(), [&stream_name] (const auto &stream_info) {
return std::string(stream_info.name) == stream_name;
});
CHECK_AS_EXPECTED(matching_stream_info != input_infos.first.end(), HAILO_INVALID_OPERATION, "Failed to find raw-stream with name {}.", stream_name);
const size_t frames_count = (host_buffer->size() / host_frame_size);
const size_t frames_count = (host_buffer.size() / host_frame_size);
const size_t hw_frame_size = matching_stream_info->hw_frame_size;
const size_t hw_buffer_size = frames_count * hw_frame_size;
auto hw_buffer = Buffer::create_shared(hw_buffer_size);
CHECK_EXPECTED(hw_buffer);
auto transform_context = InputTransformContext::create(*matching_stream_info, trans_params);
CHECK_EXPECTED(transform_context);
TRY(auto hw_buffer, Buffer::create_shared(hw_buffer_size));
TRY(auto transform_context, InputTransformContext::create(*matching_stream_info, trans_params));
for (size_t i = 0; i < frames_count; i++) {
MemoryView host_data(static_cast<uint8_t*>(host_buffer->data() + (i*host_frame_size)), host_frame_size);
MemoryView hw_data(static_cast<uint8_t*>(hw_buffer.value()->data() + (i*hw_frame_size)), hw_frame_size);
MemoryView host_data(static_cast<uint8_t*>(host_buffer.data() + (i*host_frame_size)), host_frame_size);
MemoryView hw_data(static_cast<uint8_t*>(hw_buffer->data() + (i*hw_frame_size)), hw_frame_size);
auto status = transform_context.value()->transform(host_data, hw_data);
auto status = transform_context->transform(host_data, hw_data);
CHECK_SUCCESS_AS_EXPECTED(status);
}
dataset[stream_name] = hw_buffer.release();
dataset[stream_name] = std::move(hw_buffer);
} else {
auto host_buffer_shared = make_shared_nothrow<Buffer>(host_buffer.release());
auto host_buffer_shared = make_shared_nothrow<Buffer>(std::move(host_buffer));
CHECK_NOT_NULL_AS_EXPECTED(host_buffer_shared, HAILO_OUT_OF_HOST_MEMORY);
dataset[stream_name] = host_buffer_shared;
}
@@ -1052,30 +1004,25 @@ static Expected<std::vector<std::map<std::string, BufferPtr>>> create_dataset(
// Vector of len(ng.conut), each element is pair of all input_stream_infos, and all input_vstream_infos
std::vector<std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>>> input_infos;
for (auto &network_group : network_groups) {
auto expected_all_streams_infos = network_group->get_all_stream_infos();
CHECK_EXPECTED(expected_all_streams_infos);
auto &all_stream_infos = expected_all_streams_infos.value();
TRY(const auto all_streams_infos, network_group->get_all_stream_infos());
std::vector<hailo_stream_info_t> group_input_stream_infos;
std::copy_if(all_stream_infos.begin(), all_stream_infos.end(), std::back_inserter(group_input_stream_infos), [](const auto &info) {
std::copy_if(all_streams_infos.begin(), all_streams_infos.end(), std::back_inserter(group_input_stream_infos), [](const auto &info) {
return info.direction == HAILO_H2D_STREAM;
});
auto expected_input_vstreams_infos = network_group->get_input_vstream_infos();
CHECK_EXPECTED(expected_input_vstreams_infos);
input_infos.push_back({group_input_stream_infos, expected_input_vstreams_infos.release()});
TRY(const auto input_vstreams_infos, network_group->get_input_vstream_infos());
input_infos.push_back({group_input_stream_infos, std::move(input_vstreams_infos)});
}
if (!params.inputs_name_and_file_path.empty()) {
for (auto &group_input_infos : input_infos) {
auto network_group_dataset = create_dataset_from_files(group_input_infos, params.inputs_name_and_file_path,
trans_params, params.mode);
CHECK_EXPECTED(network_group_dataset);
results.emplace_back(network_group_dataset.release());
TRY(auto network_group_dataset, create_dataset_from_files(group_input_infos, params.inputs_name_and_file_path,
trans_params, params.mode));
results.emplace_back(std::move(network_group_dataset));
}
} else {
for (auto &group_input_infos : input_infos) {
auto network_group_dataset = create_constant_dataset(group_input_infos, trans_params, params.mode);
CHECK_EXPECTED(network_group_dataset);
results.emplace_back(network_group_dataset.release());
TRY(auto network_group_dataset, create_constant_dataset(group_input_infos, trans_params, params.mode));
results.emplace_back(std::move(network_group_dataset));
}
}
return results;
@@ -1087,11 +1034,8 @@ Expected<InferResult> activate_and_run_single_device(
const inference_runner_params &params)
{
CHECK_AS_EXPECTED(1 == network_groups.size(), HAILO_INVALID_OPERATION, "Inference is not supported on HEFs with multiple network groups");
auto activated_net_group = activate_network_group(*network_groups[0]);
CHECK_EXPECTED(activated_net_group, "Failed activate network_group");
auto input_dataset = create_dataset(network_groups, params);
CHECK_EXPECTED(input_dataset, "Failed creating input dataset");
TRY(auto activated_net_group, network_groups[0]->activate(), "Failed activate network_group");
TRY(auto input_dataset, create_dataset(network_groups, params));
hailo_power_measurement_types_t measurement_type = HAILO_POWER_MEASUREMENT_TYPES__MAX_ENUM;
bool should_measure_power = false;
@@ -1103,48 +1047,43 @@ Expected<InferResult> activate_and_run_single_device(
should_measure_power = true;
}
std::shared_ptr<LongPowerMeasurement> long_power_measurement = nullptr;
std::shared_ptr<LongPowerMeasurement> long_power_measurement_ptr = nullptr;
if (should_measure_power) {
auto long_power_measurement_exp = PowerMeasurementSubcommand::start_power_measurement(device,
HAILO_DVM_OPTIONS_AUTO,
measurement_type, params.power_measurement.sampling_period, params.power_measurement.averaging_factor);
CHECK_EXPECTED(long_power_measurement_exp);
long_power_measurement = make_shared_nothrow<LongPowerMeasurement>(long_power_measurement_exp.release());
CHECK_NOT_NULL_AS_EXPECTED(long_power_measurement, HAILO_OUT_OF_HOST_MEMORY);
TRY(auto long_power_measurement, PowerMeasurementSubcommand::start_power_measurement(device,
HAILO_DVM_OPTIONS_AUTO, measurement_type, params.power_measurement.sampling_period, params.power_measurement.averaging_factor));
long_power_measurement_ptr = make_shared_nothrow<LongPowerMeasurement>(std::move(long_power_measurement));
CHECK_NOT_NULL_AS_EXPECTED(long_power_measurement_ptr, HAILO_OUT_OF_HOST_MEMORY);
}
bool should_measure_temp = params.measure_temp;
auto temp_measure = TemperatureMeasurement::create_shared(device);
CHECK_EXPECTED(temp_measure);
TRY(auto temp_measure, TemperatureMeasurement::create_shared(device));
if (should_measure_temp) {
auto status = temp_measure.value()->start_measurement();
auto status = temp_measure->start_measurement();
CHECK_SUCCESS_AS_EXPECTED(status, "Failed to get chip's temperature");
}
auto infer_result = run_inference(network_groups, input_dataset.value(), params);
CHECK_EXPECTED(infer_result, "Error failed running inference");
TRY(auto inference_result, run_inference(network_groups, input_dataset, params), "Error while running inference");
InferResult inference_result(infer_result.release());
std::vector<std::reference_wrapper<Device>> device_refs;
device_refs.push_back(device);
inference_result.initialize_measurements(device_refs);
if (should_measure_power) {
auto status = long_power_measurement->stop();
auto status = long_power_measurement_ptr->stop();
CHECK_SUCCESS_AS_EXPECTED(status);
if (params.power_measurement.measure_current) {
status = inference_result.set_current_measurement(device.get_dev_id(), std::move(long_power_measurement));
status = inference_result.set_current_measurement(device.get_dev_id(), std::move(long_power_measurement_ptr));
CHECK_SUCCESS_AS_EXPECTED(status);
} else {
status = inference_result.set_power_measurement(device.get_dev_id(), std::move(long_power_measurement));
status = inference_result.set_power_measurement(device.get_dev_id(), std::move(long_power_measurement_ptr));
CHECK_SUCCESS_AS_EXPECTED(status);
}
}
if (should_measure_temp) {
temp_measure.value()->stop_measurement();
auto temp_measure_p = make_shared_nothrow<AccumulatorResults>(temp_measure.value()->get_data());
temp_measure->stop_measurement();
auto temp_measure_p = make_shared_nothrow<AccumulatorResults>(temp_measure->get_data());
CHECK_NOT_NULL_AS_EXPECTED(temp_measure_p, HAILO_OUT_OF_HOST_MEMORY);
auto status = inference_result.set_temp_measurement(device.get_dev_id(), std::move(temp_measure_p));
CHECK_SUCCESS_AS_EXPECTED(status);
@@ -1158,9 +1097,8 @@ Expected<size_t> get_min_inferred_frames_count(InferResult &inference_result)
size_t min_frames_count = UINT32_MAX;
for (auto &network_group_results : inference_result.network_group_results()) {
for (const auto &network_results_pair : network_group_results.results_per_network()) {
auto frames_count = network_group_results.frames_count(network_results_pair.first);
CHECK_EXPECTED(frames_count);
min_frames_count = std::min(frames_count.value(), min_frames_count);
TRY(const auto frames_count, network_group_results.frames_count(network_results_pair.first));
min_frames_count = std::min(frames_count, min_frames_count);
}
}
return min_frames_count;
@@ -1168,38 +1106,29 @@ Expected<size_t> get_min_inferred_frames_count(InferResult &inference_result)
Expected<InferResult> run_command_hef_single_device(const inference_runner_params &params)
{
auto devices = create_devices(params.vdevice_params.device_params);
CHECK_EXPECTED(devices, "Failed creating device");
TRY(auto devices, create_devices(params.vdevice_params.device_params), "Failed creating device");
/* This function supports controls for multiple devices.
We validate there is only 1 device generated as we are on a single device flow */
CHECK_AS_EXPECTED(1 == devices->size(), HAILO_INTERNAL_FAILURE);
auto &device = devices.value()[0];
CHECK_AS_EXPECTED(1 == devices.size(), HAILO_INTERNAL_FAILURE);
auto &device = devices[0];
auto hef = Hef::create(params.hef_path.c_str());
CHECK_EXPECTED(hef, "Failed reading hef file {}", params.hef_path);
auto interface = device->get_default_streams_interface();
CHECK_EXPECTED(interface, "Failed to get default streams interface");
auto configure_params = get_configure_params(params, hef.value(), interface.value());
CHECK_EXPECTED(configure_params);
auto network_group_list = device->configure(hef.value(), configure_params.value());
CHECK_EXPECTED(network_group_list, "Failed configure device from hef");
TRY(auto hef, Hef::create(params.hef_path.c_str()), "Failed reading hef file {}", params.hef_path);
TRY(const auto interface, device->get_default_streams_interface(), "Failed to get default streams interface");
TRY(auto configure_params, get_configure_params(params, hef, interface));
TRY(auto network_group_list, device->configure(hef, configure_params), "Failed configure device from hef");
if (use_batch_to_measure_opt(params)) {
auto status = DownloadActionListCommand::set_batch_to_measure(*device, params.runtime_data.batch_to_measure);
CHECK_SUCCESS_AS_EXPECTED(status);
}
auto inference_result = activate_and_run_single_device(*device, network_group_list.value(), params);
auto inference_result = activate_and_run_single_device(*device, network_group_list, params);
if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && inference_result) {
auto min_frames_count = get_min_inferred_frames_count(inference_result.value());
CHECK_EXPECTED(min_frames_count);
if (min_frames_count.value() < params.runtime_data.batch_to_measure) {
TRY(auto min_frames_count, get_min_inferred_frames_count(inference_result.value()));
if (min_frames_count < params.runtime_data.batch_to_measure) {
LOGGER__WARNING("Number of frames sent ({}) is smaller than --batch-to-measure ({}), "
"hence timestamps will not be updated in runtime data", min_frames_count.value() ,
"hence timestamps will not be updated in runtime data", min_frames_count,
params.runtime_data.batch_to_measure);
}
}
@@ -1207,11 +1136,11 @@ Expected<InferResult> run_command_hef_single_device(const inference_runner_param
if (params.runtime_data.collect_runtime_data) {
const auto runtime_data_output_path = format_runtime_data_output_path(
params.runtime_data.runtime_data_output_path, params.hef_path);
DownloadActionListCommand::execute(*device, runtime_data_output_path, network_group_list.value(),
DownloadActionListCommand::execute(*device, runtime_data_output_path, network_group_list,
params.hef_path);
}
CHECK_EXPECTED(inference_result);
CHECK_EXPECTED(inference_result); // TODO (HRT-13278): Figure out how to remove CHECK_EXPECTED here
return inference_result;
}
@@ -1224,13 +1153,10 @@ Expected<InferResult> activate_and_run_vdevice(
{
std::unique_ptr<ActivatedNetworkGroup> activated_network_group;
if (!scheduler_is_used) {
auto activated_net_group_exp = activate_network_group(*network_groups[0]);
CHECK_EXPECTED(activated_net_group_exp, "Failed activate network_group");
activated_network_group = activated_net_group_exp.release();
TRY(activated_network_group, network_groups[0]->activate());
}
auto input_dataset = create_dataset(network_groups, params);
CHECK_EXPECTED(input_dataset, "Failed creating input dataset");
TRY(const auto input_dataset, create_dataset(network_groups, params), "Failed creating input dataset");
hailo_power_measurement_types_t measurement_type = HAILO_POWER_MEASUREMENT_TYPES__MAX_ENUM;
bool should_measure_power = false;
@@ -1245,11 +1171,10 @@ Expected<InferResult> activate_and_run_vdevice(
std::map<std::string, std::shared_ptr<LongPowerMeasurement>> power_measurements;
if (should_measure_power) {
for (auto &device : physical_devices) {
auto long_power_measurement_exp = PowerMeasurementSubcommand::start_power_measurement(device,
HAILO_DVM_OPTIONS_AUTO,
measurement_type, params.power_measurement.sampling_period, params.power_measurement.averaging_factor);
CHECK_EXPECTED(long_power_measurement_exp, "Failed starting power measurement on device {}", device.get().get_dev_id());
auto long_power_measurement_p = make_shared_nothrow<LongPowerMeasurement>(long_power_measurement_exp.release());
TRY(auto long_power_measurement, PowerMeasurementSubcommand::start_power_measurement(device,
HAILO_DVM_OPTIONS_AUTO, measurement_type, params.power_measurement.sampling_period, params.power_measurement.averaging_factor),
"Failed starting power measurement on device {}", device.get().get_dev_id());
auto long_power_measurement_p = make_shared_nothrow<LongPowerMeasurement>(std::move(long_power_measurement));
CHECK_NOT_NULL_AS_EXPECTED(long_power_measurement_p, HAILO_OUT_OF_HOST_MEMORY);
power_measurements.emplace(device.get().get_dev_id(), std::move(long_power_measurement_p));
}
@@ -1258,18 +1183,14 @@ Expected<InferResult> activate_and_run_vdevice(
std::map<std::string, std::shared_ptr<TemperatureMeasurement>> temp_measurements;
if (params.measure_temp) {
for (auto &device : physical_devices) {
auto temp_measure = TemperatureMeasurement::create_shared(device);
CHECK_EXPECTED(temp_measure);
auto status = temp_measure.value()->start_measurement();
TRY(auto temp_measure, TemperatureMeasurement::create_shared(device));
auto status = temp_measure->start_measurement();
CHECK_SUCCESS_AS_EXPECTED(status, "Failed starting temperature measurement on device {}", device.get().get_dev_id());
temp_measurements.emplace(device.get().get_dev_id(), temp_measure.release());
temp_measurements.emplace(device.get().get_dev_id(), temp_measure);
}
}
auto infer_result = run_inference(network_groups, input_dataset.value(), params);
CHECK_EXPECTED(infer_result, "Error failed running inference");
InferResult inference_result(infer_result.release());
TRY(auto inference_result, run_inference(network_groups, input_dataset, params), "Error failed running inference");
inference_result.initialize_measurements(physical_devices);
if (should_measure_power) {
@@ -1311,12 +1232,9 @@ Expected<InferResult> activate_and_run_vdevice(
Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &params)
{
auto hef = Hef::create(params.hef_path.c_str());
CHECK_EXPECTED(hef, "Failed reading hef file {}", params.hef_path);
auto network_groups_infos = hef->get_network_groups_infos();
CHECK_EXPECTED(network_groups_infos);
bool scheduler_is_used = (1 < network_groups_infos->size()) || params.vdevice_params.multi_process_service;
TRY(auto hef, Hef::create(params.hef_path.c_str()), "Failed reading hef file {}", params.hef_path);
TRY(auto network_groups_infos, hef.get_network_groups_infos());
bool scheduler_is_used = (1 < network_groups_infos.size()) || params.vdevice_params.multi_process_service;
hailo_vdevice_params_t vdevice_params = {};
auto status = hailo_init_vdevice_params(&vdevice_params);
@@ -1326,12 +1244,8 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
}
std::vector<hailo_device_id_t> dev_ids;
if (!params.vdevice_params.device_params.device_ids.empty()) {
auto dev_ids_exp = get_device_ids(params.vdevice_params.device_params);
CHECK_EXPECTED(dev_ids_exp);
auto dev_ids_struct_exp = HailoRTCommon::to_device_ids_vector(dev_ids_exp.value());
CHECK_EXPECTED(dev_ids_struct_exp);
dev_ids = dev_ids_struct_exp.release();
TRY(auto dev_ids_strs, get_device_ids(params.vdevice_params.device_params));
TRY(dev_ids, HailoRTCommon::to_device_ids_vector(dev_ids_strs));
vdevice_params.device_ids = dev_ids.data();
vdevice_params.device_count = static_cast<uint32_t>(dev_ids.size());
@@ -1339,42 +1253,47 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
vdevice_params.scheduling_algorithm = (scheduler_is_used) ? HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN : HAILO_SCHEDULING_ALGORITHM_NONE;
vdevice_params.group_id = params.vdevice_params.group_id.c_str();
vdevice_params.multi_process_service = params.vdevice_params.multi_process_service;
auto vdevice = VDevice::create(vdevice_params);
CHECK_EXPECTED(vdevice, "Failed creating vdevice");
TRY(auto vdevice, VDevice::create(vdevice_params), "Failed creating vdevice");
std::vector<std::reference_wrapper<Device>> physical_devices;
if (!params.vdevice_params.multi_process_service) {
auto expected_physical_devices = vdevice.value()->get_physical_devices();
CHECK_EXPECTED(expected_physical_devices);
physical_devices = expected_physical_devices.value();
TRY(physical_devices, vdevice->get_physical_devices());
}
auto interface = vdevice.value()->get_default_streams_interface();
CHECK_EXPECTED(interface, "Failed to get default streams interface");
auto configure_params = get_configure_params(params, hef.value(), *interface);
CHECK_EXPECTED(configure_params);
auto network_group_list = vdevice.value()->configure(hef.value(), configure_params.value());
CHECK_EXPECTED(network_group_list, "Failed configure vdevice from hef");
TRY(const auto interface, vdevice->get_default_streams_interface(), "Failed to get default streams interface");
TRY(auto configure_params, get_configure_params(params, hef, interface));
TRY(auto network_group_list, vdevice->configure(hef, configure_params), "Failed configure vdevice from hef");
for (auto &device : physical_devices) {
TRY(const auto identity, device.get().identify());
CHECK_AS_EXPECTED((HailoRTCommon::is_power_measurement_supported(identity.device_architecture) ||
!(params.power_measurement.measure_power)), HAILO_INVALID_OPERATION,
"HW arch {} does not support power measurement. Disable the power-measure option",
HailoRTCommon::get_device_arch_str(identity.device_architecture));
CHECK_AS_EXPECTED((HailoRTCommon::is_current_measurement_supported(identity.device_architecture) ||
!(params.power_measurement.measure_current)), HAILO_INVALID_OPERATION,
"HW arch {} does not support current measurement. Disable the current-measure option",
HailoRTCommon::get_device_arch_str(identity.device_architecture));
CHECK_AS_EXPECTED((HailoRTCommon::is_temp_measurement_supported(identity.device_architecture) ||
!(params.measure_temp)), HAILO_INVALID_OPERATION,
"HW arch {} does not support temperature measurement. Disable the temp-measure option",
HailoRTCommon::get_device_arch_str(identity.device_architecture));
if (use_batch_to_measure_opt(params)) {
status = DownloadActionListCommand::set_batch_to_measure(device.get(), params.runtime_data.batch_to_measure);
CHECK_SUCCESS_AS_EXPECTED(status);
}
}
auto infer_result = activate_and_run_vdevice(physical_devices, scheduler_is_used, network_group_list.value(), params);
CHECK_EXPECTED(infer_result, "Error failed running inference");
TRY(auto infer_result, activate_and_run_vdevice(physical_devices, scheduler_is_used, network_group_list, params),
"Error while running inference");
for (auto &device : physical_devices) {
if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && infer_result) {
auto min_frames_count = get_min_inferred_frames_count(infer_result.value());
CHECK_EXPECTED(min_frames_count);
if (min_frames_count.value() < params.runtime_data.batch_to_measure) {
if (use_batch_to_measure_opt(params) && (0 == params.frames_count)) {
TRY(const auto min_frames_count, get_min_inferred_frames_count(infer_result));
if (min_frames_count < params.runtime_data.batch_to_measure) {
LOGGER__WARNING("Number of frames sent ({}) is smaller than --batch-to-measure ({}), "
"hence timestamps will not be updated in runtime data", min_frames_count.value() ,
"hence timestamps will not be updated in runtime data", min_frames_count,
params.runtime_data.batch_to_measure);
}
}
@@ -1383,7 +1302,7 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
auto output_path = (1 == physical_devices.size()) ? params.runtime_data.runtime_data_output_path :
(std::string(device.get().get_dev_id()) + "_" + params.runtime_data.runtime_data_output_path);
const auto runtime_data_output_path = format_runtime_data_output_path(output_path, params.hef_path);
DownloadActionListCommand::execute(device.get(), runtime_data_output_path, network_group_list.value(),
DownloadActionListCommand::execute(device.get(), runtime_data_output_path, network_group_list,
params.hef_path);
}
}
@@ -1402,18 +1321,16 @@ Expected<bool> use_vdevice(const hailo_vdevice_params &params)
return true;
}
auto device_type = Device::get_device_type(params.device_params.device_ids[0]);
CHECK_EXPECTED(device_type);
TRY(const auto device_type, Device::get_device_type(params.device_params.device_ids[0]));
return device_type.value() != Device::Type::ETH;
return device_type != Device::Type::ETH;
}
Expected<InferResult> run_command_hef(const inference_runner_params &params)
{
auto use_vdevice_expected = use_vdevice(params.vdevice_params);
CHECK_EXPECTED(use_vdevice_expected);
TRY(auto use_vdevice, use_vdevice(params.vdevice_params));
if (use_vdevice_expected.value()) {
if (use_vdevice) {
return run_command_hef_vdevice(params);
}
else {
@@ -1428,17 +1345,15 @@ static hailo_status run_command_hefs_dir(const inference_runner_params &params,
std::string hef_dir = params.hef_path;
inference_runner_params curr_params = params;
const auto files = Filesystem::get_files_in_dir_flat(hef_dir);
CHECK_EXPECTED_AS_STATUS(files);
TRY(const auto files, Filesystem::get_files_in_dir_flat(hef_dir));
for (const auto &full_path : files.value()) {
for (const auto &full_path : files) {
if (Filesystem::has_suffix(full_path, ".hef")) {
contains_hef = true;
curr_params.hef_path = full_path;
std::cout << std::string(80, '*') << std::endl << "Inferring " << full_path << ":"<< std::endl;
auto hef = Hef::create(full_path);
CHECK_EXPECTED_AS_STATUS(hef);
auto network_groups_names = hef->get_network_groups_names();
TRY(auto hef, Hef::create(full_path));
auto network_groups_names = hef.get_network_groups_names();
auto infer_stats = run_command_hef(curr_params);
printer.print(network_groups_names , infer_stats);
@@ -1458,23 +1373,20 @@ static hailo_status run_command_hefs_dir(const inference_runner_params &params,
hailo_status run_command(const inference_runner_params &params)
{
auto printer = InferStatsPrinter::create(params);
CHECK_EXPECTED_AS_STATUS(printer, "Failed to initialize infer stats printer");
TRY(auto printer, InferStatsPrinter::create(params), "Failed to initialize infer stats printer");
if (!params.csv_output.empty()) {
printer->print_csv_header();
printer.print_csv_header();
}
auto is_dir = Filesystem::is_directory(params.hef_path.c_str());
CHECK_EXPECTED_AS_STATUS(is_dir, "Failed checking if path is directory");
TRY(const auto is_dir, Filesystem::is_directory(params.hef_path.c_str()), "Failed checking if path is directory");
if (is_dir.value()){
return run_command_hefs_dir(params, printer.value());
if (is_dir){
return run_command_hefs_dir(params, printer);
} else {
auto infer_stats = run_command_hef(params);
auto hef = Hef::create(params.hef_path.c_str());
CHECK_EXPECTED_AS_STATUS(hef);
auto network_groups_names = hef->get_network_groups_names();
printer->print(network_groups_names, infer_stats);
TRY(auto hef, Hef::create(params.hef_path.c_str()));
auto network_groups_names = hef.get_network_groups_names();
printer.print(network_groups_names, infer_stats);
return infer_stats.status();
}
}

View File

@@ -36,23 +36,20 @@ hailo_status ScanSubcommand::execute()
return scan();
}
else {
auto res = scan_ethernet(m_interface_ip_addr, m_interface_name);
CHECK_EXPECTED_AS_STATUS(res);
TRY(const auto res, scan_ethernet(m_interface_ip_addr, m_interface_name));
return HAILO_SUCCESS;
}
}
hailo_status ScanSubcommand::scan()
{
auto device_ids = Device::scan();
CHECK_EXPECTED_AS_STATUS(device_ids);
if (device_ids->size() == 0) {
TRY(const auto device_ids, Device::scan());
if (device_ids.size() == 0) {
std::cout << "Hailo devices not found" << std::endl;
}
else {
std::cout << "Hailo Devices:" << std::endl;
for (const auto& device_id : device_ids.value()) {
for (const auto &device_id : device_ids) {
std::cout << "[-] Device: " << device_id << std::endl;
}
}

View File

@@ -108,9 +108,8 @@ SensorSectionsInfoSubcommand::SensorSectionsInfoSubcommand(CLI::App &parent_app)
hailo_status SensorSectionsInfoSubcommand::execute_on_device(Device &device)
{
auto sections_info = device.sensor_get_sections_info();
CHECK_EXPECTED_AS_STATUS(sections_info);
return print_sections_info((SENSOR_CONFIG__section_info_t*)sections_info->data());
TRY(auto sections_info, device.sensor_get_sections_info());
return print_sections_info((SENSOR_CONFIG__section_info_t*)sections_info.data());
}
hailo_status SensorSectionsInfoSubcommand::print_sections_info(SENSOR_CONFIG__section_info_t *operation_cfg)
@@ -125,11 +124,11 @@ hailo_status SensorSectionsInfoSubcommand::print_sections_info(SENSOR_CONFIG__se
}
else {
std::string reset_config = section_info->no_reset_offset ? "not valid" : "valid";
auto sensor_type_expected = convert_sensor_type_to_string(section_info->sensor_type);
CHECK_EXPECTED_AS_STATUS(sensor_type_expected, "Failed convert sensor type to string");
TRY( const auto sensor_type, convert_sensor_type_to_string(section_info->sensor_type),
"Failed convert sensor type to string");
std::cout << "Configuration Name: " << section_info->config_name << "\n";
std::cout << "Sensor Type: " << sensor_type_expected.value() << "\n";
std::cout << "Sensor Type: " << sensor_type << "\n";
std::cout << "Configuration lines number: " << (section_info->config_size / sizeof(SENSOR_CONFIG__operation_cfg_t)) << "\n";
std::cout << "Configuration size in bytes: " << section_info->config_size << "\n";
std::cout << "Reset configuration: " << reset_config << "\n";

View File

@@ -140,11 +140,10 @@ std::map<uint16_t, hailo_status> UdpRateLimiterCommand::reset_commnad(const std:
hailo_status UdpRateLimiterCommand::autoset_commnad(const std::vector<uint16_t> &board_ports)
{
const auto rates_from_hef = calc_rate_from_hef(m_hef_path, m_network_group_name, m_fps);
CHECK_EXPECTED_AS_STATUS(rates_from_hef);
TRY(const auto rates_from_hef, calc_rate_from_hef(m_hef_path, m_network_group_name, m_fps));
// On auto set, we use min rate for all input ports
auto min_rate_pair = *std::min_element(rates_from_hef.value().begin(), rates_from_hef.value().end(),
auto min_rate_pair = *std::min_element(rates_from_hef.begin(), rates_from_hef.end(),
[](const auto& lhs, const auto& rhs) { return lhs.second < rhs.second; });
return set_command(board_ports, static_cast<uint32_t>(min_rate_pair.second));
@@ -197,16 +196,11 @@ uint32_t UdpRateLimiterCommand::bit_rate_kbit_sec_to_bytes_sec(uint32_t rate_kbi
Expected<std::map<std::string, uint32_t>> UdpRateLimiterCommand::calc_rate_from_hef(const std::string &hef_path,
const std::string &network_group_name, uint32_t fps)
{
auto hef = Hef::create(hef_path.c_str());
CHECK_EXPECTED(hef, "Failed reading hef file {}", hef_path.c_str());
TRY(auto hef, Hef::create(hef_path.c_str()), "Failed reading hef file {}", hef_path.c_str());
TRY(auto rate_calc, NetworkUdpRateCalculator::create(&(hef), network_group_name));
TRY(auto calculated_rates, rate_calc.calculate_inputs_bandwith(fps));
auto rate_calc = NetworkUdpRateCalculator::create(&(hef.value()), network_group_name);
CHECK_EXPECTED(rate_calc);
auto calculated_rates = rate_calc->calculate_inputs_bandwith(fps);
CHECK_EXPECTED(calculated_rates);
return calculated_rates.release();
return calculated_rates;
}
std::vector<uint16_t> UdpRateLimiterCommand::get_dports()

View File

@@ -0,0 +1,22 @@
cmake_minimum_required(VERSION 3.0.0)
set(HRPC_IMPL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/os")
if(WIN32)
set(HRPC_OS_DIR "${HRPC_IMPL_DIR}/windows")
elseif(UNIX)
set(HRPC_OS_DIR "${HRPC_IMPL_DIR}/posix")
else()
message(FATAL_ERROR "Unexpeced host, stopping build")
endif()
set(SRC_FILES
${CMAKE_CURRENT_SOURCE_DIR}/rpc_connection.cpp
${CMAKE_CURRENT_SOURCE_DIR}/raw_connection.cpp
${HRPC_IMPL_DIR}/pcie/raw_connection_internal.cpp
${HRPC_OS_DIR}/raw_connection_internal.cpp
${CMAKE_CURRENT_SOURCE_DIR}/client.cpp
${CMAKE_CURRENT_SOURCE_DIR}/server.cpp
)
set(HRPC_CPP_SOURCES ${SRC_FILES} PARENT_SCOPE)

125
hailort/hrpc/client.cpp Normal file
View File

@@ -0,0 +1,125 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file client.hpp
* @brief RPC Client
**/
#include "client.hpp"
using namespace hrpc;
Expected<std::shared_ptr<ResultEvent>> ResultEvent::create_shared()
{
TRY(auto event, hailort::Event::create_shared(hailort::Event::State::not_signalled));
auto ptr = make_shared_nothrow<ResultEvent>(event);
CHECK_NOT_NULL(ptr, HAILO_OUT_OF_HOST_MEMORY);
return ptr;
}
ResultEvent::ResultEvent(EventPtr event) :
m_event(event)
{
}
Buffer &&ResultEvent::release()
{
return std::move(m_value);
}
hailo_status ResultEvent::signal(Buffer &&value)
{
m_value = std::move(value);
return m_event->signal();
}
hailo_status ResultEvent::wait(std::chrono::milliseconds timeout)
{
return m_event->wait(timeout);
}
Client::~Client()
{
is_running = false;
(void)m_connection.close();
if (m_thread.joinable()) {
m_thread.join();
}
}
hailo_status Client::connect()
{
TRY(m_conn_context, ConnectionContext::create_shared(false));
TRY(auto conn, RawConnection::create_shared(m_conn_context));
auto status = conn->connect();
CHECK_SUCCESS(status);
m_connection = RpcConnection(conn);
m_thread = std::thread([this] {
auto status = message_loop();
if ((status != HAILO_SUCCESS) && (status != HAILO_COMMUNICATION_CLOSED)) { // TODO: Use this to prevent future requests
LOGGER__ERROR("Error in message loop - {}", status);
}
});
return HAILO_SUCCESS;
}
hailo_status Client::message_loop()
{
while (is_running) {
rpc_message_header_t header;
TRY_WITH_ACCEPTABLE_STATUS(HAILO_COMMUNICATION_CLOSED, auto message, m_connection.read_message(header));
assert(header.action_id < static_cast<uint32_t>(HailoRpcActionID::MAX_VALUE));
auto action_id_enum = static_cast<HailoRpcActionID>(header.action_id);
if (m_custom_callbacks.find(action_id_enum) != m_custom_callbacks.end()) {
auto status = m_custom_callbacks[action_id_enum](MemoryView(message), m_connection);
CHECK_SUCCESS(status);
continue;
}
std::unique_lock<std::mutex> lock(m_message_mutex);
auto event = m_events[header.message_id];
lock.unlock();
auto status = event->signal(std::move(message));
CHECK_SUCCESS(status);
}
return HAILO_SUCCESS;
}
Expected<Buffer> Client::execute_request(HailoRpcActionID action_id, const MemoryView &request,
std::function<hailo_status(RpcConnection)> write_buffers_callback)
{
std::unique_lock<std::mutex> lock(m_message_mutex);
rpc_message_header_t header;
header.size = static_cast<uint32_t>(request.size());
header.message_id = m_messages_sent++;
header.action_id = static_cast<uint32_t>(action_id);
auto status = m_connection.write_message(header, request);
CHECK_SUCCESS_AS_EXPECTED(status);
if (write_buffers_callback) {
status = write_buffers_callback(m_connection);
CHECK_SUCCESS_AS_EXPECTED(status);
}
TRY(auto event, ResultEvent::create_shared());
m_events[header.message_id] = event;
lock.unlock();
status = event->wait(REQUEST_TIMEOUT);
CHECK_SUCCESS_AS_EXPECTED(status);
m_events.erase(header.message_id);
return event->release();
}
void Client::register_custom_reply(HailoRpcActionID action_id,
std::function<hailo_status(const MemoryView&, RpcConnection connection)> callback)
{
m_custom_callbacks[action_id] = callback;
}

68
hailort/hrpc/client.hpp Normal file
View File

@@ -0,0 +1,68 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file client.hpp
* @brief RPC Client Header
**/
#ifndef _CLIENT_HPP_
#define _CLIENT_HPP_
#include <hailo/event.hpp>
#include <fcntl.h>
#include <functional>
#include <thread>
#include "rpc_connection.hpp"
#include "hrpc_protocol/serializer.hpp"
namespace hrpc
{
#define REQUEST_TIMEOUT std::chrono::milliseconds(10000)
class ResultEvent
{
public:
static Expected<std::shared_ptr<ResultEvent>> create_shared();
ResultEvent(EventPtr event);
Buffer &&release();
hailo_status signal(Buffer &&value);
hailo_status wait(std::chrono::milliseconds timeout);
private:
Buffer m_value;
EventPtr m_event;
};
class Client
{
public:
Client() = default;
~Client();
hailo_status connect();
Expected<Buffer> execute_request(HailoRpcActionID action_id, const MemoryView &request,
std::function<hailo_status(RpcConnection)> write_buffers_callback = nullptr);
void register_custom_reply(HailoRpcActionID action_id, std::function<hailo_status(const MemoryView&, RpcConnection connection)> callback);
protected:
hailo_status message_loop();
bool is_running = true;
std::shared_ptr<ConnectionContext> m_conn_context;
RpcConnection m_connection;
std::thread m_thread;
std::unordered_map<uint32_t, std::shared_ptr<ResultEvent>> m_events;
std::unordered_map<HailoRpcActionID, std::function<hailo_status(const MemoryView&, RpcConnection)>> m_custom_callbacks;
uint32_t m_messages_sent = 0;
std::mutex m_message_mutex;
};
} // namespace hrpc
#endif // _CLIENT_HPP_

View File

@@ -0,0 +1,196 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection_internal.cpp
* @brief PCIE Raw Connection
**/
#include "hrpc/os/pcie/raw_connection_internal.hpp"
#include "common/logger_macros.hpp"
#include "common/utils.hpp"
#include "hailo/hailort.h"
#include "vdma/driver/hailort_driver.hpp"
// TODO: Remove this after we can choose ports in the driver
#define PCIE_PORT (1213355091)
using namespace hrpc;
Expected<std::shared_ptr<ConnectionContext>> PcieConnectionContext::create_shared(bool is_accepting)
{
const auto max_size = PcieSession::max_transfer_size();
TRY(auto write_buffer, Buffer::create(static_cast<size_t>(max_size), BufferStorageParams::create_dma()));
TRY(auto read_buffer, Buffer::create(static_cast<size_t>(max_size), BufferStorageParams::create_dma()));
std::shared_ptr<PcieConnectionContext> ptr = nullptr;
if (is_accepting) {
// Server side
TRY(auto driver, HailoRTDriver::create_pcie_ep());
ptr = make_shared_nothrow<PcieConnectionContext>(std::move(driver), is_accepting,
std::move(write_buffer), std::move(read_buffer));
CHECK_NOT_NULL(ptr, HAILO_OUT_OF_HOST_MEMORY);
return std::dynamic_pointer_cast<ConnectionContext>(ptr);
} else {
// Client side
TRY(auto device_infos, HailoRTDriver::scan_devices());
CHECK(device_infos.size() > 0, HAILO_NOT_FOUND, "No devices found");
for (auto &device_info : device_infos) {
if (HailoRTDriver::AcceleratorType::SOC_ACCELERATOR == device_info.accelerator_type) {
TRY(auto driver, HailoRTDriver::create(device_info.device_id, device_info.dev_path));
ptr = make_shared_nothrow<PcieConnectionContext>(std::move(driver), is_accepting,
std::move(write_buffer), std::move(read_buffer));
CHECK_NOT_NULL(ptr, HAILO_OUT_OF_HOST_MEMORY);
return std::dynamic_pointer_cast<ConnectionContext>(ptr);
}
}
}
LOGGER__ERROR("No suitable device found");
return make_unexpected(HAILO_NOT_FOUND);
}
hailo_status PcieConnectionContext::wait_for_available_connection()
{
std::unique_lock<std::mutex> lock(m_mutex);
bool was_successful = m_cv.wait_for(lock, std::chrono::milliseconds(HAILO_INFINITE), [this] () -> bool {
return (m_conn_count == 0);
});
CHECK(was_successful, HAILO_TIMEOUT, "Got timeout in accept");
m_conn_count++;
return HAILO_SUCCESS;
}
void PcieConnectionContext::mark_connection_closed()
{
{
std::unique_lock<std::mutex> lock(m_mutex);
m_conn_count--;
}
m_cv.notify_one();
}
Expected<std::shared_ptr<RawConnection>> PcieRawConnection::create_shared(std::shared_ptr<PcieConnectionContext> context)
{
auto ptr = make_shared_nothrow<PcieRawConnection>(context);
CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
return std::dynamic_pointer_cast<RawConnection>(ptr);
}
Expected<std::shared_ptr<RawConnection>> PcieRawConnection::accept()
{
auto status = m_context->wait_for_available_connection();
CHECK_SUCCESS(status);
auto new_conn = make_shared_nothrow<PcieRawConnection>(m_context);
CHECK_NOT_NULL_AS_EXPECTED(new_conn, HAILO_OUT_OF_HOST_MEMORY);
TRY(auto session, PcieSession::accept(m_context->driver(), PCIE_PORT));
status = new_conn->set_session(std::move(session));
CHECK_SUCCESS(status);
return std::dynamic_pointer_cast<RawConnection>(new_conn);
}
hailo_status PcieRawConnection::set_session(PcieSession &&session)
{
m_session = make_shared_nothrow<PcieSession>(std::move(session));
CHECK_NOT_NULL(m_session, HAILO_OUT_OF_HOST_MEMORY);
return HAILO_SUCCESS;
}
hailo_status PcieRawConnection::connect()
{
TRY(auto session, PcieSession::connect(m_context->driver(), PCIE_PORT));
auto status = set_session(std::move(session));
CHECK_SUCCESS(status);
return HAILO_SUCCESS;
}
hailo_status PcieRawConnection::write(const uint8_t *buffer, size_t size)
{
if (0 == size) {
return HAILO_SUCCESS;
}
const auto alignment = OsUtils::get_dma_able_alignment();
const auto max_size = PcieSession::max_transfer_size();
bool is_aligned = ((reinterpret_cast<uintptr_t>(buffer) % alignment )== 0);
size_t bytes_written = 0;
while (bytes_written < size) {
size_t amount_to_write = 0;
auto size_left = size - bytes_written;
if (is_aligned) {
amount_to_write = std::min(static_cast<size_t>(size_left), static_cast<size_t>(max_size));
auto status = m_session->write(buffer + bytes_written, amount_to_write, m_timeout);
if (HAILO_STREAM_ABORT == status) {
return HAILO_COMMUNICATION_CLOSED;
}
CHECK_SUCCESS(status);
} else {
amount_to_write = std::min(static_cast<size_t>(size_left), m_context->write_buffer().size());
memcpy(m_context->write_buffer().data(), buffer + bytes_written, amount_to_write);
auto status = m_session->write(m_context->write_buffer().data(), amount_to_write, m_timeout);
if (HAILO_STREAM_ABORT == status) {
return HAILO_COMMUNICATION_CLOSED;
}
CHECK_SUCCESS(status);
}
bytes_written += amount_to_write;
}
return HAILO_SUCCESS;
}
hailo_status PcieRawConnection::read(uint8_t *buffer, size_t size)
{
if (0 == size) {
return HAILO_SUCCESS;
}
const auto alignment = OsUtils::get_dma_able_alignment();
const auto max_size = PcieSession::max_transfer_size();
bool is_aligned = ((reinterpret_cast<uintptr_t>(buffer) % alignment) == 0);
size_t bytes_read = 0;
while (bytes_read < size) {
size_t amount_to_read = 0;
auto size_left = size - bytes_read;
if (is_aligned) {
amount_to_read = std::min(static_cast<size_t>(size_left), static_cast<size_t>(max_size));
auto status = m_session->read(buffer + bytes_read, amount_to_read, m_timeout);
if (HAILO_STREAM_ABORT == status) {
return HAILO_COMMUNICATION_CLOSED;
}
CHECK_SUCCESS(status);
} else {
amount_to_read = std::min(static_cast<size_t>(size_left), m_context->read_buffer().size());
auto status = m_session->read(m_context->read_buffer().data(), amount_to_read, m_timeout);
if (HAILO_STREAM_ABORT == status) {
return HAILO_COMMUNICATION_CLOSED;
}
CHECK_SUCCESS(status);
memcpy(buffer + bytes_read, m_context->read_buffer().data(), amount_to_read);
}
bytes_read += amount_to_read;
}
return HAILO_SUCCESS;
}
hailo_status PcieRawConnection::close()
{
auto status = m_session->close();
CHECK_SUCCESS(status);
m_context->mark_connection_closed();
return HAILO_SUCCESS;
}

View File

@@ -0,0 +1,78 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection_internal.hpp
* @brief Raw Connection Header for pcie based comunication
**/
#ifndef _PCIE_RAW_CONNECTION_INTERNAL_HPP_
#define _PCIE_RAW_CONNECTION_INTERNAL_HPP_
#include "hailo/expected.hpp"
#include "vdma/pcie_session.hpp"
#include "hrpc/raw_connection.hpp"
#include <memory>
#include <condition_variable>
using namespace hailort;
namespace hrpc
{
class PcieConnectionContext : public ConnectionContext
{
public:
static Expected<std::shared_ptr<ConnectionContext>> create_shared(bool is_accepting);
PcieConnectionContext(std::shared_ptr<HailoRTDriver> &&driver, bool is_accepting,
Buffer &&write_buffer, Buffer &&read_buffer)
: ConnectionContext(is_accepting), m_driver(std::move(driver)),
m_write_buffer(std::move(write_buffer)), m_read_buffer(std::move(read_buffer)),
m_conn_count(0) {}
virtual ~PcieConnectionContext() = default;
std::shared_ptr<HailoRTDriver> driver() { return m_driver; }
Buffer &write_buffer() { return m_write_buffer; }
Buffer &read_buffer() { return m_read_buffer; }
hailo_status wait_for_available_connection();
void mark_connection_closed();
private:
std::shared_ptr<HailoRTDriver> m_driver;
Buffer m_write_buffer;
Buffer m_read_buffer;
uint32_t m_conn_count;
std::mutex m_mutex;
std::condition_variable m_cv;
};
class PcieRawConnection : public RawConnection
{
public:
static Expected<std::shared_ptr<RawConnection>> create_shared(std::shared_ptr<PcieConnectionContext> context);
PcieRawConnection() = default;
virtual ~PcieRawConnection() = default;
virtual Expected<std::shared_ptr<RawConnection>> accept() override;
virtual hailo_status connect() override;
virtual hailo_status write(const uint8_t *buffer, size_t size) override;
virtual hailo_status read(uint8_t *buffer, size_t size) override;
virtual hailo_status close() override;
explicit PcieRawConnection(std::shared_ptr<PcieConnectionContext> context) : m_context(context) {}
private:
hailo_status set_session(PcieSession &&session);
std::shared_ptr<PcieConnectionContext> m_context;
std::shared_ptr<PcieSession> m_session;
};
} // namespace hrpc
#endif // _PCIE_RAW_CONNECTION_INTERNAL_HPP_

View File

@@ -0,0 +1,122 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection_internal.cpp
* @brief Linux Sockets Raw Connection
**/
#include "hrpc/os/posix/raw_connection_internal.hpp"
#include <sys/socket.h>
#include <sys/un.h>
#include <string>
#include <unistd.h>
#include <common/logger_macros.hpp>
#include <common/utils.hpp>
#include <hailo/hailort.h>
using namespace hrpc;
Expected<std::shared_ptr<ConnectionContext>> OsConnectionContext::create_shared(bool is_accepting)
{
auto ptr = make_shared_nothrow<OsConnectionContext>(is_accepting);
CHECK_NOT_NULL(ptr, HAILO_OUT_OF_HOST_MEMORY);
return std::dynamic_pointer_cast<ConnectionContext>(ptr);
}
Expected<std::shared_ptr<RawConnection>> OsRawConnection::create_shared(std::shared_ptr<OsConnectionContext> context)
{
std::shared_ptr<RawConnection> ptr;
if (context->is_accepting()) {
int fd = ::socket(AF_UNIX, SOCK_STREAM, 0);
CHECK_AS_EXPECTED(fd >= 0, HAILO_OPEN_FILE_FAILURE, "Socket creation error, errno = {}", errno);
struct sockaddr_un server_addr;
memset(&server_addr, 0, sizeof(server_addr));
server_addr.sun_family = AF_UNIX;
std::string addr = "/tmp/unix_socket";
strncpy(server_addr.sun_path, addr.c_str(), addr.size());
unlink(addr.c_str());
int result = ::bind(fd, (struct sockaddr*)&server_addr, sizeof(server_addr));
CHECK_AS_EXPECTED(result >= 0, HAILO_FILE_OPERATION_FAILURE, "Bind error, errno = {}", errno);
result = ::listen(fd, 5);
CHECK_AS_EXPECTED(result >= 0, HAILO_FILE_OPERATION_FAILURE, "Listen error, errno = {}", errno);
ptr = make_shared_nothrow<OsRawConnection>(fd, context);
} else {
int fd = ::socket(AF_UNIX, SOCK_STREAM, 0);
CHECK_AS_EXPECTED(fd >= 0, HAILO_OPEN_FILE_FAILURE, "Socket creation error, errno = {}", errno);
ptr = make_shared_nothrow<OsRawConnection>(fd, context);
}
CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
return ptr;
}
Expected<std::shared_ptr<RawConnection>> OsRawConnection::accept()
{
int fd = ::accept(m_fd, nullptr, nullptr);
CHECK_AS_EXPECTED(fd >= 0, HAILO_FILE_OPERATION_FAILURE, "Accept error, errno = {}", errno);
std::shared_ptr<RawConnection> ptr = make_shared_nothrow<OsRawConnection>(fd, m_context);
CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
return ptr;
}
hailo_status OsRawConnection::connect()
{
struct sockaddr_un server_addr;
std::string addr = "/tmp/unix_socket";
memset(&server_addr, 0, sizeof(server_addr));
server_addr.sun_family = AF_UNIX;
strncpy(server_addr.sun_path, addr.c_str(), addr.size());
int result = ::connect(m_fd, (struct sockaddr*)&server_addr, sizeof(server_addr));
CHECK(result >= 0, HAILO_FILE_OPERATION_FAILURE, "Connect error, errno = {}", errno);
return HAILO_SUCCESS;
}
hailo_status OsRawConnection::write(const uint8_t *buffer, size_t size)
{
size_t bytes_written = 0;
while (bytes_written < size) {
ssize_t result = ::send(m_fd, buffer + bytes_written, size - bytes_written, MSG_NOSIGNAL);
CHECK(result >= 0, HAILO_FILE_OPERATION_FAILURE, "Write error, errno = {}", errno);
bytes_written += result;
}
return HAILO_SUCCESS;
}
hailo_status OsRawConnection::read(uint8_t *buffer, size_t size)
{
size_t bytes_read = 0;
while (bytes_read < size) {
ssize_t result = ::read(m_fd, buffer + bytes_read, size - bytes_read);
if (0 == result) {
return HAILO_COMMUNICATION_CLOSED; // 0 means the communication is closed
}
CHECK(result >= 0, HAILO_FILE_OPERATION_FAILURE, "Read error, errno = {}", errno);
bytes_read += result;
}
return HAILO_SUCCESS;
}
hailo_status OsRawConnection::close()
{
int result = ::shutdown(m_fd, SHUT_RDWR);
CHECK(0 == result, HAILO_CLOSE_FAILURE, "Socket shutdown failed, errno = {}", errno);
result = ::close(m_fd);
CHECK(0 == result, HAILO_CLOSE_FAILURE, "Socket close failed, errno = {}", errno);
return HAILO_SUCCESS;
}

View File

@@ -0,0 +1,54 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection_internal.hpp
* @brief Raw Connection Header for sockets based comunication
**/
#ifndef _POSIX_RAW_CONNECTION_INTERNAL_HPP_
#define _POSIX_RAW_CONNECTION_INTERNAL_HPP_
#include "hailo/expected.hpp"
#include "hrpc/raw_connection.hpp"
#include <memory>
using namespace hailort;
namespace hrpc
{
class OsConnectionContext : public ConnectionContext
{
public:
static Expected<std::shared_ptr<ConnectionContext>> create_shared(bool is_accepting);
OsConnectionContext(bool is_accepting) : ConnectionContext(is_accepting) {}
virtual ~OsConnectionContext() = default;
};
class OsRawConnection : public RawConnection
{
public:
static Expected<std::shared_ptr<RawConnection>> create_shared(std::shared_ptr<OsConnectionContext> context);
virtual ~OsRawConnection() = default;
virtual Expected<std::shared_ptr<RawConnection>> accept() override;
virtual hailo_status connect() override;
virtual hailo_status write(const uint8_t *buffer, size_t size) override;
virtual hailo_status read(uint8_t *buffer, size_t size) override;
virtual hailo_status close() override;
OsRawConnection(int fd, std::shared_ptr<OsConnectionContext> context) : m_fd(fd), m_context(context) {}
private:
int m_fd;
std::shared_ptr<OsConnectionContext> m_context;
};
} // namespace hrpc
#endif // _POSIX_RAW_CONNECTION_INTERNAL_HPP_

View File

@@ -0,0 +1,57 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection_internal.cpp
* @brief Windows Sockets Raw Connection
**/
#include "hrpc/os/windows/raw_connection_internal.hpp"
#include "common/logger_macros.hpp"
#include "common/utils.hpp"
#include "hailo/hailort.h"
using namespace hrpc;
Expected<std::shared_ptr<ConnectionContext>> OsConnectionContext::create_shared(bool is_accepting)
{
(void)is_accepting;
return make_unexpected(HAILO_NOT_IMPLEMENTED);
}
Expected<std::shared_ptr<RawConnection>> OsRawConnection::create_shared(std::shared_ptr<OsConnectionContext> context)
{
(void)context;
return make_unexpected(HAILO_NOT_IMPLEMENTED);
}
Expected<std::shared_ptr<RawConnection>> OsRawConnection::accept()
{
return make_unexpected(HAILO_NOT_IMPLEMENTED);
}
hailo_status OsRawConnection::connect()
{
return HAILO_NOT_IMPLEMENTED;
}
hailo_status OsRawConnection::write(const uint8_t *buffer, size_t size)
{
(void)buffer;
(void)size;
return HAILO_NOT_IMPLEMENTED;
}
hailo_status OsRawConnection::read(uint8_t *buffer, size_t size)
{
(void)buffer;
(void)size;
return HAILO_NOT_IMPLEMENTED;
}
hailo_status OsRawConnection::close()
{
return HAILO_NOT_IMPLEMENTED;
}

View File

@@ -0,0 +1,48 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection_internal.hpp
* @brief Raw Connection Header for sockets based comunication
**/
#ifndef _WINDOWS_RAW_CONNECTION_INTERNAL_HPP_
#define _WINDOWS_RAW_CONNECTION_INTERNAL_HPP_
#include "hailo/expected.hpp"
#include "hrpc/raw_connection.hpp"
#include <memory>
using namespace hailort;
namespace hrpc
{
class OsConnectionContext : public ConnectionContext
{
public:
static Expected<std::shared_ptr<ConnectionContext>> create_shared(bool is_accepting);
};
class OsRawConnection : public RawConnection
{
public:
static Expected<std::shared_ptr<RawConnection>> create_shared(std::shared_ptr<OsConnectionContext> context);
OsRawConnection() = default;
virtual ~OsRawConnection() = default;
virtual Expected<std::shared_ptr<RawConnection>> accept() override;
virtual hailo_status connect() override;
virtual hailo_status write(const uint8_t *buffer, size_t size) override;
virtual hailo_status read(uint8_t *buffer, size_t size) override;
virtual hailo_status close() override;
explicit OsRawConnection(std::shared_ptr<OsConnectionContext> /*context*/) {}
};
} // namespace hrpc
#endif // _WINDOWS_RAW_CONNECTION_INTERNAL_HPP_

View File

@@ -0,0 +1,47 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection.cpp
* @brief Raw Connection
**/
#include "hailo/vdevice.hpp"
#include "hrpc/raw_connection.hpp"
#include "hrpc/os/pcie/raw_connection_internal.hpp"
#ifdef _WIN32
#include "hrpc/os/windows/raw_connection_internal.hpp"
#else
#include "hrpc/os/posix/raw_connection_internal.hpp"
#endif
#define HAILO_FORCE_SOCKET_COM_ENV_VAR "HAILO_FORCE_SOCKET_COM"
using namespace hrpc;
Expected<std::shared_ptr<ConnectionContext>> ConnectionContext::create_shared(bool is_accepting)
{
// The env var HAILO_FORCE_HRPC_CLIENT_ENV_VAR is supported for debug purposes
char *socket_com = std::getenv(HAILO_FORCE_SOCKET_COM_ENV_VAR); // TODO: Remove duplication
auto force_socket_com = (nullptr != socket_com) && ("1" == std::string(socket_com));
if (force_socket_com || VDevice::force_hrpc_client()) {// If forcing hrpc service, its because we work without EP driver -> use sockets
return OsConnectionContext::create_shared(is_accepting);
} else {
return PcieConnectionContext::create_shared(is_accepting);
}
}
Expected<std::shared_ptr<RawConnection>> RawConnection::create_shared(std::shared_ptr<ConnectionContext> context)
{
// Create according to ConnectionContext type
auto os_connection_context = std::dynamic_pointer_cast<OsConnectionContext>(context);
if (os_connection_context != nullptr) {
return OsRawConnection::create_shared(os_connection_context);
} else {
return PcieRawConnection::create_shared(std::dynamic_pointer_cast<PcieConnectionContext>(context));
}
}

View File

@@ -0,0 +1,58 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file raw_connection.hpp
* @brief Raw Connection Header
**/
#ifndef _RAW_CONNECTION_HPP_
#define _RAW_CONNECTION_HPP_
#include "hailo/expected.hpp"
#include "vdma/pcie_session.hpp"
#include <memory>
using namespace hailort;
namespace hrpc
{
class ConnectionContext
{
public:
static Expected<std::shared_ptr<ConnectionContext>> create_shared(bool is_accepting);
bool is_accepting() const { return m_is_accepting; }
ConnectionContext(bool is_accepting) : m_is_accepting(is_accepting) {}
virtual ~ConnectionContext() = default;
protected:
bool m_is_accepting;
};
class RawConnection
{
public:
static Expected<std::shared_ptr<RawConnection>> create_shared(std::shared_ptr<ConnectionContext> context);
RawConnection() = default;
virtual ~RawConnection() = default;
virtual Expected<std::shared_ptr<RawConnection>> accept() = 0;
virtual hailo_status connect() = 0;
virtual hailo_status write(const uint8_t *buffer, size_t size) = 0;
virtual hailo_status read(uint8_t *buffer, size_t size) = 0;
virtual hailo_status close() = 0;
protected:
std::chrono::milliseconds m_timeout = std::chrono::milliseconds(HAILO_INFINITE);
};
} // namespace hrpc
#endif // _RAW_CONNECTION_HPP_

View File

@@ -0,0 +1,82 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file rpc_connection.cpp
* @brief RPC connection implementation
**/
#include "rpc_connection.hpp"
namespace hrpc
{
hailo_status RpcConnection::write_message(const rpc_message_header_t &header, const MemoryView &buffer) {
auto header_with_magic = header;
header_with_magic.magic = RPC_MESSAGE_MAGIC;
auto status = m_raw->write(reinterpret_cast<const uint8_t*>(&header_with_magic), sizeof(header_with_magic));
if (HAILO_COMMUNICATION_CLOSED == status) {
return make_unexpected(status);
}
CHECK_SUCCESS(status);
status = m_raw->write(buffer.data(), header.size);
if (HAILO_COMMUNICATION_CLOSED == status) {
return make_unexpected(status);
}
CHECK_SUCCESS(status);
return HAILO_SUCCESS;
}
Expected<Buffer> RpcConnection::read_message(rpc_message_header_t &header) {
auto status = m_raw->read(reinterpret_cast<uint8_t*>(&header), sizeof(header));
if (HAILO_COMMUNICATION_CLOSED == status) {
return make_unexpected(status);
}
CHECK_SUCCESS_AS_EXPECTED(status);
CHECK_AS_EXPECTED(RPC_MESSAGE_MAGIC == header.magic, HAILO_INTERNAL_FAILURE, "Invalid magic! {} != {}",
header.magic, RPC_MESSAGE_MAGIC);
TRY(auto buffer, Buffer::create(header.size, BufferStorageParams::create_dma()));
status = m_raw->read(buffer.data(), header.size);
if (HAILO_COMMUNICATION_CLOSED == status) {
return make_unexpected(status);
}
CHECK_SUCCESS_AS_EXPECTED(status);
return buffer;
}
hailo_status RpcConnection::write_buffer(const MemoryView &buffer)
{
auto status = m_raw->write(buffer.data(), buffer.size());
if (HAILO_COMMUNICATION_CLOSED == status) {
return make_unexpected(status);
}
CHECK_SUCCESS(status);
return HAILO_SUCCESS;
}
hailo_status RpcConnection::read_buffer(MemoryView buffer)
{
auto status = m_raw->read(buffer.data(), buffer.size());
if (HAILO_COMMUNICATION_CLOSED == status) {
return make_unexpected(status);
}
CHECK_SUCCESS(status);
return HAILO_SUCCESS;
}
hailo_status RpcConnection::close()
{
if (m_raw) {
return m_raw->close();
}
return HAILO_SUCCESS;
}
} // namespace hrpc

View File

@@ -0,0 +1,54 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file rpc_connection.hpp
* @brief RPC Connection Header
**/
#ifndef _RPC_CONNECTION_HPP_
#define _RPC_CONNECTION_HPP_
#include "raw_connection.hpp"
#include "hailo/buffer.hpp"
#include "common/utils.hpp"
#define RPC_MESSAGE_MAGIC (0x8A554432)
namespace hrpc
{
#pragma pack(push, 1)
struct rpc_message_header_t
{
uint32_t magic; // TODO: consider removing. check if hurts performance
uint32_t size;
uint32_t message_id;
uint32_t action_id;
};
#pragma pack(pop)
class RpcConnection
{
public:
RpcConnection() = default;
explicit RpcConnection(std::shared_ptr<RawConnection> raw) : m_raw(raw) {}
hailo_status write_message(const rpc_message_header_t &header, const MemoryView &buffer);
Expected<Buffer> read_message(rpc_message_header_t &header);
hailo_status write_buffer(const MemoryView &buffer);
hailo_status read_buffer(MemoryView buffer);
hailo_status close();
private:
std::shared_ptr<RawConnection> m_raw;
};
} // namespace hrpc
#endif // _RPC_CONNECTION_HPP_

123
hailort/hrpc/server.cpp Normal file
View File

@@ -0,0 +1,123 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file server.cpp
* @brief RPC Server
**/
#include "server.hpp"
namespace hrpc
{
ServerContext::ServerContext(Server &server, RpcConnection connection) :
m_server(server), m_connection(connection) {}
hailo_status ServerContext::trigger_callback(uint32_t callback_id, hailo_status callback_status, std::function<hailo_status(RpcConnection)> write_buffers_callback)
{
return m_server.trigger_callback(callback_id, m_connection, callback_status, write_buffers_callback);
}
RpcConnection &ServerContext::connection()
{
return m_connection;
}
void Dispatcher::register_action(HailoRpcActionID action_id,
std::function<Expected<Buffer>(const MemoryView&, ServerContextPtr)> action)
{
m_actions[action_id] = action;
}
Expected<Buffer> Dispatcher::call_action(HailoRpcActionID action_id, const MemoryView &request, ServerContextPtr server_context)
{
if (m_actions.find(action_id) != m_actions.end()) {
return m_actions[action_id](request, server_context);
}
LOGGER__ERROR("Failed to find RPC action {}", action_id);
return make_unexpected(HAILO_RPC_FAILED);
}
hailo_status Server::serve()
{
while (true) {
TRY(auto client_connection, create_client_connection());
auto th = std::thread([this, client_connection]() { serve_client(client_connection); });
th.detach();
}
return HAILO_SUCCESS;
}
void Server::set_dispatcher(Dispatcher dispatcher)
{
m_dispatcher = dispatcher;
}
Expected<RpcConnection> Server::create_client_connection()
{
TRY(auto server_connection, RawConnection::create_shared(m_connection_context));
TRY(auto conn, server_connection->accept());
return RpcConnection(conn);
}
hailo_status Server::serve_client(RpcConnection client_connection)
{
auto server_context = make_shared_nothrow<ServerContext>(*this, client_connection);
CHECK_NOT_NULL(server_context, HAILO_OUT_OF_HOST_MEMORY);
while (true) {
rpc_message_header_t header;
auto request = client_connection.read_message(header);
if (HAILO_COMMUNICATION_CLOSED == request.status()) {
cleanup_client_resources(client_connection);
break; // Client EP is disconnected, exit this loop
}
CHECK_EXPECTED_AS_STATUS(request);
assert(header.action_id < static_cast<uint32_t>(HailoRpcActionID::MAX_VALUE));
TRY(auto reply, m_dispatcher.call_action(static_cast<HailoRpcActionID>(header.action_id), MemoryView(*request), server_context));
{
std::unique_lock<std::mutex> lock(m_write_mutex);
header.size = static_cast<uint32_t>(reply.size());
auto status = client_connection.write_message(header, MemoryView(reply));
if ((HAILO_COMMUNICATION_CLOSED == status) || (HAILO_FILE_OPERATION_FAILURE == status)) {
lock.unlock(); // We need to acquire this lock when releasing the client resources (trigger cb)
cleanup_client_resources(client_connection);
break; // Client EP is disconnected, exit this loop
}
CHECK_SUCCESS(status);
}
}
return HAILO_SUCCESS;
}
hailo_status Server::trigger_callback(uint32_t callback_id, RpcConnection connection, hailo_status callback_status,
std::function<hailo_status(RpcConnection)> write_buffers_callback)
{
TRY(auto reply, CallbackCalledSerializer::serialize_reply(callback_status, callback_id));
std::unique_lock<std::mutex> lock(m_write_mutex);
rpc_message_header_t header;
header.action_id = static_cast<uint32_t>(HailoRpcActionID::CALLBACK_CALLED);
header.message_id = callback_id;
header.size = static_cast<uint32_t>(reply.size());
auto status = connection.write_message(header, MemoryView(reply));
if ((HAILO_COMMUNICATION_CLOSED == status) || (HAILO_FILE_OPERATION_FAILURE == status)) {
return status;
}
CHECK_SUCCESS(status);
if (write_buffers_callback) {
status = write_buffers_callback(connection);
CHECK_SUCCESS(status);
}
return HAILO_SUCCESS;
}
} // namespace hrpc

78
hailort/hrpc/server.hpp Normal file
View File

@@ -0,0 +1,78 @@
#ifndef _SERVER_HPP_
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file server.hpp
* @brief RPC Server Header
**/
#define _SERVER_HPP_
#include <functional>
#include <thread>
#include "rpc_connection.hpp"
#include "hailort_service/service_resource_manager.hpp"
#include "hrpc_protocol/serializer.hpp"
namespace hrpc
{
class Server;
class ServerContext
{
public:
ServerContext(Server &server, RpcConnection connection);
hailo_status trigger_callback(uint32_t callback_id, hailo_status callback_status,
std::function<hailo_status(RpcConnection)> write_buffers_callback = nullptr);
RpcConnection &connection();
private:
Server &m_server;
RpcConnection m_connection;
};
using ServerContextPtr = std::shared_ptr<ServerContext>;
class Dispatcher
{
public:
Dispatcher() = default;
void register_action(HailoRpcActionID action_id,
std::function<Expected<Buffer>(const MemoryView&, ServerContextPtr)> action);
Expected<Buffer> call_action(HailoRpcActionID action_id, const MemoryView &request, ServerContextPtr server_context);
private:
std::unordered_map<HailoRpcActionID, std::function<Expected<Buffer>(const MemoryView&, ServerContextPtr)>> m_actions;
};
class Server
{
public:
Server(std::shared_ptr<ConnectionContext> connection_context) : m_connection_context(connection_context) {};
virtual ~Server() = default;
hailo_status serve();
void set_dispatcher(Dispatcher dispatcher);
friend class ServerContext;
protected:
std::shared_ptr<ConnectionContext> m_connection_context;
private:
Expected<RpcConnection> create_client_connection();
hailo_status serve_client(RpcConnection client_connection);
hailo_status trigger_callback(uint32_t callback_id, RpcConnection connection, hailo_status callback_status,
std::function<hailo_status(RpcConnection)> write_buffers_callback = nullptr);
virtual hailo_status cleanup_client_resources(RpcConnection client_connection) = 0;
Dispatcher m_dispatcher;
std::mutex m_write_mutex;
};
} // namespace hrpc
#endif // _SERVER_HPP_

View File

@@ -0,0 +1,24 @@
cmake_minimum_required(VERSION 3.0.0)
protobuf_generate_cpp(PROTO_RPC_SRC PROTO_RPC_HEADER rpc.proto)
get_filename_component(PROTO_HEADER_DIRECTORY ${PROTO_RPC_HEADER} DIRECTORY)
add_library(rpc_proto STATIC EXCLUDE_FROM_ALL ${PROTO_RPC_SRC} ${PROTO_RPC_HEADER})
target_link_libraries(rpc_proto libprotobuf-lite)
set_target_properties(rpc_proto PROPERTIES CXX_STANDARD 14 GENERATED TRUE POSITION_INDEPENDENT_CODE ON)
if(CMAKE_HOST_WIN32)
# https://github.com/protocolbuffers/protobuf/tree/master/cmake#notes-on-compiler-warnings
target_compile_options(rpc_proto PRIVATE /wd4244)
endif()
get_filename_component(PROTO_HEADER_DIRECTORY ${PROTO_RPC_HEADER} DIRECTORY)
target_include_directories(rpc_proto
PUBLIC
$<BUILD_INTERFACE: ${PROTO_HEADER_DIRECTORY}>
$<BUILD_INTERFACE: ${Protobuf_INCLUDE_DIRS}>
)
set(SRC_FILES
${CMAKE_CURRENT_SOURCE_DIR}/serializer.cpp
)
set(HRPC_PROTOCOL_CPP_SOURCES ${SRC_FILES} PARENT_SCOPE)

View File

@@ -0,0 +1,207 @@
syntax = "proto3";
option optimize_for = LITE_RUNTIME;
message RpcRequest {
oneof request {
VDevice_Create_Request create_vdevice_request = 1;
VDevice_Destroy_Request destroy_vdevice_request = 2;
VDevice_CreateInferModel_Request create_infer_model_request = 3;
InferModel_Destroy_Request destroy_infer_model_request = 4;
InferModel_CreateConfiguredInferModel_Request create_configured_infer_model_request = 5;
ConfiguredInferModel_Destroy_Request destroy_configured_infer_model_request = 6;
ConfiguredInferModel_SetSchedulerTimeout_Request set_scheduler_timeout_request = 7;
ConfiguredInferModel_SetSchedulerThreshold_Request set_scheduler_threshold_request = 8;
ConfiguredInferModel_SetSchedulerPriority_Request set_scheduler_priority_request = 9;
ConfiguredInferModel_GetHwLatencyMeasurement_Request get_hw_latency_measurement_request = 10;
ConfiguredInferModel_Activate_Request activate_request = 11;
ConfiguredInferModel_Deactivate_Request deactivate_request = 12;
ConfiguredInferModel_Shutdown_Request shutdown_request = 13;
ConfiguredInferModel_AsyncInfer_Request async_infer_request = 14;
}
}
message RpcReply {
oneof reply {
VDevice_Create_Reply create_vdevice_reply = 1;
VDevice_Destroy_Reply destroy_vdevice_reply = 2;
VDevice_CreateInferModel_Reply create_infer_model_reply = 3;
InferModel_Destroy_Reply destroy_infer_model_reply = 4;
InferModel_CreateConfiguredInferModel_Reply create_configured_infer_model_reply = 5;
ConfiguredInferModel_Destroy_Reply destroy_configured_infer_model_reply = 6;
ConfiguredInferModel_SetSchedulerTimeout_Reply set_scheduler_timeout_reply = 7;
ConfiguredInferModel_SetSchedulerThreshold_Reply set_scheduler_threshold_reply = 8;
ConfiguredInferModel_SetSchedulerPriority_Reply set_scheduler_priority_reply = 9;
ConfiguredInferModel_GetHwLatencyMeasurement_Reply get_hw_latency_measurement_reply = 10;
ConfiguredInferModel_Activate_Reply activate_reply = 11;
ConfiguredInferModel_Deactivate_Reply deactivate_reply = 12;
ConfiguredInferModel_Shutdown_Reply shutdown_reply = 13;
ConfiguredInferModel_AsyncInfer_Reply async_infer_reply = 14;
CallbackCalled_Reply callback_called_reply = 15;
}
}
message HailoObjectHandle {
uint32 id = 1;
}
message HailoCallbackHandle {
uint32 id = 1;
}
message VDeviceParamsProto {
uint32 scheduling_algorithm = 1;
string group_id = 2;
}
message VDevice_Create_Request {
VDeviceParamsProto params = 1;
}
message VDevice_Create_Reply {
uint32 status = 1;
HailoObjectHandle vdevice_handle = 2;
}
message VDevice_Destroy_Request {
HailoObjectHandle vdevice_handle = 1;
}
message VDevice_Destroy_Reply {
uint32 status = 1;
}
message VDevice_CreateInferModel_Request {
HailoObjectHandle vdevice_handle = 1;
uint64 hef_size = 2;
// Protocol note: After this message, server expects to get HEF data (buffer of size 'hef_size')
}
message VDevice_CreateInferModel_Reply {
uint32 status = 1;
HailoObjectHandle infer_model_handle = 2;
}
message InferModel_Destroy_Request {
HailoObjectHandle infer_model_handle = 1;
}
message InferModel_Destroy_Reply {
uint32 status = 1;
}
message InferStreamParamsProto {
string name = 1;
uint32 format_order = 2;
uint32 format_type = 3;
float nms_score_threshold = 4;
float nms_iou_threshold = 5;
uint32 nms_max_proposals_per_class = 6;
uint32 nms_max_accumulated_mask_size = 7;
};
message InferModel_CreateConfiguredInferModel_Request {
HailoObjectHandle infer_model_handle = 1;
HailoObjectHandle vdevice_handle = 2;
repeated InferStreamParamsProto input_infer_streams = 3;
repeated InferStreamParamsProto output_infer_streams = 4;
uint32 batch_size = 5;
uint32 power_mode = 6;
uint32 latency_flag = 7;
}
message InferModel_CreateConfiguredInferModel_Reply {
uint32 status = 1;
HailoObjectHandle configured_infer_model_handle = 2;
uint32 async_queue_size = 3;
}
message ConfiguredInferModel_Destroy_Request {
HailoObjectHandle configured_infer_model_handle = 1;
}
message ConfiguredInferModel_Destroy_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_SetSchedulerTimeout_Request {
HailoObjectHandle configured_infer_model_handle = 1;
uint32 timeout = 2;
}
message ConfiguredInferModel_SetSchedulerTimeout_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_SetSchedulerThreshold_Request {
HailoObjectHandle configured_infer_model_handle = 1;
uint32 threshold = 2;
}
message ConfiguredInferModel_SetSchedulerThreshold_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_SetSchedulerPriority_Request {
HailoObjectHandle configured_infer_model_handle = 1;
uint32 priority = 2;
}
message ConfiguredInferModel_SetSchedulerPriority_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_GetHwLatencyMeasurement_Request {
HailoObjectHandle configured_infer_model_handle = 1;
}
message ConfiguredInferModel_GetHwLatencyMeasurement_Reply {
uint32 status = 1;
uint32 avg_hw_latency = 2;
}
message ConfiguredInferModel_Activate_Request {
HailoObjectHandle configured_infer_model_handle = 1;
}
message ConfiguredInferModel_Activate_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_Deactivate_Request {
HailoObjectHandle configured_infer_model_handle = 1;
}
message ConfiguredInferModel_Deactivate_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_Shutdown_Request {
HailoObjectHandle configured_infer_model_handle = 1;
}
message ConfiguredInferModel_Shutdown_Reply {
uint32 status = 1;
}
message ConfiguredInferModel_AsyncInfer_Request {
HailoObjectHandle configured_infer_model_handle = 1;
HailoObjectHandle infer_model_handle = 2;
HailoCallbackHandle callback_handle = 3;
// Protocol note: After this messgae, server expects to get the input buffers, one after the other, in order
}
message ConfiguredInferModel_AsyncInfer_Reply {
uint32 status = 1;
}
message CallbackCalled_Reply {
uint32 status = 1;
HailoCallbackHandle callback_handle = 2;
// Protocol note: After this messgae, and only if status is HAILO_SUCCESS, server expects to get the output buffers, one after the other, in order
}

View File

@@ -0,0 +1,834 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file serializer.cpp
* @brief HRPC Serialization implementation
**/
#include "serializer.hpp"
#include "hailo/hailort.h"
#include "hailo/hailort_common.hpp"
#include "hailo/hailort_defaults.hpp"
#include "common/utils.hpp"
// https://github.com/protocolbuffers/protobuf/tree/master/cmake#notes-on-compiler-warnings
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4244 4267 4127)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include "rpc.pb.h"
#if defined(_MSC_VER)
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
namespace hailort
{
Expected<Buffer> CreateVDeviceSerializer::serialize_request(const hailo_vdevice_params_t &params)
{
VDevice_Create_Request request;
auto proto_params = request.mutable_params();
proto_params->set_scheduling_algorithm(params.scheduling_algorithm);
proto_params->set_group_id(params.group_id == nullptr ? "" : std::string(params.group_id));
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CreateVDevice'");
return serialized_request;
}
Expected<hailo_vdevice_params_t> CreateVDeviceSerializer::deserialize_request(const MemoryView &serialized_request)
{
VDevice_Create_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateVDevice'");
bool multi_process_service_flag = false;
hailo_vdevice_params_t res = {
1,
nullptr,
static_cast<hailo_scheduling_algorithm_e>(request.params().scheduling_algorithm()),
request.params().group_id().c_str(),
multi_process_service_flag
};
return res;
}
Expected<Buffer> CreateVDeviceSerializer::serialize_reply(hailo_status status, rpc_object_handle_t vdevice_handle)
{
VDevice_Create_Reply reply;
reply.set_status(status);
auto proto_vdevice_handle = reply.mutable_vdevice_handle();
proto_vdevice_handle->set_id(vdevice_handle);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CreateVDevice'");
return serialized_reply;
}
Expected<std::tuple<hailo_status, rpc_object_handle_t>> CreateVDeviceSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
VDevice_Create_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateVDevice'");
return std::make_tuple(static_cast<hailo_status>(reply.status()), reply.vdevice_handle().id());
}
Expected<Buffer> DestroyVDeviceSerializer::serialize_request(rpc_object_handle_t vdevice_handle)
{
VDevice_Destroy_Request request;
auto proto_vdevice_handle= request.mutable_vdevice_handle();
proto_vdevice_handle->set_id(vdevice_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'DestroyVDevice'");
return serialized_request;
}
Expected<rpc_object_handle_t> DestroyVDeviceSerializer::deserialize_request(const MemoryView &serialized_request)
{
VDevice_Destroy_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'DestroyVDevice'");
return request.vdevice_handle().id();
}
Expected<Buffer> DestroyVDeviceSerializer::serialize_reply(hailo_status status)
{
VDevice_Destroy_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'DestroyVDevice'");
return serialized_reply;
}
hailo_status DestroyVDeviceSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
VDevice_Destroy_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'DestroyVDevice'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> CreateInferModelSerializer::serialize_request(rpc_object_handle_t vdevice_handle, uint64_t hef_size)
{
VDevice_CreateInferModel_Request request;
auto proto_vdevice_handle = request.mutable_vdevice_handle();
proto_vdevice_handle->set_id(vdevice_handle);
request.set_hef_size(hef_size);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CreateVInferModel'");
return serialized_request;
}
Expected<std::tuple<rpc_object_handle_t, uint64_t>> CreateInferModelSerializer::deserialize_request(const MemoryView &serialized_request)
{
VDevice_CreateInferModel_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateVInferModel'");
return std::make_tuple(request.vdevice_handle().id(), request.hef_size());
}
Expected<Buffer> CreateInferModelSerializer::serialize_reply(hailo_status status, rpc_object_handle_t infer_model_handle)
{
VDevice_CreateInferModel_Reply reply;
reply.set_status(status);
auto proto_infer_model_handle = reply.mutable_infer_model_handle();
proto_infer_model_handle->set_id(infer_model_handle);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CreateVInferModel'");
return serialized_reply;
}
Expected<std::tuple<hailo_status, rpc_object_handle_t>> CreateInferModelSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
VDevice_CreateInferModel_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateVInferModel'");
return std::make_tuple(static_cast<hailo_status>(reply.status()), reply.infer_model_handle().id());
}
Expected<Buffer> DestroyInferModelSerializer::serialize_request(rpc_object_handle_t infer_model_handle)
{
InferModel_Destroy_Request request;
auto proto_infer_model_handle = request.mutable_infer_model_handle();
proto_infer_model_handle->set_id(infer_model_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'DestroyInferModel'");
return serialized_request;
}
Expected<rpc_object_handle_t> DestroyInferModelSerializer::deserialize_request(const MemoryView &serialized_request)
{
InferModel_Destroy_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'DestroyInferModel'");
return request.infer_model_handle().id();
}
Expected<Buffer> DestroyInferModelSerializer::serialize_reply(hailo_status status)
{
InferModel_Destroy_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'DestroyInferModel'");
return serialized_reply;
}
hailo_status DestroyInferModelSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
InferModel_Destroy_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'DestroyInferModel'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> CreateConfiguredInferModelSerializer::serialize_request(rpc_create_configured_infer_model_request_params_t params)
{
InferModel_CreateConfiguredInferModel_Request request;
auto proto_infer_model_handle = request.mutable_infer_model_handle();
proto_infer_model_handle->set_id(params.infer_model_handle);
auto proto_vdevide_handle = request.mutable_vdevice_handle();
proto_vdevide_handle->set_id(params.vdevice_handle);
for (auto &input_stream_params : params.input_streams_params) {
auto proto_input_stream = request.add_input_infer_streams();
proto_input_stream->set_name(input_stream_params.first);
proto_input_stream->set_format_order(input_stream_params.second.format_order);
proto_input_stream->set_format_type(input_stream_params.second.format_type);
proto_input_stream->set_nms_score_threshold(input_stream_params.second.nms_score_threshold);
proto_input_stream->set_nms_iou_threshold(input_stream_params.second.nms_iou_threshold);
proto_input_stream->set_nms_max_proposals_per_class(input_stream_params.second.nms_max_proposals_per_class);
proto_input_stream->set_nms_max_accumulated_mask_size(input_stream_params.second.nms_max_accumulated_mask_size);
}
for (auto &output_stream_params : params.output_streams_params) {
auto proto_output_stream = request.add_output_infer_streams();
proto_output_stream->set_name(output_stream_params.first);
proto_output_stream->set_format_order(output_stream_params.second.format_order);
proto_output_stream->set_format_type(output_stream_params.second.format_type);
proto_output_stream->set_nms_score_threshold(output_stream_params.second.nms_score_threshold);
proto_output_stream->set_nms_iou_threshold(output_stream_params.second.nms_iou_threshold);
proto_output_stream->set_nms_max_proposals_per_class(output_stream_params.second.nms_max_proposals_per_class);
proto_output_stream->set_nms_max_accumulated_mask_size(output_stream_params.second.nms_max_accumulated_mask_size);
}
request.set_batch_size(static_cast<uint32_t>(params.batch_size));
request.set_power_mode(static_cast<uint32_t>(params.power_mode));
request.set_latency_flag(static_cast<uint32_t>(params.latency_flag));
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CreateConfiguredInferModel'");
return serialized_request;
}
Expected<rpc_create_configured_infer_model_request_params_t> CreateConfiguredInferModelSerializer::deserialize_request(const MemoryView &serialized_request)
{
rpc_create_configured_infer_model_request_params_t request_params;
InferModel_CreateConfiguredInferModel_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateConfiguredInferModel'");
request_params.infer_model_handle = request.infer_model_handle().id();
request_params.vdevice_handle = request.vdevice_handle().id();
for (auto input_stream: request.input_infer_streams()) {
rpc_stream_params_t current_stream_params;
current_stream_params.format_order = input_stream.format_order();
current_stream_params.format_type = input_stream.format_type();
current_stream_params.nms_score_threshold = input_stream.nms_score_threshold();
current_stream_params.nms_iou_threshold = input_stream.nms_iou_threshold();
current_stream_params.nms_max_proposals_per_class = input_stream.nms_max_proposals_per_class();
current_stream_params.nms_max_accumulated_mask_size = input_stream.nms_max_accumulated_mask_size();
request_params.input_streams_params.emplace(input_stream.name(), current_stream_params);
}
for (auto output_stream: request.output_infer_streams()) {
rpc_stream_params_t current_stream_params;
current_stream_params.format_order = output_stream.format_order();
current_stream_params.format_type = output_stream.format_type();
current_stream_params.nms_score_threshold = output_stream.nms_score_threshold();
current_stream_params.nms_iou_threshold = output_stream.nms_iou_threshold();
current_stream_params.nms_max_proposals_per_class = output_stream.nms_max_proposals_per_class();
current_stream_params.nms_max_accumulated_mask_size = output_stream.nms_max_accumulated_mask_size();
request_params.output_streams_params.emplace(output_stream.name(), current_stream_params);
}
request_params.batch_size = static_cast<uint16_t>(request.batch_size());
request_params.power_mode = static_cast<hailo_power_mode_t>(request.power_mode());
request_params.latency_flag = static_cast<hailo_latency_measurement_flags_t>(request.latency_flag());
return request_params;
}
Expected<Buffer> CreateConfiguredInferModelSerializer::serialize_reply(hailo_status status, rpc_object_handle_t configured_infer_handle,
uint32_t async_queue_size)
{
InferModel_CreateConfiguredInferModel_Reply reply;
reply.set_status(status);
auto proto_configured_infer_model_handle = reply.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_handle);
reply.set_async_queue_size(async_queue_size);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CreateConfiguredInferModel'");
return serialized_reply;
}
Expected<std::tuple<hailo_status, rpc_object_handle_t, uint32_t>> CreateConfiguredInferModelSerializer::deserialize_reply(
const MemoryView &serialized_reply)
{
InferModel_CreateConfiguredInferModel_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateConfiguredInferModel'");
return std::make_tuple(static_cast<hailo_status>(reply.status()), reply.configured_infer_model_handle().id(), reply.async_queue_size());
}
Expected<Buffer> DestroyConfiguredInferModelSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle)
{
ConfiguredInferModel_Destroy_Request request;
auto proto_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_infer_model_handle->set_id(configured_infer_model_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'DestroyConfiguredInferModel'");
return serialized_request;
}
Expected<rpc_object_handle_t> DestroyConfiguredInferModelSerializer::deserialize_request(const MemoryView &serialized_request)
{
ConfiguredInferModel_Destroy_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'DestroyConfiguredInferModel'");
return request.configured_infer_model_handle().id();
}
Expected<Buffer> DestroyConfiguredInferModelSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_Destroy_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'DestroyConfiguredInferModel'");
return serialized_reply;
}
hailo_status DestroyConfiguredInferModelSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_Destroy_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CreateConfiguredInferModel'");
CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
return HAILO_SUCCESS;
}
Expected<Buffer> SetSchedulerTimeoutSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle, const std::chrono::milliseconds &timeout)
{
ConfiguredInferModel_SetSchedulerTimeout_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
request.set_timeout(static_cast<uint32_t>(timeout.count()));
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'SetSchedulerTimeout'");
return serialized_request;
}
Expected<std::tuple<rpc_object_handle_t, std::chrono::milliseconds>> SetSchedulerTimeoutSerializer::deserialize_request(
const MemoryView &serialized_request)
{
ConfiguredInferModel_SetSchedulerTimeout_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'SetSchedulerTimeout'");
return std::make_tuple(request.configured_infer_model_handle().id(), std::chrono::milliseconds(request.timeout()));
}
Expected<Buffer> SetSchedulerTimeoutSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_SetSchedulerTimeout_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'SetSchedulerTimeout'");
return serialized_reply;
}
hailo_status SetSchedulerTimeoutSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_SetSchedulerTimeout_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'SetSchedulerTimeout'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> SetSchedulerThresholdSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle, uint32_t threshold)
{
ConfiguredInferModel_SetSchedulerThreshold_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
request.set_threshold(threshold);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'SetSchedulerThreshold'");
return serialized_request;
}
Expected<std::tuple<rpc_object_handle_t, uint32_t>> SetSchedulerThresholdSerializer::deserialize_request(
const MemoryView &serialized_request)
{
ConfiguredInferModel_SetSchedulerThreshold_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'SetSchedulerThreshold'");
return std::make_tuple(request.configured_infer_model_handle().id(), request.threshold());
}
Expected<Buffer> SetSchedulerThresholdSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_SetSchedulerThreshold_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'SetSchedulerThreshold'");
return serialized_reply;
}
hailo_status SetSchedulerThresholdSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_SetSchedulerThreshold_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'SetSchedulerThreshold'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> SetSchedulerPrioritySerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle, uint32_t priority)
{
ConfiguredInferModel_SetSchedulerPriority_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
request.set_priority(priority);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'SetSchedulerPriority'");
return serialized_request;
}
Expected<std::tuple<rpc_object_handle_t, uint32_t>> SetSchedulerPrioritySerializer::deserialize_request(
const MemoryView &serialized_request)
{
ConfiguredInferModel_SetSchedulerPriority_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'SetSchedulerPriority'");
return std::make_tuple(request.configured_infer_model_handle().id(), request.priority());
}
Expected<Buffer> SetSchedulerPrioritySerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_SetSchedulerPriority_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'SetSchedulerPriority'");
return serialized_reply;
}
hailo_status SetSchedulerPrioritySerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_SetSchedulerPriority_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'SetSchedulerPriority'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> GetHwLatencyMeasurementSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle)
{
ConfiguredInferModel_GetHwLatencyMeasurement_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'GetHwLatencyMeasurement'");
return serialized_request;
}
Expected<rpc_object_handle_t> GetHwLatencyMeasurementSerializer::deserialize_request(const MemoryView &serialized_request)
{
ConfiguredInferModel_GetHwLatencyMeasurement_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'GetHwLatencyMeasurement'");
return request.configured_infer_model_handle().id();
}
Expected<Buffer> GetHwLatencyMeasurementSerializer::serialize_reply(hailo_status status, uint32_t avg_hw_latency)
{
ConfiguredInferModel_GetHwLatencyMeasurement_Reply reply;
reply.set_status(status);
reply.set_avg_hw_latency(avg_hw_latency);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'GetHwLatencyMeasurement'");
return serialized_reply;
}
Expected<std::tuple<hailo_status, std::chrono::nanoseconds>> GetHwLatencyMeasurementSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_GetHwLatencyMeasurement_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'GetHwLatencyMeasurement'");
return std::make_tuple(static_cast<hailo_status>(reply.status()), std::chrono::nanoseconds(reply.avg_hw_latency()));
}
Expected<Buffer> ActivateSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle)
{
ConfiguredInferModel_Activate_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'Activate'");
return serialized_request;
}
Expected<rpc_object_handle_t> ActivateSerializer::deserialize_request(const MemoryView &serialized_request)
{
ConfiguredInferModel_Activate_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'Activate'");
return request.configured_infer_model_handle().id();
}
Expected<Buffer> ActivateSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_Activate_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'Activate'");
return serialized_reply;
}
hailo_status ActivateSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_Activate_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'Activate'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> DeactivateSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle)
{
ConfiguredInferModel_Deactivate_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'Deactivate'");
return serialized_request;
}
Expected<rpc_object_handle_t> DeactivateSerializer::deserialize_request(const MemoryView &serialized_request)
{
ConfiguredInferModel_Deactivate_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'Deactivate'");
return request.configured_infer_model_handle().id();
}
Expected<Buffer> DeactivateSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_Deactivate_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'Deactivate'");
return serialized_reply;
}
hailo_status DeactivateSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_Deactivate_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'Deactivate'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> ShutdownSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle)
{
ConfiguredInferModel_Shutdown_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'Shutdown'");
return serialized_request;
}
Expected<rpc_object_handle_t> ShutdownSerializer::deserialize_request(const MemoryView &serialized_request)
{
ConfiguredInferModel_Shutdown_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'Shutdown'");
return request.configured_infer_model_handle().id();
}
Expected<Buffer> ShutdownSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_Shutdown_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'Shutdown'");
return serialized_reply;
}
hailo_status ShutdownSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_Shutdown_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'Shutdown'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> RunAsyncSerializer::serialize_request(rpc_object_handle_t configured_infer_model_handle, rpc_object_handle_t infer_model_handle,
rpc_object_handle_t callback_handle)
{
ConfiguredInferModel_AsyncInfer_Request request;
auto proto_configured_infer_model_handle = request.mutable_configured_infer_model_handle();
proto_configured_infer_model_handle->set_id(configured_infer_model_handle);
auto proto_infer_model_handle = request.mutable_infer_model_handle();
proto_infer_model_handle->set_id(infer_model_handle);
auto proto_cb_handle = request.mutable_callback_handle();
proto_cb_handle->set_id(callback_handle);
// TODO (HRT-13983) - check if we can use GetCachedSize
TRY(auto serialized_request, Buffer::create(request.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(request.SerializeToArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to serialize 'RunAsync'");
return serialized_request;
}
Expected<std::tuple<rpc_object_handle_t, rpc_object_handle_t, rpc_object_handle_t>> RunAsyncSerializer::deserialize_request(
const MemoryView &serialized_request)
{
ConfiguredInferModel_AsyncInfer_Request request;
CHECK_AS_EXPECTED(request.ParseFromArray(serialized_request.data(), static_cast<int>(serialized_request.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'RunAsync'");
return std::make_tuple(request.configured_infer_model_handle().id(), request.infer_model_handle().id(),
request.callback_handle().id());
}
Expected<Buffer> RunAsyncSerializer::serialize_reply(hailo_status status)
{
ConfiguredInferModel_AsyncInfer_Reply reply;
reply.set_status(status);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'RunAsync'");
return serialized_reply;
}
hailo_status RunAsyncSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
ConfiguredInferModel_AsyncInfer_Reply reply;
CHECK(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'RunAsync'");
return static_cast<hailo_status>(reply.status());
}
Expected<Buffer> CallbackCalledSerializer::serialize_reply(hailo_status status, rpc_object_handle_t callback_handle)
{
CallbackCalled_Reply reply;
reply.set_status(status);
auto proto_callback_handle = reply.mutable_callback_handle();
proto_callback_handle->set_id(callback_handle);
TRY(auto serialized_reply, Buffer::create(reply.ByteSizeLong(), BufferStorageParams::create_dma()));
CHECK_AS_EXPECTED(reply.SerializeToArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to serialize 'CallbackCalled'");
return serialized_reply;
}
Expected<std::tuple<hailo_status, rpc_object_handle_t>> CallbackCalledSerializer::deserialize_reply(const MemoryView &serialized_reply)
{
CallbackCalled_Reply reply;
CHECK_AS_EXPECTED(reply.ParseFromArray(serialized_reply.data(), static_cast<int>(serialized_reply.size())),
HAILO_RPC_FAILED, "Failed to de-serialize 'CallbackCalled'");
return std::make_tuple(static_cast<hailo_status>(reply.status()), reply.callback_handle().id());
}
} /* namespace hailort */

View File

@@ -0,0 +1,253 @@
/**
* Copyright (c) 2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file serializer.hpp
* @brief HRPC protocol serialization
**/
#ifndef _HAILO_SERIALIZER_HPP_
#define _HAILO_SERIALIZER_HPP_
#include "hailo/hailort.h"
#include "hailo/buffer.hpp"
#include "hailo/expected.hpp"
#include <chrono>
#include <unordered_map>
namespace hailort
{
#define INVALID_HANDLE_ID (UINT32_MAX)
#define INVALID_LATENCY_MEASUREMENT (UINT32_MAX)
enum class HailoRpcActionID {
VDEVICE__CREATE,
VDEVICE__DESTROY,
VDEVICE__CREATE_INFER_MODEL,
INFER_MODEL__DESTROY,
INFER_MODEL__CREATE_CONFIGURED_INFER_MODEL,
CONFIGURED_INFER_MODEL__DESTROY,
CONFIGURED_INFER_MODEL__SET_SCHEDULER_TIMEOUT,
CONFIGURED_INFER_MODEL__SET_SCHEDULER_THRESHOLD,
CONFIGURED_INFER_MODEL__SET_SCHEDULER_PRIORITY,
CONFIGURED_INFER_MODEL__GET_HW_LATENCY_MEASUREMENT,
CONFIGURED_INFER_MODEL__ACTIVATE,
CONFIGURED_INFER_MODEL__DEACTIVATE,
CONFIGURED_INFER_MODEL__SHUTDOWN,
CONFIGURED_INFER_MODEL__RUN_ASYNC,
CALLBACK_CALLED,
MAX_VALUE,
};
using rpc_object_handle_t = uint32_t;
struct rpc_stream_params_t
{
uint32_t format_order;
uint32_t format_type;
float32_t nms_score_threshold;
float32_t nms_iou_threshold;
uint32_t nms_max_proposals_per_class;
uint32_t nms_max_accumulated_mask_size;
};
using rpc_stream_params_map_t = std::unordered_map<std::string, rpc_stream_params_t>;
struct rpc_create_configured_infer_model_request_params_t
{
rpc_object_handle_t infer_model_handle;
rpc_object_handle_t vdevice_handle;
rpc_stream_params_map_t input_streams_params;
rpc_stream_params_map_t output_streams_params;
uint16_t batch_size;
hailo_power_mode_t power_mode;
hailo_latency_measurement_flags_t latency_flag;
};
class CreateVDeviceSerializer
{
public:
CreateVDeviceSerializer() = delete;
static Expected<Buffer> serialize_request(const hailo_vdevice_params_t &params);
static Expected<hailo_vdevice_params_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status, rpc_object_handle_t vdevice_handle = INVALID_HANDLE_ID);
static Expected<std::tuple<hailo_status, rpc_object_handle_t>> deserialize_reply(const MemoryView &serialized_reply);
};
class DestroyVDeviceSerializer
{
public:
DestroyVDeviceSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t vdevice_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class CreateInferModelSerializer
{
public:
CreateInferModelSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t vdevice_handle, uint64_t hef_size);
static Expected<std::tuple<rpc_object_handle_t, uint64_t>> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status, rpc_object_handle_t infer_model_handle = INVALID_HANDLE_ID);
static Expected<std::tuple<hailo_status, rpc_object_handle_t>> deserialize_reply(const MemoryView &serialized_reply);
};
class DestroyInferModelSerializer
{
public:
DestroyInferModelSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t infer_model_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class CreateConfiguredInferModelSerializer
{
public:
CreateConfiguredInferModelSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_create_configured_infer_model_request_params_t params);
static Expected<rpc_create_configured_infer_model_request_params_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status, rpc_object_handle_t configured_infer_handle = INVALID_HANDLE_ID,
uint32_t async_queue_size = 0);
static Expected<std::tuple<hailo_status, rpc_object_handle_t, uint32_t>> deserialize_reply(const MemoryView &serialized_reply);
};
class DestroyConfiguredInferModelSerializer
{
public:
DestroyConfiguredInferModelSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class SetSchedulerTimeoutSerializer
{
public:
SetSchedulerTimeoutSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle, const std::chrono::milliseconds &timeout);
static Expected<std::tuple<rpc_object_handle_t, std::chrono::milliseconds>> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class SetSchedulerThresholdSerializer
{
public:
SetSchedulerThresholdSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle, uint32_t threshold);
static Expected<std::tuple<rpc_object_handle_t, uint32_t>> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class SetSchedulerPrioritySerializer
{
public:
SetSchedulerPrioritySerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle, uint32_t priority);
static Expected<std::tuple<rpc_object_handle_t, uint32_t>> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class GetHwLatencyMeasurementSerializer
{
public:
GetHwLatencyMeasurementSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status, uint32_t avg_hw_latency = INVALID_LATENCY_MEASUREMENT);
static Expected<std::tuple<hailo_status, std::chrono::nanoseconds>> deserialize_reply(const MemoryView &serialized_reply);
};
class ActivateSerializer
{
public:
ActivateSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class DeactivateSerializer
{
public:
DeactivateSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class ShutdownSerializer
{
public:
ShutdownSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle);
static Expected<rpc_object_handle_t> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class RunAsyncSerializer
{
public:
RunAsyncSerializer() = delete;
static Expected<Buffer> serialize_request(rpc_object_handle_t configured_infer_model_handle, rpc_object_handle_t infer_model_handle,
rpc_object_handle_t callback_handle);
static Expected<std::tuple<rpc_object_handle_t, rpc_object_handle_t, rpc_object_handle_t>> deserialize_request(const MemoryView &serialized_request);
static Expected<Buffer> serialize_reply(hailo_status status);
static hailo_status deserialize_reply(const MemoryView &serialized_reply);
};
class CallbackCalledSerializer
{
public:
CallbackCalledSerializer() = delete;
static Expected<Buffer> serialize_reply(hailo_status status, rpc_object_handle_t callback_handle = INVALID_HANDLE_ID);
static Expected<std::tuple<hailo_status, rpc_object_handle_t>> deserialize_reply(const MemoryView &serialized_reply);
};
} /* namespace hailort */
#endif /* _HAILO_SERIALIZER_HPP_ */

View File

@@ -2,8 +2,8 @@ cmake_minimum_required(VERSION 3.0.0)
# set(CMAKE_C_CLANG_TIDY "clang-tidy;-checks=*")
set(HAILORT_MAJOR_VERSION 4)
set(HAILORT_MINOR_VERSION 17)
set(HAILORT_REVISION_VERSION 1)
set(HAILORT_MINOR_VERSION 18)
set(HAILORT_REVISION_VERSION 0)
# Add the cmake folder so the modules there are found
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
@@ -11,6 +11,7 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
# Generate hef-proto files using host protobuf
protobuf_generate_cpp(PROTO_HEF_SRC PROTO_HEF_HEADER hef.proto)
protobuf_generate_python(PROTO_HEF_PY hef.proto) # TODO (HRT-12504): Copy hef_pb2.py to tools directory
protobuf_generate_python(PROTO_HEF_PY tracer_profiler.proto)
add_library(hef_proto ${PROTO_HEF_SRC} ${PROTO_HEF_HEADER} ${PROTO_HEF_PY})
target_link_libraries(hef_proto libprotobuf-lite)
@@ -68,4 +69,6 @@ if(HAILO_BUILD_UT)
add_subdirectory(tests)
endif()
add_subdirectory(bindings)
add_subdirectory(doc)
if(HAILO_BUILD_DOC)
add_subdirectory(doc)
endif()

View File

@@ -8,7 +8,7 @@ if(NOT CMAKE_HOST_UNIX)
message(FATAL_ERROR "Only unix hosts are supported, stopping build")
endif()
find_package(HailoRT 4.17.1 EXACT REQUIRED)
find_package(HailoRT 4.18.0 EXACT REQUIRED)
# GST_PLUGIN_DEFINE needs PACKAGE to be defined
set(GST_HAILO_PACKAGE_NAME "hailo")
@@ -27,6 +27,8 @@ add_library(gsthailo SHARED
gst-hailo/sync_gst_hailosend.cpp
gst-hailo/sync_gst_hailorecv.cpp
gst-hailo/gsthailonet.cpp
gst-hailo/gsthailo_allocator.cpp
gst-hailo/gsthailo_dmabuf_allocator.cpp
gst-hailo/gsthailodevicestats.cpp
gst-hailo/common.cpp
gst-hailo/network_group_handle.cpp

View File

@@ -33,8 +33,11 @@
using namespace hailort;
#define ERROR(msg, ...) g_print("HailoNet Error: " msg, ##__VA_ARGS__)
#define PLUGIN_AUTHOR "Hailo Technologies Ltd. (\"Hailo\")"
#define MAX_STRING_SIZE (PATH_MAX)
#define MAX_QUEUED_BUFFERS_IN_INPUT (16)
#define MAX_QUEUED_BUFFERS_IN_OUTPUT (16)
#define MAX_QUEUED_BUFFERS_IN_CORE (16)
@@ -156,6 +159,31 @@ using namespace hailort;
} while(0)
#define CHECK_EXPECTED(obj, ...) _CHECK_EXPECTED(obj, "" __VA_ARGS__)
#define __HAILO_CONCAT(x, y) x ## y
#define _HAILO_CONCAT(x, y) __HAILO_CONCAT(x, y)
#define _TRY(expected_var_name, var_decl, expr, ...) \
auto expected_var_name = (expr); \
CHECK_EXPECTED(expected_var_name, __VA_ARGS__); \
var_decl = expected_var_name.release()
/**
* The TRY macro is used to allow easier validation and access for variables returned as Expected<T>.
* If the expression returns an Expected<T> with status HAILO_SUCCESS, the macro will release the expected and assign
* the var_decl.
* Otherwise, the macro will cause current function to return the failed status.
*
* Usage example:
*
* Expected<int> func() {
* TRY(auto var, return_5());
* // Now var is int with value 5
*
* // func will return Unexpected with status HAILO_INTERNAL_FAILURE
* TRY(auto var2, return_error(HAILO_INTERNAL_FAILURE), "Failed doing stuff {}", 5);
*/
#define TRY(var_decl, expr, ...) _TRY(_HAILO_CONCAT(__expected, __COUNTER__), var_decl, expr, __VA_ARGS__)
#define RGB_FEATURES_SIZE (3)
#define RGBA_FEATURES_SIZE (4)
#define GRAY8_FEATURES_SIZE (1)
@@ -194,7 +222,7 @@ public:
return *this;
}
const T &get()
const T &get() const
{
return m_value;
}
@@ -209,6 +237,38 @@ private:
bool m_was_changed;
};
class HailoElemStringProperty final
{
public:
HailoElemStringProperty(const std::string &default_val) : m_was_changed(false) {
memset(m_string, 0, sizeof(m_string));
strncpy(m_string, default_val.c_str(), sizeof(m_string) - 1);
}
~HailoElemStringProperty() {}
HailoElemStringProperty &operator=(const std::string &value)
{
m_was_changed = true;
strncpy(m_string, value.c_str(), sizeof(m_string) - 1);
return *this;
}
const std::string get() const
{
return m_string;
}
bool was_changed()
{
return m_was_changed;
}
private:
char m_string[MAX_STRING_SIZE];
bool m_was_changed;
};
template<>
HailoElemProperty<gchar*>::~HailoElemProperty();

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2021-2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the LGPL 2.1 license (https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "gsthailo_allocator.hpp"
G_DEFINE_TYPE (GstHailoAllocator, gst_hailo_allocator, GST_TYPE_ALLOCATOR);
static GstMemory *gst_hailo_allocator_alloc(GstAllocator* allocator, gsize size, GstAllocationParams* /*params*/) {
GstHailoAllocator *hailo_allocator = GST_HAILO_ALLOCATOR(allocator);
auto buffer = Buffer::create(size, BufferStorageParams::create_dma());
if (!buffer) {
ERROR("Creating buffer for allocator has failed, status = %d\n", buffer.status());
return nullptr;
}
GstMemory *memory = gst_memory_new_wrapped(static_cast<GstMemoryFlags>(0), buffer->data(),
buffer->size(), 0, buffer->size(), nullptr, nullptr);
if (nullptr == memory) {
ERROR("Creating new GstMemory for allocator has failed!\n");
return nullptr;
}
hailo_allocator->buffers[memory] = std::move(buffer.release());
return memory;
}
static void gst_hailo_allocator_free(GstAllocator* allocator, GstMemory *mem) {
GstHailoAllocator *hailo_allocator = GST_HAILO_ALLOCATOR(allocator);
hailo_allocator->buffers.erase(mem);
}
static void gst_hailo_allocator_class_init(GstHailoAllocatorClass* klass) {
GstAllocatorClass* allocator_class = GST_ALLOCATOR_CLASS(klass);
allocator_class->alloc = gst_hailo_allocator_alloc;
allocator_class->free = gst_hailo_allocator_free;
}
static void gst_hailo_allocator_init(GstHailoAllocator* allocator) {
allocator->buffers = std::unordered_map<GstMemory*, Buffer>();
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2021-2024 Hailo Technologies Ltd. All rights reserved.
* Distributed under the LGPL 2.1 license (https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef _GST_HAILO_ALLOCATOR_HPP_
#define _GST_HAILO_ALLOCATOR_HPP_
#include "common.hpp"
using namespace hailort;
G_BEGIN_DECLS
#define GST_TYPE_HAILO_ALLOCATOR (gst_hailo_allocator_get_type())
#define GST_HAILO_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_HAILO_ALLOCATOR, GstHailoAllocator))
#define GST_HAILO_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), GST_TYPE_HAILO_ALLOCATOR, GstHailoAllocatorClass))
#define GST_IS_HAILO_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_HAILO_ALLOCATOR))
#define GST_IS_HAILO_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_HAILO_ALLOCATOR))
struct GstHailoAllocator
{
GstAllocator parent;
std::unordered_map<GstMemory*, Buffer> buffers;
};
struct GstHailoAllocatorClass
{
GstAllocatorClass parent;
};
GType gst_hailo_allocator_get_type(void);
G_END_DECLS
#endif /* _GST_HAILO_ALLOCATOR_HPP_ */

View File

@@ -0,0 +1,91 @@
/*
* Copyright (c) 2021-2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the LGPL 2.1 license (https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "gsthailo_dmabuf_allocator.hpp"
#include <sys/ioctl.h>
#include <fcntl.h>
// TODO: HRT-13107
#define DEVPATH "/dev/dma_heap/linux,cma"
G_DEFINE_TYPE (GstHailoDmabufAllocator, gst_hailo_dmabuf_allocator, GST_TYPE_DMABUF_ALLOCATOR);
bool GstHailoDmaHeapControl::dma_heap_fd_open = false;
int GstHailoDmaHeapControl::dma_heap_fd = -1;
static GstMemory *gst_hailo_dmabuf_allocator_alloc(GstAllocator* allocator, gsize size, GstAllocationParams* /*params*/) {
GstHailoDmabufAllocator *hailo_allocator = GST_HAILO_DMABUF_ALLOCATOR(allocator);
if (!GstHailoDmaHeapControl::dma_heap_fd_open) {
GstHailoDmaHeapControl::dma_heap_fd = open(DEVPATH, O_RDWR | O_CLOEXEC);
if (GstHailoDmaHeapControl::dma_heap_fd < 0) {
ERROR("open fd failed!\n");
return nullptr;
}
GstHailoDmaHeapControl::dma_heap_fd_open = true;
}
dma_heap_allocation_data heap_data;
heap_data = {
.len = size,
.fd = 0,
.fd_flags = O_RDWR | O_CLOEXEC,
.heap_flags = 0,
};
int ret = ioctl(GstHailoDmaHeapControl::dma_heap_fd, DMA_HEAP_IOCTL_ALLOC, &heap_data);
if (ret < 0) {
ERROR("ioctl DMA_HEAP_IOCTL_ALLOC failed! ret = %d\n", ret);
return nullptr;
}
if (GST_IS_DMABUF_ALLOCATOR(hailo_allocator) == false) {
ERROR("hailo_allocator is not dmabuf!\n");
return nullptr;
}
GstMemory *memory = gst_dmabuf_allocator_alloc(allocator, heap_data.fd, size);
if (nullptr == memory) {
ERROR("Creating new GstMemory for allocator has failed!\n");
return nullptr;
}
hailo_allocator->dma_buffers[memory] = heap_data;
return memory;
}
static void gst_hailo_dmabuf_allocator_free(GstAllocator* allocator, GstMemory *mem) {
GstHailoDmabufAllocator *hailo_allocator = GST_HAILO_DMABUF_ALLOCATOR(allocator);
close(hailo_allocator->dma_buffers[mem].fd);
hailo_allocator->dma_buffers.erase(mem);
}
static void gst_hailo_dmabuf_allocator_class_init(GstHailoDmabufAllocatorClass* klass) {
GstAllocatorClass* allocator_class = GST_ALLOCATOR_CLASS(klass);
allocator_class->alloc = gst_hailo_dmabuf_allocator_alloc;
allocator_class->free = gst_hailo_dmabuf_allocator_free;
}
static void gst_hailo_dmabuf_allocator_init(GstHailoDmabufAllocator* allocator) {
allocator->dma_buffers = std::unordered_map<GstMemory*, dma_heap_allocation_data>();
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2021-2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the LGPL 2.1 license (https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef _GST_HAILO_DMABUF_ALLOCATOR_HPP_
#define _GST_HAILO_DMABUF_ALLOCATOR_HPP_
#include "common.hpp"
#include "hailo/hailort_dma-heap.h"
#include <gst/allocators/gstdmabuf.h>
using namespace hailort;
G_BEGIN_DECLS
#define GST_TYPE_HAILO_DMABUF_ALLOCATOR (gst_hailo_dmabuf_allocator_get_type())
#define GST_HAILO_DMABUF_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_HAILO_DMABUF_ALLOCATOR, GstHailoDmabufAllocator))
#define GST_HAILO_DMABUF_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), GST_TYPE_HAILO_DMABUF_ALLOCATOR, GstHailoDmabufAllocator))
#define GST_IS_HAILO_DMABUF_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_HAILO_DMABUF_ALLOCATOR))
#define GST_IS_HAILO_DMABUF_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_HAILO_DMABUF_ALLOCATOR))
#define GST_HAILO_USE_DMA_BUFFER_ENV_VAR "GST_HAILO_USE_DMA_BUFFER"
class GstHailoDmaHeapControl {
public:
static bool dma_heap_fd_open;
static int dma_heap_fd;
};
struct GstHailoDmabufAllocator
{
GstDmaBufAllocator parent;
std::unordered_map<GstMemory*, dma_heap_allocation_data> dma_buffers;
};
struct GstHailoDmabufAllocatorClass
{
GstDmaBufAllocatorClass parent;
};
GType gst_hailo_dmabuf_allocator_get_type(void);
G_END_DECLS
#endif /* _GST_HAILO_DMABUF_ALLOCATOR_HPP_ */

View File

@@ -23,12 +23,10 @@
#include "hailo/hailort_common.hpp"
#include "hailo/hailort_defaults.hpp"
#include <gst/allocators/gstdmabuf.h>
#include <algorithm>
#include <unordered_map>
#define WAIT_FOR_ASYNC_READY_TIMEOUT (std::chrono::milliseconds(10000))
#define ERROR(msg, ...) g_print(msg, ##__VA_ARGS__)
enum
{
@@ -63,7 +61,6 @@ enum
static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS_ANY);
static GstStaticPadTemplate src_template = GST_STATIC_PAD_TEMPLATE("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS_ANY);
G_DEFINE_TYPE (GstHailoAllocator, gst_hailo_allocator, GST_TYPE_ALLOCATOR);
G_DEFINE_TYPE (GstHailoNet, gst_hailonet, GST_TYPE_ELEMENT);
static std::atomic_uint32_t hailonet_count(0);
@@ -74,41 +71,6 @@ static bool gst_hailo_should_use_dma_buffers()
return (nullptr != env) && (0 == g_strcmp0(env, "1"));
}
static GstMemory *gst_hailo_allocator_alloc(GstAllocator* allocator, gsize size, GstAllocationParams* /*params*/) {
GstHailoAllocator *hailo_allocator = GST_HAILO_ALLOCATOR(allocator);
auto buffer = Buffer::create(size, BufferStorageParams::create_dma());
if (!buffer) {
ERROR("Creating buffer for allocator has failed, status = %d\n", buffer.status());
return nullptr;
}
GstMemory *memory = gst_memory_new_wrapped(static_cast<GstMemoryFlags>(0), buffer->data(),
buffer->size(), 0, buffer->size(), nullptr, nullptr);
if (nullptr == memory) {
ERROR("Creating new GstMemory for allocator has failed!\n");
return nullptr;
}
hailo_allocator->buffers[memory] = std::move(buffer.release());
return memory;
}
static void gst_hailo_allocator_free(GstAllocator* allocator, GstMemory *mem) {
GstHailoAllocator *hailo_allocator = GST_HAILO_ALLOCATOR(allocator);
hailo_allocator->buffers.erase(mem);
}
static void gst_hailo_allocator_class_init(GstHailoAllocatorClass* klass) {
GstAllocatorClass* allocator_class = GST_ALLOCATOR_CLASS(klass);
allocator_class->alloc = gst_hailo_allocator_alloc;
allocator_class->free = gst_hailo_allocator_free;
}
static void gst_hailo_allocator_init(GstHailoAllocator* allocator) {
allocator->buffers = std::unordered_map<GstMemory*, Buffer>();
}
static hailo_status gst_hailonet_deconfigure(GstHailoNet *self)
{
// This will wakeup any blocking calls to deuque
@@ -122,6 +84,14 @@ static hailo_status gst_hailonet_deconfigure(GstHailoNet *self)
return HAILO_SUCCESS;
}
static void gst_hailonet_unref_input_caps(GstHailoNet *self)
{
if (nullptr != self->input_caps) {
gst_caps_unref(self->input_caps);
self->input_caps = nullptr;
}
}
static hailo_status gst_hailonet_free(GstHailoNet *self)
{
std::unique_lock<std::mutex> lock(self->infer_mutex);
@@ -147,8 +117,24 @@ static hailo_status gst_hailonet_free(GstHailoNet *self)
gst_queue_array_free(self->thread_queue);
}
if (nullptr != self->input_caps) {
gst_caps_unref(self->input_caps);
while(!self->curr_event_queue.empty()) {
auto event = self->curr_event_queue.front();
gst_event_unref(event);
self->curr_event_queue.pop();
}
for (auto &buffer_events_queue_pair : self->events_queue_per_buffer) {
while(!buffer_events_queue_pair.second.empty()) {
auto event = buffer_events_queue_pair.second.front();
gst_event_unref(event);
buffer_events_queue_pair.second.pop();
}
}
self->events_queue_per_buffer.clear();
{
std::unique_lock<std::mutex> lock(self->input_caps_mutex);
gst_hailonet_unref_input_caps(self);
}
for (auto &name_pool_pair : self->output_buffer_pools) {
@@ -156,14 +142,21 @@ static hailo_status gst_hailonet_free(GstHailoNet *self)
CHECK(result, HAILO_INTERNAL_FAILURE, "Could not release buffer pool");
gst_object_unref(name_pool_pair.second);
}
self->output_buffer_pools.clear();
if (gst_hailo_should_use_dma_buffers()) {
gst_object_unref(self->dma_allocator);
} else {
if (GstHailoDmaHeapControl::dma_heap_fd_open) {
close(GstHailoDmaHeapControl::dma_heap_fd);
GstHailoDmaHeapControl::dma_heap_fd_open = false;
}
if (nullptr != self->dmabuf_allocator) {
gst_object_unref(self->dmabuf_allocator);
}
} else if (nullptr != self->allocator) {
gst_object_unref(self->allocator);
}
self->props.free_strings();
return HAILO_SUCCESS;
}
@@ -171,18 +164,15 @@ static hailo_status gst_hailonet_set_format_types(GstHailoNet *self, std::shared
{
if (self->props.m_input_format_type.was_changed()) {
for (const auto &input_name : infer_model->get_input_names()) {
auto input = infer_model->input(input_name);
CHECK_EXPECTED_AS_STATUS(input);
input->set_format_type(self->props.m_input_format_type.get());
TRY(auto input, infer_model->input(input_name));
input.set_format_type(self->props.m_input_format_type.get());
}
}
if (self->props.m_output_format_type.was_changed()) {
for (const auto &output_name : infer_model->get_output_names()) {
auto output = infer_model->output(output_name);
CHECK_EXPECTED_AS_STATUS(output);
TRY(auto output, infer_model->output(output_name));
output->set_format_type(self->props.m_output_format_type.get());
output.set_format_type(self->props.m_output_format_type.get());
}
}
@@ -198,25 +188,24 @@ static hailo_status gst_hailonet_set_nms_params(GstHailoNet *self, std::shared_p
});
for (const auto &output_name : infer_model->get_output_names()) {
auto output = infer_model->output(output_name);
CHECK_EXPECTED_AS_STATUS(output);
TRY(auto output, infer_model->output(output_name));
if (self->props.m_nms_score_threshold.was_changed()) {
CHECK(has_nms_output, HAILO_INVALID_OPERATION, "NMS score threshold is set, but there is no NMS output in this model.");
if (output->is_nms()) {
output->set_nms_score_threshold(self->props.m_nms_score_threshold.get());
if (output.is_nms()) {
output.set_nms_score_threshold(self->props.m_nms_score_threshold.get());
}
}
if (self->props.m_nms_iou_threshold.was_changed()) {
CHECK(has_nms_output, HAILO_INVALID_OPERATION, "NMS IoU threshold is set, but there is no NMS output in this model.");
if (output->is_nms()) {
output->set_nms_iou_threshold(self->props.m_nms_iou_threshold.get());
if (output.is_nms()) {
output.set_nms_iou_threshold(self->props.m_nms_iou_threshold.get());
}
}
if (self->props.m_nms_max_proposals_per_class.was_changed()) {
CHECK(has_nms_output, HAILO_INVALID_OPERATION, "NMS max proposals per class is set, but there is no NMS output in this model.");
if (output->is_nms()) {
output->set_nms_max_proposals_per_class(self->props.m_nms_max_proposals_per_class.get());
if (output.is_nms()) {
output.set_nms_max_proposals_per_class(self->props.m_nms_max_proposals_per_class.get());
}
}
}
@@ -252,7 +241,7 @@ static Expected<GstBufferPool*> gst_hailonet_create_buffer_pool(GstHailoNet *sel
self->props.m_outputs_max_pool_size.get());
if (gst_hailo_should_use_dma_buffers()) {
gst_buffer_pool_config_set_allocator(config, self->dma_allocator, nullptr);
gst_buffer_pool_config_set_allocator(config, GST_ALLOCATOR(self->dmabuf_allocator), nullptr);
} else {
gst_buffer_pool_config_set_allocator(config, GST_ALLOCATOR(self->allocator), nullptr);
}
@@ -266,6 +255,43 @@ static Expected<GstBufferPool*> gst_hailonet_create_buffer_pool(GstHailoNet *sel
return pool;
}
static void gst_hailonet_push_event_to_queue(GstHailoNet *self, GstEvent *event)
{
std::unique_lock<std::mutex> lock(self->input_queue_mutex);
self->curr_event_queue.push(event);
}
static gboolean gst_hailonet_handle_queued_event(GstHailoNet *self, GstEvent *event)
{
switch (GST_EVENT_TYPE(event)) {
case GST_EVENT_CAPS:
{
GstCaps *caps;
gst_event_parse_caps(event, &caps);
auto result = gst_pad_set_caps(self->srcpad, caps);
gst_event_unref(event);
return result;
}
default:
return gst_pad_push_event(self->srcpad, event);
}
}
static void gst_hailonet_handle_buffer_events(GstHailoNet *self, GstBuffer *buffer)
{
if (self->events_queue_per_buffer.find(buffer) == self->events_queue_per_buffer.end()) {
// The buffer does not have any events to send
return;
}
while (!self->events_queue_per_buffer.at(buffer).empty()) {
GstEvent* event = self->events_queue_per_buffer.at(buffer).front();
(void)gst_hailonet_handle_queued_event(self, event);
self->events_queue_per_buffer.at(buffer).pop();
}
self->events_queue_per_buffer.erase(buffer);
}
static hailo_status gst_hailonet_configure(GstHailoNet *self)
{
if (self->is_configured) {
@@ -288,9 +314,8 @@ static hailo_status gst_hailonet_configure(GstHailoNet *self)
for (const auto &input_name : self->infer_model->get_input_names()) {
if(self->props.m_no_transform.get()) {
// In case transformation is disabled - format order will be the same as we get from the HW (stream info).
auto input_stream_infos = self->infer_model->hef().get_stream_info_by_name(input_name, HAILO_H2D_STREAM);
CHECK_EXPECTED_AS_STATUS(input_stream_infos);
self->infer_model->input(input_name)->set_format_order(input_stream_infos.value().format.order);
TRY(const auto input_stream_infos, self->infer_model->hef().get_stream_info_by_name(input_name, HAILO_H2D_STREAM));
self->infer_model->input(input_name)->set_format_order(input_stream_infos.format.order);
} else if (self->infer_model->input(input_name)->format().order == HAILO_FORMAT_ORDER_NHWC) {
self->infer_model->input(input_name)->set_format_order(HAILO_FORMAT_ORDER_RGB4);
}
@@ -299,16 +324,14 @@ static hailo_status gst_hailonet_configure(GstHailoNet *self)
if (self->props.m_no_transform.get()) {
for (const auto &output_name : self->infer_model->get_output_names()) {
// In case transformation is disabled - format order will be the same as we get from the HW (stream info).
auto output_stream_infos = self->infer_model->hef().get_stream_info_by_name(output_name, HAILO_D2H_STREAM);
CHECK_EXPECTED_AS_STATUS(output_stream_infos);
self->infer_model->output(output_name)->set_format_order(output_stream_infos.value().format.order);
TRY(const auto output_stream_infos, self->infer_model->hef().get_stream_info_by_name(output_name, HAILO_D2H_STREAM));
self->infer_model->output(output_name)->set_format_order(output_stream_infos.format.order);
}
}
auto configured_infer_model = self->infer_model->configure();
CHECK_EXPECTED_AS_STATUS(configured_infer_model);
TRY(auto configured_infer_model, self->infer_model->configure());
auto ptr = make_shared_nothrow<ConfiguredInferModel>(configured_infer_model.release());
auto ptr = make_shared_nothrow<ConfiguredInferModel>(std::move(configured_infer_model));
CHECK_NOT_NULL(ptr, HAILO_OUT_OF_HOST_MEMORY);
self->configured_infer_model = ptr;
@@ -319,19 +342,33 @@ static hailo_status gst_hailonet_configure(GstHailoNet *self)
return HAILO_SUCCESS;
}
static void gst_hailonet_init_allocator(GstHailoNet *self)
{
gchar *parent_name = gst_object_get_name(GST_OBJECT(self));
gchar *name = g_strconcat(parent_name, ":hailo_allocator", NULL);
g_free(parent_name);
if (gst_hailo_should_use_dma_buffers()) {
self->dmabuf_allocator = GST_HAILO_DMABUF_ALLOCATOR(g_object_new(GST_TYPE_HAILO_DMABUF_ALLOCATOR, "name", name, NULL));
gst_object_ref_sink(self->dmabuf_allocator);
} else {
self->allocator = GST_HAILO_ALLOCATOR(g_object_new(GST_TYPE_HAILO_ALLOCATOR, "name", name, NULL));
gst_object_ref_sink(self->allocator);
}
g_free(name);
}
static hailo_status gst_hailonet_allocate_infer_resources(GstHailoNet *self)
{
auto bindings = self->configured_infer_model->create_bindings();
CHECK_EXPECTED_AS_STATUS(bindings);
self->infer_bindings = std::move(bindings.release());
TRY(self->infer_bindings, self->configured_infer_model->create_bindings());
self->output_buffer_pools = std::unordered_map<std::string, GstBufferPool*>();
self->output_vstream_infos = std::unordered_map<std::string, hailo_vstream_info_t>();
auto async_queue_size = self->configured_infer_model->get_async_queue_size();
CHECK_EXPECTED_AS_STATUS(async_queue_size);
self->input_queue = gst_queue_array_new(static_cast<guint>(async_queue_size.value()));
self->thread_queue = gst_queue_array_new(static_cast<guint>(async_queue_size.value()));
TRY(const auto async_queue_size, self->configured_infer_model->get_async_queue_size());
self->input_queue = gst_queue_array_new(static_cast<guint>(async_queue_size));
self->thread_queue = gst_queue_array_new(static_cast<guint>(async_queue_size));
self->is_thread_running = true;
self->thread = std::thread([self] () {
while (self->is_thread_running) {
@@ -351,7 +388,7 @@ static hailo_status gst_hailonet_allocate_infer_resources(GstHailoNet *self)
self->thread_cv.notify_all();
if (GST_IS_PAD(self->srcpad)) { // Checking because we fail here when exiting the application
GstFlowReturn ret = gst_pad_push(self->srcpad, buffer);
if ((GST_FLOW_OK != ret) && (GST_FLOW_FLUSHING != ret) && (!self->has_got_eos)) {
if ((GST_FLOW_OK != ret) && (GST_FLOW_FLUSHING != ret) && ((GST_FLOW_EOS != ret)) && (!self->has_got_eos)) {
ERROR("gst_pad_push failed with status = %d\n", ret);
break;
}
@@ -359,23 +396,58 @@ static hailo_status gst_hailonet_allocate_infer_resources(GstHailoNet *self)
}
});
gst_hailonet_init_allocator(self);
for (auto &output : self->infer_model->outputs()) {
auto buffer_pool = gst_hailonet_create_buffer_pool(self, output.get_frame_size());
CHECK_EXPECTED_AS_STATUS(buffer_pool);
self->output_buffer_pools[output.name()] = buffer_pool.release();
TRY(self->output_buffer_pools[output.name()], gst_hailonet_create_buffer_pool(self, output.get_frame_size()));
}
auto vstream_infos = self->infer_model->hef().get_output_vstream_infos();
CHECK_EXPECTED_AS_STATUS(vstream_infos);
for (const auto &vstream_info : vstream_infos.value()) {
TRY(const auto vstream_infos, self->infer_model->hef().get_output_vstream_infos());
for (const auto &vstream_info : vstream_infos) {
self->output_vstream_infos[vstream_info.name] = vstream_info;
}
return HAILO_SUCCESS;
}
static GstPadProbeReturn gst_hailonet_sink_probe(GstPad */*pad*/, GstPadProbeInfo */*info*/, gpointer user_data)
{
GstHailoNet *self = static_cast<GstHailoNet*>(user_data);
std::unique_lock<std::mutex> lock(self->sink_probe_change_state_mutex);
if (self->did_critical_failure_happen) {
return GST_PAD_PROBE_REMOVE;
}
auto status = gst_hailonet_configure(self);
if (HAILO_SUCCESS != status) {
return GST_PAD_PROBE_REMOVE;
}
status = gst_hailonet_allocate_infer_resources(self);
if (HAILO_SUCCESS != status) {
return GST_PAD_PROBE_REMOVE;
}
if (HAILO_SCHEDULING_ALGORITHM_NONE != self->props.m_scheduling_algorithm.get()) {
self->props.m_is_active = true;
return GST_PAD_PROBE_REMOVE;
}
if ((1 == hailonet_count) && (!self->props.m_is_active.was_changed())) {
self->props.m_is_active = true;
}
if (self->props.m_is_active.get()) {
status = self->configured_infer_model->activate();
if (HAILO_SUCCESS != status) {
return GST_PAD_PROBE_REMOVE;
}
}
self->has_called_activate = true;
return GST_PAD_PROBE_REMOVE;
}
static GstStateChangeReturn gst_hailonet_change_state(GstElement *element, GstStateChange transition)
{
GstStateChangeReturn ret = GST_ELEMENT_CLASS(gst_hailonet_parent_class)->change_state(element, transition);
@@ -409,6 +481,8 @@ static GstStateChangeReturn gst_hailonet_change_state(GstElement *element, GstSt
if (HAILO_SUCCESS != status) {
return GST_STATE_CHANGE_FAILURE;
}
gst_pad_add_probe(self->sinkpad, GST_PAD_PROBE_TYPE_BUFFER, static_cast<GstPadProbeCallback>(gst_hailonet_sink_probe), self, nullptr);
break;
}
default:
@@ -456,10 +530,7 @@ static void gst_hailonet_set_property(GObject *object, guint property_id, const
g_warning("The network was already configured so changing the HEF path will not take place!");
break;
}
if (nullptr != self->props.m_hef_path.get()) {
g_free(self->props.m_hef_path.get());
}
self->props.m_hef_path = g_strdup(g_value_get_string(value));
self->props.m_hef_path = g_value_get_string(value);
break;
case PROP_BATCH_SIZE:
if (self->is_configured) {
@@ -478,15 +549,12 @@ static void gst_hailonet_set_property(GObject *object, guint property_id, const
g_warning("The network was already configured so changing the device ID will not take place!");
break;
}
if (nullptr != self->props.m_device_id.get()) {
g_free(self->props.m_device_id.get());
}
self->props.m_device_id = g_strdup(g_value_get_string(value));
self->props.m_device_id = g_value_get_string(value);
break;
case PROP_DEVICE_COUNT:
if (nullptr != self->props.m_device_id.get()) {
if (!self->props.m_device_id.get().empty()) {
g_error("device-id and device-count excludes eachother. received device-id=%s, device-count=%d",
self->props.m_device_id.get(), g_value_get_uint(value));
self->props.m_device_id.get().c_str(), g_value_get_uint(value));
break;
}
if (self->is_configured) {
@@ -500,10 +568,7 @@ static void gst_hailonet_set_property(GObject *object, guint property_id, const
g_warning("The network was already configured so changing the vdevice group ID will not take place!");
break;
}
if (nullptr != self->props.m_vdevice_group_id.get()) {
g_free(self->props.m_vdevice_group_id.get());
}
self->props.m_vdevice_group_id = g_strdup(g_value_get_string(value));
self->props.m_vdevice_group_id = g_value_get_string(value);
break;
case PROP_IS_ACTIVE:
(void)gst_hailonet_toggle_activation(self, self->props.m_is_active.get(), g_value_get_boolean(value));
@@ -635,19 +700,19 @@ static void gst_hailonet_get_property(GObject *object, guint property_id, GValue
GstHailoNet *self = GST_HAILONET(object);
switch (property_id) {
case PROP_HEF_PATH:
g_value_set_string(value, self->props.m_hef_path.get());
g_value_set_string(value, self->props.m_hef_path.get().c_str());
break;
case PROP_BATCH_SIZE:
g_value_set_uint(value, self->props.m_batch_size.get());
break;
case PROP_DEVICE_ID:
g_value_set_string(value, self->props.m_device_id.get());
g_value_set_string(value, self->props.m_device_id.get().c_str());
break;
case PROP_DEVICE_COUNT:
g_value_set_uint(value, self->props.m_device_count.get());
break;
case PROP_VDEVICE_GROUP_ID:
g_value_set_string(value, self->props.m_vdevice_group_id.get());
g_value_set_string(value, self->props.m_vdevice_group_id.get().c_str());
break;
case PROP_IS_ACTIVE:
g_value_set_boolean(value, self->props.m_is_active.get());
@@ -911,11 +976,9 @@ static Expected<std::unordered_map<std::string, hailo_dma_buffer_t>> gst_hailone
static hailo_status gst_hailonet_fill_multiple_input_bindings_dma_buffers(GstHailoNet *self, GstBuffer *buffer)
{
auto input_buffers = gst_hailonet_read_input_dma_buffers_from_meta(self, buffer);
CHECK_EXPECTED_AS_STATUS(input_buffers);
for (const auto &name : self->infer_model->get_input_names())
{
auto status = self->infer_bindings.input(name)->set_dma_buffer(input_buffers.value().at(name));
TRY(auto input_buffers, gst_hailonet_read_input_dma_buffers_from_meta(self, buffer));
for (const auto &name : self->infer_model->get_input_names()) {
auto status = self->infer_bindings.input(name)->set_dma_buffer(input_buffers.at(name));
CHECK_SUCCESS(status);
}
@@ -950,10 +1013,9 @@ static Expected<std::unordered_map<std::string, uint8_t*>> gst_hailonet_read_inp
static hailo_status gst_hailonet_fill_multiple_input_bindings(GstHailoNet *self, GstBuffer *buffer)
{
auto input_buffers = gst_hailonet_read_input_buffers_from_meta(self, buffer);
CHECK_EXPECTED_AS_STATUS(input_buffers);
TRY(auto input_buffers, gst_hailonet_read_input_buffers_from_meta(self, buffer));
for (const auto &name : self->infer_model->get_input_names()) {
auto status = self->infer_bindings.input(name)->set_buffer(MemoryView(input_buffers.value().at(name),
auto status = self->infer_bindings.input(name)->set_buffer(MemoryView(input_buffers.at(name),
self->infer_model->input(name)->get_frame_size()));
CHECK_SUCCESS(status);
}
@@ -961,9 +1023,20 @@ static hailo_status gst_hailonet_fill_multiple_input_bindings(GstHailoNet *self,
return HAILO_SUCCESS;
}
static void store_buffer_events(GstHailoNet *self, GstBuffer *buffer)
{
self->events_queue_per_buffer[buffer] = std::queue<GstEvent*>();
while (!self->curr_event_queue.empty()) {
GstEvent *event = self->curr_event_queue.front();
self->events_queue_per_buffer[buffer].push(event);
self->curr_event_queue.pop();
}
}
static hailo_status gst_hailonet_push_buffer_to_input_queue(GstHailoNet *self, GstBuffer *buffer)
{
std::unique_lock<std::mutex> lock(self->input_queue_mutex);
store_buffer_events(self, buffer);
gst_queue_array_push_tail(self->input_queue, buffer);
return HAILO_SUCCESS;
@@ -1022,11 +1095,12 @@ static hailo_status gst_hailonet_call_run_async(GstHailoNet *self, const std::un
self->ongoing_frames++;
}
auto job = self->configured_infer_model->run_async(self->infer_bindings, [self, tensors] (const AsyncInferCompletionInfo &/*completion_info*/) {
TRY(auto job, self->configured_infer_model->run_async(self->infer_bindings, [self, tensors] (const AsyncInferCompletionInfo &/*completion_info*/) {
GstBuffer *buffer = nullptr;
{
std::unique_lock<std::mutex> lock(self->input_queue_mutex);
buffer = static_cast<GstBuffer*>(gst_queue_array_pop_head(self->input_queue));
gst_hailonet_handle_buffer_events(self, buffer);
}
for (auto &output : self->infer_model->outputs()) {
@@ -1047,9 +1121,8 @@ static hailo_status gst_hailonet_call_run_async(GstHailoNet *self, const std::un
self->flush_cv.notify_all();
gst_hailonet_push_buffer_to_thread(self, buffer);
});
CHECK_EXPECTED_AS_STATUS(job);
job->detach();
}));
job.detach();
return HAILO_SUCCESS;
}
@@ -1071,7 +1144,7 @@ static hailo_status gst_hailonet_async_infer_multi_input(GstHailoNet *self, GstB
if (HAILO_STREAM_ABORT == tensors.status()) {
return HAILO_SUCCESS;
}
CHECK_EXPECTED_AS_STATUS(tensors);
CHECK_EXPECTED_AS_STATUS(tensors); // TODO (HRT-13278): Figure out how to remove CHECK_EXPECTED here
status = gst_hailonet_call_run_async(self, tensors.value());
CHECK_SUCCESS(status);
@@ -1090,7 +1163,7 @@ static hailo_status gst_hailonet_async_infer_single_input(GstHailoNet *self, Gst
if (HAILO_STREAM_ABORT == tensors.status()) {
return HAILO_SUCCESS;
}
CHECK_EXPECTED_AS_STATUS(tensors);
CHECK_EXPECTED_AS_STATUS(tensors); // TODO (HRT-13278): Figure out how to remove CHECK_EXPECTED here
status = gst_hailonet_call_run_async(self, tensors.value());
CHECK_SUCCESS(status);
@@ -1125,6 +1198,10 @@ static GstFlowReturn gst_hailonet_chain(GstPad * /*pad*/, GstObject * parent, Gs
GstHailoNet *self = GST_HAILONET(parent);
std::unique_lock<std::mutex> lock(self->infer_mutex);
if (self->did_critical_failure_happen) {
return GST_FLOW_ERROR;
}
if (self->props.m_pass_through.get() || !self->props.m_is_active.get() || !self->is_configured) {
gst_hailonet_push_buffer_to_thread(self, buffer);
return GST_FLOW_OK;
@@ -1134,11 +1211,11 @@ static GstFlowReturn gst_hailonet_chain(GstPad * /*pad*/, GstObject * parent, Gs
if (self->props.m_should_force_writable.get()) {
buffer = gst_buffer_make_writable(buffer);
if (nullptr == buffer) {
ERROR("Failed to make buffer writable!");
ERROR("Failed to make buffer writable!\n");
return GST_FLOW_ERROR;
}
} else {
ERROR("Input buffer is not writable! Use force-writable property to force the buffer to be writable");
ERROR("Input buffer is not writable! Use force-writable property to force the buffer to be writable\n");
return GST_FLOW_ERROR;
}
}
@@ -1168,17 +1245,14 @@ static hailo_status gst_hailonet_init_infer_model(GstHailoNet * self)
hailo_device_id_t device_id = {0};
if (self->props.m_device_id.was_changed()) {
auto expected_device_id = HailoRTCommon::to_device_id(self->props.m_device_id.get());
CHECK_EXPECTED_AS_STATUS(expected_device_id);
device_id = std::move(expected_device_id.release());
TRY(device_id, HailoRTCommon::to_device_id(self->props.m_device_id.get()));
vdevice_params.device_ids = &device_id;
}
if (self->props.m_device_count.was_changed()) {
vdevice_params.device_count = self->props.m_device_count.get();
}
if (self->props.m_vdevice_group_id.was_changed()) {
vdevice_params.group_id = self->props.m_vdevice_group_id.get();
vdevice_params.group_id = self->props.m_vdevice_group_id.get().c_str();
} else if (self->props.m_vdevice_key.was_changed()) {
auto key_str = std::to_string(self->props.m_vdevice_key.get());
vdevice_params.group_id = key_str.c_str();
@@ -1192,13 +1266,8 @@ static hailo_status gst_hailonet_init_infer_model(GstHailoNet * self)
"To use multi-process-service please set scheduling-algorithm to a value other than 'none'");
}
auto vdevice = VDevice::create(vdevice_params);
CHECK_EXPECTED_AS_STATUS(vdevice);
self->vdevice = std::move(vdevice.release());
auto infer_model = self->vdevice->create_infer_model(self->props.m_hef_path.get());
CHECK_EXPECTED_AS_STATUS(infer_model);
self->infer_model = infer_model.release();
TRY(self->vdevice, VDevice::create(vdevice_params));
TRY(self->infer_model, self->vdevice->create_infer_model(self->props.m_hef_path.get()));
if(!(self->props.m_input_from_meta.get())){
CHECK(self->infer_model->inputs().size() == 1, HAILO_INVALID_OPERATION,
@@ -1275,6 +1344,10 @@ static GstCaps *gst_hailonet_get_caps(GstHailoNet *self)
return nullptr;
}
if (nullptr != self->input_caps) {
return gst_caps_copy(self->input_caps);
}
if (nullptr == self->vdevice) {
auto status = gst_hailonet_init_infer_model(self);
if (HAILO_SUCCESS != status) {
@@ -1283,9 +1356,10 @@ static GstCaps *gst_hailonet_get_caps(GstHailoNet *self)
}
}
// TODO (HRT-12491): check caps based on incoming metadata
if (self->props.m_input_from_meta.get()) {
GstCaps *new_caps = gst_caps_new_any();
std::unique_lock<std::mutex> lock(self->input_caps_mutex);
gst_hailonet_unref_input_caps(self);
self->input_caps = new_caps;
return gst_caps_copy(new_caps);
}
@@ -1312,6 +1386,8 @@ static GstCaps *gst_hailonet_get_caps(GstHailoNet *self)
return nullptr;
}
std::unique_lock<std::mutex> lock(self->input_caps_mutex);
gst_hailonet_unref_input_caps(self);
self->input_caps = new_caps;
return gst_caps_copy(new_caps);
}
@@ -1341,83 +1417,21 @@ static gboolean gst_hailonet_handle_sink_query(GstPad * pad, GstObject * parent,
}
}
static gboolean gst_hailonet_handle_caps_event(GstHailoNet *self, GstCaps */*caps*/)
{
if (nullptr == self->input_caps) {
return FALSE;
}
GstCaps *caps_result = gst_pad_peer_query_caps(self->srcpad, self->input_caps);
if (gst_caps_is_empty(caps_result)) {
return FALSE;
}
if (gst_caps_is_any(caps_result)) {
gst_caps_unref(caps_result);
return TRUE;
}
GstCaps *outcaps = gst_caps_fixate(caps_result);
gboolean res = gst_pad_set_caps(self->srcpad, outcaps);
gst_caps_unref(outcaps);
return res;
}
static gboolean gst_hailonet_sink_event(GstPad *pad, GstObject *parent, GstEvent *event)
{
GstHailoNet *self = GST_HAILONET(parent);
switch (GST_EVENT_TYPE(event)) {
case GST_EVENT_CAPS:
{
GstCaps *caps;
gst_event_parse_caps(event, &caps);
auto result = gst_hailonet_handle_caps_event(self, caps);
gst_event_unref(event);
return result;
}
case GST_EVENT_EOS:
if (GST_EVENT_TYPE(event) == GST_EVENT_EOS) {
self->has_got_eos = true;
return gst_pad_push_event(self->srcpad, event);
default:
}
if (GST_EVENT_IS_STICKY(event)) {
gst_hailonet_push_event_to_queue(self, event);
return TRUE;
} else {
return gst_pad_event_default(pad, parent, event);
}
}
static GstPadProbeReturn gst_hailonet_sink_probe(GstPad */*pad*/, GstPadProbeInfo */*info*/, gpointer user_data)
{
GstHailoNet *self = static_cast<GstHailoNet*>(user_data);
std::unique_lock<std::mutex> lock(self->sink_probe_change_state_mutex);
auto status = gst_hailonet_configure(self);
if (HAILO_SUCCESS != status) {
return GST_PAD_PROBE_DROP;
}
status = gst_hailonet_allocate_infer_resources(self);
if (HAILO_SUCCESS != status) {
return GST_PAD_PROBE_DROP;
}
if (HAILO_SCHEDULING_ALGORITHM_NONE != self->props.m_scheduling_algorithm.get()) {
self->props.m_is_active = true;
return GST_PAD_PROBE_REMOVE;
}
if ((1 == hailonet_count) && (!self->props.m_is_active.was_changed())) {
self->props.m_is_active = true;
}
if (self->props.m_is_active.get()) {
status = self->configured_infer_model->activate();
if (HAILO_SUCCESS != status) {
return GST_PAD_PROBE_DROP;
}
}
self->has_called_activate = true;
return GST_PAD_PROBE_REMOVE;
}
static void gst_hailonet_flush_callback(GstHailoNet *self, gpointer /*data*/)
{
std::unique_lock<std::mutex> lock(self->flush_mutex);
@@ -1454,18 +1468,8 @@ static void gst_hailonet_init(GstHailoNet *self)
self->has_called_activate = false;
self->ongoing_frames = 0;
self->did_critical_failure_happen = false;
gchar *parent_name = gst_object_get_name(GST_OBJECT(self));
gchar *name = g_strconcat(parent_name, ":hailo_allocator", NULL);
g_free(parent_name);
if (gst_hailo_should_use_dma_buffers()) {
self->dma_allocator = gst_dmabuf_allocator_new();
} else {
self->allocator = GST_HAILO_ALLOCATOR(g_object_new(GST_TYPE_HAILO_ALLOCATOR, "name", name, NULL));
gst_object_ref_sink(self->allocator);
g_free(name);
}
self->events_queue_per_buffer = std::unordered_map<GstBuffer*, std::queue<GstEvent*>>();
self->curr_event_queue = std::queue<GstEvent*>();
g_signal_connect(self, "flush", G_CALLBACK(gst_hailonet_flush_callback), nullptr);

View File

@@ -30,6 +30,8 @@
#include "hailo/infer_model.hpp"
#include "common.hpp"
#include "gsthailo_allocator.hpp"
#include "gsthailo_dmabuf_allocator.hpp"
#include <queue>
#include <condition_variable>
@@ -40,35 +42,14 @@ using namespace hailort;
G_BEGIN_DECLS
#define GST_TYPE_HAILO_ALLOCATOR (gst_hailo_allocator_get_type())
#define GST_HAILO_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_HAILO_ALLOCATOR, GstHailoAllocator))
#define GST_HAILO_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), GST_TYPE_HAILO_ALLOCATOR, GstHailoAllocatorClass))
#define GST_IS_HAILO_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_HAILO_ALLOCATOR))
#define GST_IS_HAILO_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_HAILO_ALLOCATOR))
#define MIN_OUTPUTS_POOL_SIZE (MAX_GSTREAMER_BATCH_SIZE)
#define MAX_OUTPUTS_POOL_SIZE (MAX_GSTREAMER_BATCH_SIZE * 4)
#define GST_HAILO_USE_DMA_BUFFER_ENV_VAR "GST_HAILO_USE_DMA_BUFFER"
struct GstHailoAllocator
{
GstAllocator parent;
std::unordered_map<GstMemory*, Buffer> buffers;
};
struct GstHailoAllocatorClass
{
GstAllocatorClass parent;
};
GType gst_hailo_allocator_get_type(void);
struct HailoNetProperties final
{
public:
HailoNetProperties() : m_hef_path(nullptr), m_batch_size(HAILO_DEFAULT_BATCH_SIZE),
m_device_id(nullptr), m_device_count(0), m_vdevice_group_id(nullptr), m_is_active(false), m_pass_through(false),
HailoNetProperties() : m_hef_path(""), m_batch_size(HAILO_DEFAULT_BATCH_SIZE),
m_device_id(""), m_device_count(0), m_vdevice_group_id(""), m_is_active(false), m_pass_through(false),
m_outputs_min_pool_size(MIN_OUTPUTS_POOL_SIZE), m_outputs_max_pool_size(MAX_OUTPUTS_POOL_SIZE),
m_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN), m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS),
m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD), m_scheduler_priority(HAILO_SCHEDULER_PRIORITY_NORMAL),
@@ -78,24 +59,11 @@ public:
m_vdevice_key(DEFAULT_VDEVICE_KEY)
{}
void free_strings()
{
if (m_hef_path.was_changed()) {
g_free(m_hef_path.get());
}
if (m_device_id.was_changed()) {
g_free(m_device_id.get());
}
if (m_vdevice_group_id.was_changed()) {
g_free(m_vdevice_group_id.get());
}
}
HailoElemProperty<gchar*> m_hef_path;
HailoElemStringProperty m_hef_path;
HailoElemProperty<guint16> m_batch_size;
HailoElemProperty<gchar*> m_device_id;
HailoElemStringProperty m_device_id;
HailoElemProperty<guint16> m_device_count;
HailoElemProperty<gchar*> m_vdevice_group_id;
HailoElemStringProperty m_vdevice_group_id;
HailoElemProperty<gboolean> m_is_active;
HailoElemProperty<gboolean> m_pass_through;
HailoElemProperty<guint> m_outputs_min_pool_size;
@@ -119,42 +87,47 @@ public:
};
typedef struct _GstHailoNet {
GstElement element;
GstPad *sinkpad;
GstPad *srcpad;
GstQueueArray *input_queue;
GstQueueArray *thread_queue;
std::atomic_uint32_t buffers_in_thread_queue;
std::thread thread;
HailoNetProperties props;
GstCaps *input_caps;
std::atomic_bool is_thread_running;
std::atomic_bool has_got_eos;
std::mutex sink_probe_change_state_mutex;
bool did_critical_failure_happen;
GstElement element;
GstPad *sinkpad;
GstPad *srcpad;
std::unique_ptr<VDevice> vdevice;
std::shared_ptr<InferModel> infer_model;
std::shared_ptr<ConfiguredInferModel> configured_infer_model;
ConfiguredInferModel::Bindings infer_bindings;
bool is_configured;
std::mutex infer_mutex;
std::unordered_map<GstBuffer*, std::queue<GstEvent*>> events_queue_per_buffer;
std::queue<GstEvent*> curr_event_queue;
GstQueueArray *input_queue;
bool has_called_activate;
std::atomic_uint32_t ongoing_frames;
std::condition_variable flush_cv;
std::mutex flush_mutex;
GstQueueArray *thread_queue;
std::atomic_uint32_t buffers_in_thread_queue;
std::thread thread;
HailoNetProperties props;
GstCaps *input_caps;
std::atomic_bool is_thread_running;
std::atomic_bool has_got_eos;
std::mutex sink_probe_change_state_mutex;
bool did_critical_failure_happen;
GstVideoInfo input_frame_info;
std::unique_ptr<VDevice> vdevice;
std::shared_ptr<InferModel> infer_model;
std::shared_ptr<ConfiguredInferModel> configured_infer_model;
ConfiguredInferModel::Bindings infer_bindings;
bool is_configured;
std::mutex infer_mutex;
GstHailoAllocator *allocator;
GstAllocator *dma_allocator;
std::unordered_map<std::string, GstBufferPool*> output_buffer_pools;
std::unordered_map<std::string, hailo_vstream_info_t> output_vstream_infos;
bool has_called_activate;
std::atomic_uint32_t ongoing_frames;
std::condition_variable flush_cv;
std::mutex flush_mutex;
std::mutex input_caps_mutex;
std::mutex input_queue_mutex;
std::mutex thread_queue_mutex;
std::condition_variable thread_cv;
GstVideoInfo input_frame_info;
GstHailoAllocator *allocator;
GstHailoDmabufAllocator *dmabuf_allocator;
std::unordered_map<std::string, GstBufferPool*> output_buffer_pools;
std::unordered_map<std::string, hailo_vstream_info_t> output_vstream_infos;
std::mutex input_queue_mutex;
std::mutex thread_queue_mutex;
std::condition_variable thread_cv;
} GstHailoNet;
typedef struct _GstHailoNetClass {

View File

@@ -1,4 +1,3 @@
cmake_minimum_required(VERSION 3.11.0)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pybind11.cmake)
add_subdirectory(src)

View File

@@ -14,6 +14,7 @@ def join_drivers_path(path):
return os.path.join(_ROOT, 'hailo_platform', 'drivers', path)
import hailo_platform.pyhailort._pyhailort as _pyhailort
from hailo_platform.tools.udp_rate_limiter import UDPRateLimiter
from hailo_platform.pyhailort.hw_object import PcieDevice, EthernetDevice
from hailo_platform.pyhailort.pyhailort import (HEF, ConfigureParams,
@@ -26,7 +27,7 @@ from hailo_platform.pyhailort.pyhailort import (HEF, ConfigureParams,
InputVStreams, OutputVStreams,
InferVStreams, HailoStreamDirection, HailoFormatFlags, HailoCpuId, Device, VDevice,
DvmTypes, PowerMeasurementTypes, SamplingPeriod, AveragingFactor, MeasurementBufferIndex,
HailoRTException, HailoSchedulingAlgorithm, HailoRTStreamAbortedByUser)
HailoRTException, HailoSchedulingAlgorithm, HailoRTStreamAbortedByUser, AsyncInferJob)
def _verify_pyhailort_lib_exists():
python_version = "".join(str(i) for i in sys.version_info[:2])
@@ -41,25 +42,16 @@ def _verify_pyhailort_lib_exists():
_verify_pyhailort_lib_exists()
def get_version(package_name):
# See: https://packaging.python.org/guides/single-sourcing-package-version/ (Option 5)
# We assume that the installed package is actually the same one we import. This assumption may
# break in some edge cases e.g. if the user modifies sys.path manually.
__version__ = "4.18.0"
if _pyhailort.__version__ != __version__:
raise ImportError(
f"_pyhailort version ({_pyhailort.__version__}) does not match pyhailort version ({__version__})"
)
# hailo_platform package has been renamed to hailort, but the import is still hailo_platform
if package_name == "hailo_platform":
package_name = "hailort"
try:
import pkg_resources
return pkg_resources.get_distribution(package_name).version
except:
return 'unknown'
__version__ = get_version('hailo_platform')
__all__ = ['EthernetDevice', 'DvmTypes', 'PowerMeasurementTypes',
'SamplingPeriod', 'AveragingFactor', 'MeasurementBufferIndex', 'UDPRateLimiter', 'PcieDevice', 'HEF',
'ConfigureParams', 'FormatType', 'FormatOrder', 'MipiDataTypeRx', 'MipiPixelsPerClock', 'MipiClockSelection',
'MipiIspImageInOrder', 'MipiIspImageOutDataType', 'join_drivers_path', 'IspLightFrequency', 'HailoPowerMode',
'Endianness', 'HailoStreamInterface', 'InputVStreamParams', 'OutputVStreamParams',
'InputVStreams', 'OutputVStreams', 'InferVStreams', 'HailoStreamDirection', 'HailoFormatFlags', 'HailoCpuId',
'Device', 'VDevice', 'HailoRTException', 'HailoSchedulingAlgorithm', 'HailoRTStreamAbortedByUser']
'Device', 'VDevice', 'HailoRTException', 'HailoSchedulingAlgorithm', 'HailoRTStreamAbortedByUser', 'AsyncInferJob']

View File

@@ -3,7 +3,7 @@ import pathlib
import subprocess
import sys
import pkg_resources
import importlib.util
import hailo_platform
from hailo_platform.tools.hailocli.base_utils import HailortCliUtil
@@ -103,11 +103,8 @@ class TutorialRunnerCLI():
def _check_requirements(self):
missing_pkgs = []
working_set = pkg_resources.WorkingSet()
for req in self.TUTORIALS_REQUIREMENTS:
try:
working_set.require(req)
except pkg_resources.DistributionNotFound:
if importlib.util.find_spec(req) is None:
missing_pkgs.append(req)
if missing_pkgs:

View File

@@ -7,7 +7,8 @@
"\n",
"# Python inference tutorial\n",
"\n",
"This tutorial will walk you through the inference process.\n",
"This tutorial will describe how to use the Inference Process.\n",
"\n",
"\n",
"**Requirements:**\n",
"\n",
@@ -23,7 +24,7 @@
"## Standalone hardware deployment\n",
"\n",
"The standalone flow allows direct access to the HW, developing applications directly on top of Hailo\n",
"core HW, using HailoRT. This way we can use the Hailo hardware without Tensorflow, and\n",
"core HW, using HailoRT. This way the Hailo hardware can be used without Tensorflow, and\n",
"even without the Hailo SDK (after the HEF is built).\n",
"\n",
"An HEF is Hailo's binary format for neural networks. The HEF files contain:\n",
@@ -32,7 +33,8 @@
"* Weights\n",
"* Metadata for HailoRT (e.g. input/output scaling)\n",
"\n",
"First create the desired target object. In our example we use the Hailo-8 PCIe interface:\n"
"First create the desired target object.\n",
"In this example the Hailo-8 PCIe interface is used."
]
},
{

View File

@@ -7,14 +7,15 @@
"\n",
"# Python Inference Tutorial - Multi Process Service and Model Scheduler\n",
"\n",
"This tutorial will walk you through the inference process using The Model Scheduler.\n",
"This tutorial describes how to run an inference process using the multi-process service.\n",
"\n",
"\n",
"**Requirements:**\n",
"\n",
"* Enable HailoRT Multi-Process Service before running inference\n",
"* Enable HailoRT Multi-Process Service before running inference. For instructions, see [Multi Process Service](https://hailo.ai/developer-zone/documentation/hailort/latest/?sp_referrer=inference/inference.html#multi-process-service).\n",
"* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
"\n",
"It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials."
"It is recommended to use the command ``hailo tutorial`` (when inside the ```virtualenv```) to open a Jupyter server that contains the tutorials."
]
},
{
@@ -67,6 +68,7 @@
"# Creating the VDevice target with scheduler enabled\n",
"params = VDevice.create_params()\n",
"params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN\n",
"params.multi_process_service = True\n",
"with VDevice(params) as target:\n",
" infer_processes = []\n",
"\n",

View File

@@ -0,0 +1,102 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"# Python inference tutorial\n",
"\n",
"This tutorial will describe how to use the Inference Process.\n",
"\n",
"\n",
"**Requirements:**\n",
"\n",
"* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
"\n",
"It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Standalone hardware deployment\n",
"\n",
"The standalone flow allows direct access to the HW, developing applications directly on top of Hailo\n",
"core HW, using HailoRT. This way the Hailo hardware can be used without Tensorflow, and\n",
"even without the Hailo SDK (after the HEF is built).\n",
"\n",
"An HEF is Hailo's binary format for neural networks. The HEF files contain:\n",
"\n",
"* Target HW configuration\n",
"* Weights\n",
"* Metadata for HailoRT (e.g. input/output scaling)\n",
"\n",
"First create the desired target object.\n",
"In this example the Hailo-8 PCIe interface is used."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from hailo_platform import VDevice, HailoSchedulingAlgorithm\n",
"\n",
"timeout_ms = 1000\n",
"\n",
"params = VDevice.create_params()\n",
"params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN\n",
"\n",
"# The vdevice is used as a context manager (\"with\" statement) to ensure it's released on time.\n",
"with VDevice(params) as vdevice:\n",
"\n",
" # Create an infer model from an HEF:\n",
" infer_model = vdevice.create_infer_model('../hefs/resnet_v1_18.hef')\n",
"\n",
" # Configure the infer model and create bindings for it\n",
" with infer_model.configure() as configured_infer_model:\n",
" bindings = configured_infer_model.create_bindings()\n",
"\n",
" # Set input and output buffers\n",
" buffer = np.empty(infer_model.input().shape).astype(np.uint8)\n",
" bindings.input().set_buffer(buffer)\n",
"\n",
" buffer = np.empty(infer_model.output().shape).astype(np.uint8)\n",
" bindings.output().set_buffer(buffer)\n",
"\n",
" # Run synchronous inference and access the output buffers\n",
" configured_infer_model.run([bindings], timeout_ms)\n",
" buffer = bindings.output().get_buffer()\n",
"\n",
" # Run asynchronous inference\n",
" job = configured_infer_model.run_async([bindings])\n",
" job.wait(timeout_ms)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,195 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"# Python Async Inference Tutorial - Multiple Models with Model Scheduler\n",
"\n",
"This tutorial will describe how to run an inference process.\n",
"\n",
"\n",
"**Requirements:**\n",
"\n",
"* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
"\n",
"It is recommended to use the command ``hailo tutorial`` (when inside the ```virtualenv```) to open a Jupyter server that contains the tutorials."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Running Inference using HailoRT\n",
"\n",
"In this example we will use the Model Scheduler to run inference on multiple models.\n",
"Each model is represented by an HEF which is built using the Hailo Dataflow Compiler.\n",
"An HEF is Hailo's binary format for neural networks. The HEF files contain:\n",
"\n",
"* Target HW configuration\n",
"* Weights\n",
"* Metadata for HailoRT (e.g. input/output scaling)\n",
"\n",
"The Model Scheduler is an HailoRT component that comes to enhance and simplify the usage\n",
"of the same Hailo device by multiple networks. The responsibility for activating/deactivating the network\n",
"groups is now under HailoRT, and done **automatically** without user application intervention.\n",
"In order to use the Model Scheduler, create the VDevice with scheduler enabled, configure all models to the device, and start inference on all models:\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Optional: define a callback function that will run after the inference job is done\n",
"# The callback must have a keyword argument called \"completion_info\".\n",
"# That argument will be passed by the framework.\n",
"def example_callback(completion_info, bindings):\n",
" if completion_info.exception:\n",
" # handle exception\n",
" pass\n",
" \n",
" _ = bindings.output().get_buffer()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from functools import partial\n",
"from hailo_platform import VDevice, HailoSchedulingAlgorithm, FormatType\n",
"\n",
"number_of_frames = 4\n",
"timeout_ms = 10000\n",
"\n",
"def infer(multi_process_service):\n",
" # Create a VDevice\n",
" params = VDevice.create_params()\n",
" params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN\n",
" params.group_id = \"SHARED\" \n",
" if multi_process_service:\n",
" params.multi_process_service = multi_process_service\n",
" \n",
" with VDevice(params) as vdevice:\n",
"\n",
" # Create an infer model from an HEF:\n",
" infer_model = vdevice.create_infer_model('../hefs/resnet_v1_18.hef')\n",
"\n",
" # Set optional infer model parameters\n",
" infer_model.set_batch_size(2)\n",
"\n",
" # For a single input / output model, the input / output object \n",
" # can be accessed with a name parameter ...\n",
" infer_model.input(\"input_layer1\").set_format_type(FormatType.FLOAT32)\n",
" # ... or without\n",
" infer_model.output().set_format_type(FormatType.FLOAT32)\n",
"\n",
" # Once the infer model is set, configure the infer model\n",
" with infer_model.configure() as configured_infer_model:\n",
" for _ in range(number_of_frames):\n",
" # Create bindings for it and set buffers\n",
" bindings = configured_infer_model.create_bindings()\n",
" bindings.input().set_buffer(np.empty(infer_model.input().shape).astype(np.float32))\n",
" bindings.output().set_buffer(np.empty(infer_model.output().shape).astype(np.float32))\n",
"\n",
" # Wait for the async pipeline to be ready, and start an async inference job\n",
" configured_infer_model.wait_for_async_ready(timeout_ms=10000)\n",
"\n",
" # Any callable can be passed as callback (lambda, function, functools.partial), as long\n",
" # as it has a keyword argument \"completion_info\"\n",
" job = configured_infer_model.run_async([bindings], partial(example_callback, bindings=bindings))\n",
"\n",
" # Wait for the last job\n",
" job.wait(timeout_ms)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Running multiple models concurrently\n",
"\n",
"The models can be run concurrently using either multiple `Thread` objects or multiple `Process` objects"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from threading import Thread\n",
"\n",
"pool = [\n",
" Thread(target=infer, args=(False,)),\n",
" Thread(target=infer, args=(False,))\n",
"]\n",
"\n",
"print('Starting async inference on multiple models using threads')\n",
"\n",
"for job in pool:\n",
" job.start()\n",
"for job in pool:\n",
" job.join()\n",
"\n",
"print('Done inference')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If the models are run in different processes, the multi-process service must be enabled first."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from multiprocessing import Process\n",
"\n",
"pool = [\n",
" Process(target=infer, args=(True,)),\n",
" Process(target=infer, args=(True,))\n",
"]\n",
"\n",
"print('Starting async inference on multiple models using processes')\n",
"\n",
"for job in pool:\n",
" job.start()\n",
"for job in pool:\n",
" job.join()\n",
"\n",
"print('Done inference')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,39 +1,101 @@
"""
builds hailo_platform python package and its C++ dependencies using cmake
"""
import platform
import os
import json
from setuptools import setup, find_packages
import subprocess
import sys
from pathlib import Path
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext as orig_build_ext
from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel
class NonPurePythonBDistWheel(orig_bdist_wheel):
"""makes the wheel platform-dependant so it can be based on the _pyhailort architecture"""
_plat_name = None
def _fix_plat_name(s):
# plat_name does not require the "linux_" prefix
return s.replace(platform.processor(), _plat_name.replace("linux_", ""))
class bdist_wheel(orig_bdist_wheel):
"""makes the wheel platform-dependant so it can be based on the _pyhailort architecture"""
def finalize_options(self):
# Save the plat_name option and pass it along to build_ext which will use it to change the processor in the
# extension name.
# All other paths will still use the naive processor, but that's ok, since the only thing that is packed into
# the wheel is the actual shared library, so only its name is relevant. Fixing all paths will require tweaking
# build_py, install, install_lib commands or fixing this somehow all accross setuptools
global _plat_name
_plat_name = self.plat_name
orig_bdist_wheel.finalize_options(self)
self.root_is_pure = False
def _get_pyhailort_lib_path():
conf_file_path = os.path.join(os.path.abspath(os.path.dirname( __file__ )), "wheel_conf.json")
extension = {
"posix": "so",
"nt": "pyd", # Windows
}[os.name]
if not os.path.isfile(conf_file_path):
return None
class build_ext(orig_build_ext):
OPTIONAL_CMAKE_ENV_VARIABLES = [
"CMAKE_TOOLCHAIN_FILE",
"HAILORT_INCLUDE_DIR",
"LIBHAILORT_PATH",
"PYTHON_INCLUDE_DIRS",
"CMAKE_GENERATOR",
"PYTHON_LIBRARY",
]
with open(conf_file_path, "r") as conf_file:
content = json.load(conf_file)
# TODO (HRT-8637): change this hard-coded path
return f"../hailo_platform/pyhailort/_pyhailort*{content['py_version']}*{content['arch']}*.{extension}"
"""defines a cmake command that will be called from the python build process"""
def run(self):
cfg = 'Debug' if self.debug else 'Release'
def _get_package_paths():
packages = []
pyhailort_lib = _get_pyhailort_lib_path()
if pyhailort_lib:
packages.append(pyhailort_lib)
packages.append("../hailo_tutorials/notebooks/*")
packages.append("../hailo_tutorials/hefs/*")
return packages
build_args = f"--config {cfg}"
build_directory = os.path.abspath(self.build_temp)
cmake_list_dir = Path(__file__).absolute().parents[1] / "src"
python_version = f"{sys.version_info.major}.{sys.version_info.minor}"
cmake_args = [
f'-DCMAKE_BUILD_TYPE={cfg}',
f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={build_directory}',
f'-DPYBIND11_PYTHON_VERSION={python_version}',
f'-DPYTHON_EXECUTABLE={sys.executable}',
]
for env_var in self.OPTIONAL_CMAKE_ENV_VARIABLES:
if env_var in os.environ:
if env_var == "CMAKE_GENERATOR":
cmake_args.append(f'-G "{os.environ[env_var]}"')
else:
cmake_args.append(f"-D{env_var}={os.environ[env_var]}")
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.run(
f"cmake {cmake_list_dir} {' '.join(cmake_args)}",
cwd=self.build_temp,
shell=True,
check=True
)
subprocess.run(
f"cmake --build . {build_args}",
cwd=self.build_temp,
shell=True,
check=True,
)
for ext in self.extensions:
ext_filename = self.get_ext_filename(ext.name)
if platform.system() == "Linux" and _plat_name:
ext_filename = _fix_plat_name(ext_filename)
dst = Path(self.get_ext_fullpath(ext.name)).resolve().parent / "hailo_platform/pyhailort/"
build_temp = Path(self.build_temp).resolve()
if os.name == "nt":
src = build_temp / cfg / ext_filename
else:
src = build_temp / ext_filename
self.copy_file(src, dst)
if __name__ == "__main__":
@@ -41,7 +103,8 @@ if __name__ == "__main__":
author="Hailo team",
author_email="contact@hailo.ai",
cmdclass={
"bdist_wheel": NonPurePythonBDistWheel,
"bdist_wheel": bdist_wheel,
"build_ext": build_ext, # Build the C++ extension (_pyhailort) using cmake
},
description="HailoRT",
entry_points={
@@ -49,6 +112,9 @@ if __name__ == "__main__":
"hailo=hailo_platform.tools.hailocli.main:main",
]
},
ext_modules= [
Extension('_pyhailort', sources=[]),
],
install_requires=[
"argcomplete",
"contextlib2",
@@ -61,7 +127,10 @@ if __name__ == "__main__":
],
name="hailort",
package_data={
"hailo_platform": _get_package_paths(),
"hailo_platform": [
"../hailo_tutorials/notebooks/*",
"../hailo_tutorials/hefs/*"
]
},
packages=find_packages(),
platforms=[
@@ -69,6 +138,6 @@ if __name__ == "__main__":
"linux_aarch64",
],
url="https://hailo.ai/",
version="4.17.1",
version="4.18.0",
zip_safe=False,
)

View File

@@ -1,6 +1,20 @@
cmake_minimum_required(VERSION 3.0.0)
project(pyhailort)
get_filename_component(HAILORT_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../../../" ABSOLUTE)
get_filename_component(HAILORT_COMMON_DIR "${HAILORT_PROJECT_SOURCE_DIR}/hailort/" ABSOLUTE)
get_filename_component(PYHAILORT_DIR "${CMAKE_CURRENT_LIST_DIR}" ABSOLUTE)
set(HAILO_EXTERNAL_DIR ${HAILORT_COMMON_DIR}/external)
set(HAILO_EXTERNALS_CMAKE_SCRIPTS ${HAILORT_COMMON_DIR}/cmake/external/)
option(LIBHAILORT_PATH "Path to libhailort to link against" "")
option(HAILORT_INCLUDE_DIR "Path to include dir of libhailort" "")
include(ExternalProject)
include(GNUInstallDirs)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pybind11.cmake)
include_directories(${HAILORT_COMMON_DIR})
FUNCTION(exclude_archive_libs_symbols target) # should be same as in common_compiler_options.cmake
if(WIN32)
@@ -9,8 +23,6 @@ FUNCTION(exclude_archive_libs_symbols target) # should be same as in common_comp
get_property(TEMP_LINK_FLAGS TARGET ${target} PROPERTY LINK_FLAGS)
set(TEMP_LINK_FLAGS "${TEMP_LINK_FLAGS} -Wl,--exclude-libs=ALL")
set_property(TARGET ${target} PROPERTY LINK_FLAGS ${TEMP_LINK_FLAGS})
else()
message(FATAL_ERROR "Unexpeced host, stopping build")
endif()
ENDFUNCTION()
@@ -27,13 +39,11 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
set(PYTHON_MODULE_EXTENSION ".cpython-${dpython}${m_flag}-${CMAKE_SYSTEM_PROCESSOR}-linux-gnu.so")
endif()
option(HAILO_BUILD_PYHAILORT_INTERNAL OFF)
set(PYHAILORT_DIR ${CMAKE_CURRENT_LIST_DIR})
pybind11_add_module(_pyhailort
pyhailort.cpp
device_api.cpp
vdevice_api.cpp
infer_model_api.cpp
network_group_api.cpp
hef_api.cpp
vstream_api.cpp
@@ -49,9 +59,33 @@ set_target_properties(_pyhailort PROPERTIES
# VISIBILITY_INLINES_HIDDEN YES
)
find_package(HailoRT 4.17.1 EXACT REQUIRED)
# allow user to inject a specific libhailort (and headers) to link against.
# use case: cross compilation
if(LIBHAILORT_PATH AND HAILORT_INCLUDE_DIR)
message(STATUS "LIBHAILORT_PATH is set. Will link against given libhailort: ${LIBHAILORT_PATH}")
message(STATUS "HAILORT_INCLUDE_DIR is set. Will include given include dir: ${HAILORT_INCLUDE_DIR}")
# the library to link against
target_link_libraries(_pyhailort PRIVATE ${LIBHAILORT_PATH})
# the include dir
include_directories(${HAILORT_INCLUDE_DIR})
# since we are linking against an injected libhailort, we need to define the version
target_compile_definitions(
_pyhailort
PUBLIC
HAILORT_MAJOR_VERSION=4
HAILORT_MINOR_VERSION=18
HAILORT_REVISION_VERSION=0
)
elseif(LIBHAILORT_PATH OR HAILORT_INCLUDE_DIR)
message(FATAL_ERROR "Both LIBHAILORT_PATH and HAILORT_INCLUDE_DIR must be defined or none of them")
else()
find_package(HailoRT 4.18.0 EXACT REQUIRED)
target_link_libraries(_pyhailort PRIVATE HailoRT::libhailort)
endif()
target_link_libraries(_pyhailort PRIVATE HailoRT::libhailort)
if(WIN32)
target_link_libraries(_pyhailort PRIVATE Ws2_32)
target_compile_options(_pyhailort PRIVATE
@@ -64,20 +98,9 @@ endif()
target_compile_options(_pyhailort PRIVATE ${HAILORT_COMPILE_OPTIONS})
exclude_archive_libs_symbols(_pyhailort)
if (HAILO_BUILD_PYHAILORT_INTERNAL)
add_subdirectory(internal)
# copy files to a path the venv will look for
add_custom_target(pyhailort_internal_venv ALL
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:_pyhailort_internal> ${PROJECT_SOURCE_DIR}/platform_internals/hailo_platform_internals/pyhailort/
)
add_dependencies(pyhailort_internal_venv _pyhailort_internal)
endif()
# TODO (HRT-8637): change this hard-coded path
set(HAILO_PYHAILORT_TARGET_DIR ${CMAKE_CURRENT_LIST_DIR}/../platform/hailo_platform/pyhailort/)
# copy files to a path the venv and whl will look for
message(STATUS "Copying _pyhailort artifacts into " ${HAILO_PYHAILORT_TARGET_DIR})
add_custom_target(pyhailort_venv ALL
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:_pyhailort> ${HAILO_PYHAILORT_TARGET_DIR}
)
@@ -87,3 +110,4 @@ install(TARGETS _pyhailort
LIBRARY DESTINATION ${HAILO_PYHAILORT_TARGET_DIR}
CONFIGURATIONS Release
)

Some files were not shown because too many files have changed in this diff Show More