This commit is contained in:
HailoRT-Automation
2023-06-29 15:02:42 +03:00
committed by GitHub
parent 86bb9c4968
commit 9bce73eb42
377 changed files with 17950 additions and 8664 deletions

BIN
.hailort.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 MiB

View File

@@ -14,20 +14,12 @@ if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
In order to build, please create a new `build` directory and run `cmake ..` from there.")
endif()
# Check build type
if (NOT CMAKE_BUILD_TYPE)
message(STATUS "No build type selected, default to Debug")
set(CMAKE_BUILD_TYPE "Debug")
endif()
message(STATUS "Building ${PROJECT_NAME} in ${CMAKE_BUILD_TYPE}")
# Set compiler flags in HAILORT_COMPILE_OPTIONS
# TODO: Change HAILORT_COMPILE_OPTIONS to add_compile_options
if(WIN32)
# TODO: set this eventually? set(HAILORT_COMPILE_OPTIONS /Wall)
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS}
/W4
/WX
/DWIN32_LEAN_AND_MEAN
/DNOMINMAX # NOMINMAX is required in order to play nice with std::min/std::max (otherwise Windows.h defines it's own)
/D_HAILO_EXPORTING
@@ -37,9 +29,9 @@ if(WIN32)
add_definitions(-D_CRT_SECURE_NO_WARNINGS) # Disable "unsafe function" warnings
elseif(UNIX)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "QCC")
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS} -Werror -Wall -Wextra -Wconversion)
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS} -Wall -Wextra -Wconversion)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS} -Werror -Wall -Wextra
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS} -Wall -Wextra
# TODO: remove me warnings
-Wno-conversion
-Wno-deprecated-declarations # On c structures with deprecated attribute, clang generates implicit move ctor

View File

@@ -1,12 +1,12 @@
<p align="left">
<img src=".hailort.png" />
<img src=".hailort.jpg" />
</p>
# HailoRT #
HailoRT is a lightweight, production-grade runtime library that runs on the host processor and provides a robust
user-space runtime library (the HailoRT Library) with intuitive APIs in C/C++ for optimized performance
user-space library (the HailoRT Library) with intuitive APIs in C/C++ for optimized performance
HailoRT consists of the following main components:
- HailoRT Library.
@@ -42,8 +42,8 @@ Contact information and support is available at [**hailo.ai**](https://hailo.ai/
## About Hailo-8™
Hailo-8 is a deep learning processor for edge devices. The Hailo-8 provides groundbraking efficiency for neural network deployment.
The Hailo-8 edge AI processor, featuring up to 26 tera-operations per second (TOPS), significantly outperforms all other edge processors.
Hailo-8 is a deep learning processor for edge devices. The Hailo-8 provides groundbreaking efficiency for neural network deployment.
The Hailo-8 edge AI processor, featuring up to 26 Tera-Operations-Per-Second (TOPS), significantly outperforms all other edge processors.
Hailo-8 is available in various form-factors, including the Hailo-8 M.2 Module.
The Hailo-8 AI processor is designed to fit into a multitude of smart machines and devices, for a wide variety of sectors including Automotive, Smart Cities, Industry 4.0,

View File

@@ -54,6 +54,9 @@ extern "C" {
(vdma_channel_index) = ((src) & CONTEXT_SWITCH_DEFS__PACKED_VDMA_CHANNEL_ID__VDMA_CHANNEL_INDEX_MASK); \
} while (0)
#define CONTEXT_SWITCH_DEFS__WRITE_ACTION_BY_TYPE_MAX_SIZE (4)
#pragma pack(push, 1)
typedef struct {
uint16_t core_bytes_per_buffer;
@@ -104,6 +107,8 @@ typedef enum __attribute__((packed)) {
CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH,
/* Must be last */
CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT
@@ -358,8 +363,33 @@ typedef struct {
typedef struct {
uint8_t nms_unit_index;
uint8_t network_index;
uint16_t number_of_classes;
uint16_t burst_size;
} CONTEXT_SWITCH_DEFS__enable_nms_action_t;
typedef enum {
WRITE_ACTION_TYPE_GENERAL = 0,
WRITE_ACTION_TYPE_WRITE_BATCH = 1,
/* Must be last */
WRITE_ACTION_BY_TYPE_COUNT
} CONTEXT_SWITCH_DEFS__WRITE_ACTION_TYPE_t;
typedef struct {
uint32_t address;
uint8_t data_type; //CONTEXT_SWITCH_DEFS__WRITE_ACTION_TYPE_t
uint32_t data;
uint8_t shift;
uint32_t mask;
uint8_t network_index;
} CONTEXT_SWITCH_DEFS__write_data_by_type_action_t;
typedef struct {
uint8_t packed_lcu_id;
uint8_t network_index;
uint32_t kernel_done_count;
} CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t;
#pragma pack(pop)
#ifdef __cplusplus

View File

@@ -1017,6 +1017,7 @@ typedef enum {
CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_COUNT,
} CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t;
#define CONTROL_PROTOCOL__INIFINITE_BATCH_COUNT (0)
typedef struct {
uint32_t state_machine_status_length;
uint8_t state_machine_status;
@@ -1024,6 +1025,8 @@ typedef struct {
uint8_t application_index;
uint32_t dynamic_batch_size_length;
uint16_t dynamic_batch_size;
uint32_t batch_count_length;
uint16_t batch_count;
uint32_t keep_nn_config_during_reset_length;
uint8_t keep_nn_config_during_reset;
} CONTROL_PROTOCOL__change_context_switch_status_request_t;
@@ -1315,6 +1318,8 @@ typedef struct {
uint8_t application_index;
uint32_t dynamic_batch_size_length;
uint16_t dynamic_batch_size;
uint32_t batch_count_length;
uint16_t batch_count;
uint32_t channels_info_length;
CONTROL_PROTOCOL__hw_infer_channels_info_t channels_info;
} CONTROL_PROTOCOL__change_hw_infer_status_request_t;

View File

@@ -57,6 +57,8 @@ typedef enum {
HEALTH_MONITOR_CPU_ECC_FATAL_EVENT_ID,
CONTEXT_SWITCH_BREAKPOINT_REACHED,
HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID,
HW_INFER_MANAGER_INFER_DONE,
D2H_EVENT_ID_COUNT /* Must be last*/
} D2H_EVENT_ID_t;
@@ -138,6 +140,12 @@ typedef struct {
#define D2H_EVENT_HEALTH_MONITOR_CLOCK_CHANGED_EVENT_PARAMETER_COUNT (2)
typedef struct {
uint32_t infer_cycles;
} D2H_EVENT_hw_infer_mamager_infer_done_message_t;
#define D2H_EVENT_HW_INFER_MANAGER_INFER_DONE_PARAMETER_COUNT (1)
/* D2H_EVENT__message_parameters_t should be in the same order as hailo_notification_message_parameters_t */
typedef union {
D2H_EVENT_rx_error_event_message_t rx_error_event;
@@ -149,6 +157,7 @@ typedef union {
D2H_EVENT_health_monitor_cpu_ecc_event_message_t health_monitor_cpu_ecc_event;
D2H_EVENT_context_switch_breakpoint_reached_event_massage_t context_switch_breakpoint_reached_event;
D2H_EVENT_health_monitor_clock_changed_event_message_t health_monitor_clock_changed_event;
D2H_EVENT_hw_infer_mamager_infer_done_message_t hw_infer_manager_infer_done_event;
} D2H_EVENT__message_parameters_t;
typedef struct {

View File

@@ -411,6 +411,7 @@ Updating rules:
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_SLEEP_STATE)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_HW_INFER_STATE_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CHANNELS_INFO_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_BATCH_COUNT_LENGTH)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__POWER_MEASUREMENT)\
FIRMWARE_STATUS__X(HAILO_POWER_MEASUREMENT_STATUS_POWER_INIT_ERROR)\
@@ -554,6 +555,7 @@ Updating rules:
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_H2D_CHANNEL_INDEX)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_D2H_CHANNEL_INDEX)\
FIRMWARE_STATUS__X(PCIE_SERVICE_INVALID_INITIAL_CREDIT_SIZE)\
FIRMWARE_STATUS__X(PCIE_SERVICE_ERROR_ADDING_CREDITS_TO_PCIE_CHANNEL)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__FIRMWARE_UPDATE)\
FIRMWARE_STATUS__X(FIRMWARE_UPDATE_STATUS_INVALID_PARAMETERS)\
@@ -753,6 +755,9 @@ Updating rules:
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_DYNAMIC_CONTEXT_COUNT)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_CONTEXT_INDEX_OUT_OF_RANGE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_TOTAL_PROVIDED_EDGE_LAYERS_LARGER_THEN_EXPECTED)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_NETWORK_IDLE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_TYPE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_MEMORY_SPACE)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__D2H_EVENT_MANAGER)\
FIRMWARE_STATUS__X(HAILO_D2H_EVENT_MANAGER_STATUS_MESSAGE_HIGH_PRIORITY_QUEUE_CREATE_FAILED)\
@@ -1010,6 +1015,7 @@ Updating rules:
FIRMWARE_STATUS__X(VDMA_SERVICE_STATUS_INVALID_CONSTANTS)\
FIRMWARE_STATUS__X(VDMA_SERVICE_STATUS_INVALID_CHANNEL_INDEX)\
FIRMWARE_STATUS__X(VDMA_SERVICE_STATUS_INVALID_EDGE_LAYER_DIRECTION)\
FIRMWARE_STATUS__X(VDMA_SERVICE_INSUFFICIENT_DESCRIPTORS_COUNT)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__MEMORY_LOGGER)\
FIRMWARE_STATUS__X(MEMORY_LOGGER_STATUS_DEBUG_INSUFFICIENT_MEMORY)\
@@ -1079,6 +1085,9 @@ Updating rules:
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_NETWORK_INDEX)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_NMS_UNIT_INDEX)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_BATCH_SIZE)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_NUM_CLASSES_SIZE)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_BURST_SIZE)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_LAST_FRAME_IN_BATCH_SIZE)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__CLUSTER_MANAGER)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_CLUSTER_INDEX)\
@@ -1087,6 +1096,7 @@ Updating rules:
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_LCU_INDEX)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_ADDRESS)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_RECEIVED_UNEXPECTED_INTERRUPT)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_NETWORK_INDEX)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__HW_INFER_MANAGER)\
FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_NETWORK_GROUP_NOT_CONFIGURED_BEFORE_INFER_START)\

View File

@@ -10,6 +10,8 @@
#ifndef __UTILS_H__
#define __UTILS_H__
#include <stdint.h>
/** A compile time assertion check.
*
* Validate at compile time that the predicate is true without
@@ -125,4 +127,20 @@ _PP_ISEMPTY( \
#define MICROSECONDS_IN_MILLISECOND (1000)
static inline uint8_t ceil_log2(uint32_t n)
{
uint8_t result = 0;
if (n <= 1) {
return 0;
}
while (n > 1) {
result++;
n = (n + 1) >> 1;
}
return result;
}
#endif /* __UTILS_H__ */

View File

@@ -9,6 +9,18 @@ option(HAILO_BUILD_EXAMPLES "Build examples" OFF)
option(HAILO_OFFLINE_COMPILATION "Don't download external dependencies" OFF)
option(HAILO_BUILD_SERVICE "Build hailort service" OFF)
option(HAILO_BUILD_PROFILER "Build hailort profiler" ON)
option(HAILO_COMPILE_WARNING_AS_ERROR "Add compilation flag for treating compilation warnings as errors" OFF)
option(HAILO_SUPPORT_PACKAGING "Create HailoRT package (internal)" OFF)
if (HAILO_COMPILE_WARNING_AS_ERROR)
if(WIN32)
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS} /WX)
elseif(UNIX)
set(HAILORT_COMPILE_OPTIONS ${HAILORT_COMPILE_OPTIONS} -Werror)
else()
message(FATAL_ERROR "Unexpeced host, stopping build")
endif()
endif()
# Flag for emulator (FPGA/Veloce)
if(HAILO_BUILD_EMULATOR)
@@ -18,7 +30,7 @@ endif()
# Set firmware version
add_definitions( -DFIRMWARE_VERSION_MAJOR=4 )
add_definitions( -DFIRMWARE_VERSION_MINOR=13 )
add_definitions( -DFIRMWARE_VERSION_MINOR=14 )
add_definitions( -DFIRMWARE_VERSION_REVISION=0 )
if(HAILO_BUILD_SERVICE)
add_definitions( -DHAILO_SUPPORT_MULTI_PROCESS )
@@ -78,20 +90,6 @@ set(COMMON_INC_DIR ${PROJECT_SOURCE_DIR}/common/include)
set(DRIVER_INC_DIR ${PROJECT_SOURCE_DIR}/hailort/drivers/common)
set(RPC_DIR ${PROJECT_SOURCE_DIR}/hailort/rpc)
if(HAILO_BUILD_PYBIND)
if(NOT PYTHON_EXECUTABLE AND PYBIND11_PYTHON_VERSION)
# PYBIND11_PYTHON_VERSION is prioritized (not virtual environment) if PYTHON_EXECUTABLE is not set.
# See https://pybind11.readthedocs.io/en/stable/changelog.html#v2-6-0-oct-21-2020
if((${CMAKE_VERSION} VERSION_LESS "3.22.0") AND (NOT WIN32))
find_package(PythonInterp ${PYBIND11_PYTHON_VERSION} REQUIRED)
set(PYTHON_EXECUTABLE ${Python_EXECUTABLE})
else()
find_package(Python3 ${PYBIND11_PYTHON_VERSION} REQUIRED EXACT COMPONENTS Interpreter Development)
set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE})
endif()
endif()
add_subdirectory(external/pybind11 EXCLUDE_FROM_ALL)
endif()
add_subdirectory(external/Catch2 EXCLUDE_FROM_ALL)
add_subdirectory(external/CLI11 EXCLUDE_FROM_ALL)
add_subdirectory(external/json EXCLUDE_FROM_ALL)
@@ -128,6 +126,9 @@ endif()
if(HAILO_WIN_DRIVER)
add_subdirectory(drivers/win)
endif()
if(HAILO_SUPPORT_PACKAGING)
add_subdirectory(packaging)
endif()

View File

@@ -19,6 +19,7 @@ set(SRC_FILES
${CMAKE_CURRENT_SOURCE_DIR}/barrier.cpp
${CMAKE_CURRENT_SOURCE_DIR}/file_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/string_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/event_internal.cpp
${CMAKE_CURRENT_SOURCE_DIR}/device_measurements.cpp
)

View File

@@ -16,6 +16,9 @@
namespace hailort
{
class Barrier;
using BarrierPtr = std::shared_ptr<Barrier>;
/**
* A barrier is a synchronization object that allows an expected number of threads to block until all of them
* arrive at the barrier.

View File

@@ -56,16 +56,22 @@ Expected<std::shared_ptr<TemperatureMeasurement>> TemperatureMeasurement::create
return ptr;
}
TemperatureMeasurement::TemperatureMeasurement(Device &device, hailo_status &status) : BaseMeasurement(device, status)
{}
{
/* Executing the check only if BaseMeasurement constructor has succeeded */
if (HAILO_SUCCESS == status) {
status = sanity_check();
}
}
hailo_status TemperatureMeasurement::sanity_check()
{
auto temp_measurement = m_device.get_chip_temperature();
return temp_measurement.status();
}
hailo_status TemperatureMeasurement::start_measurement()
{
// Checking sensor before starting thread
auto temp_info = m_device.get_chip_temperature();
CHECK_EXPECTED_AS_STATUS(temp_info);
m_is_thread_running = true;
m_thread = std::thread([this] () {
while (m_is_thread_running.load()) {
@@ -102,14 +108,21 @@ Expected<std::shared_ptr<PowerMeasurement>> PowerMeasurement::create_shared(Devi
PowerMeasurement::PowerMeasurement(Device &device, hailo_power_measurement_types_t measurement_type, hailo_status &status)
: BaseMeasurement(device, status), m_measurement_type(measurement_type)
{}
{
/* Executing the check only if BaseMeasurement constructor has succeeded */
if (HAILO_SUCCESS == status) {
status = sanity_check();
}
}
hailo_status PowerMeasurement::sanity_check()
{
auto power_measurement = m_device.power_measurement(HAILO_DVM_OPTIONS_AUTO, m_measurement_type);
return power_measurement.status();
}
hailo_status PowerMeasurement::start_measurement()
{
// Checking sensor before starting thread
auto power_info = m_device.power_measurement(HAILO_DVM_OPTIONS_AUTO, m_measurement_type);
CHECK_EXPECTED_AS_STATUS(power_info);
m_is_thread_running = true;
m_thread = std::thread([this] () {
while (m_is_thread_running.load()) {

View File

@@ -38,6 +38,9 @@ protected:
std::atomic_bool m_is_thread_running;
std::mutex m_mutex;
hailort::AccumulatorPtr m_acc;
private:
virtual hailo_status sanity_check() = 0;
};
@@ -56,6 +59,9 @@ public:
}
TemperatureMeasurement(hailort::Device &device, hailo_status &status);
private:
virtual hailo_status sanity_check() override;
};
@@ -89,6 +95,7 @@ public:
private:
hailo_power_measurement_types_t m_measurement_type;
virtual hailo_status sanity_check() override;
};
#endif /* _HAILO_DEVICE_MEASUREMENTS_HPP_ */

View File

@@ -86,13 +86,12 @@ public:
static const uint32_t MAX_INTERFACE_SIZE = IFNAMSIZ;
#endif
static hailo_status get_interface_from_board_ip(const char *board_ip, char *interface_name, size_t interface_name_length);
static hailo_status get_ip_from_interface(const char *interface_name, char *ip, size_t ip_length);
static Expected<std::string> get_interface_from_board_ip(const std::string &board_ip);
static Expected<std::string> get_ip_from_interface(const std::string &interface_name);
private:
#if defined(__GNUG__)
static hailo_status get_interface_from_arp_entry(char *arp_entry, char *interface_name,
size_t max_interface_name_length);
static Expected<std::string> get_interface_from_arp_entry(char *arp_entry);
#endif
};

View File

@@ -0,0 +1,73 @@
/**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file event_internal.cpp
* @brief Internal implementation for events, shared between all os.
**/
#include "common/event_internal.hpp"
#include "common/logger_macros.hpp"
#include "common/utils.hpp"
namespace hailort
{
Waitable::Waitable(underlying_waitable_handle_t handle) :
m_handle(handle)
{}
hailo_status Waitable::wait(std::chrono::milliseconds timeout)
{
auto status = wait_for_single_object(m_handle, timeout);
if (HAILO_TIMEOUT == status) {
LOGGER__TRACE("wait_for_single_object failed with timeout (timeout={}ms)", timeout.count());
return status;
}
CHECK_SUCCESS(status);
status = post_wait();
CHECK_SUCCESS(status);
return HAILO_SUCCESS;
}
underlying_waitable_handle_t Waitable::get_underlying_handle()
{
return m_handle;
}
WaitOrShutdown::WaitOrShutdown(WaitablePtr waitable, EventPtr shutdown_event) :
m_waitable(waitable),
m_shutdown_event(shutdown_event),
m_waitable_group(create_waitable_group(m_waitable, m_shutdown_event))
{}
hailo_status WaitOrShutdown::wait(std::chrono::milliseconds timeout)
{
auto index = m_waitable_group.wait_any(timeout);
if (index.status() == HAILO_TIMEOUT) {
return index.status();
}
CHECK_EXPECTED_AS_STATUS(index);
assert(index.value() <= WAITABLE_INDEX);
return (index.value() == SHUTDOWN_INDEX) ? HAILO_SHUTDOWN_EVENT_SIGNALED : HAILO_SUCCESS;
}
hailo_status WaitOrShutdown::signal()
{
return m_waitable->signal();
}
WaitableGroup WaitOrShutdown::create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event)
{
// Note the order - consistent with SHUTDOWN_INDEX, WAITABLE_INDEX.
std::vector<std::reference_wrapper<Waitable>> waitables;
waitables.emplace_back(std::ref(*shutdown_event));
waitables.emplace_back(std::ref(*waitable));
return waitables;
}
} /* namespace hailort */

View File

@@ -10,8 +10,7 @@
#ifndef _EVENT_INTERNAL_HPP_
#define _EVENT_INTERNAL_HPP_
#include "hailo/hailort.h"
#include "hailo/expected.hpp"
#include "hailo/event.hpp"
#include <memory>
#include <vector>
@@ -24,9 +23,50 @@
namespace hailort
{
// TODO: Replace with a static wait_multiple func belonging to Waitable (SDK-16567).
// Will get a vector of pointers as an argument. Can also use variadic
// template args for cases with fixed number Waitables
// Group of Waitable objects that can be waited for together
class WaitableGroup final
{
public:
WaitableGroup(std::vector<std::reference_wrapper<Waitable>> &&waitables) :
m_waitables(std::move(waitables)),
m_waitable_handles(create_waitable_handle_vector(m_waitables))
{}
/**
* Waits until any of the given waitables are signaled. Returns the index in the waitables vector
* of the signaled waitable with the smallest index value.
*/
Expected<size_t> wait_any(std::chrono::milliseconds timeout);
private:
#if defined(__linux__)
using WaitableHandle = pollfd;
#else
using WaitableHandle = underlying_waitable_handle_t;
#endif
static std::vector<WaitableHandle> create_waitable_handle_vector(
const std::vector<std::reference_wrapper<Waitable>> &waitables)
{
std::vector<WaitableHandle> waitable_handles;
waitable_handles.reserve(waitables.size());
for (auto &waitable : waitables) {
#if defined(__linux__)
waitable_handles.emplace_back(pollfd{waitable.get().get_underlying_handle(), POLLIN, 0});
#else
waitable_handles.emplace_back(waitable.get().get_underlying_handle());
#endif
}
return waitable_handles;
}
// Initialization dependency
std::vector<std::reference_wrapper<Waitable>> m_waitables;
// Store this vector here to avoid runtime allocations.
std::vector<WaitableHandle> m_waitable_handles;
};
class WaitOrShutdown final
{
public:
@@ -55,29 +95,19 @@ public:
hailo_status signal();
private:
static WaitableGroup create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event);
// Note: We want to guarantee that if the shutdown event is signaled, HAILO_SHUTDOWN_EVENT_SIGNALED will be
// returned.
// * In Unix, using poll this isn't a problem since we'll get all the readable fds in a single call.
// * In Windows, using WaitForMultipleObjects, this works differently (from msdn):
// If bWaitAll is FALSE, the return value minus WAIT_OBJECT_0 indicates the lpHandles array index
// of the object that satisfied the wait. If more than one object became signaled during the call,
// this is the array index of the signaled object with the smallest index value of all the signaled
// objects.
// (https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitformultipleobjects)
// * Hence, SHUTDOWN_INDEX must come before WAITABLE_INDEX!
// Waitable::wait_any returns the smallest index value of all the signaled objects.
// Hence, SHUTDOWN_INDEX must come before WAITABLE_INDEX!
static const size_t SHUTDOWN_INDEX = 0;
static const size_t WAITABLE_INDEX = 1;
#if defined(_MSC_VER) || defined(__QNX__)
using WaitHandleArray = std::array<underlying_waitable_handle_t, 2>;
#else
using WaitHandleArray = std::array<struct pollfd, 2>;
#endif
const WaitablePtr m_waitable;
const EventPtr m_shutdown_event;
WaitHandleArray m_wait_handle_array;
static WaitHandleArray create_wait_handle_array(WaitablePtr waitable, EventPtr shutdown_event);
WaitableGroup m_waitable_group;
};
} /* namespace hailort */

View File

@@ -35,6 +35,7 @@ public:
static Expected<time_t> get_file_modified_time(const std::string &file_path);
static Expected<bool> is_directory(const std::string &path);
static hailo_status create_directory(const std::string &dir_path);
static hailo_status remove_directory(const std::string &dir_path);
static Expected<std::string> get_current_dir();
static std::string get_home_directory();
static bool is_path_accesible(const std::string &path);

View File

@@ -29,7 +29,7 @@ public:
using duration = std::chrono::nanoseconds;
using TimestampsArray = CircularArray<duration>;
explicit LatencyMeter(const std::set<std::string> &output_names, size_t timestamps_list_length) :
LatencyMeter(const std::set<std::string> &output_names, size_t timestamps_list_length) :
m_start_timestamps(timestamps_list_length),
m_latency_count(0),
m_latency_sum(0)

View File

@@ -10,6 +10,9 @@
#include "common/utils.hpp"
#include "common/logger_macros.hpp"
#include "common/ethernet_utils.hpp"
#include "common/socket.hpp"
#include <fstream>
namespace hailort
{
@@ -20,8 +23,7 @@ namespace hailort
#define ETHERNET_UTILS__ARP_DEVICE_NAME_INDEX (4)
hailo_status EthernetUtils::get_interface_from_arp_entry(char *arp_entry, char *interface_name,
size_t max_interface_name_length)
Expected<std::string> EthernetUtils::get_interface_from_arp_entry(char *arp_entry)
{
/* This function parses the interface name out from the arp entry
* Each entry is built as follows:
@@ -30,132 +32,62 @@ hailo_status EthernetUtils::get_interface_from_arp_entry(char *arp_entry, char *
* For example:
* 10.0.0.163 0x1 0x2 80:00:de:ad:be:3f * enp1s0
* */
hailo_status status = HAILO_UNINITIALIZED;
size_t token_counter = 0;
char* token = NULL;
/* Start splitting the arp entry into tokens according to the delimiter */
token = strtok(arp_entry, ETHERNET_UTILS__ARP_ENTRY_DELIMIETERS);
if (NULL == token) {
LOGGER__ERROR("Invalid arp entry, could not split it to tokens");
status = HAILO_ETH_FAILURE;
goto l_exit;
}
CHECK_AS_EXPECTED(nullptr != token, HAILO_ETH_FAILURE, "Invalid arp entry, could not split it to tokens");
/* Iterate over the tokens until the device name is found */
while (NULL != token) {
token = strtok(NULL, ETHERNET_UTILS__ARP_ENTRY_DELIMIETERS);
if (ETHERNET_UTILS__ARP_DEVICE_NAME_INDEX == token_counter) {
LOGGER__DEBUG("Interface name: {}", token);
strncpy(interface_name, token, max_interface_name_length);
break;
return std::string(token);
}
token_counter++;
}
status = HAILO_SUCCESS;
l_exit:
return status;
return make_unexpected(HAILO_ETH_FAILURE);
}
hailo_status EthernetUtils::get_interface_from_board_ip(const char *board_ip, char *interface_name, size_t interface_name_length)
Expected<std::string> EthernetUtils::get_interface_from_board_ip(const std::string &board_ip)
{
hailo_status status = HAILO_UNINITIALIZED;
FILE* arp_file = NULL;
int fclose_rc = -1;
std::ifstream arp_file(ETHERNET_UTILS__ARP_FILE, std::ios::in);
CHECK_AS_EXPECTED(arp_file, HAILO_OPEN_FILE_FAILURE, "Cannot open file {}. errno: {:#x}", ETHERNET_UTILS__ARP_FILE, errno);
char buffer[ETHERNET_UTILS__ARP_MAX_ENTRY_LENGTH] = {};
CHECK_ARG_NOT_NULL(interface_name);
CHECK_ARG_NOT_NULL(board_ip);
/* Open arp file */
arp_file = fopen(ETHERNET_UTILS__ARP_FILE, "r");
if (NULL == arp_file) {
LOGGER__ERROR("Cannot open file {}. Errno: {:#x}", ETHERNET_UTILS__ARP_FILE, errno);
status = HAILO_OPEN_FILE_FAILURE;
goto l_exit;
}
/* Go over all of the lines at the file */
while(fgets(buffer, ARRAY_LENGTH(buffer), arp_file)) {
/* Check if the arp line contains the board_ip */
if (strstr(buffer, board_ip)) {
status = get_interface_from_arp_entry(buffer, interface_name, interface_name_length);
if (HAILO_SUCCESS != status) {
goto l_exit;
}
break;
while (arp_file.getline(buffer, sizeof(buffer))) {
if (strstr(buffer, board_ip.c_str())) {
return get_interface_from_arp_entry(buffer);
}
}
status = HAILO_SUCCESS;
l_exit:
if (NULL != arp_file) {
fclose_rc = fclose(arp_file);
if (0 != fclose_rc) {
LOGGER__ERROR("Cannot close arp file {} ", ETHERNET_UTILS__ARP_FILE);
if (HAILO_SUCCESS == status) {
status = HAILO_CLOSE_FAILURE;
} else {
LOGGER__ERROR("Did not override status. Left status value at: {} (not assigned {}",
status,
HAILO_CLOSE_FAILURE);
}
}
LOGGER__ERROR("Failed to find interface name for ip {}", board_ip);
return make_unexpected(HAILO_ETH_FAILURE);
}
return status;
}
hailo_status EthernetUtils::get_ip_from_interface(const char *interface_name, char *ip, size_t ip_length)
Expected<std::string> EthernetUtils::get_ip_from_interface(const std::string &interface_name)
{
hailo_status status = HAILO_UNINITIALIZED;
struct ifreq ifr = {};
int fd = 0;
int posix_rc = 0;
CHECK_ARG_NOT_NULL(interface_name);
CHECK_ARG_NOT_NULL(ip);
/* Create socket */
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0) {
LOGGER__ERROR("Failed to create socket. Errno: {:#x}", errno);
status = HAILO_ETH_FAILURE;
goto l_exit;
}
auto socket = Socket::create(AF_INET, SOCK_DGRAM, 0);
CHECK_EXPECTED(socket);
/* Convert interface name to ip address */
ifr.ifr_addr.sa_family = AF_INET;
(void)strncpy(ifr.ifr_name, interface_name, IFNAMSIZ-1);
posix_rc = ioctl(fd, SIOCGIFADDR, &ifr);
if (0 > posix_rc) {
LOGGER__ERROR("Interface was not found. ioctl with SIOCGIFADDR has failed. Errno: {:#x}", errno);
status = HAILO_ETH_INTERFACE_NOT_FOUND;
goto l_exit;
}
(void)strncpy(ip, inet_ntoa(((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr), ip_length);
LOGGER__DEBUG("Interface {} | IP: {}", interface_name, ip);
(void)strncpy(ifr.ifr_name, interface_name.c_str(), IFNAMSIZ-1);
auto posix_rc = ioctl(socket->get_fd(), SIOCGIFADDR, &ifr);
CHECK_AS_EXPECTED(posix_rc >= 0, HAILO_ETH_INTERFACE_NOT_FOUND,
"Interface was not found. ioctl with SIOCGIFADDR has failed. errno: {:#x}", errno);
status = HAILO_SUCCESS;
l_exit:
/* Close the socket if it was created */
if (0 < fd) {
posix_rc = close(fd);
if (0 != posix_rc) {
LOGGER__ERROR("Failed closing socket. Errno: {:#x}", errno);
/* Update status if only in case there was not previous error */
if (HAILO_SUCCESS == status) {
status = HAILO_CLOSE_FAILURE;
} else {
LOGGER__ERROR("Did not override status. Left status value at: {} (not assigned {}",
status,
HAILO_CLOSE_FAILURE);
}
}
}
return status;
std::string res = inet_ntoa(((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr);
LOGGER__DEBUG("Interface {} | IP: {}", interface_name, res);
return res;
}
} /* namespace hailort */

View File

@@ -165,6 +165,13 @@ hailo_status Filesystem::create_directory(const std::string &dir_path)
return HAILO_SUCCESS;
}
hailo_status Filesystem::remove_directory(const std::string &dir_path)
{
auto ret_val = rmdir(dir_path.c_str());
CHECK(0 == ret_val, HAILO_FILE_OPERATION_FAILURE, "Failed to remove directory {}", dir_path);
return HAILO_SUCCESS;
}
Expected<std::string> Filesystem::get_current_dir()
{
char cwd[PATH_MAX];

View File

@@ -8,15 +8,20 @@
**/
#include "hailo/hailort.h"
#include "common/os_utils.hpp"
#include "common/utils.hpp"
#include "spdlog/sinks/syslog_sink.h"
#include <unistd.h>
#include <signal.h>
#include <sched.h>
namespace hailort
{
#define EXISTENCE_CHECK_SIGNAL (0)
HailoRTOSLogger::HailoRTOSLogger()
{
m_hailort_os_logger = spdlog::syslog_logger_mt("syslog", "hailort_service", LOG_PID);
@@ -29,6 +34,46 @@ uint32_t OsUtils::get_curr_pid()
return getpid();
}
bool OsUtils::is_pid_alive(uint32_t pid)
{
return (0 == kill(pid, EXISTENCE_CHECK_SIGNAL));
}
void OsUtils::set_current_thread_name(const std::string &name)
{
(void)name;
#ifndef NDEBUG
// pthread_setname_np name size is limited to 16 chars (including null terminator)
assert(name.size() < 16);
pthread_setname_np(pthread_self(), name.c_str());
#endif /* NDEBUG */
}
hailo_status OsUtils::set_current_thread_affinity(uint8_t cpu_index)
{
#if defined(__linux__)
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu_index, &cpuset);
static const pid_t CURRENT_THREAD = 0;
int rc = sched_setaffinity(CURRENT_THREAD, sizeof(cpu_set_t), &cpuset);
CHECK(rc == 0, HAILO_INTERNAL_FAILURE, "sched_setaffinity failed with status {}", rc);
return HAILO_SUCCESS;
#elif defined(__QNX__)
(void)cpu_index;
// TODO: impl on qnx (HRT-10889)
return HAILO_NOT_IMPLEMENTED;
#endif
}
size_t OsUtils::get_page_size()
{
static const auto page_size = sysconf(_SC_PAGESIZE);
return page_size;
}
CursorAdjustment::CursorAdjustment(){}
CursorAdjustment::~CursorAdjustment(){}

View File

@@ -22,7 +22,7 @@ namespace hailort
Expected<TrafficControlUtil> TrafficControlUtil::create(const std::string &ip, uint16_t port, uint32_t rate_bytes_per_sec)
{
auto interface_name = get_interface_name(ip);
auto interface_name = EthernetUtils::get_interface_from_board_ip(ip);
CHECK_EXPECTED(interface_name, "get_interface_name failed with status {}", interface_name.status());
auto board_id = ip_to_board_id(ip);
@@ -158,17 +158,6 @@ hailo_status TrafficControlUtil::tc_class_del_dev_for_board(const std::string &i
return run_command(cmd.str(), m_is_sudo_needed, {}, true);
}
Expected<std::string> TrafficControlUtil::get_interface_name(const std::string &ip)
{
auto interface_name = Buffer::create(EthernetUtils::MAX_INTERFACE_SIZE, 0);
CHECK_EXPECTED(interface_name);
CHECK_SUCCESS_AS_EXPECTED(EthernetUtils::get_interface_from_board_ip(ip.c_str(),
interface_name->as_pointer<char>(), interface_name->size()));
return interface_name->to_string();
}
Expected<uint32_t> TrafficControlUtil::ip_to_board_id(const std::string &ip)
{
// Takes last digit from 3 octet + the whole 4th octet

View File

@@ -23,7 +23,6 @@ class TrafficControlUtil final
{
public:
static Expected<TrafficControlUtil> create(const std::string &ip, uint16_t port, uint32_t rate_bytes_per_sec);
static Expected<std::string> get_interface_name(const std::string &ip);
~TrafficControlUtil() = default;
TrafficControlUtil(TrafficControlUtil&) = delete;
TrafficControlUtil &operator=(const TrafficControlUtil &) = delete;

View File

@@ -160,48 +160,40 @@ Expected<ArpTable> ArpTable::create(uint32_t interface_index)
return result;
}
hailo_status EthernetUtils::get_interface_from_board_ip(const char *board_ip, char *interface_name, size_t interface_name_length)
Expected<std::string> EthernetUtils::get_interface_from_board_ip(const std::string &board_ip)
{
CHECK_ARG_NOT_NULL(interface_name);
CHECK_ARG_NOT_NULL(board_ip);
auto network_interfaces = NetworkInterface::get_all_interfaces();
CHECK_EXPECTED_AS_STATUS(network_interfaces);
CHECK_EXPECTED(network_interfaces);
struct in_addr board_ip_struct{};
auto status = Socket::pton(AF_INET, board_ip, &board_ip_struct);
CHECK_SUCCESS(status, "Invalid board ip address {}", board_ip);
auto status = Socket::pton(AF_INET, board_ip.c_str(), &board_ip_struct);
CHECK_SUCCESS_AS_EXPECTED(status, "Invalid board ip address {}", board_ip);
for (const auto& network_interface : network_interfaces.value()) {
auto arp_table = ArpTable::create(network_interface.index());
CHECK_EXPECTED_AS_STATUS(arp_table);
CHECK_EXPECTED(arp_table);
const auto mac_address = arp_table->get_mac_address(static_cast<uint32_t>(board_ip_struct.S_un.S_addr));
if (mac_address) {
(void)strncpy(interface_name, network_interface.friendly_name().c_str(), interface_name_length);
return HAILO_SUCCESS;
return network_interface.friendly_name();
}
}
return HAILO_ETH_INTERFACE_NOT_FOUND;
return make_unexpected(HAILO_ETH_INTERFACE_NOT_FOUND);
}
hailo_status EthernetUtils::get_ip_from_interface(const char *interface_name, char *ip, size_t ip_length)
Expected<std::string> EthernetUtils::get_ip_from_interface(const std::string &interface_name)
{
CHECK_ARG_NOT_NULL(interface_name);
CHECK_ARG_NOT_NULL(ip);
auto network_interfaces = NetworkInterface::get_all_interfaces();
CHECK_EXPECTED_AS_STATUS(network_interfaces);
CHECK_EXPECTED(network_interfaces);
for (const auto& network_interface : network_interfaces.value()) {
if (network_interface.friendly_name() == interface_name) {
(void)strncpy(ip, network_interface.ip().c_str(), ip_length);
return HAILO_SUCCESS;
return network_interface.ip();
}
}
return HAILO_ETH_INTERFACE_NOT_FOUND;
return make_unexpected(HAILO_ETH_INTERFACE_NOT_FOUND);
}
} /* namespace hailort */

View File

@@ -164,6 +164,13 @@ hailo_status Filesystem::create_directory(const std::string &dir_path)
return HAILO_SUCCESS;
}
hailo_status Filesystem::remove_directory(const std::string &dir_path)
{
bool was_removed = RemoveDirectoryA(dir_path.c_str());
CHECK(was_removed, HAILO_FILE_OPERATION_FAILURE, "Failed to remove directory {}", dir_path);
return HAILO_SUCCESS;
}
bool Filesystem::is_path_accesible(const std::string &path)
{
// The code is based on examples from: https://cpp.hotexamples.com/examples/-/-/AccessCheck/cpp-accesscheck-function-examples.html

View File

@@ -8,6 +8,7 @@
**/
#include "common/os_utils.hpp"
#include "common/utils.hpp"
#include "hailo/hailort.h"
#include <windows.h>
@@ -29,6 +30,54 @@ uint32_t OsUtils::get_curr_pid()
return static_cast<uint32_t>(GetCurrentProcessId());
}
bool OsUtils::is_pid_alive(uint32_t pid)
{
HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid);
if (hProcess == NULL) {
// Process is not running
return false;
}
DWORD exitCode;
BOOL result = GetExitCodeProcess(hProcess, &exitCode);
CloseHandle(hProcess);
if (result && exitCode == STILL_ACTIVE) {
return true;
}
else {
return false;
}
}
void OsUtils::set_current_thread_name(const std::string &name)
{
(void)name;
}
hailo_status OsUtils::set_current_thread_affinity(uint8_t cpu_index)
{
const DWORD_PTR affinity_mask = static_cast<DWORD_PTR>(1ULL << cpu_index);
CHECK(0 != SetThreadAffinityMask(GetCurrentThread(), affinity_mask), HAILO_INTERNAL_FAILURE,
"SetThreadAffinityMask failed. LE={}", GetLastError());
return HAILO_SUCCESS;
}
static size_t get_page_size_impl()
{
SYSTEM_INFO system_info{};
GetSystemInfo(&system_info);
return system_info.dwPageSize;
}
size_t OsUtils::get_page_size()
{
static const auto page_size = get_page_size_impl();
return page_size;
}
CursorAdjustment::CursorAdjustment()
{
// Enables Vitual Terminal Processing - enables ANSI Escape Sequences on Windows

View File

@@ -57,22 +57,12 @@ class OsUtils final
{
public:
OsUtils() = delete;
static uint32_t get_curr_pid();
static void set_current_thread_name(const std::string &name)
{
(void)name;
#ifndef NDEBUG
#ifndef _WIN32
// pthread_setname_np name size is limited to 16 chars (including null terminator)
assert(name.size() < 16);
pthread_setname_np(pthread_self(), name.c_str());
#else
// TODO: implement for windows
#endif /* _WIN32 */
#endif /* NDEBUG */
}
static bool is_pid_alive(uint32_t pid);
static void set_current_thread_name(const std::string &name);
static hailo_status set_current_thread_affinity(uint8_t cpu_index);
static size_t get_page_size();
};
} /* namespace hailort */

View File

@@ -42,6 +42,8 @@ public:
m_module_wrapper(std::move(other.m_module_wrapper)), m_socket_fd(std::exchange(other.m_socket_fd, INVALID_SOCKET))
{};
socket_t get_fd() { return m_socket_fd; }
static hailo_status ntop(int af, const void *src, char *dst, socklen_t size);
static hailo_status pton(int af, const char *src, void *dst);

View File

@@ -18,6 +18,7 @@
#include <spdlog/fmt/bundled/core.h>
#include <map>
#include <set>
#include <unordered_set>
namespace hailort
@@ -25,7 +26,7 @@ namespace hailort
#define IS_FIT_IN_UINT8(number) ((std::numeric_limits<uint8_t>::max() >= ((int32_t)(number))) && (std::numeric_limits<uint8_t>::min() <= ((int32_t)(number))))
#define IS_FIT_IN_UINT16(number) ((std::numeric_limits<uint16_t>::max() >= ((int32_t)(number))) && (std::numeric_limits<uint16_t>::min() <= ((int32_t)(number))))
#define IS_FIT_IN_UINT32(number) ((std::numeric_limits<uint32_t>::max() >= ((int64_t)(number))) && (std::numeric_limits<uint32_t>::min() <= ((int64_t)(number))))
template <typename T>
static inline bool contains(const std::vector<T> &container, const T &value)
@@ -51,6 +52,12 @@ static inline bool contains(const std::set<T> &container, T value)
return (container.find(value) != container.end());
}
template <typename T>
static inline bool contains(const std::unordered_set<T> &container, T value)
{
return (container.find(value) != container.end());
}
// From https://stackoverflow.com/questions/57092289/do-stdmake-shared-and-stdmake-unique-have-a-nothrow-version
template <class T, class... Args>
static inline std::unique_ptr<T> make_unique_nothrow(Args&&... args)
@@ -202,6 +209,14 @@ _ISEMPTY( \
} while(0)
#define CHECK_SUCCESS_AS_EXPECTED(status, ...) _CHECK_SUCCESS_AS_EXPECTED(status, ISEMPTY(__VA_ARGS__), "" __VA_ARGS__)
// Define macro CHECK_IN_DEBUG - that checks cond in debug with CHECK macro but in release does nothing and will get optimized out
#ifdef NDEBUG
// In release have this macro do nothing - empty macro
#define CHECK_IN_DEBUG(cond, ret_val, ...)
#else // NDEBUG
#define CHECK_IN_DEBUG(cond, ret_val, ...) CHECK(cond, ret_val, __VA_ARGS__)
#endif // NDEBUG
#ifdef HAILO_SUPPORT_MULTI_PROCESS
#define _CHECK_SUCCESS_AS_RPC_STATUS(status, reply, is_default, fmt, ...) \
do { \
@@ -314,6 +329,12 @@ static uint32_t get_min_value_of_unordered_map(const std::unordered_map<K, V> &m
return min_count;
}
static inline bool is_env_variable_on(const char* env_var_name)
{
auto env_var = std::getenv(env_var_name);
return ((nullptr != env_var) && (strnlen(env_var, 2) == 1) && (strncmp(env_var, "1", 1) == 0));
}
} /* namespace hailort */
#endif /* HAILO_UTILS_H_ */

View File

@@ -27,6 +27,19 @@
#define INVALID_VDMA_CHANNEL (0xff)
#if !defined(__cplusplus) && defined(NTDDI_VERSION)
#include <wdm.h>
typedef ULONG uint32_t;
typedef UCHAR uint8_t;
typedef USHORT uint16_t;
typedef ULONGLONG uint64_t;
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
#endif /* !defined(__cplusplus) && defined(NTDDI_VERSION) */
#ifdef _MSC_VER
#if !defined(bool) && !defined(__cplusplus)
typedef uint8_t bool;
@@ -64,6 +77,8 @@ typedef uint8_t bool;
#include <stdint.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <stdbool.h>
// defines for devctl
#define _IOW_ __DIOF
#define _IOR_ __DIOT
@@ -132,8 +147,8 @@ struct hailo_vdma_buffer_unmap_params {
/* structure used in ioctl HAILO_DESC_LIST_CREATE */
struct hailo_desc_list_create_params {
size_t desc_count; // in
bool is_circular; // in
uintptr_t desc_handle; // out
// Note: The dma address is required for CONTEXT_SWITCH firmware controls
uint64_t dma_address; // out
};
@@ -277,7 +292,7 @@ struct hailo_vdma_channel_write_register_params {
/* structure used in ioctl HAILO_VDMA_BUFFER_SYNC */
enum hailo_vdma_buffer_sync_type {
HAILO_SYNC_FOR_HOST,
HAILO_SYNC_FOR_CPU,
HAILO_SYNC_FOR_DEVICE,
/** Max enum value to maintain ABI Integrity */

View File

@@ -13,6 +13,7 @@ add_executable(hailort_service
service_resource_manager.hpp
${HAILORT_SERVICE_OS_DIR}/hailort_service.cpp
${HAILORT_COMMON_CPP_SOURCES}
${HAILO_FULL_OS_DIR}/event.cpp # TODO HRT-10681: move event.cpp to common
)
target_compile_options(hailort_service PRIVATE ${HAILORT_COMPILE_OPTIONS})
set_property(TARGET hailort_service PROPERTY CXX_STANDARD 14)

View File

@@ -32,32 +32,146 @@ HailoRtRpcService::HailoRtRpcService()
});
}
void HailoRtRpcService::keep_alive()
hailo_status HailoRtRpcService::abort_input_vstream(uint32_t handle)
{
if (is_input_vstream_aborted(handle)) {
return HAILO_SUCCESS;
}
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->abort();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(handle, lambda);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to abort input vstream with status {}", status);
}
return status;
}
hailo_status HailoRtRpcService::abort_output_vstream(uint32_t handle)
{
if (is_output_vstream_aborted(handle)) {
return HAILO_SUCCESS;
}
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->abort();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(handle, lambda);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to abort output vstream with status {}", status);
}
return status;
}
bool HailoRtRpcService::is_input_vstream_aborted(uint32_t handle)
{
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->is_aborted();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
return manager.execute<bool>(handle, lambda);
}
bool HailoRtRpcService::is_output_vstream_aborted(uint32_t handle)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->is_aborted();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
return manager.execute<bool>(handle, lambda);
}
hailo_status HailoRtRpcService::resume_input_vstream(uint32_t handle)
{
if (!is_input_vstream_aborted(handle)) {
return HAILO_SUCCESS;
}
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->resume();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(handle, lambda);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to resume input vstream with status {}", status);
}
return status;
}
hailo_status HailoRtRpcService::resume_output_vstream(uint32_t handle)
{
if (!is_output_vstream_aborted(handle)) {
return HAILO_SUCCESS;
}
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->resume();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(handle, lambda);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to resume output vstream with status {}", status);
}
return status;
}
// TODO: Add a named templated release functions for InputVStream and OutputVStream to call abort before release.
void HailoRtRpcService::abort_vstreams_by_pids(std::set<uint32_t> &pids)
{
auto inputs_handles = ServiceResourceManager<InputVStream>::get_instance().resources_handles_by_pids(pids);
auto outputs_handles = ServiceResourceManager<OutputVStream>::get_instance().resources_handles_by_pids(pids);
for (auto &input_handle : inputs_handles) {
abort_input_vstream(input_handle);
}
for (auto &output_handle : outputs_handles) {
abort_output_vstream(output_handle);
}
}
void HailoRtRpcService::remove_disconnected_clients()
{
while (true) {
std::this_thread::sleep_for(hailort::HAILO_KEEPALIVE_INTERVAL / 2);
auto now = std::chrono::high_resolution_clock::now();
std::unique_lock<std::mutex> lock(m_mutex);
std::set<uint32_t> pids_to_remove;
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto pid_to_last_alive : m_clients_pids) {
auto duration = std::chrono::duration_cast<std::chrono::seconds>(now - pid_to_last_alive.second);
if (duration > hailort::HAILO_KEEPALIVE_INTERVAL) {
auto client_id = pid_to_last_alive.first;
pids_to_remove.insert(client_id);
LOGGER__INFO("Client disconnected, pid: {}", client_id);
HAILORT_OS_LOG_INFO("Client disconnected, pid: {}", client_id);
ServiceResourceManager<OutputVStream>::get_instance().release_by_pid(client_id);
ServiceResourceManager<InputVStream>::get_instance().release_by_pid(client_id);
ServiceResourceManager<ConfiguredNetworkGroup>::get_instance().release_by_pid(client_id);
ServiceResourceManager<VDevice>::get_instance().release_by_pid(client_id);
auto client_pid = pid_to_last_alive.first;
pids_to_remove.insert(client_pid);
}
}
for (auto &pid : pids_to_remove) {
m_clients_pids.erase(pid);
// We abort vstreams before releasing them to avoid cases where the vstream is stuck in execute of a
// blocking operation (which will be finished with timeout).
// To release the vstream the ServiceResourceManager is waiting for the resource_mutex which is also locked in execute.
abort_vstreams_by_pids(pids_to_remove);
for (auto &client_pid : pids_to_remove) {
ServiceResourceManager<OutputVStream>::get_instance().release_by_pid(client_pid);
ServiceResourceManager<InputVStream>::get_instance().release_by_pid(client_pid);
ServiceResourceManager<ConfiguredNetworkGroup>::get_instance().release_by_pid(client_pid);
ServiceResourceManager<VDevice>::get_instance().release_by_pid(client_pid);
LOGGER__INFO("Client disconnected, pid: {}", client_pid);
HAILORT_OS_LOG_INFO("Client disconnected, pid: {}", client_pid);
m_clients_pids.erase(client_pid);
}
}
}
void HailoRtRpcService::keep_alive()
{
while (true) {
remove_disconnected_clients();
}
}
grpc::Status HailoRtRpcService::client_keep_alive(grpc::ServerContext*, const keepalive_Request *request,
empty*)
{
@@ -93,6 +207,8 @@ grpc::Status HailoRtRpcService::VDevice_dup_handle(grpc::ServerContext*, const d
grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
VDevice_create_Reply *reply)
{
remove_disconnected_clients();
// Deserialization
const auto params_proto = request->hailo_vdevice_params();
std::vector<hailo_device_id_t> device_ids;
@@ -125,8 +241,8 @@ grpc::Status HailoRtRpcService::VDevice_release(grpc::ServerContext*, const Rele
Release_Reply *reply)
{
auto &manager = ServiceResourceManager<VDevice>::get_instance();
auto status = manager.release_resource(request->handle());
reply->set_status(static_cast<uint32_t>(status));
manager.release_resource(request->handle(), request->pid());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
@@ -236,8 +352,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_release(grpc::ServerConte
Release_Reply *reply)
{
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto status = manager.release_resource(request->handle());
reply->set_status(static_cast<uint32_t>(status));
manager.release_resource(request->handle(), request->pid());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
@@ -468,11 +584,12 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_timeout(grp
const ConfiguredNetworkGroup_set_scheduler_timeout_Request *request,
ConfiguredNetworkGroup_set_scheduler_timeout_Reply *reply)
{
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, std::chrono::milliseconds timeout_ms) {
return cng->set_scheduler_timeout(timeout_ms);
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, std::chrono::milliseconds timeout_ms, std::string network_name) {
return cng->set_scheduler_timeout(timeout_ms, network_name);
};
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<std::chrono::milliseconds>(request->timeout_ms()));
auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<std::chrono::milliseconds>(request->timeout_ms()),
request->network_name());
reply->set_status(status);
return grpc::Status::OK;
}
@@ -561,21 +678,24 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
};
inputs_params.emplace(param_proto.name(), std::move(params));
}
auto network_group_handle = request->net_group();
auto client_pid = request->pid();
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &inputs_params) {
return cng->create_input_vstreams(inputs_params);
};
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto vstreams_expected = net_group_manager.execute<Expected<std::vector<InputVStream>>>(request->net_group(), lambda, inputs_params);
auto vstreams_expected = net_group_manager.execute<Expected<std::vector<InputVStream>>>(network_group_handle, lambda, inputs_params);
CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply);
auto vstreams = vstreams_expected.release();
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto client_pid = request->pid();
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
for (size_t i = 0; i < vstreams.size(); i++) {
auto handle = manager.register_resource(client_pid, make_shared_nothrow<InputVStream>(std::move(vstreams[i])));
reply->add_handles(handle);
}
net_group_manager.dup_handle(client_pid, network_group_handle);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
@@ -584,8 +704,8 @@ grpc::Status HailoRtRpcService::InputVStream_release(grpc::ServerContext *, cons
Release_Reply *reply)
{
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.release_resource(request->handle());
reply->set_status(static_cast<uint32_t>(status));
manager.release_resource(request->handle(), request->pid());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
@@ -610,20 +730,24 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
output_params.emplace(param_proto.name(), std::move(params));
}
auto network_group_handle = request->net_group();
auto client_pid = request->pid();
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &output_params) {
return cng->create_output_vstreams(output_params);
};
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto vstreams_expected = net_group_manager.execute<Expected<std::vector<OutputVStream>>>(request->net_group(), lambda, output_params);
auto vstreams_expected = net_group_manager.execute<Expected<std::vector<OutputVStream>>>(network_group_handle, lambda, output_params);
CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply);
auto vstreams = vstreams_expected.release();
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto client_pid = request->pid();
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
for (size_t i = 0; i < vstreams.size(); i++) {
auto handle = manager.register_resource(client_pid, make_shared_nothrow<OutputVStream>(std::move(vstreams[i])));
reply->add_handles(handle);
}
net_group_manager.dup_handle(client_pid, network_group_handle);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
@@ -631,8 +755,17 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
grpc::Status HailoRtRpcService::OutputVStream_release(grpc::ServerContext *, const Release_Request *request,
Release_Reply *reply)
{
auto was_aborted = is_output_vstream_aborted(request->handle());
abort_output_vstream(request->handle());
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.release_resource(request->handle());
auto resource = manager.release_resource(request->handle(), request->pid());
auto status = HAILO_SUCCESS;
if (resource && (!was_aborted)) {
status = resource->resume();
if (HAILO_SUCCESS != status) {
LOGGER__INFO("Failed to resume output vstream {} after destruction", resource->name());
}
}
reply->set_status(static_cast<uint32_t>(status));
return grpc::Status::OK;
}
@@ -752,6 +885,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_all_stream_infos(grpc
auto proto_nms_info_defuse_info = proto_nms_info->mutable_defuse_info();
proto_nms_info_defuse_info->set_class_group_index(stream_info.nms_info.defuse_info.class_group_index);
proto_nms_info_defuse_info->set_original_name(std::string(stream_info.nms_info.defuse_info.original_name));
proto_nms_info->set_burst_size(stream_info.nms_info.burst_size);
proto_nms_info->set_burst_type(static_cast<ProtoNmsBurstType>(proto_stream_info.nms_info().burst_type()));
} else {
auto proto_stream_shape = proto_stream_info.mutable_stream_shape();
auto proto_stream_shape_shape = proto_stream_shape->mutable_shape();
@@ -793,9 +928,13 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_latency_measurement(g
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_latency_result = manager.execute<Expected<LatencyMeasurementResult>>(request->handle(), lambda, request->network_name());
if (HAILO_NOT_AVAILABLE == expected_latency_result.status()) {
reply->set_status(static_cast<uint32_t>(HAILO_NOT_AVAILABLE));
} else {
CHECK_EXPECTED_AS_RPC_STATUS(expected_latency_result, reply);
reply->set_avg_hw_latency(static_cast<uint32_t>(expected_latency_result.value().avg_hw_latency.count()));
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
}
return grpc::Status::OK;
}
@@ -813,6 +952,60 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_is_multi_context(grpc::Se
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_sorted_output_names(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_sorted_output_names_Request *request,
ConfiguredNetworkGroup_get_sorted_output_names_Reply *reply)
{
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng) {
return cng->get_sorted_output_names();
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto sorted_output_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(sorted_output_names_expected, reply);
auto sorted_output_names_proto = reply->mutable_sorted_output_names();
for (auto &name : sorted_output_names_expected.value()) {
sorted_output_names_proto->Add(std::move(name));
}
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_stream_names_from_vstream_name(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Request *request,
ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Reply *reply)
{
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::string &vstream_name) {
return cng->get_stream_names_from_vstream_name(vstream_name);
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto streams_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda, request->vstream_name());
CHECK_EXPECTED_AS_RPC_STATUS(streams_names_expected, reply);
auto streams_names_proto = reply->mutable_streams_names();
for (auto &name : streams_names_expected.value()) {
streams_names_proto->Add(std::move(name));
}
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_vstream_names_from_stream_name(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Request *request,
ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Reply *reply)
{
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::string &stream_name) {
return cng->get_vstream_names_from_stream_name(stream_name);
};
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto vstreams_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda, request->stream_name());
CHECK_EXPECTED_AS_RPC_STATUS(vstreams_names_expected, reply);
auto vstreams_names_proto = reply->mutable_vstreams_names();
for (auto &name : vstreams_names_expected.value()) {
vstreams_names_proto->Add(std::move(name));
}
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_get_frame_size(grpc::ServerContext*, const VStream_get_frame_size_Request *request,
VStream_get_frame_size_Reply *reply)
{
@@ -906,11 +1099,7 @@ grpc::Status HailoRtRpcService::OutputVStream_network_name(grpc::ServerContext*,
grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request,
VStream_abort_Reply *reply)
{
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->abort();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda);
auto status = abort_input_vstream(request->handle());
reply->set_status(status);
return grpc::Status::OK;
}
@@ -918,11 +1107,7 @@ grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const V
grpc::Status HailoRtRpcService::OutputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request,
VStream_abort_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->abort();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda);
auto status = abort_output_vstream(request->handle());
reply->set_status(status);
return grpc::Status::OK;
}
@@ -951,6 +1136,54 @@ grpc::Status HailoRtRpcService::OutputVStream_resume(grpc::ServerContext*, const
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_stop_and_clear(grpc::ServerContext*, const VStream_stop_and_clear_Request *request,
VStream_stop_and_clear_Reply *reply)
{
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->stop_and_clear();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda);
reply->set_status(status);
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::OutputVStream_stop_and_clear(grpc::ServerContext*, const VStream_stop_and_clear_Request *request,
VStream_stop_and_clear_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->stop_and_clear();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda);
reply->set_status(status);
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_start_vstream(grpc::ServerContext*, const VStream_start_vstream_Request *request,
VStream_start_vstream_Reply *reply)
{
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->start_vstream();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda);
reply->set_status(status);
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::OutputVStream_start_vstream(grpc::ServerContext*, const VStream_start_vstream_Request *request,
VStream_start_vstream_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->start_vstream();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda);
reply->set_status(status);
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_get_user_buffer_format(grpc::ServerContext*, const VStream_get_user_buffer_format_Request *request,
VStream_get_user_buffer_format_Reply *reply)
{
@@ -1015,5 +1248,31 @@ grpc::Status HailoRtRpcService::OutputVStream_get_info(grpc::ServerContext*, con
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> input_vstream) {
return input_vstream->is_aborted();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto is_aborted = manager.execute<bool>(request->handle(), lambda);
reply->set_is_aborted(is_aborted);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::OutputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply *reply)
{
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->is_aborted();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto is_aborted = manager.execute<bool>(request->handle(), lambda);
reply->set_is_aborted(is_aborted);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
}

View File

@@ -26,6 +26,7 @@
#endif
#include <thread>
#include "hailo/hailort.h"
namespace hailort
{
@@ -98,6 +99,18 @@ public:
dup_handle_Reply*) override;
virtual grpc::Status OutputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
dup_handle_Reply*) override;
virtual grpc::Status InputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request,
VStream_stop_and_clear_Reply*) override;
virtual grpc::Status OutputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request,
VStream_stop_and_clear_Reply*) override;
virtual grpc::Status InputVStream_start_vstream(grpc::ServerContext *ctx, const VStream_start_vstream_Request *request,
VStream_start_vstream_Reply*) override;
virtual grpc::Status OutputVStream_start_vstream(grpc::ServerContext *ctx, const VStream_start_vstream_Request *request,
VStream_start_vstream_Reply*) override;
virtual grpc::Status InputVStream_is_aborted(grpc::ServerContext *ctx, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply*) override;
virtual grpc::Status OutputVStream_is_aborted(grpc::ServerContext *ctx, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply*) override;
virtual grpc::Status ConfiguredNetworkGroup_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
dup_handle_Reply*) override;
@@ -157,9 +170,26 @@ public:
virtual grpc::Status ConfiguredNetworkGroup_get_config_params(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_config_params_Request *request,
ConfiguredNetworkGroup_get_config_params_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_get_sorted_output_names(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_sorted_output_names_Request *request,
ConfiguredNetworkGroup_get_sorted_output_names_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_get_stream_names_from_vstream_name(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Request *request,
ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_get_vstream_names_from_stream_name(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Request *request,
ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Reply *reply) override;
private:
void keep_alive();
hailo_status abort_input_vstream(uint32_t handle);
hailo_status abort_output_vstream(uint32_t handle);
hailo_status resume_input_vstream(uint32_t handle);
hailo_status resume_output_vstream(uint32_t handle);
bool is_input_vstream_aborted(uint32_t handle);
bool is_output_vstream_aborted(uint32_t handle);
void abort_vstreams_by_pids(std::set<uint32_t> &pids);
void remove_disconnected_clients();
std::mutex m_mutex;
std::map<uint32_t, std::chrono::time_point<std::chrono::high_resolution_clock>> m_clients_pids;

View File

@@ -7,5 +7,4 @@
[Service]
HAILORT_LOGGER_PATH="/var/log/hailo"
HAILO_DISABLE_MULTIPLEXER=0
HAILO_MONITOR=0

View File

@@ -13,9 +13,11 @@
#include "hailo/expected.hpp"
#include "common/utils.hpp"
#include "common/os_utils.hpp"
#include <mutex>
#include <shared_mutex>
#include <unordered_set>
namespace hailort
{
@@ -23,11 +25,13 @@ namespace hailort
template<class T>
struct Resource {
Resource(uint32_t pid, std::shared_ptr<T> resource)
: pid(pid), resource(std::move(resource))
{}
: resource(std::move(resource))
{
pids.insert(pid);
}
uint32_t pid;
std::shared_ptr<T> resource;
std::unordered_set<uint32_t> pids;
};
template<class T>
@@ -69,42 +73,88 @@ public:
uint32_t dup_handle(uint32_t pid, uint32_t handle)
{
// Keeping this function for future possible usage
(void)pid;
std::unique_lock<std::mutex> lock(m_mutex);
auto resource_expected = resource_lookup(handle);
assert(resource_expected);
auto resource = resource_expected.release();
assert(contains(m_resources_mutexes, handle));
std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
resource->pids.insert(pid);
return handle;
}
hailo_status release_resource(uint32_t handle)
std::shared_ptr<T> release_resource(uint32_t handle, uint32_t pid)
{
std::shared_ptr<T> res = nullptr;
std::unique_lock<std::mutex> lock(m_mutex);
auto found = m_resources.find(handle);
CHECK(found != m_resources.end(), HAILO_NOT_FOUND, "Failed to release resource with handle {}, resource does not exist", handle);
assert(contains(m_resources_mutexes, handle));
auto resource = m_resources[handle];
{
std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
m_resources.erase(handle);
}
m_resources_mutexes.erase(handle);
return HAILO_SUCCESS;
if (found == m_resources.end()) {
LOGGER__INFO("Failed to release resource with handle {} and PID {}. The resource no longer exists or may have already been released",
handle, pid);
return res;
}
void release_by_pid(uint32_t pid)
assert(contains(m_resources_mutexes, handle));
auto resource = m_resources[handle];
bool release_resource = false;
{
std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
resource->pids.erase(pid);
if (all_pids_dead(resource)) {
release_resource = true;
res = resource->resource;
m_resources.erase(handle);
}
}
if (release_resource) {
m_resources_mutexes.erase(handle);
}
return res;
}
std::vector<std::shared_ptr<T>> release_by_pid(uint32_t pid)
{
std::vector<std::shared_ptr<T>> res;
std::unique_lock<std::mutex> lock(m_mutex);
for (auto iter = m_resources.begin(); iter != m_resources.end(); ) {
auto handle = iter->first;
if (iter->second->pid == pid) {
bool release_resource = false;
if (contains(iter->second->pids, pid)) {
assert(contains(m_resources_mutexes, handle));
{
std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
iter->second->pids.erase(pid);
if (iter->second->pids.empty()) {
release_resource = true;
res.push_back(iter->second->resource);
iter = m_resources.erase(iter);
}
}
}
if (release_resource) {
m_resources_mutexes.erase(handle);
} else {
++iter;
}
}
return res;
}
std::vector<uint32_t> resources_handles_by_pids(std::set<uint32_t> &pids)
{
std::unique_lock<std::mutex> lock(m_mutex);
std::vector<uint32_t> resources_handles;
for (auto &handle_resource_pair : m_resources) {
for (auto &pid : pids) {
if (contains(handle_resource_pair.second->pids, pid)) {
resources_handles.emplace_back(handle_resource_pair.first);
}
}
}
return resources_handles;
}
private:
@@ -120,6 +170,16 @@ private:
return resource;
}
bool all_pids_dead(std::shared_ptr<Resource<T>> resource)
{
for (auto &pid : resource->pids) {
if (OsUtils::is_pid_alive(pid)) {
return false;
}
}
return true;
}
std::mutex m_mutex;
std::atomic<uint32_t> m_current_handle_index;
std::unordered_map<uint32_t, std::shared_ptr<Resource<T>>> m_resources;

View File

@@ -8,4 +8,3 @@
reg ADD HKLM\SYSTEM\CurrentControlSet\Services\hailort_service /f /v Environment /t REG_MULTI_SZ /d ^
HAILORT_LOGGER_PATH="%PROGRAMDATA%\HailoRT_Service\logs"\0^
HAILO_DISABLE_MULTIPLEXER=0\0

View File

@@ -26,10 +26,11 @@ set(HAILORTCLI_CPP_FILES
run2/run2_command.cpp
run2/network_runner.cpp
run2/live_printer.cpp
run2/live_stats.cpp
run2/timer_live_track.cpp
run2/network_live_track.cpp
run2/measurement_live_track.cpp
run2/io_wrappers.cpp
)
if(UNIX)
@@ -38,6 +39,7 @@ if(UNIX)
udp_rate_limiter_command.cpp
# TODO: We dont compile download_action_list_command on windows, as it uses packed enums (HRT-5919)
download_action_list_command.cpp
measure_nnc_performance_command.cpp
)
endif()
@@ -70,7 +72,7 @@ target_link_libraries(hailortcli
scheduler_mon_proto)
if(WIN32)
target_link_libraries(hailortcli Ws2_32 Iphlpapi Shlwapi)
target_link_libraries(hailortcli Ws2_32 Iphlpapi Shlwapi winmm.lib)
elseif(CMAKE_SYSTEM_NAME STREQUAL QNX)
target_link_libraries(hailortcli pevents)
endif()

View File

@@ -308,6 +308,14 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__enable_nms_action_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__enable_nms_action_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__write_data_by_type_action_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__write_data_by_type_action_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT:
// Fallthrough
// Handling CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT is needed because we compile this file with -Wswitch-enum
@@ -622,3 +630,12 @@ void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_output_channel_da
{
j = unpack_vdma_channel_id(data);
}
void to_json(json& j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t& data) {
const auto cluster_index = CONTEXT_SWITCH_DEFS__PACKED_LCU_ID_CLUSTER_INDEX_READ(data.packed_lcu_id);
const auto lcu_index = CONTEXT_SWITCH_DEFS__PACKED_LCU_ID_LCU_INDEX_READ(data.packed_lcu_id);
const auto network_index = data.network_index;
const auto kernel_done_count = data.kernel_done_count;
j = json{{"cluster_index", cluster_index}, {"lcu_index", lcu_index}, {"network_index", network_index},
{"kernel_done_count", kernel_done_count}};
}

View File

@@ -100,6 +100,8 @@ static std::pair<CONTEXT_SWITCH_DEFS__ACTION_TYPE_t, std::string> mapping[] = {
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL, "open_boundary_input_channel"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL, "open_boundary_output_channel"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS, "enable_nms"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE, "write_data_by_type"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH, "switch_lcu_batch"},
};
static_assert(ARRAY_ENTRIES(mapping) == CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT,
"Missing a mapping from a CONTEXT_SWITCH_DEFS__ACTION_TYPE_t to it's string value");
@@ -112,8 +114,9 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__trigger_sequencer_action
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__sequencer_interrupt_data_t, sequencer_index);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__wait_nms_data_t, aggregator_index);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__module_config_done_interrupt_data_t, module_index);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__fetch_ccw_bursts_action_data_t, config_stream_index);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__enable_nms_action_t, nms_unit_index, network_index);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__fetch_ccw_bursts_action_data_t, config_stream_index, ccw_bursts);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__enable_nms_action_t, nms_unit_index, network_index, number_of_classes, burst_size);
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(CONTEXT_SWITCH_DEFS__write_data_by_type_action_t, address, data_type, data, shift, mask, network_index);
// Non-default implementations
void to_json(json &j, const CONTEXT_SWITCH_DEFS__deactivate_vdma_channel_action_data_t &data);
@@ -138,5 +141,6 @@ void to_json(json &j, const CONTEXT_SWITCH_DEFS__deactivate_cfg_channel_t &data)
void to_json(json &j, const CONTEXT_SWITCH_DEFS__add_ddr_pair_info_action_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_output_channel_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t &data);
#endif /* _HAILO_DOWNLOAD_ACTION_LIST_COMMAND_HPP_ */

View File

@@ -26,6 +26,7 @@
#endif
#include "parse_hef_command.hpp"
#include "fw_control_command.hpp"
#include "measure_nnc_performance_command.hpp"
#include "firmware_header_utils.h"
#include "hailo/hailort.h"
@@ -200,6 +201,7 @@ public:
add_subcommand<MonCommand>();
#if defined(__GNUC__)
add_subcommand<UdpRateLimiterCommand>();
add_subcommand<HwInferEstimatorCommand>();
#endif
add_subcommand<ParseHefCommand>();
add_subcommand<FwControlCommand>();

View File

@@ -46,6 +46,12 @@ void add_device_options(CLI::App *app, hailo_device_params &device_params, bool
Expected<std::vector<std::unique_ptr<Device>>> create_devices(const hailo_device_params &device_params);
Expected<std::vector<std::string>> get_device_ids(const hailo_device_params &device_params);
enum class OptionVisibility {
VISIBLE,
HIDDEN
};
/**
* CLI11 transformer object, converting enum argument from string.
* Use this object instead of CLI::CheckedTransformer in order
@@ -55,12 +61,47 @@ template<typename EnumType>
class HailoCheckedTransformer : public CLI::CheckedTransformer
{
public:
HailoCheckedTransformer(std::vector<std::pair<std::string, EnumType>> values) :
CLI::CheckedTransformer(values)
struct Enum
{
desc_function_ = [values]() {
return CLI::detail::generate_map(CLI::detail::smart_deref(values), true);
std::string name;
EnumType value;
OptionVisibility visibility = OptionVisibility::VISIBLE;
std::pair<std::string, EnumType> to_pair() const { return std::make_pair(name, value); }
};
HailoCheckedTransformer(std::vector<Enum> values) :
CLI::CheckedTransformer(to_values_vector(values, true)) // Getting hidden value for the enum transformer.
{
// Hide hidden values for help and autocomplete.
const auto non_hidden_values = to_values_vector(values, false);
desc_function_ = [non_hidden_values]() {
return CLI::detail::generate_map(CLI::detail::smart_deref(non_hidden_values), true);
};
autocomplete_func_ = [non_hidden_values](const std::string &) {
std::vector<std::string> completions;
for (const auto &completion : non_hidden_values) {
completions.emplace_back(completion.first);
}
return completions;
};
}
private:
static std::vector<std::pair<std::string, EnumType>> to_values_vector(const std::vector<Enum> &values,
bool get_hidden)
{
std::vector<std::pair<std::string, EnumType>> values_vector;
for (const auto &value : values) {
if (get_hidden || (value.visibility == OptionVisibility::VISIBLE)) {
values_vector.emplace_back(value.to_pair());
}
}
return values_vector;
}
};

View File

@@ -0,0 +1,118 @@
/**
* Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file measure_nnc_performance_command.cpp
* @brief measure nerual network performance for given network using only the HW components without host SW
**/
#include "measure_nnc_performance_command.hpp"
#include "hailortcli.hpp"
#include "hailo/hailort.h"
#include "hailo/network_group.hpp"
#include "hailo/hef.hpp"
#include "hailo/vstream.hpp"
#include "hailo/vdevice.hpp"
#include <iostream>
#define BYTES_TO_KILOBYTES (1024)
HwInferEstimatorCommand::HwInferEstimatorCommand(CLI::App &parent_app) :
Command(parent_app.add_subcommand("measure-nnc-performance",
"measure nerual network performance for given network using only the HW components without host SW")),
m_params({})
{
// This will make the command to be hidden in the --help print in the command line.
m_app->group("");
add_vdevice_options(m_app, m_params.vdevice_params);
m_app->add_option("hef", m_params.hef_path, "Path of the HEF to load")
->check(CLI::ExistingFile)
->required();
m_app->add_option("--batch-size", m_params.batch_size,
"Inference batch.\n"
"This batch applies to the whole network_group.")
->check(CLI::NonNegativeNumber)
->default_val(HAILO_DEFAULT_BATCH_SIZE);
}
Expected<std::map<std::string, ConfigureNetworkParams>> get_configure_params(const hw_infer_runner_params &params,
hailort::Hef &hef, hailo_stream_interface_t interface)
{
std::map<std::string, ConfigureNetworkParams> configure_params{};
hailo_configure_params_t config_params{};
hailo_status status = hailo_init_configure_params(reinterpret_cast<hailo_hef>(&hef), interface, &config_params);
CHECK_SUCCESS_AS_EXPECTED(status);
/* For default case overwrite batch to 1 */
uint16_t batch_size = (HAILO_DEFAULT_BATCH_SIZE == params.batch_size ? 1 : params.batch_size);
/* Fill all network and network group structs with batch size value */
for (size_t network_group_idx = 0; network_group_idx < config_params.network_group_params_count; network_group_idx++) {
config_params.network_group_params[network_group_idx].batch_size = batch_size;
}
for (size_t network_group_idx = 0; network_group_idx < config_params.network_group_params_count; network_group_idx++) {
config_params.network_group_params[network_group_idx].power_mode = params.power_mode;
configure_params.emplace(std::string(config_params.network_group_params[network_group_idx].name),
ConfigureNetworkParams(config_params.network_group_params[network_group_idx]));
}
return configure_params;
}
hailo_status HwInferEstimatorCommand::execute()
{
auto devices = create_devices(m_params.vdevice_params.device_params);
CHECK_EXPECTED_AS_STATUS(devices, "Failed creating device");
/* This function supports controls for multiple devices.
We validate there is only 1 device generated as we are on a single device flow */
CHECK(1 == devices->size(), HAILO_INTERNAL_FAILURE, "Hw infer command support only one physical device");
auto &device = devices.value()[0];
auto hef = Hef::create(m_params.hef_path.c_str());
CHECK_EXPECTED_AS_STATUS(hef, "Failed reading hef file {}", m_params.hef_path);
auto interface = device->get_default_streams_interface();
CHECK_EXPECTED_AS_STATUS(interface, "Failed to get default streams interface");
auto configure_params = get_configure_params(m_params, hef.value(), interface.value());
CHECK_EXPECTED_AS_STATUS(configure_params);
/* Use Env var to configure all desc list with max depth */
setenv("HAILO_CONFIGURE_FOR_HW_INFER","Y",1);
auto network_group_list = device->configure(hef.value(), configure_params.value());
CHECK_EXPECTED_AS_STATUS(network_group_list, "Failed configure device from hef");
unsetenv("HAILO_CONFIGURE_FOR_HW_INFER");
CHECK(1 == network_group_list->size(), HAILO_INVALID_OPERATION,
"HW Inference is not supported on HEFs with multiple network groups");
auto network_group_ptr = network_group_list.value()[0];
std::cout << "Starting HW infer Estimator..." << std::endl;
auto results = network_group_ptr->run_hw_infer_estimator();
CHECK_EXPECTED_AS_STATUS(results);
std::cout << std::endl;
std::cout << "======================" << std::endl;
std::cout << " Summary" << std::endl;
std::cout << "======================" << std::endl;
std::cout << "Batch count: " << results->batch_count << std::endl;
std::cout << "Total transfer size [KB]: " << (results->total_transfer_size / BYTES_TO_KILOBYTES) << std::endl;
std::cout << "Total frames passed: " << results->total_frames_passed << std::endl;
std::cout << "Total time [s]: " << results->time_sec << std::endl;
std::cout << "Total FPS [1/s]: " << results->fps << std::endl;
std::cout << "BW [Gbps]: " << results->BW_Gbps << std::endl;
std::cout << "======================" << std::endl;
std::cout << " End of report" << std::endl;
std::cout << "======================" << std::endl;
return HAILO_SUCCESS;
}

View File

@@ -0,0 +1,33 @@
/**
* Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file measure_nnc_performance_command.hpp
* @brief measure nerual network performance for given network using only the HW components without host SW
**/
#ifndef _HAILO_HW_INFER_ESTIMATOR_COMMAND_HPP_
#define _HAILO_HW_INFER_ESTIMATOR_COMMAND_HPP_
#include "hailortcli.hpp"
#include "command.hpp"
#include "CLI/CLI.hpp"
struct hw_infer_runner_params {
hailo_vdevice_params vdevice_params;
std::string hef_path;
uint16_t batch_size;
hailo_power_mode_t power_mode;
};
class HwInferEstimatorCommand : public Command {
public:
explicit HwInferEstimatorCommand(CLI::App &parent_app);
hailo_status execute() override;
private:
hw_infer_runner_params m_params;
};
#endif /*_HAILO_HW_INFER_ESTIMATOR_COMMAND_HPP_*/

View File

@@ -14,7 +14,7 @@
#include "hailortcli.hpp"
#include "command.hpp"
#include "vdevice/scheduler/scheduler_mon.hpp"
#include "utils/profiler/monitor_handler.hpp"
#include "CLI/CLI.hpp"

View File

@@ -40,7 +40,7 @@ hailo_status ParseHefCommand::parse_hefs_info(const std::string &hef_path, bool
CHECK_EXPECTED_AS_STATUS(hef_exp, "Failed to parse HEF");
auto hef = hef_exp.release();
auto hef_info = hef.get_hef_description(stream_infos, vstream_infos);
auto hef_info = hef.get_description(stream_infos, vstream_infos);
CHECK_EXPECTED_AS_STATUS(hef_info, "Failed to parse HEF");
std::cout << hef_info.release();
return HAILO_SUCCESS;

View File

@@ -0,0 +1,26 @@
/**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file io_wrappers.cpp
**/
#include "io_wrappers.hpp"
FramerateThrottle::FramerateThrottle(uint32_t framerate) :
m_framerate(framerate),
m_framerate_interval(std::chrono::duration<double>(1) / framerate),
m_last_write_time(std::chrono::steady_clock::now())
{}
void FramerateThrottle::throttle()
{
if (m_framerate == UNLIMITED_FRAMERATE) {
return;
}
const auto elapsed_time = std::chrono::steady_clock::now() - m_last_write_time;
std::this_thread::sleep_for(m_framerate_interval - elapsed_time);
m_last_write_time = std::chrono::steady_clock::now();
}

View File

@@ -0,0 +1,261 @@
/**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file io_wrappers.hpp
* @brief Wrappers for Input/Output Stream/VStream. Manages buffer allocation, framerate throttle, latency meter and
* more.
**/
#ifndef _HAILO_IO_WRAPPERS_HPP_
#define _HAILO_IO_WRAPPERS_HPP_
#include "network_live_track.hpp"
#include "common/file_utils.hpp"
#include "common/latency_meter.hpp"
#include <chrono>
#include <string>
using namespace hailort;
constexpr uint32_t UNLIMITED_FRAMERATE = 0;
#ifndef HAILO_EMULATOR
constexpr std::chrono::milliseconds HAILORTCLI_DEFAULT_TIMEOUT(HAILO_DEFAULT_VSTREAM_TIMEOUT_MS);
#else /* ifndef HAILO_EMULATOR */
constexpr std::chrono::milliseconds HAILORTCLI_DEFAULT_TIMEOUT(HAILO_DEFAULT_VSTREAM_TIMEOUT_MS * 100);
#endif /* ifndef HAILO_EMULATOR */
class FramerateThrottle final
{
public:
FramerateThrottle(uint32_t framerate);
~FramerateThrottle() = default;
void throttle();
private:
const uint32_t m_framerate;
const std::chrono::duration<double> m_framerate_interval;
decltype(std::chrono::steady_clock::now()) m_last_write_time;
};
// Wrapper for InputStream or InputVStream objects.
template<typename Writer>
class WriterWrapper final
{
public:
template<typename WriterParams>
static Expected<std::shared_ptr<WriterWrapper>> create(Writer &writer, const WriterParams &params,
const LatencyMeterPtr &overall_latency_meter, uint32_t framerate)
{
auto dataset = create_dataset(writer, params);
CHECK_EXPECTED(dataset);
std::shared_ptr<WriterWrapper> wrapper(
new (std::nothrow) WriterWrapper(writer, dataset.release(), overall_latency_meter, framerate));
CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
return wrapper;
}
Writer &get() { return m_writer.get(); }
Writer &get() const { return m_writer.get(); }
hailo_status write()
{
before_write_start();
auto status = get().write(MemoryView(*next_buffer()));
if (HAILO_SUCCESS != status) {
return status;
}
m_framerate_throttle.throttle();
return HAILO_SUCCESS;
}
hailo_status wait_for_async_ready()
{
return get().wait_for_async_ready(m_dataset[0]->size(), HAILORTCLI_DEFAULT_TIMEOUT);
}
hailo_status write_async(typename Writer::TransferDoneCallback callback)
{
before_write_start();
// We can use the same buffer for multiple writes simultaneously. That is OK since we don't modify the buffers.
auto status = get().write_async(MemoryView(*next_buffer()), callback);
if (HAILO_SUCCESS != status) {
return status;
}
m_framerate_throttle.throttle();
return HAILO_SUCCESS;
}
private:
WriterWrapper(Writer &writer, std::vector<BufferPtr> &&dataset, const LatencyMeterPtr &overall_latency_meter,
uint32_t framerate) :
m_writer(std::ref(writer)),
m_dataset(std::move(dataset)),
m_overall_latency_meter(overall_latency_meter),
m_framerate_throttle(framerate)
{}
void before_write_start()
{
if (m_overall_latency_meter) {
m_overall_latency_meter->add_start_sample(std::chrono::steady_clock::now().time_since_epoch());
}
}
size_t next_buffer_index()
{
const auto index = m_current_buffer_index;
m_current_buffer_index = (m_current_buffer_index + 1) % m_dataset.size();
return index;
}
BufferPtr next_buffer()
{
return m_dataset[next_buffer_index()];
}
template<typename WriterParams>
static Expected<std::vector<BufferPtr>> create_dataset(Writer &writer, const WriterParams &params)
{
if (params.input_file_path.empty()) {
return create_constant_dataset(writer.get_frame_size());
} else {
return create_dataset_from_input_file(params.input_file_path, writer.get_frame_size());
}
}
static Expected<std::vector<BufferPtr>> create_constant_dataset(size_t frame_size)
{
const uint8_t const_byte = 0xAB;
auto constant_buffer = Buffer::create_shared(frame_size, const_byte, BufferStorageParams::create_dma());
CHECK_EXPECTED(constant_buffer);
return std::vector<BufferPtr>{constant_buffer.release()};
}
static Expected<std::vector<BufferPtr>> create_dataset_from_input_file(const std::string &file_path, size_t frame_size)
{
auto buffer = read_binary_file(file_path);
CHECK_EXPECTED(buffer);
CHECK_AS_EXPECTED(0 == (buffer->size() % frame_size), HAILO_INVALID_ARGUMENT,
"Input file ({}) size {} must be a multiple of the frame size {}",
file_path, buffer->size(), frame_size);
auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
std::vector<BufferPtr> dataset;
const size_t frames_count = buffer->size() / frame_size;
dataset.reserve(frames_count);
for (size_t i = 0; i < frames_count; i++) {
const auto offset = frame_size * i;
auto frame_buffer = Buffer::create_shared(buffer->data() + offset, frame_size, BufferStorageParams::create_dma());
CHECK_EXPECTED(frame_buffer);
dataset.emplace_back(frame_buffer.release());
}
return dataset;
}
std::reference_wrapper<Writer> m_writer;
std::vector<BufferPtr> m_dataset;
size_t m_current_buffer_index = 0;
LatencyMeterPtr m_overall_latency_meter;
FramerateThrottle m_framerate_throttle;
};
template<typename Writer>
using WriterWrapperPtr = std::shared_ptr<WriterWrapper<Writer>>;
// Wrapper for OutputStream or OutputVStream objects.
// We use std::enable_from_this because on async api the callback is using `this`. We want to increase the reference
// count until the callback is over.
template<typename Reader>
class ReaderWrapper final : public std::enable_shared_from_this<ReaderWrapper<Reader>>
{
public:
static Expected<std::shared_ptr<ReaderWrapper>> create(Reader &reader, const LatencyMeterPtr &overall_latency_meter,
std::shared_ptr<NetworkLiveTrack> net_live_track)
{
auto buffer = Buffer::create_shared(reader.get_frame_size(), BufferStorageParams::create_dma());
CHECK_EXPECTED(buffer);
std::shared_ptr<ReaderWrapper> wrapper(
new (std::nothrow) ReaderWrapper(reader, buffer.release(), overall_latency_meter, net_live_track));
CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
return wrapper;
}
Reader &get() { return m_reader.get(); }
Reader &get() const { return m_reader.get(); }
hailo_status read()
{
auto status = get().read(MemoryView(*m_buffer));
if (HAILO_SUCCESS != status) {
return status;
}
on_read_done();
return HAILO_SUCCESS;
}
hailo_status wait_for_async_ready()
{
return get().wait_for_async_ready(m_buffer->size(), HAILORTCLI_DEFAULT_TIMEOUT);
}
hailo_status read_async(typename Reader::TransferDoneCallback callback)
{
auto self = std::enable_shared_from_this<ReaderWrapper<Reader>>::shared_from_this();
return get().read_async(MemoryView(*m_buffer),
[self, original=callback](const typename Reader::CompletionInfo &completion_info) {
original(completion_info);
if (completion_info.status == HAILO_SUCCESS) {
self->on_read_done();
}
});
}
private:
ReaderWrapper(Reader &reader, BufferPtr &&buffer, const LatencyMeterPtr &overall_latency_meter,
std::shared_ptr<NetworkLiveTrack> net_live_track) :
m_reader(std::ref(reader)),
m_buffer(std::move(buffer)),
m_overall_latency_meter(overall_latency_meter),
m_net_live_track(net_live_track)
{}
void on_read_done()
{
if (m_overall_latency_meter) {
m_overall_latency_meter->add_end_sample(get().name(), std::chrono::steady_clock::now().time_since_epoch());
}
if (m_net_live_track) {
m_net_live_track->progress();
}
}
std::reference_wrapper<Reader> m_reader;
BufferPtr m_buffer;
LatencyMeterPtr m_overall_latency_meter;
std::shared_ptr<NetworkLiveTrack> m_net_live_track;
};
template<typename Reader>
using ReaderWrapperPtr = std::shared_ptr<ReaderWrapper<Reader>>;
#endif /* _HAILO_IO_WRAPPERS_HPP_ */

View File

@@ -1,86 +0,0 @@
/**
* Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file live_printer.cpp
* @brief Live printer
**/
#include "live_printer.hpp"
#include "../common.hpp"
#include "common/os_utils.hpp"
#include "common/utils.hpp"
#include <sstream>
#include <iostream>
using namespace hailort;
LivePrinter::LivePrinter(std::chrono::milliseconds interval) :
m_interval(interval),
m_stop_event(Event::create_shared(Event::State::not_signalled)),
m_tracks(),
m_mutex(),
m_prev_count(0),
m_enable_ansi_escape_sequences(CursorAdjustment())
{
}
LivePrinter::~LivePrinter()
{
(void)m_stop_event->signal();
if (m_thread.joinable()) {
m_thread.join();
}
print();
}
void LivePrinter::add(std::shared_ptr<Track> track, uint8_t level)
{
std::unique_lock<std::mutex> lock(m_mutex);
if (!contains(m_tracks, level)) {
m_tracks[level] = {};
}
m_tracks[level].emplace_back(track);
}
void LivePrinter::print()
{
std::stringstream ss;
uint32_t count = 0;
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto &level_pair : m_tracks) {
for (auto &track : level_pair.second) {
count += track->get_text(ss);
}
}
}
CliCommon::reset_cursor(m_prev_count);
// On the first print m_prev_count = 0, so no lines will be deleted
std::cout << ss.str() << std::flush;
m_prev_count = count;
}
hailo_status LivePrinter::start()
{
for (auto &level_pair : m_tracks) {
for (auto &track : level_pair.second) {
CHECK_SUCCESS(track->start());
}
}
m_thread = std::thread([this] () {
OsUtils::set_current_thread_name("LIVE_PRINTER");
while (true) {
print();
auto status = m_stop_event->wait(m_interval);
if (HAILO_TIMEOUT != status) {
break;
}
}
});
return HAILO_SUCCESS;
}

View File

@@ -0,0 +1,149 @@
/**
* Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file live_stats.cpp
* @brief Live stats
**/
#include "live_stats.hpp"
#include "../common.hpp"
#include "common/os_utils.hpp"
#include "common/utils.hpp"
#include <nlohmann/json.hpp>
#include <sstream>
#include <iostream>
using namespace hailort;
hailo_status LiveStats::Track::start()
{
CHECK_SUCCESS(start_impl());
m_started = true;
return HAILO_SUCCESS;
}
uint32_t LiveStats::Track::push_text(std::stringstream &ss)
{
if (!m_started) {
return 0;
}
return push_text_impl(ss);
}
void LiveStats::Track::push_json(nlohmann::ordered_json &json)
{
if (!m_started) {
return;
}
push_json_impl(json);
}
LiveStats::LiveStats(std::chrono::milliseconds interval) :
m_running(false),
m_interval(interval),
m_stop_event(Event::create_shared(Event::State::not_signalled)),
m_tracks(),
m_mutex(),
m_prev_count(0),
m_enable_ansi_escape_sequences(CursorAdjustment())
{
}
LiveStats::~LiveStats()
{
stop();
print();
}
void LiveStats::add(std::shared_ptr<Track> track, uint8_t level)
{
std::unique_lock<std::mutex> lock(m_mutex);
m_tracks[level].emplace_back(track);
}
void LiveStats::print()
{
std::stringstream ss;
uint32_t count = 0;
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto &level_pair : m_tracks) {
for (auto &track : level_pair.second) {
count += track->push_text(ss);
}
}
}
CliCommon::reset_cursor(m_prev_count);
// On the first print m_prev_count = 0, so no lines will be deleted
std::cout << ss.str() << std::flush;
m_prev_count = count;
}
hailo_status LiveStats::dump_stats(const std::string &json_path, const std::string &inference_mode)
{
stop(); // stop measuring before creating json because we want the json to hold the last measurements
nlohmann::ordered_json json;
auto time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
auto str_time = std::string(std::ctime(&time));
if (str_time.length()){
str_time.pop_back();
}
json["time"] = str_time;
json["inference_mode"] = inference_mode;
json["network_groups"] = nlohmann::ordered_json::array();
std::unique_lock<std::mutex> lock(m_mutex);
for (auto &level_pair : m_tracks) {
for (auto &track : level_pair.second) {
track->push_json(json);
}
}
std::ofstream output_json(json_path);
CHECK(output_json, HAILO_FILE_OPERATION_FAILURE, "Failed opening file '{}'", json_path);
output_json << std::setw(4) << json << std::endl; // 4: amount of spaces to indent (for pretty printing)
CHECK(!output_json.bad() && !output_json.fail(), HAILO_FILE_OPERATION_FAILURE,
"Failed writing to file '{}'", json_path);
return HAILO_SUCCESS;
}
hailo_status LiveStats::start()
{
// In order to re-start LiveStats, we should add m_stop_event->reset() here
m_running = true;
for (auto &level_pair : m_tracks) {
for (auto &track : level_pair.second) {
CHECK_SUCCESS(track->start());
}
}
m_thread = std::thread([this] () {
OsUtils::set_current_thread_name("LIVE_PRINTER");
while (true) {
print();
auto status = m_stop_event->wait(m_interval);
if (HAILO_TIMEOUT != status) {
break;
}
}
});
return HAILO_SUCCESS;
}
void LiveStats::stop()
{
if (m_running){
(void)m_stop_event->signal();
if (m_thread.joinable()) {
m_thread.join();
}
m_running = false;
}
}

View File

@@ -3,15 +3,16 @@
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file live_printer.hpp
* @brief Live printer
* @file live_stats.hpp
* @brief Live stats
**/
#ifndef _HAILO_HAILORTCLI_RUN2_LIVE_PRINTER_HPP_
#define _HAILO_HAILORTCLI_RUN2_LIVE_PRINTER_HPP_
#ifndef _HAILO_HAILORTCLI_RUN2_LIVE_STATS_HPP_
#define _HAILO_HAILORTCLI_RUN2_LIVE_STATS_HPP_
#include "common/os_utils.hpp"
#include "hailo/event.hpp"
#include <nlohmann/json.hpp>
#include <stdint.h>
#include <chrono>
#include <mutex>
@@ -19,7 +20,7 @@
#include <atomic>
#include <map>
class LivePrinter final
class LiveStats final
{
public:
class Track
@@ -28,20 +29,28 @@ public:
Track() : m_started(false)
{}
virtual hailo_status start() = 0;
virtual uint32_t get_text(std::stringstream &ss) = 0;
hailo_status start();
uint32_t push_text(std::stringstream &ss);
void push_json(nlohmann::ordered_json &json);
protected:
virtual hailo_status start_impl() = 0;
virtual uint32_t push_text_impl(std::stringstream &ss) = 0;
virtual void push_json_impl(nlohmann::ordered_json &json) = 0;
bool m_started;
};
LivePrinter(std::chrono::milliseconds interval);
~LivePrinter();
LiveStats(std::chrono::milliseconds interval);
~LiveStats();
void add(std::shared_ptr<Track> track, uint8_t level); // prints tracks in consecutive order from low-to-high levels
void print();
hailo_status dump_stats(const std::string &json_path, const std::string &inference_mode);
hailo_status start();
void stop();
private:
bool m_running;
std::chrono::milliseconds m_interval;
hailort::EventPtr m_stop_event;
std::map<uint8_t, std::vector<std::shared_ptr<Track>>> m_tracks;
@@ -51,4 +60,4 @@ private:
hailort::CursorAdjustment m_enable_ansi_escape_sequences;
};
#endif /* _HAILO_HAILORTCLI_RUN2_LIVE_PRINTER_HPP_ */
#endif /* _HAILO_HAILORTCLI_RUN2_LIVE_STATS_HPP_ */

View File

@@ -17,7 +17,6 @@
#include <spdlog/fmt/fmt.h>
#include <sstream>
using namespace hailort;
Expected<std::shared_ptr<MeasurementLiveTrack>> MeasurementLiveTrack::create_shared(Device &device, bool measure_power, bool measure_current,
@@ -53,35 +52,27 @@ Expected<std::shared_ptr<MeasurementLiveTrack>> MeasurementLiveTrack::create_sha
MeasurementLiveTrack::MeasurementLiveTrack(std::shared_ptr<PowerMeasurement> power_measurement,
std::shared_ptr<PowerMeasurement> current_measurement, std::shared_ptr<TemperatureMeasurement> temp_measurement,
const std::string &device_id) :
LivePrinter::Track(), m_power_measurement(std::move(power_measurement)), m_current_measurement(std::move(current_measurement)),
LiveStats::Track(), m_power_measurement(std::move(power_measurement)), m_current_measurement(std::move(current_measurement)),
m_temp_measurement(std::move(temp_measurement)), m_device_id(device_id)
{}
hailo_status MeasurementLiveTrack::start()
hailo_status MeasurementLiveTrack::start_impl()
{
if (m_power_measurement) {
CHECK_SUCCESS(m_power_measurement->start_measurement());
}
if (m_current_measurement) {
CHECK_SUCCESS(m_current_measurement->start_measurement());
}
if (m_temp_measurement) {
CHECK_SUCCESS(m_temp_measurement->start_measurement());
}
m_started = true;
return HAILO_SUCCESS;
}
uint32_t MeasurementLiveTrack::get_text(std::stringstream &ss)
uint32_t MeasurementLiveTrack::push_text_impl(std::stringstream &ss)
{
if (!m_started) {
return 0;
}
auto rows_count = 0;
if (m_power_measurement || m_current_measurement || m_temp_measurement) {
@@ -139,3 +130,36 @@ uint32_t MeasurementLiveTrack::get_text(std::stringstream &ss)
return rows_count;
}
void MeasurementLiveTrack::push_json_measurment_val(nlohmann::ordered_json &device_json, std::shared_ptr<BaseMeasurement> measurment, const std::string &measurment_name)
{
auto measurment_info = measurment->get_data();
auto measurement_unit = measurment->measurement_unit();
auto min = measurment_info.min();
auto max = measurment_info.max();
auto mean = measurment_info.mean();
if (min && max && mean){
device_json[measurment_name] = {
{"min", std::to_string(min.value()) + " " + measurement_unit},
{"max", std::to_string(max.value()) + " " + measurement_unit},
{"average", std::to_string(mean.value()) + " " + measurement_unit}
};
}
}
void MeasurementLiveTrack::push_json_impl(nlohmann::ordered_json &json)
{
nlohmann::ordered_json device_json;
device_json["device_id"] = m_device_id;
if (m_power_measurement){
push_json_measurment_val(device_json, m_power_measurement, "power");
}
if (m_current_measurement){
push_json_measurment_val(device_json, m_current_measurement, "current");
}
if (m_temp_measurement){
push_json_measurment_val(device_json, m_temp_measurement, "temperature");
}
json["devices"].emplace_back(device_json);
}

View File

@@ -13,24 +13,26 @@
#include "hailo/hailort.h"
#include "common/device_measurements.hpp"
#include "live_stats.hpp"
#include "live_printer.hpp"
#include <nlohmann/json.hpp>
class MeasurementLiveTrack : public LivePrinter::Track
class MeasurementLiveTrack : public LiveStats::Track
{
public:
static hailort::Expected<std::shared_ptr<MeasurementLiveTrack>> create_shared(hailort::Device &vdevice, bool measure_power,
bool measure_current, bool measure_temp);
virtual ~MeasurementLiveTrack() = default;
virtual hailo_status start() override;
virtual uint32_t get_text(std::stringstream &ss) override;
virtual hailo_status start_impl() override;
virtual uint32_t push_text_impl(std::stringstream &ss) override;
virtual void push_json_impl(nlohmann::ordered_json &json) override;
MeasurementLiveTrack(std::shared_ptr<PowerMeasurement> power_measurement, std::shared_ptr<PowerMeasurement> current_measurement,
std::shared_ptr<TemperatureMeasurement> temp_measurement, const std::string &device_id);
private:
void push_json_measurment_val(nlohmann::ordered_json &device_json, std::shared_ptr<BaseMeasurement> measurment, const std::string &measurment_name);
std::shared_ptr<PowerMeasurement> m_power_measurement;
std::shared_ptr<PowerMeasurement> m_current_measurement;
std::shared_ptr<TemperatureMeasurement> m_temp_measurement;

View File

@@ -13,52 +13,110 @@
#include <spdlog/fmt/fmt.h>
#include <sstream>
NetworkLiveTrack::NetworkLiveTrack(const std::string &name, std::shared_ptr<ConfiguredNetworkGroup> cng, LatencyMeterPtr overall_latency_meter) :
m_name(name), m_count(0), m_last_get_time(), m_cng(cng), m_overall_latency_meter(overall_latency_meter)
size_t NetworkLiveTrack::max_ng_name = 0;
std::mutex NetworkLiveTrack::mutex;
NetworkLiveTrack::NetworkLiveTrack(const std::string &name, std::shared_ptr<ConfiguredNetworkGroup> cng,
LatencyMeterPtr overall_latency_meter, bool measure_fps, const std::string &hef_path) :
m_name(name),
m_count(0),
m_last_get_time(),
m_cng(cng),
m_overall_latency_meter(overall_latency_meter),
m_measure_fps(measure_fps),
m_hef_path(hef_path)
{
std::lock_guard<std::mutex> lock(mutex);
max_ng_name = std::max(m_name.size(), max_ng_name);
}
hailo_status NetworkLiveTrack::start()
hailo_status NetworkLiveTrack::start_impl()
{
m_last_get_time = std::chrono::steady_clock::now();
m_count = 0;
m_started = true;
return HAILO_SUCCESS;
}
uint32_t NetworkLiveTrack::get_text(std::stringstream &ss)
double NetworkLiveTrack::get_fps()
{
if (!m_started) {
return 0;
}
auto elapsed_time = std::chrono::steady_clock::now() - m_last_get_time;
auto count = m_count.load();
auto fps = count / std::chrono::duration<double>(elapsed_time).count();
ss << fmt::format("{}:\n\t| fps: {:.2f}", m_name, fps);
return fps;
}
uint32_t NetworkLiveTrack::push_text_impl(std::stringstream &ss)
{
ss << fmt::format("{}:", m_name);
ss << std::string(max_ng_name - m_name.size(), ' ');
bool first = true;
auto get_separator = [&first] () {
auto res = first ? " " : " | ";
first = false;
return res;
};
if (m_measure_fps) {
auto fps = get_fps();
ss << fmt::format("{}fps: {:.2f}", get_separator(), fps);
}
auto hw_latency_measurement = m_cng->get_latency_measurement();
if (hw_latency_measurement) {
ss << fmt::format(" | hw latency: {:.2f} ms", InferResultsFormatUtils::latency_result_to_ms(hw_latency_measurement->avg_hw_latency));
ss << fmt::format("{}hw latency: {:.2f} ms", get_separator(), InferResultsFormatUtils::latency_result_to_ms(hw_latency_measurement->avg_hw_latency));
}
else if (HAILO_NOT_AVAILABLE != hw_latency_measurement.status()) { // HAILO_NOT_AVAILABLE is a valid error, we ignore it
ss << fmt::format(" | hw latency: failed with status={}", hw_latency_measurement.status());
ss << fmt::format("{}hw latency: NaN (err)", get_separator());
}
if (m_overall_latency_meter) {
auto overall_latency_measurement = m_overall_latency_meter->get_latency(true);
auto overall_latency_measurement = m_overall_latency_meter->get_latency(false);
if (overall_latency_measurement) {
ss << fmt::format(" | overall latency: {:.2f} ms", InferResultsFormatUtils::latency_result_to_ms(*overall_latency_measurement));
ss << fmt::format("{}overall latency: {:.2f} ms", get_separator(), InferResultsFormatUtils::latency_result_to_ms(*overall_latency_measurement));
}
else if (HAILO_NOT_AVAILABLE != overall_latency_measurement.status()) { // HAILO_NOT_AVAILABLE is a valid error, we ignore it
ss << fmt::format(" | overall latency: failed with status={}", overall_latency_measurement.status());
ss << fmt::format("{}overall latency: NaN (err)", get_separator());
}
}
ss << "\n";
return 2;
return 1;
}
void NetworkLiveTrack::push_json_impl(nlohmann::ordered_json &json)
{
nlohmann::ordered_json network_group_json;
network_group_json["name"] = m_name;
network_group_json["full_hef_path"] = m_hef_path;
// TODO: HRT-8695 Support stats display per network
// auto networks_info = m_cng->get_network_infos();
// if (networks_info){
// network_group_json["networks"] = nlohmann::ordered_json::array();
// for (const auto &network_info : networks_info.value()){
// network_group_json["networks"].emplace_back(nlohmann::json::object({ {"name", network_info.name} }));
// }
// }
if (m_measure_fps) {
auto fps = get_fps();
network_group_json["FPS"] = std::to_string(fps);
}
auto hw_latency_measurement = m_cng->get_latency_measurement();
if (hw_latency_measurement){
network_group_json["hw_latency"] = InferResultsFormatUtils::latency_result_to_ms(hw_latency_measurement->avg_hw_latency);
}
if (m_overall_latency_meter){
auto overall_latency_measurement = m_overall_latency_meter->get_latency(false);
if (overall_latency_measurement){
network_group_json["overall_latency"] = InferResultsFormatUtils::latency_result_to_ms(*overall_latency_measurement);
}
}
json["network_groups"].emplace_back(network_group_json);
}
void NetworkLiveTrack::progress()

View File

@@ -15,24 +15,36 @@
#include "common/latency_meter.hpp"
#include "live_printer.hpp"
#include "live_stats.hpp"
#include <nlohmann/json.hpp>
class NetworkLiveTrack : public LivePrinter::Track
class NetworkLiveTrack : public LiveStats::Track
{
public:
NetworkLiveTrack(const std::string &name, std::shared_ptr<hailort::ConfiguredNetworkGroup> cng, hailort::LatencyMeterPtr overall_latency_meter);
NetworkLiveTrack(const std::string &name, std::shared_ptr<hailort::ConfiguredNetworkGroup> cng,
hailort::LatencyMeterPtr overall_latency_meter, bool measure_fps, const std::string &hef_path);
virtual ~NetworkLiveTrack() = default;
virtual hailo_status start() override;
virtual uint32_t get_text(std::stringstream &ss) override;
virtual hailo_status start_impl() override;
virtual uint32_t push_text_impl(std::stringstream &ss) override;
virtual void push_json_impl(nlohmann::ordered_json &json) override;
void progress();
private:
double get_fps();
static size_t max_ng_name;
static std::mutex mutex;
std::string m_name;
std::atomic<uint32_t> m_count;
std::chrono::time_point<std::chrono::steady_clock> m_last_get_time;
std::shared_ptr<hailort::ConfiguredNetworkGroup> m_cng;
hailort::LatencyMeterPtr m_overall_latency_meter;
const bool m_measure_fps;
const std::string &m_hef_path;
};
#endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_ */

View File

@@ -11,38 +11,62 @@
#include "hailo/hailort_common.hpp"
#include "hailo/hailort_defaults.hpp"
#include "common/async_thread.hpp"
#include "common/file_utils.hpp"
#include "common/latency_meter.hpp"
#include "network_runner.hpp"
#if defined(_MSC_VER)
#include <mmsystem.h>
#endif
using namespace hailort;
class SignalEventScopeGuard final
{
public:
SignalEventScopeGuard(Event &event) : m_event(event)
SignalEventScopeGuard::SignalEventScopeGuard(Event &event) :
m_event(event)
{}
~SignalEventScopeGuard()
SignalEventScopeGuard::~SignalEventScopeGuard()
{
m_event.signal();
}
Event &m_event;
BarrierTerminateScopeGuard::BarrierTerminateScopeGuard(BarrierPtr barrier) :
m_barrier(barrier)
{}
BarrierTerminateScopeGuard::~BarrierTerminateScopeGuard()
{
if (m_barrier) {
m_barrier->terminate();
}
}
#if defined(_MSC_VER)
class TimeBeginScopeGuard final
{
public:
TimeBeginScopeGuard() {
// default interval between timer interrupts on Windows is 15.625 ms.
// This will change it to be 1 ms, enabling us to sleep in granularity of 1 milliseconds.
// As from Windows 10 2004, in general processes are no longer affected by other processes calling timeBeginPeriod.
// https://randomascii.wordpress.com/2020/10/04/windows-timer-resolution-the-great-rule-change/
timeBeginPeriod(1);
}
~TimeBeginScopeGuard() {
timeEndPeriod(1);
}
};
#endif
//TODO: duplicated
static hailo_status wait_for_threads(std::vector<AsyncThreadPtr<hailo_status>> &threads)
hailo_status NetworkRunner::wait_for_threads(std::vector<AsyncThreadPtr<hailo_status>> &threads)
{
auto last_error_status = HAILO_SUCCESS;
for (auto &thread : threads) {
auto thread_status = thread->get();
if ((HAILO_SUCCESS != thread_status) && (HAILO_STREAM_ABORTED_BY_USER != thread_status)) {
if (!inference_succeeded(thread_status)) {
last_error_status = thread_status;
LOGGER__ERROR("Thread failed with with status {}", thread_status);
}
@@ -50,218 +74,192 @@ static hailo_status wait_for_threads(std::vector<AsyncThreadPtr<hailo_status>> &
return last_error_status;
}
VStreamParams::VStreamParams() : name(), params(HailoRTDefaults::get_vstreams_params())
IoParams::IoParams() : name(), input_file_path()
{
}
NetworkParams::NetworkParams() : hef_path(), net_group_name(), vstream_params(), scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN),
batch_size(HAILO_DEFAULT_BATCH_SIZE), scheduler_threshold(0), scheduler_timeout_ms(0), framerate(UNLIMITED_FRAMERATE), measure_hw_latency(false),
VStreamParams::VStreamParams() : IoParams(), params(HailoRTDefaults::get_vstreams_params())
{
}
StreamParams::StreamParams() : IoParams(), flags(HAILO_STREAM_FLAGS_NONE)
{
}
NetworkParams::NetworkParams() : hef_path(), net_group_name(), vstream_params(), stream_params(),
scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN), batch_size(HAILO_DEFAULT_BATCH_SIZE),
scheduler_threshold(0), scheduler_timeout_ms(0), framerate(UNLIMITED_FRAMERATE), measure_hw_latency(false),
measure_overall_latency(false)
{
}
NetworkRunner::NetworkRunner(const NetworkParams &params, const std::string &name,
std::vector<InputVStream> &&input_vstreams, std::vector<OutputVStream> &&output_vstreams,
std::shared_ptr<ConfiguredNetworkGroup> cng, LatencyMeterPtr overall_latency_meter)
: m_params(params), m_name(name), m_input_vstreams(std::move(input_vstreams)),
m_output_vstreams(std::move(output_vstreams)), m_cng(cng), m_overall_latency_meter(overall_latency_meter)
VDevice &vdevice, std::shared_ptr<ConfiguredNetworkGroup> cng) :
m_vdevice(vdevice),
m_params(params),
m_name(name),
m_cng(cng),
m_overall_latency_meter(nullptr),
m_latency_barrier(nullptr)
{
}
Expected<std::shared_ptr<NetworkRunner>> NetworkRunner::create_shared(VDevice &vdevice, const NetworkParams &params)
{
auto hef = Hef::create(params.hef_path);
// The network params passed to the NetworkRunner may be changed by this function, hence we copy them.
auto final_net_params = params;
auto hef = Hef::create(final_net_params.hef_path);
CHECK_EXPECTED(hef);
// Get NG's name if single
auto net_group_name = params.net_group_name;
auto net_group_name = final_net_params.net_group_name;
if (net_group_name.empty()) {
auto net_groups_names = hef->get_network_groups_names();
CHECK_AS_EXPECTED(net_groups_names.size() == 1, HAILO_INVALID_ARGUMENT, "HEF {} doesn't contain a single NetworkGroup. Pass --name", params.hef_path);
CHECK_AS_EXPECTED(net_groups_names.size() == 1, HAILO_INVALID_ARGUMENT, "HEF {} doesn't contain a single NetworkGroup. Pass --name", final_net_params.hef_path);
net_group_name = net_groups_names[0];
}
auto cfg_params = vdevice.create_configure_params(hef.value(), net_group_name);
CHECK_EXPECTED(cfg_params);
cfg_params->batch_size = params.batch_size;
if (params.measure_hw_latency) {
cfg_params->batch_size = final_net_params.batch_size;
if (final_net_params.batch_size == HAILO_DEFAULT_BATCH_SIZE) {
// Changing batch_size to 1. If HAILO_DEFAULT_BATCH_SIZE is configured, the sched will send one frame per batch
final_net_params.batch_size = 1;
}
if (final_net_params.measure_hw_latency) {
cfg_params->latency |= HAILO_LATENCY_MEASURE;
}
if (final_net_params.is_async()) {
for (auto &stream_name_params_pair : cfg_params->stream_params_by_name) {
stream_name_params_pair.second.flags = HAILO_STREAM_FLAGS_ASYNC;
}
}
auto cfgr_net_groups = vdevice.configure(hef.value(), {{net_group_name, cfg_params.value()}});
CHECK_EXPECTED(cfgr_net_groups);
assert(1 == cfgr_net_groups->size());
auto cfgr_net_group = cfgr_net_groups.value()[0];
if (HAILO_SCHEDULING_ALGORITHM_NONE!= params.scheduling_algorithm) {
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_threshold(params.scheduler_threshold));
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_timeout(std::chrono::milliseconds(params.scheduler_timeout_ms)));
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_priority(params.scheduler_priority));
if (HAILO_SCHEDULING_ALGORITHM_NONE!= final_net_params.scheduling_algorithm) {
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_threshold(final_net_params.scheduler_threshold));
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_timeout(std::chrono::milliseconds(final_net_params.scheduler_timeout_ms)));
CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_priority(final_net_params.scheduler_priority));
}
std::shared_ptr<NetworkRunner> net_runner_ptr = nullptr;
switch (final_net_params.mode)
{
case InferenceMode::FULL:
{
std::map<std::string, hailo_vstream_params_t> vstreams_params;
for (auto &vstream_params : params.vstream_params) {
for (auto &vstream_params : final_net_params.vstream_params) {
vstreams_params.emplace(vstream_params.name, vstream_params.params);
}
auto vstreams = create_vstreams(*cfgr_net_group, vstreams_params);
CHECK_EXPECTED(vstreams);
LatencyMeterPtr overall_latency_meter = nullptr;
if (params.measure_overall_latency) {
CHECK_AS_EXPECTED((1 == vstreams->first.size()), HAILO_INVALID_OPERATION,
"Overall latency measurement over multiple inputs network is not supported");
std::set<std::string> output_names;
for (auto &output_vstream : vstreams->second) {
output_names.insert(output_vstream.name());
}
overall_latency_meter = make_shared_nothrow<LatencyMeter>(output_names, OVERALL_LATENCY_TIMESTAMPS_LIST_LENGTH);
CHECK_NOT_NULL_AS_EXPECTED(overall_latency_meter, HAILO_OUT_OF_HOST_MEMORY);
}
auto net_runner = make_shared_nothrow<NetworkRunner>(params, net_group_name, std::move(vstreams->first),
std::move(vstreams->second), cfgr_net_group, overall_latency_meter);
auto net_runner = make_shared_nothrow<FullNetworkRunner>(final_net_params, net_group_name, vdevice,
std::move(vstreams->first), std::move(vstreams->second), cfgr_net_group);
CHECK_NOT_NULL_AS_EXPECTED(net_runner, HAILO_OUT_OF_HOST_MEMORY);
return net_runner;
net_runner_ptr = std::static_pointer_cast<NetworkRunner>(net_runner);
break;
}
Expected<BufferPtr> NetworkRunner::create_dataset_from_input_file(const std::string &file_path,
const InputVStream &input_vstream)
case InferenceMode::RAW: // Fallthrough
case InferenceMode::RAW_ASYNC: // Fallthrough
case InferenceMode::RAW_ASYNC_SINGLE_THREAD:
{
auto buffer = read_binary_file(file_path);
CHECK_EXPECTED(buffer);
CHECK_AS_EXPECTED(0 == (buffer->size() % input_vstream.get_frame_size()), HAILO_INVALID_ARGUMENT,
"Input file ({}) size {} must be a multiple of the frame size {} ({})",
file_path, buffer->size(), input_vstream.get_frame_size(), input_vstream.name());
auto input_streams = cfgr_net_group->get_input_streams();
CHECK_AS_EXPECTED(input_streams.size() > 0, HAILO_INTERNAL_FAILURE);
auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
auto output_streams = cfgr_net_group->get_output_streams();
CHECK_AS_EXPECTED(output_streams.size() > 0, HAILO_INTERNAL_FAILURE);
return buffer_ptr;
auto net_runner = make_shared_nothrow<RawNetworkRunner>(final_net_params, net_group_name, vdevice,
std::move(input_streams), std::move(output_streams), cfgr_net_group);
CHECK_NOT_NULL_AS_EXPECTED(net_runner, HAILO_OUT_OF_HOST_MEMORY);
net_runner_ptr = std::static_pointer_cast<NetworkRunner>(net_runner);
break;
}
default:
// Shouldn't get here
return make_unexpected(HAILO_INTERNAL_FAILURE);
}
Expected<BufferPtr> NetworkRunner::create_constant_dataset(const InputVStream &input_vstream)
if (final_net_params.measure_overall_latency || final_net_params.measure_hw_latency) {
auto input_names = net_runner_ptr->get_input_names();
auto output_names = net_runner_ptr->get_output_names();
CHECK_AS_EXPECTED((1 == input_names.size()), HAILO_INVALID_OPERATION,
"Latency measurement over multiple inputs network is not supported");
if (final_net_params.measure_overall_latency) {
auto overall_latency_meter = make_shared_nothrow<LatencyMeter>(output_names, OVERALL_LATENCY_TIMESTAMPS_LIST_LENGTH);
CHECK_NOT_NULL_AS_EXPECTED(overall_latency_meter, HAILO_OUT_OF_HOST_MEMORY);
net_runner_ptr->set_overall_latency_meter(overall_latency_meter);
}
// We use a barrier for both hw and overall latency
auto latency_barrier = make_shared_nothrow<Barrier>(input_names.size() + output_names.size());
CHECK_NOT_NULL_AS_EXPECTED(latency_barrier, HAILO_OUT_OF_HOST_MEMORY);
net_runner_ptr->set_latency_barrier(latency_barrier);
}
return net_runner_ptr;
}
bool NetworkRunner::inference_succeeded(hailo_status status)
{
const uint8_t const_byte = 0xAB;
auto constant_buffer = Buffer::create_shared(input_vstream.get_frame_size(), const_byte);
CHECK_EXPECTED(constant_buffer);
return constant_buffer.release();
const auto status_find_result = std::find(NetworkRunner::ALLOWED_INFERENCE_RETURN_VALUES.cbegin(),
NetworkRunner::ALLOWED_INFERENCE_RETURN_VALUES.cend(), status);
// If the status is in the allowed list, the inference has succeeded
return status_find_result != NetworkRunner::ALLOWED_INFERENCE_RETURN_VALUES.cend();
}
hailo_status NetworkRunner::run_input_vstream(InputVStream &vstream, Event &shutdown_event, BufferPtr dataset,
LatencyMeterPtr overall_latency_meter)
{
auto signal_event_scope_guard = SignalEventScopeGuard(shutdown_event);
auto last_write_time = std::chrono::steady_clock::now();
auto framerate_interval = std::chrono::duration<double>(1) / m_params.framerate;
size_t buffer_offset = 0;
while(true) {
if (overall_latency_meter) {
overall_latency_meter->add_start_sample(std::chrono::steady_clock::now().time_since_epoch());
}
auto status = vstream.write(MemoryView((dataset->data() + buffer_offset), vstream.get_frame_size()));
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
buffer_offset += vstream.get_frame_size();
buffer_offset %= dataset->size();
if (m_params.framerate != UNLIMITED_FRAMERATE) {
auto elapsed_time = std::chrono::steady_clock::now() - last_write_time;
std::this_thread::sleep_for(framerate_interval - elapsed_time);
last_write_time = std::chrono::steady_clock::now();
}
}
return HAILO_SUCCESS;
}
hailo_status NetworkRunner::run_output_vstream(OutputVStream &vstream, bool first, std::shared_ptr<NetworkLiveTrack> net_live_track,
Event &shutdown_event, LatencyMeterPtr overall_latency_meter)
{
auto signal_event_scope_guard = SignalEventScopeGuard(shutdown_event);
auto result = Buffer::create(vstream.get_frame_size());
CHECK_EXPECTED_AS_STATUS(result);
while(true) {
auto status = vstream.read(MemoryView(result.value()));
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
if (overall_latency_meter) {
overall_latency_meter->add_end_sample(vstream.name(), std::chrono::steady_clock::now().time_since_epoch());
}
if (first) {
net_live_track->progress();
}
}
return HAILO_SUCCESS;
}
hailo_status NetworkRunner::run(Event &shutdown_event, LivePrinter &live_printer, Barrier &barrier)
hailo_status NetworkRunner::run(EventPtr shutdown_event, LiveStats &live_stats, Barrier &activation_barrier)
{
auto ang = std::unique_ptr<ActivatedNetworkGroup>(nullptr);
if (HAILO_SCHEDULING_ALGORITHM_NONE == m_params.scheduling_algorithm) {
auto ang_exp = m_cng->activate();
if (!ang_exp) {
barrier.terminate();
activation_barrier.terminate();
}
CHECK_EXPECTED_AS_STATUS(ang_exp);
ang = ang_exp.release();
}
auto net_live_track = std::make_shared<NetworkLiveTrack>(m_name, m_cng, m_overall_latency_meter);
live_printer.add(net_live_track, 1); //support progress over multiple outputs
barrier.arrive_and_wait();
// If we measure latency (hw or overall) we send frames one at a time. Hence we don't measure fps.
const auto measure_fps = !m_params.measure_hw_latency && !m_params.measure_overall_latency;
auto net_live_track = std::make_shared<NetworkLiveTrack>(m_name, m_cng, m_overall_latency_meter, measure_fps, m_params.hef_path);
live_stats.add(net_live_track, 1); //support progress over multiple outputs
std::vector<AsyncThreadPtr<hailo_status>> threads;
for (auto &input_vstream : m_input_vstreams) {
BufferPtr dataset = nullptr;
for (auto &params : m_params.vstream_params) {
if ((input_vstream.name() == params.name) && (!params.input_file_path.empty())) {
auto dataset_exp = create_dataset_from_input_file(params.input_file_path, input_vstream);
CHECK_EXPECTED_AS_STATUS(dataset_exp);
dataset = dataset_exp.release();
}
}
if (nullptr == dataset) {
auto dataset_exp = create_constant_dataset(input_vstream);
CHECK_EXPECTED_AS_STATUS(dataset_exp);
dataset = dataset_exp.release();
}
#if defined(_MSC_VER)
TimeBeginScopeGuard time_begin_scope_guard;
#endif
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("SEND", [this, &input_vstream, &shutdown_event,
dataset](){
return run_input_vstream(input_vstream, shutdown_event, dataset, m_overall_latency_meter);
}));
}
activation_barrier.arrive_and_wait();
bool first = true; //TODO: check with multiple outputs
for (auto &output_vstream : m_output_vstreams) {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("RECV", [this, &output_vstream, first, net_live_track,
&shutdown_event](){
return run_output_vstream(output_vstream, first, net_live_track, shutdown_event, m_overall_latency_meter);
}));
first = false;
}
if (m_params.mode == InferenceMode::RAW_ASYNC_SINGLE_THREAD) {
return run_single_thread_async_infer(shutdown_event, net_live_track);
} else {
auto threads = start_inference_threads(shutdown_event, net_live_track);
CHECK_EXPECTED_AS_STATUS(threads);
//TODO: return threads and move stop outside?
CHECK_SUCCESS(shutdown_event.wait(HAILO_INFINITE_TIMEOUT));
CHECK_SUCCESS(shutdown_event->wait(HAILO_INFINITE_TIMEOUT));
stop();
return wait_for_threads(threads);
return wait_for_threads(threads.value());
}
}
void NetworkRunner::stop()
void NetworkRunner::set_overall_latency_meter(LatencyMeterPtr latency_meter)
{
for (auto &input_vstream : m_input_vstreams) {
(void) input_vstream.abort();
}
for (auto &output_vstream : m_output_vstreams) {
(void) output_vstream.abort();
m_overall_latency_meter = latency_meter;
}
void NetworkRunner::set_latency_barrier(BarrierPtr latency_barrier)
{
m_latency_barrier = latency_barrier;
}
Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> NetworkRunner::create_vstreams(
@@ -277,8 +275,7 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
if (elem_it != params.end()) {
input_vstreams_params.emplace(input_vstream_info.name, elem_it->second);
match_count++;
}
else {
} else {
input_vstreams_params.emplace(input_vstream_info.name, HailoRTDefaults::get_vstreams_params());
}
}
@@ -307,3 +304,275 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
return {{input_vstreams.release(), output_vstreams.release()}};//TODO: move? copy elision?
}
const std::vector<hailo_status> NetworkRunner::ALLOWED_INFERENCE_RETURN_VALUES{
{HAILO_SUCCESS, HAILO_STREAM_ABORTED_BY_USER, HAILO_SHUTDOWN_EVENT_SIGNALED}
};
FullNetworkRunner::FullNetworkRunner(const NetworkParams &params, const std::string &name, VDevice &vdevice,
std::vector<InputVStream> &&input_vstreams, std::vector<OutputVStream> &&output_vstreams,
std::shared_ptr<ConfiguredNetworkGroup> cng) :
NetworkRunner(params, name, vdevice, cng),
m_input_vstreams(std::move(input_vstreams)),
m_output_vstreams(std::move(output_vstreams))
{
}
Expected<std::vector<AsyncThreadPtr<hailo_status>>> FullNetworkRunner::start_inference_threads(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track)
{
std::vector<AsyncThreadPtr<hailo_status>> threads;
for (auto &input_vstream : m_input_vstreams) {
const auto vstream_params = get_params(input_vstream.name());
auto writer = WriterWrapper<InputVStream>::create(input_vstream, vstream_params, m_overall_latency_meter,
m_params.framerate);
CHECK_EXPECTED(writer);
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("WRITE",
[this, writer = writer.release(), shutdown_event]() mutable {
return run_write(writer, shutdown_event, m_latency_barrier);
}));
}
bool first = true; //TODO: check with multiple outputs
for (auto &output_vstream : m_output_vstreams) {
auto reader = ReaderWrapper<OutputVStream>::create(output_vstream, m_overall_latency_meter,
first ? net_live_track : nullptr);
CHECK_EXPECTED(reader);
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("READ",
[this, reader=reader.release(), shutdown_event]() mutable {
return run_read(reader, shutdown_event, m_latency_barrier);
}));
first = false;
}
return threads;
}
void FullNetworkRunner::stop()
{
for (auto &input_vstream : m_input_vstreams) {
(void) input_vstream.abort();
}
for (auto &output_vstream : m_output_vstreams) {
(void) output_vstream.abort();
}
}
std::set<std::string> FullNetworkRunner::get_input_names()
{
std::set<std::string> result;
for (const auto &vstream : m_input_vstreams) {
result.insert(vstream.name());
}
return result;
}
std::set<std::string> FullNetworkRunner::get_output_names()
{
std::set<std::string> result;
for (const auto &vstream : m_output_vstreams) {
result.insert(vstream.name());
}
return result;
}
VStreamParams FullNetworkRunner::get_params(const std::string &name)
{
for (const auto &params : m_params.vstream_params) {
if (name == params.name) {
return params;
}
}
return VStreamParams();
}
RawNetworkRunner::RawNetworkRunner(const NetworkParams &params, const std::string &name, VDevice &vdevice,
InputStreamRefVector &&input_streams, OutputStreamRefVector &&output_streams,
std::shared_ptr<ConfiguredNetworkGroup> cng) :
NetworkRunner(params, name, vdevice, cng),
m_input_streams(std::move(input_streams)),
m_output_streams(std::move(output_streams))
{
}
Expected<std::vector<AsyncThreadPtr<hailo_status>>> RawNetworkRunner::start_inference_threads(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track)
{
const bool async_streams = (m_params.is_async());
std::vector<AsyncThreadPtr<hailo_status>> threads;
for (auto &input_stream : m_input_streams) {
const auto stream_params = get_params(input_stream.get().name());
auto writer = WriterWrapper<InputStream>::create(input_stream.get(), stream_params, m_overall_latency_meter,
m_params.framerate);
CHECK_EXPECTED(writer);
if (async_streams) {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("WRITE_ASYNC",
[this, writer = writer.release(), shutdown_event]() mutable {
return run_write_async(writer, shutdown_event, m_latency_barrier);
}));
} else {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("WRITE",
[this, writer = writer.release(), shutdown_event]() mutable {
return run_write(writer, shutdown_event, m_latency_barrier);
}));
}
}
bool first = true; //TODO: check with multiple outputs
for (auto &output_stream : m_output_streams) {
auto reader = ReaderWrapper<OutputStream>::create(output_stream.get(), m_overall_latency_meter,
first ? net_live_track : nullptr);
CHECK_EXPECTED(reader);
if (async_streams) {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("READ_ASYNC",
[this, reader=reader.release(), shutdown_event]() mutable {
return run_read_async(reader, shutdown_event, m_latency_barrier);
}));
} else {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("READ",
[this, reader=reader.release(), shutdown_event]() mutable {
return run_read(reader, shutdown_event, m_latency_barrier);
}));
}
first = false;
}
return threads;
}
hailo_status RawNetworkRunner::run_single_thread_async_infer(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track)
{
// Build output wrappers
std::vector<ReaderWrapperPtr<OutputStream>> reader_wrappers;
std::vector<SemaphorePtr> output_semaphores;
bool is_first_output = true;
for (auto &output_stream : m_output_streams) {
auto reader_wrapper = ReaderWrapper<OutputStream>::create(output_stream.get(), m_overall_latency_meter,
is_first_output ? net_live_track : nullptr);
CHECK_EXPECTED_AS_STATUS(reader_wrapper);
is_first_output = false;
auto max_queue_size = reader_wrapper.value()->get().get_async_max_queue_size();
CHECK_EXPECTED_AS_STATUS(max_queue_size);
auto semaphore = Semaphore::create_shared(static_cast<uint32_t>(*max_queue_size));
CHECK_NOT_NULL(semaphore, HAILO_OUT_OF_HOST_MEMORY);
output_semaphores.emplace_back(semaphore);
reader_wrappers.emplace_back(reader_wrapper.release());
}
// Build input wrappers
std::vector<WriterWrapperPtr<InputStream>> writer_wrappers;
std::vector<SemaphorePtr> input_semaphores;
for (auto &input_stream : m_input_streams) {
auto writer_wrapper = WriterWrapper<InputStream>::create(input_stream.get(),
get_params(input_stream.get().name()), m_overall_latency_meter, m_params.framerate);
CHECK_EXPECTED_AS_STATUS(writer_wrapper);
auto max_queue_size = writer_wrapper.value()->get().get_async_max_queue_size();
CHECK_EXPECTED_AS_STATUS(max_queue_size);
auto semaphore = Semaphore::create_shared(static_cast<uint32_t>(*max_queue_size));
CHECK_NOT_NULL(semaphore, HAILO_OUT_OF_HOST_MEMORY);
input_semaphores.emplace_back(semaphore);
writer_wrappers.emplace_back(writer_wrapper.release());
}
// Build waitables list with reference to previous input/output semaphores.
// We put output semaphores before inputs because we want to always have place to write
// the data into. It also makes sure that the framerate throttle will work properly.
const size_t shutdown_index = 0;
const size_t output_index_start = shutdown_index + 1;
const size_t input_index_start = output_index_start + output_semaphores.size();
std::vector<std::reference_wrapper<Waitable>> waitables;
waitables.emplace_back(std::ref(*shutdown_event));
auto add_to_waitables = [&waitables](const SemaphorePtr &sem) { waitables.emplace_back(std::ref(*sem)); };
std::for_each(output_semaphores.begin(), output_semaphores.end(), add_to_waitables);
std::for_each(input_semaphores.begin(), input_semaphores.end(), add_to_waitables);
WaitableGroup wait_group(std::move(waitables));
// Inference
while (true) {
auto wait_index = wait_group.wait_any(HAILORTCLI_DEFAULT_TIMEOUT);
CHECK_EXPECTED_AS_STATUS(wait_index);
if (*wait_index == shutdown_index) {
// Stopping the network so we won't get timeout on the flush. The async operations may still be active
// (until network deactivation).
stop();
break;
} else if ((*wait_index >= output_index_start) && (*wait_index < input_index_start)) {
// output is ready
const size_t output_index = *wait_index - output_index_start;
auto status = reader_wrappers[output_index]->read_async(
[semaphore=output_semaphores[output_index]](const OutputStream::CompletionInfo &) {
(void)semaphore->signal();
}
);
CHECK_SUCCESS(status);
} else {
// input is ready
const size_t input_index = *wait_index - input_index_start;
auto status = writer_wrappers[input_index]->write_async(
[semaphore=input_semaphores[input_index]](const InputStream::CompletionInfo &) {
(void)semaphore->signal();
}
);
CHECK_SUCCESS(status);
}
}
return HAILO_SUCCESS;
}
void RawNetworkRunner::stop()
{
for (auto &input_stream : m_input_streams) {
(void) input_stream.get().abort();
}
for (auto &output_stream : m_output_streams) {
(void) output_stream.get().abort();
}
}
std::set<std::string> RawNetworkRunner::get_input_names()
{
std::set<std::string> result;
for (const auto &stream : m_input_streams) {
result.insert(stream.get().name());
}
return result;
}
std::set<std::string> RawNetworkRunner::get_output_names()
{
std::set<std::string> result;
for (const auto &stream : m_output_streams) {
result.insert(stream.get().name());
}
return result;
}
StreamParams RawNetworkRunner::get_params(const std::string &name)
{
for (const auto &params : m_params.stream_params) {
if (name == params.name) {
return params;
}
}
return StreamParams();
}

View File

@@ -10,7 +10,15 @@
#ifndef _HAILO_HAILORTCLI_RUN2_NETWORK_RUNNER_HPP_
#define _HAILO_HAILORTCLI_RUN2_NETWORK_RUNNER_HPP_
#include "io_wrappers.hpp"
#include "live_stats.hpp"
#include "network_live_track.hpp"
#include "../hailortcli.hpp"
#include "common/barrier.hpp"
#include "common/async_thread.hpp"
#include "common/event_internal.hpp"
#include "hailo/vdevice.hpp"
#include "hailo/vstream.hpp"
@@ -19,23 +27,42 @@
#include "hailo/expected.hpp"
#include "hailo/buffer.hpp"
#include "../hailortcli.hpp"
#include "live_printer.hpp"
#include "network_live_track.hpp"
#include <string>
#include <vector>
constexpr uint32_t UNLIMITED_FRAMERATE = 0;
using namespace hailort;
struct VStreamParams
constexpr std::chrono::milliseconds SYNC_EVENT_TIMEOUT(1000);
enum class InferenceMode {
FULL,
RAW,
RAW_ASYNC,
RAW_ASYNC_SINGLE_THREAD,
};
struct IoParams
{
IoParams();
std::string name;
std::string input_file_path;
};
struct VStreamParams : public IoParams
{
VStreamParams();
std::string name;
hailo_vstream_params_t params;
std::string input_file_path;
};
struct StreamParams : public IoParams
{
StreamParams();
hailo_stream_flags_t flags;
};
struct NetworkParams
@@ -45,6 +72,7 @@ struct NetworkParams
std::string hef_path;
std::string net_group_name;
std::vector<VStreamParams> vstream_params;
std::vector<StreamParams> stream_params;
hailo_scheduling_algorithm_t scheduling_algorithm;
// Network parameters
@@ -58,35 +86,274 @@ struct NetworkParams
bool measure_hw_latency;
bool measure_overall_latency;
InferenceMode mode;
bool is_async() const
{
return (mode == InferenceMode::RAW_ASYNC) || (mode == InferenceMode::RAW_ASYNC_SINGLE_THREAD);
}
};
class SignalEventScopeGuard final
{
public:
SignalEventScopeGuard(Event &event);
~SignalEventScopeGuard();
private:
Event &m_event;
};
class BarrierTerminateScopeGuard final
{
public:
BarrierTerminateScopeGuard(BarrierPtr barrier);
~BarrierTerminateScopeGuard();
private:
BarrierPtr m_barrier;
};
class NetworkRunner
{
public:
static Expected<std::shared_ptr<NetworkRunner>> create_shared(VDevice &vdevice, const NetworkParams &params);
NetworkRunner(const NetworkParams &params, const std::string &name,
std::vector<hailort::InputVStream> &&input_vstreams, std::vector<hailort::OutputVStream> &&output_vstreams,
std::shared_ptr<hailort::ConfiguredNetworkGroup> cng, hailort::LatencyMeterPtr overall_latency_meter);
static hailort::Expected<std::shared_ptr<NetworkRunner>> create_shared(hailort::VDevice &vdevice, const NetworkParams &params);
hailo_status run(hailort::Event &shutdown_event, LivePrinter &live_printer, hailort::Barrier &barrier);
void stop();
VDevice &vdevice, std::shared_ptr<ConfiguredNetworkGroup> cng);
virtual ~NetworkRunner() = default;
hailo_status run(EventPtr shutdown_event, LiveStats &live_stats, Barrier &activation_barrier);
virtual void stop() = 0;
// Must be called prior to run
void set_overall_latency_meter(LatencyMeterPtr latency_meter);
void set_latency_barrier(BarrierPtr latency_barrier);
protected:
static bool inference_succeeded(hailo_status status);
// Use 'inference_succeeded(async_thread->get())' to check for a thread's success
virtual Expected<std::vector<AsyncThreadPtr<hailo_status>>> start_inference_threads(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track) = 0;
virtual hailo_status run_single_thread_async_infer(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track) = 0;
virtual std::set<std::string> get_input_names() = 0;
virtual std::set<std::string> get_output_names() = 0;
static Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> create_vstreams(
ConfiguredNetworkGroup &net_group, const std::map<std::string, hailo_vstream_params_t> &params);
template <typename Writer>
hailo_status run_write(WriterWrapperPtr<Writer> writer, EventPtr shutdown_event,
std::shared_ptr<Barrier> latency_barrier)
{
auto latency_barrier_scope_guard = BarrierTerminateScopeGuard(latency_barrier);
auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
while (true) {
if (latency_barrier) {
latency_barrier->arrive_and_wait();
}
for (auto i = 0; i < m_params.batch_size; i++) {
auto status = writer->write();
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
}
}
return HAILO_SUCCESS;
}
template <typename Writer>
hailo_status run_write_async(WriterWrapperPtr<Writer> writer, EventPtr shutdown_event,
std::shared_ptr<Barrier> latency_barrier)
{
auto latency_barrier_scope_guard = BarrierTerminateScopeGuard(latency_barrier);
auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
// When measuring latency we want to send one frame at a time (to avoid back-pressure)
// sync_event will be used to send one frame at a time
EventPtr sync_event = nullptr;
if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
sync_event = Event::create_shared(Event::State::not_signalled);
CHECK_NOT_NULL(sync_event, HAILO_OUT_OF_HOST_MEMORY);
}
while (true) {
if (latency_barrier) {
latency_barrier->arrive_and_wait();
}
for (auto i = 0; i < m_params.batch_size; i++) {
auto status = writer->wait_for_async_ready();
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
status = writer->write_async(
[sync_event](const typename Writer::CompletionInfo &) {
if (sync_event) {
(void)sync_event->signal();
}
});
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
status = WaitOrShutdown(sync_event, shutdown_event).wait(SYNC_EVENT_TIMEOUT);
if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
// Don't print an error for this
return status;
}
CHECK_SUCCESS(status);
status = sync_event->reset();
CHECK_SUCCESS(status);
}
}
}
return HAILO_SUCCESS;
}
template <typename Reader>
hailo_status run_read(ReaderWrapperPtr<Reader> reader, EventPtr shutdown_event,
std::shared_ptr<Barrier> latency_barrier)
{
auto latency_barrier_scope_guard = BarrierTerminateScopeGuard(latency_barrier);
auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
while (true) {
if (latency_barrier) {
latency_barrier->arrive_and_wait();
}
for (auto i = 0; i < m_params.batch_size; i++) {
auto status = reader->read();
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
}
}
return HAILO_SUCCESS;
}
template <typename Reader>
hailo_status run_read_async(ReaderWrapperPtr<Reader> reader, EventPtr shutdown_event,
std::shared_ptr<Barrier> latency_barrier)
{
auto latency_barrier_scope_guard = BarrierTerminateScopeGuard(latency_barrier);
auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
// When measuring latency we want to send one frame at a time (to avoid back-pressure)
// sync_event will be used to send one frame at a time
EventPtr sync_event = nullptr;
if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
sync_event = Event::create_shared(Event::State::not_signalled);
CHECK_NOT_NULL(sync_event, HAILO_OUT_OF_HOST_MEMORY);
}
while (true) {
if (latency_barrier) {
latency_barrier->arrive_and_wait();
}
for (auto i = 0; i < m_params.batch_size; i++) {
auto status = reader->wait_for_async_ready();
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
status = reader->read_async(
[sync_event](const typename Reader::CompletionInfo &) {
if (sync_event) {
(void)sync_event->signal();
}
});
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
status = WaitOrShutdown(sync_event, shutdown_event).wait(SYNC_EVENT_TIMEOUT);
if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
// Don't print an error for this
return status;
}
CHECK_SUCCESS(status);
status = sync_event->reset();
CHECK_SUCCESS(status);
}
}
}
return HAILO_SUCCESS;
}
VDevice &m_vdevice;
const NetworkParams m_params;
std::string m_name;
std::shared_ptr<ConfiguredNetworkGroup> m_cng;
LatencyMeterPtr m_overall_latency_meter;
BarrierPtr m_latency_barrier;
private:
static hailort::Expected<std::pair<std::vector<hailort::InputVStream>, std::vector<hailort::OutputVStream>>> create_vstreams(
hailort::ConfiguredNetworkGroup &net_group, const std::map<std::string, hailo_vstream_params_t> &params);
hailo_status run_input_vstream(hailort::InputVStream &vstream, hailort::Event &shutdown_event, hailort::BufferPtr dataset,
hailort::LatencyMeterPtr overall_latency_meter);
static hailo_status run_output_vstream(hailort::OutputVStream &vstream, bool first, std::shared_ptr<NetworkLiveTrack> net_live_track,
hailort::Event &shutdown_event, hailort::LatencyMeterPtr overall_latency_meter);
static const std::vector<hailo_status> ALLOWED_INFERENCE_RETURN_VALUES;
static hailo_status wait_for_threads(std::vector<AsyncThreadPtr<hailo_status>> &threads);
static Expected<BufferPtr> create_constant_dataset(size_t size);
static Expected<BufferPtr> create_dataset_from_input_file(const std::string &file_path, size_t size);
};
static hailort::Expected<hailort::BufferPtr> create_constant_dataset(const hailort::InputVStream &input_vstream);
static hailort::Expected<hailort::BufferPtr> create_dataset_from_input_file(const std::string &file_path, const hailort::InputVStream &input_vstream);
class FullNetworkRunner : public NetworkRunner
{
public:
FullNetworkRunner(const NetworkParams &params, const std::string &name, VDevice &vdevice,
std::vector<InputVStream> &&input_vstreams, std::vector<OutputVStream> &&output_vstreams,
std::shared_ptr<ConfiguredNetworkGroup> cng);
const NetworkParams &m_params;//TODO: copy instead of ref?
std::string m_name;
std::vector<hailort::InputVStream> m_input_vstreams;
std::vector<hailort::OutputVStream> m_output_vstreams;
std::shared_ptr<hailort::ConfiguredNetworkGroup> m_cng;
hailort::LatencyMeterPtr m_overall_latency_meter;
virtual Expected<std::vector<AsyncThreadPtr<hailo_status>>> start_inference_threads(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track) override;
virtual hailo_status run_single_thread_async_infer(EventPtr, std::shared_ptr<NetworkLiveTrack>) override
{
return HAILO_NOT_IMPLEMENTED;
};
virtual void stop() override;
virtual std::set<std::string> get_input_names() override;
virtual std::set<std::string> get_output_names() override;
VStreamParams get_params(const std::string &name);
private:
std::vector<InputVStream> m_input_vstreams;
std::vector<OutputVStream> m_output_vstreams;
};
class RawNetworkRunner : public NetworkRunner
{
public:
RawNetworkRunner(const NetworkParams &params, const std::string &name, VDevice &vdevice,
InputStreamRefVector &&input_streams, OutputStreamRefVector &&output_streams,
std::shared_ptr<ConfiguredNetworkGroup> cng);
virtual Expected<std::vector<AsyncThreadPtr<hailo_status>>> start_inference_threads(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track) override;
virtual hailo_status run_single_thread_async_infer(EventPtr shutdown_event,
std::shared_ptr<NetworkLiveTrack> net_live_track) override;
virtual void stop() override;
virtual std::set<std::string> get_input_names() override;
virtual std::set<std::string> get_output_names() override;
StreamParams get_params(const std::string &name);
private:
InputStreamRefVector m_input_streams;
OutputStreamRefVector m_output_streams;
};
#endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_RUNNER_HPP_ */

View File

@@ -8,13 +8,14 @@
**/
#include "run2_command.hpp"
#include "live_printer.hpp"
#include "live_stats.hpp"
#include "timer_live_track.hpp"
#include "measurement_live_track.hpp"
#include "network_runner.hpp"
#include "common/barrier.hpp"
#include "common/async_thread.hpp"
#include "../common.hpp"
#include "hailo/vdevice.hpp"
#include "hailo/hef.hpp"
@@ -73,32 +74,102 @@ std::vector<std::string> VStreamNameValidator::get_values(const std::string &hef
return names;
}
class StreamNameValidator : public CLI::Validator {
public:
StreamNameValidator(const CLI::Option *hef_path_option, const CLI::Option *net_group_name_option);
private:
static std::vector<std::string> get_values(const std::string &hef_path, const std::string &net_group_name);
};
StreamNameValidator::StreamNameValidator(const CLI::Option *hef_path_option, const CLI::Option *net_group_name_option) : Validator("STREAM") {
func_ = [](std::string&) {
//TODO: support?
return std::string();
};
autocomplete_func_ = [hef_path_option, net_group_name_option](const std::string&) {
// TODO: remove existing names from prev user input
return get_values(hef_path_option->as<std::string>(), net_group_name_option->as<std::string>());
};
}
std::vector<std::string> StreamNameValidator::get_values(const std::string &hef_path, const std::string &net_group_name)
{
auto hef = Hef::create(hef_path);
if (!hef.has_value()) {
return {};
}
// TODO: duplicate
auto actual_net_group_name = net_group_name;
if (actual_net_group_name.empty()) {
auto net_groups_names = hef->get_network_groups_names();
if (net_groups_names.size() != 1) {
return {};
}
actual_net_group_name = net_groups_names[0];
}
auto streams_info = hef->get_all_stream_infos(actual_net_group_name);
if (!streams_info.has_value()) {
return {};
}
std::vector<std::string> names;
for (auto &stream_info : streams_info.value()) {
names.emplace_back(stream_info.name);
}
return names;
}
IoApp::IoApp(const std::string &description, const std::string &name, Type type) :
CLI::App(description, name),
m_type(type),
m_vstream_params(),
m_stream_params()
{
}
IoApp::Type IoApp::get_type() const
{
return m_type;
}
const VStreamParams &IoApp::get_vstream_params() const
{
// TODO: instead of copy do a move + call reset()? change func name to move_params? same for NetworkParams/NetworkApp class
return m_vstream_params;
}
const StreamParams &IoApp::get_stream_params() const
{
// TODO: instead of copy do a move + call reset()? change func name to move_params? same for NetworkParams/NetworkApp class
return m_stream_params;
}
/** VStreamApp */
class VStreamApp : public CLI::App
class VStreamApp : public IoApp
{
public:
VStreamApp(const std::string &description, const std::string &name, CLI::Option *hef_path_option, CLI::Option *net_group_name_option);
const VStreamParams& get_params();
private:
CLI::Option* add_flag_callback(CLI::App *app, const std::string &name, const std::string &description,
std::function<void(bool)> function);
VStreamParams m_params;
};
VStreamApp::VStreamApp(const std::string &description, const std::string &name, CLI::Option *hef_path_option,
CLI::Option *net_group_name_option) : CLI::App(description, name), m_params()
CLI::Option *net_group_name_option) :
IoApp(description, name, IoApp::Type::VSTREAM)
{
add_option("name", m_params.name, "vStream name")
add_option("name", m_vstream_params.name, "vStream name")
->check(VStreamNameValidator(hef_path_option, net_group_name_option));
add_option("--input-file", m_params.input_file_path,
add_option("--input-file", m_vstream_params.input_file_path,
"Input file path. If not given, random data will be used. File format should be raw binary data with size that is a factor of the input shape size")
->default_val("");
auto format_opt_group = add_option_group("Format");
format_opt_group->add_option("--type", m_params.params.user_buffer_format.type, "Format type")
format_opt_group->add_option("--type", m_vstream_params.params.user_buffer_format.type, "Format type")
->transform(HailoCheckedTransformer<hailo_format_type_t>({
{ "auto", HAILO_FORMAT_TYPE_AUTO },
{ "uint8", HAILO_FORMAT_TYPE_UINT8 },
@@ -107,7 +178,7 @@ VStreamApp::VStreamApp(const std::string &description, const std::string &name,
}))
->default_val("auto");
format_opt_group->add_option("--order", m_params.params.user_buffer_format.order, "Format order")
format_opt_group->add_option("--order", m_vstream_params.params.user_buffer_format.order, "Format order")
->transform(HailoCheckedTransformer<hailo_format_order_t>({
{ "auto", HAILO_FORMAT_ORDER_AUTO },
{ "nhwc", HAILO_FORMAT_ORDER_NHWC },
@@ -130,19 +201,13 @@ VStreamApp::VStreamApp(const std::string &description, const std::string &name,
add_flag_callback(format_opt_group, "-q,--quantized,!--no-quantized", "Whether or not data is quantized",
[this](bool result){
m_params.params.user_buffer_format.flags = result ?
static_cast<hailo_format_flags_t>(m_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED) :
static_cast<hailo_format_flags_t>(m_params.params.user_buffer_format.flags & (~HAILO_FORMAT_FLAGS_QUANTIZED));})
m_vstream_params.params.user_buffer_format.flags = result ?
static_cast<hailo_format_flags_t>(m_vstream_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED) :
static_cast<hailo_format_flags_t>(m_vstream_params.params.user_buffer_format.flags & (~HAILO_FORMAT_FLAGS_QUANTIZED));})
->run_callback_for_default()
->default_val(true); // default_val() must be after run_callback_for_default()
}
const VStreamParams& VStreamApp::get_params()
{
//TODO: instead of copy do a move + call reset()? change func name to move_params? same for NetworkParams/NetworkApp class
return m_params;
}
CLI::Option* VStreamApp::add_flag_callback(CLI::App *app, const std::string &name, const std::string &description,
std::function<void(bool)> function)
{
@@ -152,6 +217,35 @@ CLI::Option* VStreamApp::add_flag_callback(CLI::App *app, const std::string &nam
return app->add_flag_function(name, wrap_function, description);
}
/** StreamApp */
class StreamApp : public IoApp
{
public:
StreamApp(const std::string &description, const std::string &name, CLI::Option *hef_path_option, CLI::Option *net_group_name_option);
};
StreamApp::StreamApp(const std::string &description, const std::string &name, CLI::Option *hef_path_option,
CLI::Option *net_group_name_option) :
IoApp(description, name, IoApp::Type::STREAM)
{
add_option("name", m_stream_params.name, "Stream name")
->check(StreamNameValidator(hef_path_option, net_group_name_option));
add_option("--input-file", m_stream_params.input_file_path,
"Input file path. If not given, random data will be used. File format should be raw binary data with size that is a factor of the input shape size")
->default_val("");
// TODO: async option (HRT-9580)
// TODO: flag callback?
// add_flag_callback(format_opt_group, "-q,--quantized,!--no-quantized", "Whether or not data is quantized",
// [this](bool result){
// m_params.params.user_buffer_format.flags = result ?
// static_cast<hailo_format_flags_t>(m_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED) :
// static_cast<hailo_format_flags_t>(m_params.params.user_buffer_format.flags & (~HAILO_FORMAT_FLAGS_QUANTIZED));})
// ->run_callback_for_default()
// ->default_val(true); // default_val() must be after run_callback_for_default()
}
/** NetworkGroupNameValidator */
class NetworkGroupNameValidator : public CLI::Validator {
public:
@@ -173,18 +267,9 @@ NetworkGroupNameValidator::NetworkGroupNameValidator(const CLI::Option *hef_path
}
/** NetworkApp */
class NetworkApp : public CLI::App
{
public:
NetworkApp(const std::string &description, const std::string &name);
const NetworkParams& get_params();
private:
void add_vstream_app_subcom(CLI::Option *hef_path_option, CLI::Option *net_group_name_option);
NetworkParams m_params;
};
NetworkApp::NetworkApp(const std::string &description, const std::string &name) : CLI::App(description, name), m_params()
NetworkApp::NetworkApp(const std::string &description, const std::string &name) :
CLI::App(description, name),
m_params()
{
auto hef_path_option = add_option("hef", m_params.hef_path, "HEF file path")->check(CLI::ExistingFile);
auto net_group_name_option = add_option("--name", m_params.net_group_name, "Network group name")
@@ -204,34 +289,11 @@ NetworkApp::NetworkApp(const std::string &description, const std::string &name)
// TODO: support multiple scheduling algorithms
m_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN;
add_vstream_app_subcom(hef_path_option, net_group_name_option);
}
void NetworkApp::add_vstream_app_subcom(CLI::Option *hef_path_option, CLI::Option *net_group_name_option)
{
auto vstream_app = std::make_shared<VStreamApp>("Set vStream", "set-vstream", hef_path_option, net_group_name_option);
vstream_app->immediate_callback();
vstream_app->callback([this, vstream_app, hef_path_option, net_group_name_option]() {
m_params.vstream_params.push_back(vstream_app->get_params());
// Throw an error if anything is left over and should not be.
_process_extras();
// NOTE: calling "net_app->clear(); m_params = NetworkParams();" is not sufficient because default values
// need to be re-set. we can override clear and reset them but there might be other issues as well
// and this one feels less hacky ATM
remove_subcommand(vstream_app.get());
// Remove from parsed_subcommands_ as well (probably a bug in CLI11)
parsed_subcommands_.erase(std::remove_if(
parsed_subcommands_.begin(), parsed_subcommands_.end(),
[vstream_app](auto x){return x == vstream_app.get();}),
parsed_subcommands_.end());
add_vstream_app_subcom(hef_path_option, net_group_name_option);
});
// Must set fallthrough to support nested repeated subcommands.
vstream_app->fallthrough();
add_subcommand(vstream_app);
auto vstream_subcommand = add_io_app_subcom<VStreamApp>("Set vStream", "set-vstream", hef_path_option, net_group_name_option);
auto stream_subcommand = add_io_app_subcom<StreamApp>("Set Stream", "set-stream", hef_path_option, net_group_name_option);
// TODO: doesn't seam to be working (HRT-9886)
vstream_subcommand->excludes(stream_subcommand);
stream_subcommand->excludes(vstream_subcommand);
}
const NetworkParams& NetworkApp::get_params()
@@ -252,16 +314,23 @@ public:
bool get_measure_power();
bool get_measure_current();
bool get_measure_temp();
bool get_measure_hw_latency();
bool get_measure_overall_latency();
bool get_multi_process_service();
const std::string &get_group_id();
InferenceMode get_mode() const;
const std::string &get_output_json_path();
void set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm);
void set_inference_mode();
void set_measure_latency();
private:
void add_net_app_subcom();
std::vector<NetworkParams> m_network_params;
uint32_t m_time_to_run;
InferenceMode m_mode;
std::string m_stats_json_path;
std::vector<std::string> m_device_id;
uint32_t m_device_count;
bool m_multi_process_service;
@@ -282,6 +351,17 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
add_option("-t,--time-to-run", m_time_to_run, "Time to run (seconds)")
->default_val(DEFAULT_TIME_TO_RUN_SECONDS)
->check(CLI::PositiveNumber);
add_option("-m,--mode", m_mode, "Inference mode")
->transform(HailoCheckedTransformer<InferenceMode>({
{ "full", InferenceMode::FULL },
{ "raw", InferenceMode::RAW },
{ "raw_async", InferenceMode::RAW_ASYNC },
{ "raw_async_single_thread", InferenceMode::RAW_ASYNC_SINGLE_THREAD, OptionVisibility::HIDDEN }
}))->default_val("full");
static const char *JSON_SUFFIX = ".json";
add_option("-j,--json", m_stats_json_path, "If set save statistics as json to the specified path")
->default_val("")
->check(FileSuffixValidator(JSON_SUFFIX));
auto vdevice_options_group = add_option_group("VDevice Options");
@@ -307,7 +387,7 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
measurement_options_group->add_flag("--measure-current", m_measure_current, "Measure current")->excludes(measure_power_opt)
->default_val(false);
measurement_options_group->add_flag("--measure-latency", m_measure_hw_latency, "Measure network latency")
measurement_options_group->add_flag("--measure-latency", m_measure_hw_latency, "Measure network latency on the NN core")
->default_val(false);
measurement_options_group->add_flag("--measure-overall-latency", m_measure_overall_latency, "Measure overall latency measurement")
@@ -341,6 +421,7 @@ void Run2::add_net_app_subcom()
// NOTE: fallthrough() is not a must here but it is also not working (causing only a single vstream param
// instead of >1). Debug - App.hpp::void _parse(std::vector<std::string> &args)
add_subcommand(net_app);
// TODO: set _autocomplete based on m_mode (HRT-9886)
}
const std::vector<NetworkParams>& Run2::get_network_params()
@@ -368,6 +449,16 @@ bool Run2::get_measure_temp()
return m_measure_temp;
}
bool Run2::get_measure_hw_latency()
{
return m_measure_hw_latency;
}
bool Run2::get_measure_overall_latency()
{
return m_measure_overall_latency;
}
std::vector<hailo_device_id_t> Run2::get_dev_ids()
{
std::vector<hailo_device_id_t> res;
@@ -386,6 +477,13 @@ uint32_t Run2::get_device_count()
return m_device_count;
}
void Run2::set_inference_mode()
{
for (auto &params : m_network_params) {
params.mode = m_mode;
}
}
void Run2::set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm)
{
for (auto &params: m_network_params) {
@@ -411,6 +509,15 @@ const std::string &Run2::get_group_id()
return m_group_id;
}
InferenceMode Run2::get_mode() const
{
return m_mode;
}
const std::string &Run2::get_output_json_path()
{
return m_stats_json_path;
}
/** Run2Command */
Run2Command::Run2Command(CLI::App &parent_app) : Command(parent_app.add_subcommand(std::make_shared<Run2>()))
@@ -437,10 +544,27 @@ bool is_valid_ip(const std::string &ip)
IS_FIT_IN_UINT8(a) && IS_FIT_IN_UINT8(b) && IS_FIT_IN_UINT8(c) && IS_FIT_IN_UINT8(d);
}
std::string get_str_infer_mode(const InferenceMode& infer_mode)
{
switch(infer_mode){
case InferenceMode::FULL:
return "full";
case InferenceMode::RAW:
return "raw";
case InferenceMode::RAW_ASYNC:
return "raw_async";
case InferenceMode::RAW_ASYNC_SINGLE_THREAD:
return "raw_async_single_thread";
}
return "<Unknown>";
}
hailo_status Run2Command::execute()
{
Run2 *app = reinterpret_cast<Run2*>(m_app);
app->set_inference_mode();
app->set_measure_latency();
if (0 == app->get_network_params().size()) {
@@ -450,8 +574,12 @@ hailo_status Run2Command::execute()
if (1 == app->get_network_params().size()) {
LOGGER__WARN("\"hailortcli run2\" is in preview. It is recommended to use \"hailortcli run\" command for a single network group");
}
if (app->get_measure_hw_latency() || app->get_measure_overall_latency()) {
CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "When latency measurement is enabled, only one model is allowed");
LOGGER__WARN("Measuring latency; frames are sent one at a time and FPS will not be measured");
}
hailo_vdevice_params_t vdevice_params = {};
hailo_vdevice_params_t vdevice_params{};
CHECK_SUCCESS(hailo_init_vdevice_params(&vdevice_params));
auto dev_ids = app->get_dev_ids();
if (!dev_ids.empty()) {
@@ -467,6 +595,12 @@ hailo_status Run2Command::execute()
} else {
vdevice_params.device_count = app->get_device_count();
}
// TODO: Async stream support for scheduler (HRT-9878)
if ((app->get_mode() == InferenceMode::RAW_ASYNC) || (app->get_mode() == InferenceMode::RAW_ASYNC_SINGLE_THREAD)) {
vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "Only one model is allowed with aw async inference mode");
app->set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
}
vdevice_params.group_id = app->get_group_id().c_str();
vdevice_params.multi_process_service = app->get_multi_process_service();
@@ -482,40 +616,51 @@ hailo_status Run2Command::execute()
net_runners.emplace_back(net_runner.release());
}
auto live_printer = std::make_unique<LivePrinter>(std::chrono::seconds(1));
live_printer->add(std::make_shared<TimerLiveTrack>(app->get_time_to_run()), 0);
auto live_stats = std::make_unique<LiveStats>(std::chrono::seconds(1));
live_stats->add(std::make_shared<TimerLiveTrack>(app->get_time_to_run()), 0);
auto shutdown_event = Event::create_shared(Event::State::not_signalled);
CHECK_NOT_NULL(shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
auto shutdown_event = Event::create(Event::State::not_signalled);
CHECK_EXPECTED_AS_STATUS(shutdown_event);
std::vector<AsyncThreadPtr<hailo_status>> threads;
Barrier barrier(net_runners.size() + 1); // We wait for all nets to finish activation + this thread to start sampling
Barrier activation_barrier(net_runners.size() + 1); // We wait for all nets to finish activation + this thread to start sampling
for (auto &net_runner : net_runners) {
threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("NG_INFER", [&net_runner, &shutdown_event,
&live_printer, &barrier](){
return net_runner->run(shutdown_event.value(), *live_printer, barrier);
&live_stats, &activation_barrier](){
return net_runner->run(shutdown_event, *live_stats, activation_barrier);
}));
}
auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
auto physical_devices = vdevice.value()->get_physical_devices();
CHECK_EXPECTED_AS_STATUS(physical_devices);
for (auto &device : physical_devices.value()) {
auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), app->get_measure_power(),
app->get_measure_current(), app->get_measure_temp());
if (HAILO_SUCCESS != measurement_live_track.status()) {
activation_barrier.terminate();
}
CHECK_EXPECTED_AS_STATUS(measurement_live_track);
live_printer->add(measurement_live_track.release(), 2);
live_stats->add(measurement_live_track.release(), 2);
}
// TODO: wait for all nets before starting timer. start() should update TimerLiveTrack to start. or maybe append here but first in vector...
barrier.arrive_and_wait();
CHECK_SUCCESS(live_printer->start());
activation_barrier.arrive_and_wait();
CHECK_SUCCESS(live_stats->start());
auto status = shutdown_event->wait(app->get_time_to_run());
if (HAILO_TIMEOUT != status) {
// if shutdown_event is signaled its because one of the send/recv threads failed
LOGGER__ERROR("Encountered error during inference. See log for more information.");
}
live_printer.reset(); // Ensures that the final print will include real values and not with values of when streams are already aborted.
if (!app->get_output_json_path().empty()){
live_stats->dump_stats(app->get_output_json_path(), get_str_infer_mode(app->get_mode()));
}
live_stats.reset(); // Ensures that the final print will include real values and not with values of when streams are already aborted.
shutdown_event->signal();
return wait_for_threads(threads);
}

View File

@@ -11,6 +11,10 @@
#define _HAILO_HAILORTCLI_RUN2_RUN2_COMMAND_HPP_
#include "../command.hpp"
#include "network_runner.hpp"
#include <type_traits>
class Run2Command : public Command {
public:
@@ -20,4 +24,71 @@ public:
private:
};
class IoApp : public CLI::App
{
public:
enum class Type {
STREAM,
VSTREAM
};
IoApp(const std::string &description, const std::string &name, Type type);
Type get_type() const;
const VStreamParams& get_vstream_params() const;
const StreamParams& get_stream_params() const;
protected:
Type m_type;
VStreamParams m_vstream_params;
StreamParams m_stream_params;
};
class NetworkApp : public CLI::App
{
public:
NetworkApp(const std::string &description, const std::string &name);
const NetworkParams& get_params();
private:
template <typename T>
CLI::App *add_io_app_subcom(const std::string &description, const std::string &name,
CLI::Option *hef_path_option, CLI::Option *net_group_name_option)
{
static_assert(std::is_base_of<IoApp, T>::value, "T is not a subclass of IoApp");
auto io_app = std::make_shared<T>(description, name, hef_path_option, net_group_name_option);
io_app->immediate_callback();
io_app->callback([this, description, name, io_app, hef_path_option, net_group_name_option]() {
if (io_app->get_type() == IoApp::Type::VSTREAM) {
auto vstream_params = io_app->get_vstream_params();
m_params.vstream_params.push_back(vstream_params);
} else {
auto stream_params = io_app->get_stream_params();
m_params.stream_params.push_back(stream_params);
}
// Throw an error if anything is left over and should not be.
_process_extras();
// NOTE: calling "net_app->clear(); m_params = NetworkParams();" is not sufficient because default values
// need to be re-set. we can override clear and reset them but there might be other issues as well
// and this one feels less hacky ATM
remove_subcommand(io_app.get());
// Remove from parsed_subcommands_ as well (probably a bug in CLI11)
parsed_subcommands_.erase(std::remove_if(
parsed_subcommands_.begin(), parsed_subcommands_.end(),
[io_app](auto x){return x == io_app.get();}),
parsed_subcommands_.end());
add_io_app_subcom<T>(description, name, hef_path_option, net_group_name_option);
});
// Must set fallthrough to support nested repeated subcommands.
io_app->fallthrough();
return add_subcommand(io_app);
}
NetworkParams m_params;
};
#endif /* _HAILO_HAILORTCLI_RUN2_RUN2_COMMAND_HPP_ */

View File

@@ -13,23 +13,18 @@
#include <sstream>
TimerLiveTrack::TimerLiveTrack(std::chrono::milliseconds duration) :
LivePrinter::Track(), m_duration(duration), m_start_time()
LiveStats::Track(), m_duration(duration), m_start_time()
{
}
hailo_status TimerLiveTrack::start()
hailo_status TimerLiveTrack::start_impl()
{
m_start_time = std::chrono::steady_clock::now();
m_started = true;
return HAILO_SUCCESS;
}
uint32_t TimerLiveTrack::get_text(std::stringstream &ss)
uint32_t TimerLiveTrack::push_text_impl(std::stringstream &ss)
{
if (!m_started) {
return 0;
}
static const uint32_t MAX_PROGRESS_BAR_WIDTH = 20;
auto elapsed_time = std::chrono::steady_clock::now() - m_start_time;
auto eta = std::chrono::seconds(std::max<int32_t>(0, static_cast<int32_t>(std::round(std::chrono::duration<double>(m_duration - elapsed_time).count())))); // std::chrono::round is from C++17
@@ -40,3 +35,10 @@ uint32_t TimerLiveTrack::get_text(std::stringstream &ss)
ss << fmt::format("[{:=>{}}{:{}}] {:>3}% {}\n", '>', progress_bar_width, "", MAX_PROGRESS_BAR_WIDTH - progress_bar_width, elapsed_percentage, CliCommon::duration_to_string(eta));
return 1;
}
void TimerLiveTrack::push_json_impl(nlohmann::ordered_json &json)
{
std::stringstream time_to_run;
time_to_run << std::fixed << std::setprecision(2) << std::round(std::chrono::duration<double>(m_duration).count()) << " seconds";
json["time_to_run"] = time_to_run.str();
}

View File

@@ -7,18 +7,19 @@
* @brief Timer live track
**/
#include "live_printer.hpp"
#include "live_stats.hpp"
#ifndef _HAILO_HAILORTCLI_RUN2_TIMER_LIVE_TRACK_HPP_
#define _HAILO_HAILORTCLI_RUN2_TIMER_LIVE_TRACK_HPP_
class TimerLiveTrack : public LivePrinter::Track
class TimerLiveTrack : public LiveStats::Track
{
public:
TimerLiveTrack(std::chrono::milliseconds duration);
virtual ~TimerLiveTrack() = default;
virtual hailo_status start() override;
virtual uint32_t get_text(std::stringstream &ss) override;
virtual hailo_status start_impl() override;
virtual uint32_t push_text_impl(std::stringstream &ss) override;
virtual void push_json_impl(nlohmann::ordered_json &json) override;
private:
std::chrono::milliseconds m_duration;

View File

@@ -133,7 +133,7 @@ public:
};
desc_function_ = []() {
return "\t\tInput file path/paths. On single input network, give the full path of the data file.\n\
return "\t\tInput file (.bin) path/paths. On single input network, give the full path of the data file.\n\
\t\tOn multiple inputs network, the format is input_name1=path1 input_name2=path2, where\n\
\t\tinput_name1 is the name of the input stream. If not given, random data will be used";
};

View File

@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.0.0)
# set(CMAKE_C_CLANG_TIDY "clang-tidy;-checks=*")
set(HAILORT_MAJOR_VERSION 4)
set(HAILORT_MINOR_VERSION 13)
set(HAILORT_MINOR_VERSION 14)
set(HAILORT_REVISION_VERSION 0)
# Add the cmake folder so the modules there are found

View File

@@ -8,7 +8,7 @@ if(NOT CMAKE_HOST_UNIX)
message(FATAL_ERROR "Only unix hosts are supported, stopping build")
endif()
find_package(HailoRT 4.13.0 EXACT REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED)
# GST_PLUGIN_DEFINE needs PACKAGE to be defined
set(GST_HAILO_PACKAGE_NAME "hailo")
@@ -36,6 +36,12 @@ set_property(TARGET gsthailo PROPERTY CXX_STANDARD 14)
set_target_properties(gsthailo PROPERTIES
PUBLIC_HEADER "gst-hailo/metadata/tensor_meta.hpp"
CXX_STANDARD 14
CXX_STANDARD_REQUIRED YES
CXX_EXTENSIONS NO
C_VISIBILITY_PRESET hidden
CXX_VISIBILITY_PRESET hidden
# VISIBILITY_INLINES_HIDDEN YES
)
target_compile_options(gsthailo PRIVATE

View File

@@ -48,7 +48,7 @@ using namespace hailort;
#define DEFAULT_VDEVICE_KEY (0)
#define MIN_VALID_VDEVICE_KEY (1)
#define HAILO_SUPPORTED_FORMATS "{ RGB, RGBA, YUY2, NV12, NV21, I420 }"
#define HAILO_SUPPORTED_FORMATS "{ RGB, RGBA, YUY2, NV12, NV21, I420, GRAY8 }"
#define HAILO_VIDEO_CAPS GST_VIDEO_CAPS_MAKE(HAILO_SUPPORTED_FORMATS)
#define HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS (0)

View File

@@ -102,6 +102,7 @@ enum
PROP_SCHEDULING_ALGORITHM,
PROP_SCHEDULER_TIMEOUT_MS,
PROP_SCHEDULER_THRESHOLD,
PROP_SCHEDULER_PRIORITY,
PROP_MULTI_PROCESS_SERVICE,
PROP_INPUT_QUANTIZED,
PROP_OUTPUT_QUANTIZED,
@@ -187,6 +188,10 @@ static void gst_hailonet_class_init(GstHailoNetClass *klass)
g_object_class_install_property(gobject_class, PROP_SCHEDULER_THRESHOLD,
g_param_spec_uint("scheduler-threshold", "Frames threshold for scheduler", "The minimum number of send requests required before the hailonet is considered ready to get run time from the scheduler.",
HAILO_DEFAULT_SCHEDULER_THRESHOLD, std::numeric_limits<uint32_t>::max(), HAILO_DEFAULT_SCHEDULER_THRESHOLD, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_SCHEDULER_PRIORITY,
g_param_spec_uint("scheduler-priority", "Priority index for scheduler", "When the scheduler will choose the next hailonet to run, higher priority will be prioritized in the selection. "
"Bigger number represent higher priority",
HAILO_SCHEDULER_PRIORITY_MIN, HAILO_SCHEDULER_PRIORITY_MAX, HAILO_SCHEDULER_PRIORITY_NORMAL, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_MULTI_PROCESS_SERVICE,
g_param_spec_boolean("multi-process-service", "Should run over HailoRT service", "Controls wether to run HailoRT over its service. "
"To use this property, the service should be active and scheduling-algorithm should be set. Defaults to false.",
@@ -474,7 +479,7 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
break;
case PROP_SCHEDULER_TIMEOUT_MS:
if (m_was_configured) {
g_warning("The network was already configured so changing the scheduling algorithm will not take place!");
g_warning("The network was already configured so changing the scheduling timeout will not take place!");
break;
}
if (m_props.m_is_active.was_changed()) {
@@ -485,7 +490,7 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
break;
case PROP_SCHEDULER_THRESHOLD:
if (m_was_configured) {
g_warning("The network was already configured so changing the scheduling algorithm will not take place!");
g_warning("The network was already configured so changing the scheduling threshold will not take place!");
break;
}
if (m_props.m_is_active.was_changed()) {
@@ -494,6 +499,17 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
}
m_props.m_scheduler_threshold = g_value_get_uint(value);
break;
case PROP_SCHEDULER_PRIORITY:
if (m_was_configured) {
g_warning("The network was already configured so changing the scheduling priority will not take place!");
break;
}
if (m_props.m_is_active.was_changed()) {
g_error("scheduler usage (scheduler-priority) in combination with 'is-active' is not supported.");
break;
}
m_props.m_scheduler_priority = static_cast<guint8>(g_value_get_uint(value));
break;
case PROP_MULTI_PROCESS_SERVICE:
if (m_was_configured) {
g_warning("The network was already configured so changing the multi-process-service property will not take place!");
@@ -596,6 +612,9 @@ void HailoNetImpl::get_property(GObject *object, guint property_id, GValue *valu
case PROP_SCHEDULER_THRESHOLD:
g_value_set_uint(value, m_props.m_scheduler_threshold.get());
break;
case PROP_SCHEDULER_PRIORITY:
g_value_set_uint(value, m_props.m_scheduler_priority.get());
break;
case PROP_MULTI_PROCESS_SERVICE:
g_value_set_boolean(value, m_props.m_multi_process_service.get());
break;
@@ -696,6 +715,10 @@ hailo_status HailoNetImpl::configure_network_group()
status = m_net_group_handle->set_scheduler_threshold(m_props.m_network_name.get(), m_props.m_scheduler_threshold.get());
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting scheduler threshold failed, status = %d", status);
}
if (m_props.m_scheduler_priority.was_changed()) {
status = m_net_group_handle->set_scheduler_priority(m_props.m_network_name.get(), m_props.m_scheduler_priority.get());
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting scheduler priority failed, status = %d", status);
}
auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats, static_cast<bool>(m_props.m_input_quantized.get()),
static_cast<bool>(m_props.m_output_quantized.get()), m_props.m_input_format_type.get(), m_props.m_output_format_type.get());

View File

@@ -53,7 +53,7 @@ struct HailoNetProperties final
public:
HailoNetProperties() : m_device_id(nullptr), m_hef_path(nullptr), m_network_name(nullptr), m_batch_size(HAILO_DEFAULT_BATCH_SIZE),
m_is_active(false), m_device_count(0), m_vdevice_key(DEFAULT_VDEVICE_KEY), m_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN),
m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS), m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD),
m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS), m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD), m_scheduler_priority(HAILO_SCHEDULER_PRIORITY_NORMAL),
m_multi_process_service(HAILO_DEFAULT_MULTI_PROCESS_SERVICE), m_input_quantized(true), m_output_quantized(true), m_input_format_type(HAILO_FORMAT_TYPE_AUTO),
m_output_format_type(HAILO_FORMAT_TYPE_AUTO)
@@ -69,6 +69,7 @@ public:
HailoElemProperty<hailo_scheduling_algorithm_t> m_scheduling_algorithm;
HailoElemProperty<guint32> m_scheduler_timeout_ms;
HailoElemProperty<guint32> m_scheduler_threshold;
HailoElemProperty<guint8> m_scheduler_priority;
HailoElemProperty<gboolean> m_multi_process_service;
HailoElemProperty<gboolean> m_input_quantized;
HailoElemProperty<gboolean> m_output_quantized;

View File

@@ -234,9 +234,11 @@ hailo_status HailoRecvImpl::read_from_vstreams(bool should_print_latency)
std::chrono::duration<double, std::milli> latency = std::chrono::system_clock::now() - start_time;
GST_DEBUG("%s latency: %f milliseconds", output_info.vstream().name().c_str(), latency.count());
}
GST_CHECK_SUCCESS(status, m_element, STREAM, "Reading from vstream failed, status = %d", status);
gst_buffer_unmap(*buffer, &buffer_info);
if (HAILO_STREAM_ABORTED_BY_USER == status) {
return status;
}
GST_CHECK_SUCCESS(status, m_element, STREAM, "Reading from vstream failed, status = %d", status);
}
if (should_print_latency) {

View File

@@ -30,6 +30,7 @@ GST_DEBUG_CATEGORY_STATIC(gst_hailosend_debug_category);
#define GST_CAT_DEFAULT gst_hailosend_debug_category
#define RGB_FEATURES_SIZE (3)
#define RGBA_FEATURES_SIZE (4)
#define GRAY8_FEATURES_SIZE (1)
#define YUY2_FEATURES_SIZE (2)
#define NV12_FEATURES_SIZE (3)
#define NV21_FEATURES_SIZE (3)
@@ -65,7 +66,7 @@ static void gst_hailosend_class_init(GstHailoSendClass *klass)
gst_pad_template_new("sink", GST_PAD_SINK, GST_PAD_ALWAYS, gst_caps_from_string(HAILO_VIDEO_CAPS)));
gst_element_class_set_static_metadata(GST_ELEMENT_CLASS(klass),
"hailosend element", "Hailo/Filter/Video", "Send RGB/RGBA/YUY2/NV12/NV21/I420 video to HailoRT", PLUGIN_AUTHOR);
"hailosend element", "Hailo/Filter/Video", "Send RGB/RGBA/GRAY8/YUY2/NV12/NV21/I420 video to HailoRT", PLUGIN_AUTHOR);
element_class->change_state = GST_DEBUG_FUNCPTR(gst_hailosend_change_state);
@@ -212,15 +213,28 @@ GstCaps *HailoSendImpl::get_caps(GstBaseTransform */*trans*/, GstPadDirection /*
format = "RGBA";
break;
}
else if (m_input_vstream_infos[0].shape.features == GRAY8_FEATURES_SIZE)
{
format = "GRAY8";
break;
}
/* Fallthrough */
case HAILO_FORMAT_ORDER_NHCW:
case HAILO_FORMAT_ORDER_FCR:
case HAILO_FORMAT_ORDER_F8CR:
if (m_input_vstream_infos[0].shape.features == GRAY8_FEATURES_SIZE)
{
format = "GRAY8";
break;
}
else
{
format = "RGB";
GST_CHECK(RGB_FEATURES_SIZE == m_input_vstream_infos[0].shape.features, NULL, m_element, STREAM,
"Features of input vstream %s is not %d for RGB format! (features=%d)", m_input_vstream_infos[0].name, RGB_FEATURES_SIZE,
m_input_vstream_infos[0].shape.features);
break;
}
case HAILO_FORMAT_ORDER_YUY2:
format = "YUY2";
GST_CHECK(YUY2_FEATURES_SIZE == m_input_vstream_infos[0].shape.features, NULL, m_element, STREAM,

View File

@@ -180,6 +180,11 @@ hailo_status NetworkGroupHandle::set_scheduler_threshold(const char *network_nam
return m_cng->set_scheduler_threshold(threshold, network_name);
}
hailo_status NetworkGroupHandle::set_scheduler_priority(const char *network_name, uint8_t priority)
{
return m_cng->set_scheduler_priority(priority, network_name);
}
Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> NetworkGroupHandle::create_vstreams(const char *network_name,
hailo_scheduling_algorithm_t scheduling_algorithm, const std::vector<hailo_format_with_name_t> &output_formats, bool input_quantized,
bool output_quantized, hailo_format_type_t input_format_type, hailo_format_type_t output_format_type)
@@ -294,10 +299,10 @@ Expected<std::shared_ptr<ConfiguredNetworkGroup>> NetworkGroupConfigManager::con
std::shared_ptr<ConfiguredNetworkGroup> found_cng = get_configured_network_group(device_id, hef->hash(), network_group_name, batch_size);
if (nullptr != found_cng) {
// If cng was already configured
auto infos = found_cng->get_network_infos();
GST_CHECK_EXPECTED(infos, element, RESOURCE, "Failed getting network infos");
if ((infos.release().size() > 1) || (scheduling_algorithm == HAILO_SCHEDULING_ALGORITHM_NONE)) {
// If cng was already configured
// But hailonet is not running all networks in the cng (or if not using scheduler) -
// Do not use multiplexer!
return found_cng;

View File

@@ -90,7 +90,7 @@ public:
hailo_status set_scheduler_timeout(const char *network_name, uint32_t timeout_ms);
hailo_status set_scheduler_threshold(const char *network_name, uint32_t threshold);
hailo_status set_scheduler_priority(const char *network_name, uint8_t priority);
std::shared_ptr<Hef> hef()
{

View File

@@ -1 +1,4 @@
cmake_minimum_required(VERSION 3.11.0)
include(externals/pybind11.cmake)
add_subdirectory(src)

View File

@@ -0,0 +1,35 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
if(NOT PYTHON_EXECUTABLE AND PYBIND11_PYTHON_VERSION)
# venv version is prioritized (instead of PYBIND11_PYTHON_VERSION) if PYTHON_EXECUTABLE is not set.
# See https://pybind11.readthedocs.io/en/stable/changelog.html#v2-6-0-oct-21-2020
if((${CMAKE_VERSION} VERSION_LESS "3.22.0") AND (NOT WIN32))
find_package(PythonInterp ${PYBIND11_PYTHON_VERSION} REQUIRED)
set(PYTHON_EXECUTABLE ${Python_EXECUTABLE})
else()
find_package(Python3 ${PYBIND11_PYTHON_VERSION} REQUIRED EXACT COMPONENTS Interpreter Development)
set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE})
endif()
endif()
FetchContent_Declare(
pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG 80dc998efced8ceb2be59756668a7e90e8bef917 # Version 2.10.1
#GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(pybind11)
if(NOT pybind11_POPULATED)
FetchContent_Populate(pybind11)
add_subdirectory(${pybind11_SOURCE_DIR} ${pybind11_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/pybind11 EXCLUDE_FROM_ALL)
endif()

View File

@@ -26,7 +26,7 @@ from hailo_platform.pyhailort.pyhailort import (HEF, ConfigureParams,
InputVStreams, OutputVStreams,
InferVStreams, HailoStreamDirection, HailoFormatFlags, HailoCpuId, Device, VDevice,
DvmTypes, PowerMeasurementTypes, SamplingPeriod, AveragingFactor, MeasurementBufferIndex,
HailoRTException, YOLOv5PostProcessOp, HailoSchedulingAlgorithm)
HailoRTException, HailoSchedulingAlgorithm, HailoRTStreamAbortedByUser)
def _verify_pyhailort_lib_exists():
python_version = "".join(str(i) for i in sys.version_info[:2])
@@ -62,4 +62,4 @@ __all__ = ['EthernetDevice', 'DvmTypes', 'PowerMeasurementTypes',
'MipiIspImageInOrder', 'MipiIspImageOutDataType', 'join_drivers_path', 'IspLightFrequency', 'HailoPowerMode',
'Endianness', 'HailoStreamInterface', 'InputVStreamParams', 'OutputVStreamParams',
'InputVStreams', 'OutputVStreams', 'InferVStreams', 'HailoStreamDirection', 'HailoFormatFlags', 'HailoCpuId',
'Device', 'VDevice', 'HailoRTException', 'YOLOv5PostProcessOp', 'HailoSchedulingAlgorithm']
'Device', 'VDevice', 'HailoRTException', 'HailoSchedulingAlgorithm', 'HailoRTStreamAbortedByUser']

View File

@@ -2,6 +2,7 @@
"""Control operations for the Hailo hardware device."""
from hailo_platform.common.logger.logger import default_logger
from hailo_platform.pyhailort.pyhailort import (Control, InternalPcieDevice, ExceptionWrapper, BoardInformation, # noqa F401
CoreInformation, DeviceArchitectureTypes, ExtendedDeviceInformation, # noqa F401
HealthInformation, SamplingPeriod, AveragingFactor, DvmTypes, # noqa F401
@@ -38,8 +39,7 @@ class UdpHcpControl(HcpControl):
"""
# In the C API we define the total amount of attempts, instead of the amount of retries.
# TODO: HRT-9987 - Add this deprecation warning
# default_logger().warning("UdpHcpControl is deprecated! Please Use Control object")
default_logger().warning("UdpHcpControl is deprecated! Please Use Control object")
max_number_of_attempts = retries + 1
response_timeout_milliseconds = int(response_timeout_seconds * 1000)
if device is None:
@@ -57,8 +57,8 @@ class PcieHcpControl(HcpControl):
def __init__(self, device=None, device_info=None):
"""Initializes a new HailoPcieController object."""
# TODO: HRT-9987 - Add this deprecation warning
# default_logger().warning("PcieHcpControl is deprecated! Please Use Control object")
default_logger().warning("PcieHcpControl is deprecated! Please Use Control object")
if device_info is None:
device_info = InternalPcieDevice.scan_devices()[0]

View File

@@ -27,8 +27,7 @@ class HailoHWObjectException(Exception):
class HailoHWObject(object):
# TODO: HRT-9987 - Add (deprecated) to this doc
"""Abstract Hailo hardware device representation"""
"""Abstract Hailo hardware device representation (deprecated)"""
NAME = InferenceTargets.UNINITIALIZED
IS_HARDWARE = True
@@ -44,8 +43,7 @@ class HailoHWObject(object):
self._is_device_used = False
self._hef_loaded = False
# TODO: HRT-9987 - Add this deprecation warning
# self._logger.warning("HailoHWObject is deprecated! Please use VDevice/Device object.")
self._logger.warning("HailoHWObject is deprecated! Please use VDevice/Device object.")
# TODO: HRT-6310 Remove this.
def __eq__(self, other):
@@ -53,17 +51,15 @@ class HailoHWObject(object):
@property
def name(self):
"""str: The name of this target. Valid values are defined by :class:`~hailo_platform.pyhailort.hw_object.InferenceTargets`"""
# TODO: HRT-9987 - Add this deprecation warning
# self._logger.warning("HailoHWObject name property is deprecated! Please use VDevice/Device object with device_id.")
"""str: The name of this target. Valid values are defined by :class:`~hailo_platform.pyhailort.hw_object.InferenceTargets` (deprecated)"""
self._logger.warning("HailoHWObject name property is deprecated! Please use VDevice/Device object with device_id.")
return type(self).NAME
@property
def is_hardware(self):
"""bool: Indicates this target runs on a physical hardware device."""
"""bool: Indicates this target runs on a physical hardware device. (deprecated)"""
# TODO: SDK should implement in Target
# TODO: HRT-9987 - Add this deprecation warning
# self._logger.warning("HailoHWObject is_hardware property is deprecated! Please use VDevice/Device object, or derive from it.")
self._logger.warning("HailoHWObject is_hardware property is deprecated! Please use VDevice/Device object, or derive from it.")
return type(self).IS_HARDWARE
@property
@@ -76,46 +72,42 @@ class HailoHWObject(object):
@property
def sorted_output_layer_names(self):
"""Getter for the property sorted_output_names.
"""Getter for the property sorted_output_names (deprecated).
Returns:
list of str: Sorted list of the output layer names.
"""
# TODO: HRT-9987 - Add this deprecation warning
# self._logger.warning("HailoHWObject sorted_output_layer_names property is deprecated! Please use ConfiguredNetwork get_sorted_output_names.")
self._logger.warning("HailoHWObject sorted_output_layer_names property is deprecated! Please use ConfiguredNetwork get_sorted_output_names.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to sorted_output_layer_names is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_sorted_output_names()
@contextmanager
def use_device(self, *args, **kwargs):
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject use_device context manager is deprecated! Please use VDevice/Device object.")
"""A context manager that wraps the usage of the device."""
"""A context manager that wraps the usage of the device. (deprecated)"""
self._logger.warning("HailoHWObject use_device context manager is deprecated! Please use VDevice/Device object.")
self._is_device_used = True
yield
self._is_device_used = False
def get_output_device_layer_to_original_layer_map(self):
"""Get a mapping between the device outputs to the layers' names they represent.
"""Get a mapping between the device outputs to the layers' names they represent (deprecated).
Returns:
dict: Keys are device output names and values are lists of layers' names.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject get_output_device_layer_to_original_layer_map function is deprecated!")
self._logger.warning("HailoHWObject get_output_device_layer_to_original_layer_map function is deprecated!")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to layer names is only allowed when there is a single loaded network group")
return {stream_info.name : self._loaded_network_groups[0].get_vstream_names_from_stream_name(stream_info.name)
for stream_info in self.get_output_stream_infos()}
def get_original_layer_to_device_layer_map(self):
"""Get a mapping between the layer names and the device outputs that contain them.
"""Get a mapping between the layer names and the device outputs that contain them (deprecated).
Returns:
dict: Keys are the names of the layers and values are device outputs names.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject get_original_layer_to_device_layer_map function is deprecated!")
self._logger.warning("HailoHWObject get_original_layer_to_device_layer_map function is deprecated!")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to layer names is only allowed when there is a single loaded network group")
return {vstream_info.name : self._loaded_network_groups[0].get_stream_names_from_vstream_name(vstream_info.name)
@@ -123,69 +115,61 @@ class HailoHWObject(object):
@property
def device_input_layers(self):
"""Get a list of the names of the device's inputs."""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject device_input_layers function is deprecated! Please use ConfiguredNetwork object.")
"""Get a list of the names of the device's inputs. (deprecated)"""
self._logger.warning("HailoHWObject device_input_layers function is deprecated! Please use ConfiguredNetwork object.")
return [layer.name for layer in self.get_input_stream_infos()]
@property
def device_output_layers(self):
"""Get a list of the names of the device's outputs."""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject device_output_layers function is deprecated! Please use ConfiguredNetwork object.")
"""Get a list of the names of the device's outputs. (deprecated)"""
self._logger.warning("HailoHWObject device_output_layers function is deprecated! Please use ConfiguredNetwork object.")
return [layer.name for layer in self.get_output_stream_infos()]
def hef_loaded(self):
"""Return True if this object has loaded the model HEF to the hardware device."""
"""Return True if this object has loaded the model HEF to the hardware device. (deprecated)"""
# TODO: SDK should implement in Target
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject hef_loaded function is deprecated! Please use VDevice/Device object, or derive from it.")
self._logger.warning("HailoHWObject hef_loaded function is deprecated! Please use VDevice/Device object, or derive from it.")
return self._hef_loaded
def outputs_count(self):
"""Return the amount of output tensors that are returned from the hardware device for every
input image.
input image (deprecated).
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject outputs_count function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoHWObject outputs_count function is deprecated! Please use ConfiguredNetwork object.")
return len(self.get_output_vstream_infos())
def _clear_shapes(self):
# TODO: SDK should implement in Target
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject _clear_shapes function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoHWObject _clear_shapes function is deprecated! Please use ConfiguredNetwork object.")
self._hw_consts = None
@property
def model_name(self):
"""Get the name of the current model.
"""Get the name of the current model (deprecated).
Returns:
str: Model name.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject model_name property is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoHWObject model_name property is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) == 1:
return self._loaded_network_groups[0].name
raise HailoHWObjectException(
"This function is only supported when there is exactly 1 loaded network group. one should use HEF.get_network_group_names() / ConfiguredNetwork.name / ActivatedNetwork.name")
def get_output_shapes(self):
"""Get the model output shapes, as returned to the user (without any hardware padding).
"""Get the model output shapes, as returned to the user (without any hardware padding) (deprecated).
Returns:
Tuple of output shapes, sorted by the output names.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject get_output_shapes function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoHWObject get_output_shapes function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Calling get_output_shapes is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_output_shapes()
class HailoChipObject(HailoHWObject):
# TODO: HRT-9987 - Add (deprecated) to this docs
"""Hailo hardware device representation"""
"""Hailo hardware device representation (deprecated)"""
def __init__(self):
"""Create the Hailo Chip hardware object."""
@@ -208,17 +192,16 @@ class HailoChipObject(HailoHWObject):
return self._control_object
def get_all_input_layers_dtype(self):
"""Get the model inputs dtype.
"""Get the model inputs dtype (deprecated).
Returns:
dict of :obj:'numpy.dtype': where the key is model input_layer name, and the value is dtype as the device expect to get for this input.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_all_input_layers_dtype function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_all_input_layers_dtype function is deprecated! Please use ConfiguredNetwork object.")
return {stream.name: HailoRTTransformUtils.get_dtype(stream.data_bytes) for stream in self.get_input_stream_infos()}
def get_input_vstream_infos(self, network_name=None):
"""Get input vstreams information of a specific network group.
"""Get input vstreams information of a specific network group (deprecated).
Args:
network_name (str, optional): The name of the network to access. In case not given, all the networks in the network group will be addressed.
@@ -227,14 +210,13 @@ class HailoChipObject(HailoHWObject):
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all input vstreams
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_input_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_input_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network vstream info is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_input_vstream_infos(network_name=network_name)
def get_output_vstream_infos(self, network_name=None):
"""Get output vstreams information of a specific network group.
"""Get output vstreams information of a specific network group (deprecated).
Args:
network_name (str, optional): The name of the network to access. In case not given, all the networks in the network group will be addressed.
@@ -243,14 +225,13 @@ class HailoChipObject(HailoHWObject):
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all output vstreams
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_output_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_output_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network vstream info is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_output_vstream_infos(network_name=network_name)
def get_all_vstream_infos(self, network_name=None):
"""Get input and output vstreams information.
"""Get input and output vstreams information (deprecated).
Args:
network_name (str, optional): The name of the network to access. In case not given, all the networks in the network group will be addressed.
@@ -259,14 +240,13 @@ class HailoChipObject(HailoHWObject):
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all input and output vstreams
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_all_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_all_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network vstream info is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_all_vstream_infos(network_name=network_name)
def get_input_stream_infos(self, network_name=None):
"""Get the input low-level streams information of a specific network group.
"""Get the input low-level streams information of a specific network group (deprecated).
Args:
network_name (str, optional): The name of the network to access. In case not given, all the networks in the network group will be addressed.
@@ -276,14 +256,13 @@ class HailoChipObject(HailoHWObject):
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with information objects
of all input low-level streams.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_input_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_input_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network stream info is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_input_stream_infos(network_name=network_name)
def get_output_stream_infos(self, network_name=None):
"""Get the output low-level streams information of a specific network group.
"""Get the output low-level streams information of a specific network group (deprecated).
Args:
network_name (str, optional): The name of the network to access. In case not given, all the networks in the network group will be addressed.
@@ -293,14 +272,13 @@ class HailoChipObject(HailoHWObject):
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with information objects
of all output low-level streams.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_output_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_output_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network stream info is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_output_stream_infos(network_name=network_name)
def get_all_stream_infos(self, network_name=None):
"""Get input and output streams information of a specific network group.
"""Get input and output streams information of a specific network group (deprecated).
Args:
network_name (str, optional): The name of the network to access. In case not given, all the networks in the network group will be addressed.
@@ -309,8 +287,7 @@ class HailoChipObject(HailoHWObject):
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.StreamInfo`: with all the information objects of all input and output streams
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_all_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_all_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network stream info is only allowed when there is a single loaded network group")
return self._loaded_network_groups[0].get_all_stream_infos(network_name=network_name)
@@ -339,12 +316,12 @@ class HailoChipObject(HailoHWObject):
raise HailoRTException("Device can only be configured from the process it was created in.")
configured_apps = self.control.configure(hef, configure_params_by_name)
self._hef_loaded = True
configured_networks = [ConfiguredNetwork(configured_app, self, hef) for configured_app in configured_apps]
configured_networks = [ConfiguredNetwork(configured_app) for configured_app in configured_apps]
self._loaded_network_groups.extend(configured_networks)
return configured_networks
def get_input_shape(self, name=None):
"""Get the input shape (not padded) of a network.
"""Get the input shape (not padded) of a network (deprecated).
Args:
name (str, optional): The name of the desired input. If a name is not provided, return
@@ -353,8 +330,7 @@ class HailoChipObject(HailoHWObject):
Returns:
Tuple of integers representing the input_shape.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_input_shape function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_input_shape function is deprecated! Please use ConfiguredNetwork object.")
if name is None:
name = self.get_input_vstream_infos()[0].name
@@ -366,7 +342,7 @@ class HailoChipObject(HailoHWObject):
[input_vstream.name for input_vstream in self.get_input_vstream_infos()]))
def get_index_from_name(self, name):
"""Get the index in the output list from the name.
"""Get the index in the output list from the name (deprecated).
Args:
name (str): The name of the output.
@@ -374,8 +350,7 @@ class HailoChipObject(HailoHWObject):
Returns:
int: The index of the layer name in the output list.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_index_from_name function is deprecated! Please use ConfiguredNetwork object.")
self._logger.warning("HailoChipObject get_index_from_name function is deprecated! Please use ConfiguredNetwork object.")
try:
return self.sorted_output_layer_names.index(name)
except ValueError:
@@ -398,8 +373,7 @@ class HailoChipObject(HailoHWObject):
class EthernetDevice(HailoChipObject):
# TODO: HRT-9987 - Add (deprecated) to this docs
"""Represents any Hailo hardware device that supports UDP control and dataflow"""
"""Represents any Hailo hardware device that supports UDP control and dataflow (deprecated)"""
NAME = InferenceTargets.UDP_CONTROLLER
@@ -417,6 +391,8 @@ class EthernetDevice(HailoChipObject):
super(EthernetDevice, self).__init__()
self._logger.warning("EthernetDevice is deprecated! Please use VDevice/Device object.")
gc.collect()
self._remote_ip = remote_ip
@@ -442,8 +418,7 @@ class EthernetDevice(HailoChipObject):
Returns:
list of str: IPs of scanned devices.
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# default_logger().warning("EthernetDevice scan_devices method is deprecated! Please use scan() of Device object.")
default_logger().warning("EthernetDevice scan_devices method is deprecated! Please use scan() of Device object.")
udp_scanner = HailoUdpScan()
return udp_scanner.scan_devices(interface_name, timeout_seconds=timeout_seconds)
@@ -463,15 +438,13 @@ class EthernetDevice(HailoChipObject):
@property
def remote_ip(self):
"""Return the IP of the remote device."""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("EthernetDevice remote_ip method is deprecated! Please use VDevice/Device object.")
"""Return the IP of the remote device (deprecated)."""
self._logger.warning("EthernetDevice remote_ip method is deprecated! Please use VDevice/Device object.")
return self._remote_ip
class PcieDevice(HailoChipObject):
# TODO: HRT-9987 - Add (deprecated) to this docs
"""Hailo PCIe production device representation"""
"""Hailo PCIe production device representation (deprecated)"""
NAME = InferenceTargets.PCIE_CONTROLLER
@@ -486,8 +459,7 @@ class PcieDevice(HailoChipObject):
:func:`PcieDevice.scan_devices` to get list of all available devices.
"""
super(PcieDevice, self).__init__()
# TODO: HRT-9987 - Add this deprecation warning
# self._logger.warning("PcieDevice is deprecated! Please use VDevice/Device object.")
self._logger.warning("PcieDevice is deprecated! Please use VDevice/Device object.")
gc.collect()
# PcieDevice __del__ function tries to release self._device.
@@ -506,13 +478,12 @@ class PcieDevice(HailoChipObject):
@staticmethod
def scan_devices():
"""Scans for all pcie devices on the system.
"""Scans for all pcie devices on the system (deprecated).
Returns:
list of :obj:`hailo_platform.pyhailort.pyhailort.PcieDeviceInfo`
"""
# TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# default_logger().warning("PcieDevice scan_devices method is deprecated! Please use Device object.")
default_logger().warning("PcieDevice scan_devices method is deprecated! Please use Device object.")
return InternalPcieDevice.scan_devices()
def _open_device(self, device_info):

View File

@@ -29,8 +29,7 @@ from hailo_platform.pyhailort._pyhailort import (TemperatureInfo, # noqa F401
MipiClockSelection, MipiIspImageInOrder,
MipiIspImageOutDataType, IspLightFrequency,
BootSource, HailoSocketDefs, Endianness,
MipiInputStreamParams, SensorConfigTypes,
SensorConfigOpCode)
MipiInputStreamParams, SensorConfigTypes)
BBOX_PARAMS = _pyhailort.HailoRTDefaults.BBOX_PARAMS()
HAILO_DEFAULT_ETH_CONTROL_PORT = _pyhailort.HailoRTDefaults.HAILO_DEFAULT_ETH_CONTROL_PORT()
@@ -75,6 +74,9 @@ class HailoRTTimeout(HailoRTException):
class HailoRTStreamAborted(HailoRTException):
pass
class HailoRTStreamAbortedByUser(HailoRTException):
pass
class HailoRTInvalidOperationException(HailoRTException):
pass
@@ -127,6 +129,8 @@ class ExceptionWrapper(object):
raise HailoRTTimeout("Received a timeout - hailort has failed because a timeout had occurred") from libhailort_exception
if string_error_code == "HAILO_STREAM_ABORTED_BY_HW":
raise HailoRTStreamAborted("Stream aborted due to an external event") from libhailort_exception
if string_error_code == "HAILO_STREAM_ABORTED_BY_USER":
raise HailoRTStreamAbortedByUser("Stream was aborted by user") from libhailort_exception
if string_error_code == "HAILO_INVALID_OPERATION":
raise HailoRTInvalidOperationException("Invalid operation. See hailort.log for more information") from libhailort_exception
@@ -170,23 +174,26 @@ class HailoUdpScan(object):
return device_ip_addresses
class TrafficControl(object):
class NetworkRateLimiter(object):
def __init__(self, ip, port, rate_bytes_per_sec):
if sys.platform != 'linux':
raise HailoRTInvalidOperationException('TrafficControl is supported only on UNIX os')
with ExceptionWrapper():
self._tc_util = _pyhailort.TrafficControlUtil(ip, port, int(rate_bytes_per_sec))
raise HailoRTInvalidOperationException('NetworkRateLimiter is supported only on UNIX os')
self._ip = ip
self._port = port
self._rate_bytes_per_sec = rate_bytes_per_sec
def set_rate_limit(self):
self._tc_util.set_rate_limit()
with ExceptionWrapper():
return _pyhailort.NetworkRateLimiter.set_rate_limit(self._ip, self._port, self._rate_bytes_per_sec)
def reset_rate_limit(self):
self._tc_util.reset_rate_limit()
with ExceptionWrapper():
return _pyhailort.NetworkRateLimiter.reset_rate_limit(self._ip, self._port)
def get_interface_name(ip):
"get the interface corresponding to the given ip"
with ExceptionWrapper():
return _pyhailort.TrafficControlUtil.get_interface_name(ip)
return _pyhailort.NetworkRateLimiter.get_interface_name(ip)
class ConfigureParams(object):
@@ -524,15 +531,13 @@ class HEF(object):
class ConfiguredNetwork(object):
"""Represents a network group loaded to the device."""
def __init__(self, configured_network, target, hef):
def __init__(self, configured_network):
self._configured_network = configured_network
self._input_vstreams_holders = []
self._output_vstreams_holders = []
self._target = target
self._hef = hef
def get_networks_names(self):
return self._hef.get_networks_names(self.name)
return self._configured_network.get_networks_names()
def activate(self, network_group_params=None):
"""Activate this network group in order to infer data through it.
@@ -544,14 +549,18 @@ class ConfiguredNetwork(object):
Returns:
:class:`ActivatedNetworkContextManager`: Context manager that returns the activated
network group.
"""
# TODO: HRT-9988 - Add deprecation warning when changing to service by default
network_group_params = network_group_params or self.create_params()
Note:
Usage of `activate` when scheduler enabled is deprecated. On this case, this function will return None and print deprecation warning.
"""
if self._configured_network.is_scheduled():
default_logger().warning("Calls to `activate()` when working with scheduler are deprecated! On future versions this call will raise an error.")
return EmptyContextManager()
network_group_params = network_group_params or self.create_params()
with ExceptionWrapper():
return ActivatedNetworkContextManager(self,
self._configured_network.activate(network_group_params),
self._target, self._hef)
self._configured_network.activate(network_group_params))
def wait_for_activation(self, timeout_ms=None):
"""Block until activated, or until ``timeout_ms`` is passed.
@@ -590,7 +599,7 @@ class ConfiguredNetwork(object):
return tuple(results)
def get_sorted_output_names(self):
return self._hef.get_sorted_output_names(self.name)
return self._configured_network.get_sorted_output_names()
def get_input_vstream_infos(self, network_name=None):
"""Get input vstreams information.
@@ -602,8 +611,8 @@ class ConfiguredNetwork(object):
list of :obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all input vstreams
"""
name = network_name if network_name is not None else self.name
return self._hef.get_input_vstream_infos(name)
name = network_name if network_name is not None else ""
return self._configured_network.get_input_vstream_infos(name)
def get_output_vstream_infos(self, network_name=None):
"""Get output vstreams information.
@@ -615,8 +624,8 @@ class ConfiguredNetwork(object):
list of :obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all output vstreams
"""
name = network_name if network_name is not None else self.name
return self._hef.get_output_vstream_infos(name)
name = network_name if network_name is not None else ""
return self._configured_network.get_output_vstream_infos(name)
def get_all_vstream_infos(self, network_name=None):
"""Get input and output vstreams information.
@@ -628,8 +637,8 @@ class ConfiguredNetwork(object):
list of :obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all input and output vstreams
"""
name = network_name if network_name is not None else self.name
return self._hef.get_all_vstream_infos(name)
name = network_name if network_name is not None else ""
return self._configured_network.get_all_vstream_infos(name)
def get_input_stream_infos(self, network_name=None):
"""Get the input low-level streams information of a specific network group.
@@ -642,8 +651,8 @@ class ConfiguredNetwork(object):
of all input low-level streams.
"""
name = network_name if network_name is not None else self.name
return self._hef.get_input_stream_infos(name)
name = network_name if network_name is not None else ""
return self._configured_network.get_input_stream_infos(name)
def get_output_stream_infos(self, network_name=None):
"""Get the output low-level streams information of a specific network group.
@@ -656,8 +665,8 @@ class ConfiguredNetwork(object):
of all output low-level streams.
"""
name = network_name if network_name is not None else self.name
return self._hef.get_output_stream_infos(name)
name = network_name if network_name is not None else ""
return self._configured_network.get_output_stream_infos(name)
def get_all_stream_infos(self, network_name=None):
"""Get input and output streams information of a specific network group.
@@ -669,8 +678,8 @@ class ConfiguredNetwork(object):
list of :obj:`hailo_platform.pyhailort._pyhailort.StreamInfo`: with all the information objects of all input and output streams
"""
name = network_name if network_name is not None else self.name
return self._hef.get_all_stream_infos(name)
name = network_name if network_name is not None else ""
return self._configured_network.get_all_stream_infos(name)
def get_udp_rates_dict(self, fps, max_supported_rate_bytes):
with ExceptionWrapper():
@@ -720,7 +729,7 @@ class ConfiguredNetwork(object):
list of str: All the underlying streams names for the provided vstream name.
"""
with ExceptionWrapper():
return self._hef.get_stream_names_from_vstream_name(vstream_name, self.name)
return self._configured_network.get_stream_names_from_vstream_name(vstream_name)
def get_vstream_names_from_stream_name(self, stream_name):
"""Get vstream names list from their underlying stream name for a specific network group.
@@ -732,7 +741,7 @@ class ConfiguredNetwork(object):
list of str: All the matching vstream names for the provided stream name.
"""
with ExceptionWrapper():
return self._hef.get_vstream_names_from_stream_name(stream_name, self.name)
return self._configured_network.get_vstream_names_from_stream_name(stream_name)
def set_scheduler_timeout(self, timeout_ms, network_name=None):
"""Sets the maximum time period that may pass before getting run time from the scheduler,
@@ -767,19 +776,29 @@ class ConfiguredNetwork(object):
return self._configured_network.set_scheduler_priority(priority)
class EmptyContextManager(object):
"""An empty context manager that returns instead of activated network group when scheduler is enabled`."""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class ActivatedNetworkContextManager(object):
"""A context manager that returns the activated network group upon enter."""
def __init__(self, configured_network, activated_network, target, hef):
def __init__(self, configured_network, activated_network):
self._configured_network = configured_network
self._activated_network = activated_network
self._target = target
self._hef = hef
def __enter__(self):
with ExceptionWrapper():
activated_network_group = ActivatedNetwork(self._configured_network, self._activated_network.__enter__(), self._target,
self._hef)
activated_network_group = ActivatedNetwork(self._configured_network, self._activated_network.__enter__())
return activated_network_group
def __exit__(self, *args):
@@ -789,17 +808,11 @@ class ActivatedNetworkContextManager(object):
class ActivatedNetwork(object):
"""The network group that is currently activated for inference."""
def __init__(self, configured_network, activated_network, target, hef):
def __init__(self, configured_network, activated_network):
self._configured_network = configured_network
self._activated_network = activated_network
self._target = target
self._hef = hef
self._last_number_of_invalid_frames_read = 0
@property
def target(self):
return self._target
@property
def name(self):
return self._configured_network.name
@@ -826,7 +839,7 @@ class ActivatedNetwork(object):
raise HailoRTException("There are {} invalid frames.".format(number_of_invalid_frames))
def get_sorted_output_names(self):
return self._hef.get_sorted_output_names(self.name)
return self._configured_network.get_sorted_output_names()
def _get_intermediate_buffer(self, src_context_index, src_stream_index):
with ExceptionWrapper():
@@ -859,7 +872,6 @@ class InferVStreams(object):
``[class_count, BBOX_PARAMS, detections_count]`` padded with empty bboxes.
"""
self._logger = default_logger()
self._configured_net_group = configured_net_group
self._net_group_name = configured_net_group.name
self._input_vstreams_params = input_vstreams_params
@@ -895,8 +907,9 @@ class InferVStreams(object):
network_name = self._input_name_to_network_name[input_name]
if (network_name not in already_seen_networks) :
already_seen_networks.add(network_name)
output_vstream_infos = self._configured_net_group.get_output_vstream_infos()
for output_name in self._network_name_to_outputs[network_name]:
output_buffers_info[output_name] = OutputLayerUtils(self._configured_net_group._hef, output_name, self._infer_pipeline,
output_buffers_info[output_name] = OutputLayerUtils(output_vstream_infos, output_name, self._infer_pipeline,
self._net_group_name)
output_tensor_info = output_buffers_info[output_name].output_tensor_info
shape, dtype = output_tensor_info
@@ -920,7 +933,7 @@ class InferVStreams(object):
are output data tensors as :obj:`numpy.ndarray` (or list of :obj:`numpy.ndarray` in case of nms output and tf_nms_format=False).
"""
time_before_infer_calcs = time.time()
time_before_infer_calcs = time.perf_counter()
if not isinstance(input_data, dict):
input_stream_infos = self._configured_net_group.get_input_stream_infos()
if len(input_stream_infos) != 1:
@@ -938,9 +951,9 @@ class InferVStreams(object):
self._make_c_contiguous_if_needed(input_layer_name, input_data)
with ExceptionWrapper():
time_before_infer = time.time()
time_before_infer = time.perf_counter()
self._infer_pipeline.infer(input_data, output_buffers, batch_size)
self._hw_time = time.time() - time_before_infer
self._hw_time = time.perf_counter() - time_before_infer
for name, result_array in output_buffers.items():
is_nms = output_buffers_info[name].is_nms
@@ -957,7 +970,7 @@ class InferVStreams(object):
else:
output_buffers[name] = HailoRTTransformUtils.output_raw_buffer_to_nms_format(result_array, nms_shape.number_of_classes)
self._total_time = time.time() - time_before_infer_calcs
self._total_time = time.perf_counter() - time_before_infer_calcs
return output_buffers
def get_hw_time(self):
@@ -982,7 +995,7 @@ class InferVStreams(object):
input_expected_dtype = self._infer_pipeline.get_host_dtype(input_layer_name)
if input_dtype != input_expected_dtype:
self._logger.warning("Given input data dtype ({}) is different than inferred dtype ({}). "
default_logger().warning("Given input data dtype ({}) is different than inferred dtype ({}). "
"conversion for every frame will reduce performance".format(input_dtype,
input_expected_dtype))
input_data[input_layer_name] = input_data[input_layer_name].astype(input_expected_dtype)
@@ -1015,7 +1028,7 @@ class InferVStreams(object):
def _make_c_contiguous_if_needed(self, input_layer_name, input_data):
if not input_data[input_layer_name].flags.c_contiguous:
self._logger.warning("Converting {} numpy array to be C_CONTIGUOUS".format(
default_logger().warning("Converting {} numpy array to be C_CONTIGUOUS".format(
input_layer_name))
input_data[input_layer_name] = numpy.asarray(input_data[input_layer_name], order='C')
@@ -1139,10 +1152,9 @@ class HailoRTTransformUtils(object):
return FormatType.FLOAT32
raise HailoRTException("unsupported data type {}".format(dtype))
# TODO: HRT-10427 - Remove
class InternalEthernetDevice(object):
def __init__(self, address, port, response_timeout_seconds=10, max_number_of_attempts=3):
# TODO: HRT-9987 - Add this deprecation warning
# default_logger().warning("InternalEthernetDevice is deprecated! Please use VDevice object.")
self.device = None
self._address = address
self._port = port
@@ -1204,7 +1216,7 @@ class PcieDeviceInfo(_pyhailort.PcieDeviceInfo):
except HailoRTException:
raise ArgumentTypeError('Invalid device info string, format is [<domain>]:<bus>:<device>.<func>')
# TODO: HRT-10427 - Remove
class InternalPcieDevice(object):
def __init__(self, device_info=None):
self.device = None
@@ -1224,6 +1236,7 @@ class InternalPcieDevice(object):
self.device.release()
self.device = None
# TODO: HRT-10427 - Move to a static method in pyhailort_internal when InternalPcieDevice removed
@staticmethod
def scan_devices():
with ExceptionWrapper():
@@ -1242,7 +1255,7 @@ class InternalPcieDevice(object):
with ExceptionWrapper():
return self.device.direct_read_memory(address, size)
# TODO: HRT-10427 - Remove when removing InternalPcieDevice
class PcieDebugLog(object):
def __init__(self, pci_device):
self._pcie_device = pci_device
@@ -1300,7 +1313,7 @@ class HailoFormatFlags(_pyhailort.FormatFlags):
SUPPORTED_PROTOCOL_VERSION = 2
SUPPORTED_FW_MAJOR = 4
SUPPORTED_FW_MINOR = 13
SUPPORTED_FW_MINOR = 14
SUPPORTED_FW_REVISION = 0
MEGA_MULTIPLIER = 1000.0 * 1000.0
@@ -1622,7 +1635,6 @@ class Control:
def __init__(self, device: '_pyhailort.Device'):
self.__device = device
self._logger = default_logger()
# TODO: should remove?
if sys.platform != "win32":
@@ -2269,7 +2281,6 @@ class Device:
"""
gc.collect()
self._logger = default_logger()
# Device __del__ function tries to release self._device.
# to avoid AttributeError if the __init__ func fails, we set it to None first.
# https://stackoverflow.com/questions/6409644/is-del-called-on-an-object-that-doesnt-complete-init
@@ -2323,12 +2334,16 @@ class Device:
Args:
hef (:class:`~hailo_platform.pyhailort.pyhailort.HEF`): HEF to configure the vdevice from
configure_params_by_name (dict, optional): Maps between each net_group_name to configure_params. If not provided, default params will be applied
Note:
This function is deprecated. Support will be removed in future versions.
"""
default_logger().warning("Usage of Device.configure is deprecated! One should use VDevice for inference")
if self._creation_pid != os.getpid():
raise HailoRTException("Device can only be configured from the process it was created in.")
with ExceptionWrapper():
configured_apps = self._device.configure(hef._hef, configure_params_by_name)
configured_networks = [ConfiguredNetwork(configured_app, self, hef) for configured_app in configured_apps]
configured_ngs_handles = self._device.configure(hef._hef, configure_params_by_name)
configured_networks = [ConfiguredNetwork(configured_ng_handle) for configured_ng_handle in configured_ngs_handles]
self._loaded_network_groups.extend(configured_networks)
return configured_networks
@@ -2385,7 +2400,6 @@ class VDevice(object):
list of all available devices. Excludes 'params'. Cannot be used together with device_id.
"""
gc.collect()
self._logger = default_logger()
# VDevice __del__ function tries to release self._vdevice.
# to avoid AttributeError if the __init__ func fails, we set it to None first.
@@ -2461,8 +2475,8 @@ class VDevice(object):
if self._creation_pid != os.getpid():
raise HailoRTException("VDevice can only be configured from the process it was created in.")
with ExceptionWrapper():
configured_apps = self._vdevice.configure(hef._hef, configure_params_by_name)
configured_networks = [ConfiguredNetwork(configured_app, self, hef) for configured_app in configured_apps]
configured_ngs_handles = self._vdevice.configure(hef._hef, configure_params_by_name)
configured_networks = [ConfiguredNetwork(configured_ng_handle) for configured_ng_handle in configured_ngs_handles]
self._loaded_network_groups.extend(configured_networks)
return configured_networks
@@ -2539,9 +2553,9 @@ class InputVStreamParams(object):
timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS
if queue_size is None:
queue_size = DEFAULT_VSTREAM_QUEUE_SIZE
name = network_name if network_name is not None else configured_network.name
name = network_name if network_name is not None else ""
with ExceptionWrapper():
return configured_network._hef._hef.get_input_vstreams_params(name, quantized,
return configured_network._configured_network.make_input_vstream_params(name, quantized,
format_type, timeout_ms, queue_size)
@staticmethod
@@ -2613,9 +2627,9 @@ class OutputVStreamParams(object):
timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS
if queue_size is None:
queue_size = DEFAULT_VSTREAM_QUEUE_SIZE
name = network_name if network_name is not None else configured_network.name
name = network_name if network_name is not None else ""
with ExceptionWrapper():
return configured_network._hef._hef.get_output_vstreams_params(name, quantized,
return configured_network._configured_network.make_output_vstream_params(name, quantized,
format_type, timeout_ms, queue_size)
@staticmethod
@@ -2820,8 +2834,8 @@ class InputVStreams(object):
class OutputLayerUtils(object):
def __init__(self, hef, vstream_name, pipeline, net_group_name=""):
self._hef = hef
def __init__(self, output_vstream_infos, vstream_name, pipeline, net_group_name=""):
self._output_vstream_infos = output_vstream_infos
self._vstream_info = self._get_vstream_info(net_group_name, vstream_name)
if isinstance(pipeline, (_pyhailort.InferVStreams)):
@@ -2866,8 +2880,7 @@ class OutputLayerUtils(object):
return self._quantized_empty_bbox
def _get_vstream_info(self, net_group_name, vstream_name):
output_vstream_infos = self._hef.get_output_vstream_infos(net_group_name)
for info in output_vstream_infos:
for info in self._output_vstream_infos:
if info.name == vstream_name:
return info
raise HailoRTException("No vstream matches the given name {}".format(vstream_name))
@@ -2885,7 +2898,8 @@ class OutputVStream(object):
def __init__(self, configured_network, recv_object, name, tf_nms_format=False, net_group_name=""):
self._recv_object = recv_object
self._output_layer_utils = OutputLayerUtils(configured_network._hef, name, self._recv_object, net_group_name)
output_vstream_infos = configured_network.get_output_vstream_infos()
self._output_layer_utils = OutputLayerUtils(output_vstream_infos, name, self._recv_object, net_group_name)
self._output_dtype = self._output_layer_utils.output_dtype
self._vstream_info = self._output_layer_utils._vstream_info
self._output_tensor_info = self._output_layer_utils.output_tensor_info
@@ -3030,15 +3044,3 @@ class OutputVStreams(object):
def _after_fork_in_child(self):
for vstream in self._vstreams.values():
vstream._after_fork_in_child()
class YOLOv5PostProcessOp(object):
def __init__(self, anchors, shapes, formats, quant_infos, image_height, image_width, confidence_threshold, iou_threshold, num_of_classes,
max_boxes, cross_classes=True):
self._op = _pyhailort.YOLOv5PostProcessOp.create(anchors, shapes, formats, quant_infos, image_height, image_width, confidence_threshold,
iou_threshold, num_of_classes, max_boxes, cross_classes)
def execute(self, net_flow_tensors):
return self._op.execute(net_flow_tensors)

View File

@@ -6,10 +6,9 @@ from __future__ import division
from builtins import object
from hailo_platform.pyhailort.pyhailort import ConfiguredNetwork, HEF, TrafficControl, INPUT_DATAFLOW_BASE_PORT
from hailo_platform.pyhailort.pyhailort import HEF, NetworkRateLimiter, INPUT_DATAFLOW_BASE_PORT
DEFAULT_MAX_KBPS = 850e3
DEFAULT_MAX_KBPS_PAPRIKA_B0 = 160e3
BYTES_IN_Kbits = 125.0
@@ -28,15 +27,9 @@ class BadTCCallError(Exception):
pass
def get_max_supported_kbps(hw_arch="hailo8"):
# TODO: What should be here?
if hw_arch == "paprika_b0":
return DEFAULT_MAX_KBPS_PAPRIKA_B0
return DEFAULT_MAX_KBPS
class RateLimiterWrapper(object):
"""UDPRateLimiter wrapper enabling ``with`` statements."""
def __init__(self, network_group, fps=1, fps_factor=1.0, remote_ip=None, hw_arch=None):
def __init__(self, configured_network_group, fps=1, fps_factor=1.0, remote_ip=None):
"""RateLimiterWrapper constructor.
Args:
@@ -44,32 +37,24 @@ class RateLimiterWrapper(object):
target network_group.
fps (int): Frame rate.
fps_factor (float): Safety factor by which to multiply the calculated UDP rate.
remote_ip (str): Device IP address.
"""
if not isinstance(network_group, ConfiguredNetwork):
return RateLimiterException("The API was changed. RateLimiterWrapper accept ConfiguredNetwork instead of ActivatedNetwork")
self._network_group = network_group
if remote_ip is not None:
self._network_group = configured_network_group
if remote_ip is None:
raise RateLimiterException("In order to use RateLimiterWrapper, one should pass 'remote_ip'")
self._remote_ip = remote_ip
else:
# this line should be removed. this parameter will be removed from the object
self._remote_ip = network_group._target.device_id
self._fps = fps
self._fps_factor = fps_factor
if hw_arch is not None:
self._hw_arch = hw_arch
else:
# this line should be removed. this parameter will be removed from the object
self._hw_arch = network_group._target._hw_arch if hasattr(network_group._target, '_hw_arch') else None
self._rates_dict = {}
self._tc_dict = {}
def __enter__(self):
max_supported_kbps_rate = get_max_supported_kbps(self._hw_arch)
max_supported_kbps_rate = DEFAULT_MAX_KBPS
self._rates_dict = self._network_group.get_udp_rates_dict((self._fps * self._fps_factor),
(max_supported_kbps_rate * BYTES_IN_Kbits))
for port, rate in self._rates_dict.items():
self._tc_dict[port] = TrafficControl(self._remote_ip, port, rate)
self._tc_dict[port] = NetworkRateLimiter(self._remote_ip, port, rate)
self._tc_dict[port].reset_rate_limit()
self._tc_dict[port].set_rate_limit()
@@ -82,7 +67,7 @@ class RateLimiterWrapper(object):
class UDPRateLimiter(object):
"""Enables limiting or removing limits on UDP communication rate to a board."""
def __init__(self, remote_ip, port, rate_kbits_per_sec = 0):
self._tc = TrafficControl(remote_ip, port, rate_kbits_per_sec * BYTES_IN_Kbits)
self._tc = NetworkRateLimiter(remote_ip, port, rate_kbits_per_sec * BYTES_IN_Kbits)
def set_rate_limit(self):
return self._tc.set_rate_limit()

View File

@@ -126,8 +126,8 @@
"outputs": [],
"source": [
"def send(configured_network, num_frames):\n",
" vstreams_params = InputVStreamParams.make(configured_network)\n",
" configured_network.wait_for_activation(1000)\n",
" vstreams_params = InputVStreamParams.make(configured_network)\n",
" with InputVStreams(configured_network, vstreams_params) as vstreams:\n",
" vstream_to_buffer = {vstream: np.ndarray([1] + list(vstream.shape), dtype=vstream.dtype) for vstream in vstreams}\n",
" for _ in range(num_frames):\n",

View File

@@ -37,7 +37,7 @@
"%matplotlib inline\n",
"import time\n",
"\n",
"from hailo_platform import PcieDevice, DvmTypes, PowerMeasurementTypes, SamplingPeriod, AveragingFactor, MeasurementBufferIndex # noqa F401\n"
"from hailo_platform import Device, DvmTypes, PowerMeasurementTypes, SamplingPeriod, AveragingFactor, MeasurementBufferIndex # noqa F401\n"
]
},
{
@@ -53,7 +53,7 @@
"metadata": {},
"outputs": [],
"source": [
"target = PcieDevice()"
"target = Device()"
]
},
{

View File

@@ -0,0 +1,130 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"# Python inference tutorial - Multi Process Service and Model Scheduler\n",
"\n",
"This tutorial will walk you through the inference process using The Model Scheduler.\n",
"\n",
"**Requirements:**\n",
"\n",
"* Run HailoRT Multi-Process Service before running inference. See installation steps in [Multi-Process Service](../../inference/inference.rst)\n",
"* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
"\n",
"It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Running Inference using HailoRT\n",
"\n",
"In this example we will use the Model Scheduler to run inference on multiple models.\n",
"Each model is represented by an HEF which is built using the Hailo Dataflow Compiler.\n",
"An HEF is Hailo's binary format for neural networks. The HEF files contain:\n",
"\n",
"* Target HW configuration\n",
"* Weights\n",
"* Metadata for HailoRT (e.g. input/output scaling)\n",
"\n",
"The Model Scheduler is an HailoRT component that comes to enhance and simplify the usage\n",
"of the same Hailo device by multiple networks. The responsibility for activating/deactivating the network\n",
"groups is now under HailoRT, and done **automatically** without user application intervention.\n",
"In order to use the Model Scheduler, create the VDevice with scheduler enabled, configure all models to the device, and start inference on all models:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from multiprocessing import Process\n",
"from hailo_platform import (HEF, VDevice, HailoStreamInterface, InferVStreams, ConfigureParams,\n",
" InputVStreamParams, OutputVStreamParams, InputVStreams, OutputVStreams, FormatType, HailoSchedulingAlgorithm)\n",
"\n",
"\n",
"# Define the function to run inference on the model\n",
"def infer(network_group, input_vstreams_params, output_vstreams_params, input_data):\n",
" rep_count = 100\n",
" with InferVStreams(network_group, input_vstreams_params, output_vstreams_params) as infer_pipeline:\n",
" for i in range(rep_count):\n",
" infer_results = infer_pipeline.infer(input_data)\n",
"\n",
"\n",
"# Loading compiled HEFs:\n",
"first_hef_path = '../hefs/resnet_v1_18.hef'\n",
"second_hef_path = '../hefs/shortcut_net.hef'\n",
"first_hef = HEF(first_hef_path)\n",
"second_hef = HEF(second_hef_path)\n",
"hefs = [first_hef, second_hef]\n",
"\n",
"# Creating the VDevice target with scheduler enabled\n",
"params = VDevice.create_params()\n",
"params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN\n",
"with VDevice(params) as target:\n",
" infer_processes = []\n",
"\n",
" # Configure network groups\n",
" for hef in hefs:\n",
" configure_params = ConfigureParams.create_from_hef(hef=hef, interface=HailoStreamInterface.PCIe)\n",
" network_groups = target.configure(hef, configure_params)\n",
" network_group = network_groups[0]\n",
"\n",
" # Create input and output virtual streams params\n",
" # Quantized argument signifies whether or not the incoming data is already quantized.\n",
" # Data is quantized by HailoRT if and only if quantized == False.\n",
" input_vstreams_params = InputVStreamParams.make(network_group, quantized=False, format_type=FormatType.FLOAT32)\n",
" output_vstreams_params = OutputVStreamParams.make(network_group, quantized=True, format_type=FormatType.UINT8)\n",
"\n",
" # Define dataset params\n",
" input_vstream_info = hef.get_input_vstream_infos()[0]\n",
" image_height, image_width, channels = input_vstream_info.shape\n",
" num_of_frames = 10\n",
" low, high = 2, 20\n",
"\n",
" # Generate random dataset\n",
" dataset = np.random.randint(low, high, (num_of_frames, image_height, image_width, channels)).astype(np.float32)\n",
" input_data = {input_vstream_info.name: dataset}\n",
"\n",
" # Create infer process\n",
" infer_process = Process(target=infer, args=(network_group, input_vstreams_params, output_vstreams_params, input_data))\n",
" infer_processes.append(infer_process)\n",
"\n",
" print(f'Starting streaming on multiple models using scheduler')\n",
" for infer_process in infer_processes:\n",
" infer_process.start()\n",
" for infer_process in infer_processes:\n",
" infer_process.join()\n",
"\n",
" print('Done inference')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -1,8 +1,8 @@
appdirs==1.4.4
argcomplete==2.0.0
contextlib2==0.6.0.post1
distlib==0.3.4
filelock==3.4.1
distlib==0.3.6
filelock==3.8.0
future==0.18.2
importlib-metadata==5.1.0
importlib-resources==5.1.2
@@ -11,4 +11,4 @@ netifaces==0.10.9
numpy==1.23.3
typing_extensions==4.1.1
verboselogs==1.7
virtualenv==20.4.3
virtualenv==20.17.0

View File

@@ -69,6 +69,6 @@ if __name__ == "__main__":
"linux_aarch64",
],
url="https://hailo.ai/",
version="4.13.0",
version="4.14.0",
zip_safe=False,
)

View File

@@ -1,12 +1,23 @@
cmake_minimum_required(VERSION 3.0.0)
option(HAILO_BUILD_PYHAILORT_INTERNAL OFF)
include(ExternalProject)
FUNCTION(exclude_archive_libs_symbols target) # should be same as in common_compiler_options.cmake
if(WIN32)
# TODO: check if there are required actions for Windows
elseif(UNIX)
get_property(TEMP_LINK_FLAGS TARGET ${target} PROPERTY LINK_FLAGS)
set(TEMP_LINK_FLAGS "${TEMP_LINK_FLAGS} -Wl,--exclude-libs=ALL")
set_property(TARGET ${target} PROPERTY LINK_FLAGS ${TEMP_LINK_FLAGS})
else()
message(FATAL_ERROR "Unexpeced host, stopping build")
endif()
ENDFUNCTION()
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
if(NOT DEFINED PYBIND11_PYTHON_VERSION)
message(FATAL_ERROR "PYBIND11_PYTHON_VERSION is not defined. To build _pyhailort, pass python version")
endif()
string(REPLACE "." "" dpython ${PYBIND11_PYTHON_VERSION}) # E.g "3.5" -> "35"
if(${dpython} LESS "38")
set(m_flag "m")
@@ -16,6 +27,8 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
set(PYTHON_MODULE_EXTENSION ".cpython-${dpython}${m_flag}-${CMAKE_SYSTEM_PROCESSOR}-linux-gnu.so")
endif()
option(HAILO_BUILD_PYHAILORT_INTERNAL OFF)
set(PYHAILORT_DIR ${CMAKE_CURRENT_LIST_DIR})
pybind11_add_module(_pyhailort
@@ -24,29 +37,27 @@ pybind11_add_module(_pyhailort
hef_api.cpp
vstream_api.cpp
quantization_api.cpp
${HAILORT_OPS_CPP_SOURCES}
${HAILORT_COMMON_CPP_SOURCES}
)
set_target_properties(_pyhailort PROPERTIES
CXX_STANDARD 14
CXX_STANDARD_REQUIRED YES
CXX_EXTENSIONS NO
C_VISIBILITY_PRESET hidden
CXX_VISIBILITY_PRESET hidden
# VISIBILITY_INLINES_HIDDEN YES
)
target_include_directories(_pyhailort
PRIVATE
$<BUILD_INTERFACE:${HAILORT_INC_DIR}>
$<BUILD_INTERFACE:${HAILORT_COMMON_DIR}>
$<BUILD_INTERFACE:${HAILORT_SRC_DIR}>
$<BUILD_INTERFACE:${COMMON_INC_DIR}>
)
find_package(HailoRT 4.14.0 EXACT REQUIRED)
target_link_libraries(_pyhailort PRIVATE libhailort spdlog::spdlog)
target_link_libraries(_pyhailort PRIVATE HailoRT::libhailort)
if(WIN32)
target_link_libraries(_pyhailort PRIVATE Ws2_32 Iphlpapi Shlwapi)
endif()
if(HAILO_BUILD_SERVICE)
target_link_libraries(_pyhailort PRIVATE grpc++_unsecure hailort_rpc_grpc_proto hef_proto)
target_link_libraries(_pyhailort PRIVATE Ws2_32)
target_compile_options(_pyhailort PRIVATE
/DWIN32_LEAN_AND_MEAN
/DNOMINMAX # NOMINMAX is required in order to play nice with std::min/std::max (otherwise Windows.h defines it's own)
/wd4201 /wd4251
)
endif()
target_compile_options(_pyhailort PRIVATE ${HAILORT_COMPILE_OPTIONS})

View File

@@ -14,8 +14,6 @@
#include "hailo/hailort_common.hpp"
#include "hailo/network_group.hpp"
#include "common/logger_macros.hpp"
#include "utils.hpp"
#include <pybind11/numpy.h>

View File

@@ -9,6 +9,7 @@
**/
#include "device_api.hpp"
#include <memory>
namespace hailort
@@ -39,28 +40,7 @@ DeviceWrapper DeviceWrapper::create_pcie(hailo_pcie_device_info_t &device_info)
DeviceWrapper DeviceWrapper::create_eth(const std::string &device_address, uint16_t port,
uint32_t timeout_milliseconds, uint8_t max_number_of_attempts)
{
hailo_eth_device_info_t device_info = {};
/* Validate address length */
if (INET_ADDRSTRLEN < device_address.size()) {
EXIT_WITH_ERROR("device_address is too long")
}
device_info.host_address.sin_family = AF_INET;
device_info.host_address.sin_port = HAILO_ETH_PORT_ANY;
auto status = Socket::pton(AF_INET, HAILO_ETH_ADDRESS_ANY, &(device_info.host_address.sin_addr));
VALIDATE_STATUS(status);
device_info.device_address.sin_family = AF_INET;
device_info.device_address.sin_port = port;
status = Socket::pton(AF_INET, device_address.c_str(), &(device_info.device_address.sin_addr));
VALIDATE_STATUS(status);
device_info.timeout_millis = timeout_milliseconds;
device_info.max_number_of_attempts = max_number_of_attempts;
device_info.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
auto device = Device::create_eth(device_info);
auto device = Device::create_eth(device_address, port, timeout_milliseconds, max_number_of_attempts);
VALIDATE_EXPECTED(device);
return DeviceWrapper(device.release());
@@ -125,7 +105,7 @@ bool DeviceWrapper::get_overcurrent_state()
py::bytes DeviceWrapper::read_memory(uint32_t address, uint32_t length)
{
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(length, '\x00');
std::unique_ptr<std::string> response = std::make_unique<std::string>(length, '\x00');
VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
MemoryView data_view(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(response->data())), length);
@@ -162,7 +142,7 @@ py::bytes DeviceWrapper::i2c_read(hailo_i2c_slave_config_t *slave_config, uint32
{
VALIDATE_NOT_NULL(slave_config, HAILO_INVALID_ARGUMENT);
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(length, '\x00');
std::unique_ptr<std::string> response = std::make_unique<std::string>(length, '\x00');
VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
MemoryView data_view(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(response->data())), length);
@@ -229,7 +209,7 @@ py::bytes DeviceWrapper::read_user_config()
auto config_buffer = device().read_user_config();
VALIDATE_EXPECTED(config_buffer);
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(
std::unique_ptr<std::string> response = std::make_unique<std::string>(
const_cast<char*>(reinterpret_cast<const char*>(config_buffer->data())), config_buffer->size());
VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
@@ -255,7 +235,7 @@ py::bytes DeviceWrapper::read_board_config()
auto config_buffer = device().read_board_config();
VALIDATE_EXPECTED(config_buffer);
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(
std::unique_ptr<std::string> response = std::make_unique<std::string>(
const_cast<char*>(reinterpret_cast<const char*>(config_buffer->data())), config_buffer->size());
VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
@@ -307,7 +287,7 @@ py::bytes DeviceWrapper::sensor_get_sections_info()
auto buffer = device().sensor_get_sections_info();
VALIDATE_EXPECTED(buffer);
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(
std::unique_ptr<std::string> response = std::make_unique<std::string>(
const_cast<char*>(reinterpret_cast<const char*>(buffer->data())), buffer->size());
VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);

View File

@@ -12,10 +12,9 @@
#define _DEVICE_API_HPP_
#include "hailo/hailort.h"
#include <hailo/platform.h>
#include "hailo/device.hpp"
#include "common/socket.hpp"
#include "utils.hpp"
#include "hef_api.hpp"

View File

@@ -10,6 +10,7 @@
**/
#include "hef_api.hpp"
#include <memory>
namespace hailort
@@ -20,7 +21,7 @@ HefWrapper::HefWrapper(const std::string &hef_path)
auto hef_expected = Hef::create(hef_path);
VALIDATE_EXPECTED(hef_expected);
hef = make_unique_nothrow<Hef>(hef_expected.release());
hef = std::make_unique<Hef>(hef_expected.release());
if (nullptr == hef) {
THROW_STATUS_ERROR(HAILO_OUT_OF_HOST_MEMORY);
}
@@ -31,7 +32,7 @@ HefWrapper::HefWrapper(const MemoryView &hef_buffer)
auto hef_expected = Hef::create(hef_buffer);
VALIDATE_EXPECTED(hef_expected);
hef = make_unique_nothrow<Hef>(hef_expected.release());
hef = std::make_unique<Hef>(hef_expected.release());
if (nullptr == hef) {
THROW_STATUS_ERROR(HAILO_OUT_OF_HOST_MEMORY);
}
@@ -255,7 +256,11 @@ void HefWrapper::initialize_python_module(py::module &m)
.def("get_networks_names", &HefWrapper::get_networks_names)
;
py::class_<ConfiguredNetworkGroup>(m, "ConfiguredNetworkGroup")
py::class_<ConfiguredNetworkGroup, std::shared_ptr<ConfiguredNetworkGroup>>(m, "ConfiguredNetworkGroup")
.def("is_scheduled", [](ConfiguredNetworkGroup& self)
{
return self.is_scheduled();
})
.def("get_name", [](ConfiguredNetworkGroup& self)
{
return self.name();
@@ -300,30 +305,18 @@ void HefWrapper::initialize_python_module(py::module &m)
})
.def("before_fork", [](ConfiguredNetworkGroup& self)
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
auto status = self.before_fork();
VALIDATE_STATUS(status);
#else
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
})
.def("after_fork_in_parent", [](ConfiguredNetworkGroup& self)
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
auto status = self.after_fork_in_parent();
VALIDATE_STATUS(status);
#else
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
})
.def("after_fork_in_child", [](ConfiguredNetworkGroup& self)
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
auto status = self.after_fork_in_child();
VALIDATE_STATUS(status);
#else
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
})
.def("set_scheduler_timeout", [](ConfiguredNetworkGroup& self, int timeout, const std::string &network_name="")
{
@@ -341,6 +334,112 @@ void HefWrapper::initialize_python_module(py::module &m)
auto status = self.set_scheduler_priority(priority);
VALIDATE_STATUS(status);
})
.def("get_networks_names", [](ConfiguredNetworkGroup& self)
{
auto network_infos = self.get_network_infos();
VALIDATE_EXPECTED(network_infos);
std::vector<std::string> result;
result.reserve(network_infos->size());
for (const auto &info : network_infos.value()) {
result.push_back(info.name);
}
return py::cast(result);
})
.def("get_sorted_output_names", [](ConfiguredNetworkGroup& self)
{
auto names_list = self.get_sorted_output_names();
VALIDATE_EXPECTED(names_list);
return py::cast(names_list.release());
})
.def("get_input_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_input_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_output_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_output_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_all_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_all_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_all_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_all_stream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_input_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
std::vector<hailo_stream_info_t> input_streams_infos;
auto all_streams = self.get_all_stream_infos(name);
VALIDATE_EXPECTED(all_streams);
for (auto &info : all_streams.value()) {
if (HAILO_H2D_STREAM == info.direction) {
input_streams_infos.push_back(std::move(info));
}
}
return py::cast(input_streams_infos);
})
.def("get_output_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
std::vector<hailo_stream_info_t> output_streams_infos;
auto all_streams = self.get_all_stream_infos(name);
VALIDATE_EXPECTED(all_streams);
for (auto &info : all_streams.value()) {
if (HAILO_D2H_STREAM == info.direction) {
output_streams_infos.push_back(std::move(info));
}
}
return py::cast(output_streams_infos);
})
.def("get_vstream_names_from_stream_name", [](ConfiguredNetworkGroup& self, const std::string &stream_name)
{
auto result = self.get_vstream_names_from_stream_name(stream_name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def("get_stream_names_from_vstream_name", [](ConfiguredNetworkGroup& self, const std::string &vstream_name)
{
auto result = self.get_stream_names_from_vstream_name(vstream_name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def("make_input_vstream_params", [](ConfiguredNetworkGroup& self, const std::string &name, bool quantized, hailo_format_type_t format_type,
uint32_t timeout_ms, uint32_t queue_size)
{
auto result = self.make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def("make_output_vstream_params", [](ConfiguredNetworkGroup& self, const std::string &name, bool quantized, hailo_format_type_t format_type,
uint32_t timeout_ms, uint32_t queue_size)
{
auto result = self.make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def(py::pickle(
[](const ConfiguredNetworkGroup &cng) { // __getstate__
auto handle = cng.get_client_handle();
VALIDATE_EXPECTED(handle);
return py::make_tuple(handle.value(), cng.name());
},
[](py::tuple t) { // __setstate__
auto handle = t[0].cast<uint32_t>();
auto net_group_name = t[1].cast<std::string>();
auto net_group = ConfiguredNetworkGroup::duplicate_network_group_client(handle, net_group_name);
VALIDATE_EXPECTED(net_group);
return net_group.value();
}
))
;
ActivatedAppContextManagerWrapper::add_to_python_module(m);

View File

@@ -18,7 +18,6 @@
#include "vstream_api.hpp"
#include "utils.hpp"
#include "common/logger_macros.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>

View File

@@ -173,12 +173,15 @@ py::array PyhailortInternal::get_yolov5_post_process_expected_buffer()
auto buffer = get_expected_buffer_float32();
VALIDATE_EXPECTED(buffer);
auto type = py::dtype(HailoRTBindingsCommon::convert_format_type_to_string(HAILO_FORMAT_TYPE_FLOAT32));
auto shape = *py::array::ShapeContainer({buffer->size()});
// Note: The ownership of the buffer is transferred to Python wrapped as a py::array.
// When the py::array isn't referenced anymore in Python and is destructed, the py::capsule's dtor
// is called too (and it deletes the raw buffer)
auto type = py::dtype(HailoRTBindingsCommon::convert_format_type_to_string(HAILO_FORMAT_TYPE_FLOAT32));
auto shape = *py::array::ShapeContainer({buffer->size()});
const auto unmanaged_addr = buffer.release().release();
auto unmanaged_addr_exp = buffer->storage().release();
VALIDATE_EXPECTED(unmanaged_addr_exp);
const auto unmanaged_addr = unmanaged_addr_exp.release();
return py::array(type, shape, unmanaged_addr,
py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
}
@@ -277,7 +280,7 @@ py::list PyhailortInternal::get_all_layers_info(const HefWrapper &hef, const std
auto core_op_metadata = hef.hef_ptr()->pimpl->get_core_op_metadata(net_group_name);
VALIDATE_EXPECTED(core_op_metadata);
return py::cast(core_op_metadata->get_all_layer_infos());
return py::cast(core_op_metadata.value()->get_all_layer_infos());
}
PYBIND11_MODULE(_pyhailort_internal, m) {
@@ -296,6 +299,13 @@ PYBIND11_MODULE(_pyhailort_internal, m) {
.def_readonly("cluster_index", &BufferIndices::cluster_index)
;
py::enum_<SENSOR_CONFIG_OPCODES_t>(m, "SensorConfigOpCode")
.value("SENSOR_CONFIG_OPCODES_WR", SENSOR_CONFIG_OPCODES_WR)
.value("SENSOR_CONFIG_OPCODES_RD", SENSOR_CONFIG_OPCODES_RD)
.value("SENSOR_CONFIG_OPCODES_RMW", SENSOR_CONFIG_OPCODES_RMW)
.value("SENSOR_CONFIG_OPCODES_DELAY", SENSOR_CONFIG_OPCODES_DELAY)
;
py::class_<LayerInfo>(m, "HailoLayerInfo", py::module_local())
.def_readonly("is_mux", &LayerInfo::is_mux)
.def_readonly("mux_predecessors", &LayerInfo::predecessor)

View File

@@ -1,132 +0,0 @@
/**
* Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file net_flow_api.hpp
* @brief Defines binding to a HailoRT++ ops usage over Python.
**/
#ifndef _HAILO_NET_FLOW_API_HPP_
#define _HAILO_NET_FLOW_API_HPP_
#include "hailo/hailort.h"
#include "net_flow/ops/yolo_post_process.hpp"
#include "utils.hpp"
#include "bindings_common.hpp"
namespace hailort
{
namespace net_flow
{
class YOLOv5PostProcessOpWrapper
{
public:
static YOLOv5PostProcessOpWrapper create(const std::vector<std::vector<int>> &anchors,
const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
const std::vector<hailo_quant_info_t> &quant_infos, float32_t image_height, float32_t image_width, float32_t confidence_threshold,
float32_t iou_threshold, uint32_t num_of_classes, uint32_t max_boxes,
bool cross_classes=true)
{
std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
net_flow::NmsPostProcessConfig nms_post_process_config{};
nms_post_process_config.nms_score_th = confidence_threshold;
nms_post_process_config.nms_iou_th = iou_threshold;
nms_post_process_config.max_proposals_per_class = max_boxes;
nms_post_process_config.classes = num_of_classes;
nms_post_process_config.background_removal = false;
nms_post_process_config.background_removal_index = 0;
nms_post_process_config.cross_classes = cross_classes;
net_flow::YoloPostProcessConfig yolo_post_process_config{};
yolo_post_process_config.image_height = image_height;
yolo_post_process_config.image_width = image_width;
// Each layer anchors vector is structured as {w,h} pairs.
for (size_t i = 0; i < anchors.size(); ++i) {
auto name = std::to_string(i);
yolo_post_process_config.anchors.insert({name, anchors[i]});
BufferMetaData input_metadata = {
shapes[i],
shapes[i],
formats[i],
quant_infos[i]
};
inputs_metadata.insert({name, input_metadata});
}
auto op = YOLOv5PostProcessOp::create(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config);
VALIDATE_EXPECTED(op);
return YOLOv5PostProcessOpWrapper(op.release(), num_of_classes, max_boxes);
}
static void add_to_python_module(py::module &m)
{
py::class_<YOLOv5PostProcessOpWrapper>(m, "YOLOv5PostProcessOp")
.def("create", &YOLOv5PostProcessOpWrapper::create)
.def("execute",[](YOLOv5PostProcessOpWrapper &self, const std::vector<py::array> &tensors)
{
std::map<std::string, MemoryView> data_views;
for (size_t i = 0; i < tensors.size(); ++i) {
data_views.insert({std::to_string(i),
MemoryView(const_cast<void*>(reinterpret_cast<const void*>(tensors[i].data())), tensors[i].nbytes())});
}
hailo_nms_info_t nms_info = {
self.m_num_of_classes,
self.m_max_boxes,
sizeof(hailo_bbox_float32_t),
1,
false,
hailo_nms_defuse_info_t()
};
hailo_format_t output_format = {
HAILO_FORMAT_TYPE_FLOAT32,
HAILO_FORMAT_ORDER_HAILO_NMS,
HAILO_FORMAT_FLAGS_QUANTIZED,
};
auto buffer = Buffer::create(HailoRTCommon::get_nms_host_frame_size(nms_info, output_format), 0);
VALIDATE_STATUS(buffer.status());
std::map<std::string, MemoryView> outputs;
outputs.insert({"", MemoryView(buffer.value().data(), buffer.value().size())});
auto status = self.m_post_processing_op->execute(data_views, outputs);
VALIDATE_STATUS(status);
// Note: The ownership of the buffer is transferred to Python wrapped as a py::array.
// When the py::array isn't referenced anymore in Python and is destructed, the py::capsule's dtor
// is called too (and it deletes the raw buffer)
auto type = py::dtype(HailoRTBindingsCommon::convert_format_type_to_string(HAILO_FORMAT_TYPE_FLOAT32));
auto shape = *py::array::ShapeContainer({buffer.value().size()});
const auto unmanaged_addr = buffer.release().release();
return py::array(type, shape, unmanaged_addr,
py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
})
;
}
private:
YOLOv5PostProcessOpWrapper(std::shared_ptr<Op> post_processing_op, uint32_t num_of_classes, uint32_t max_bboxes)
: m_post_processing_op(post_processing_op),
m_num_of_classes(num_of_classes),
m_max_boxes(max_bboxes) {}
std::shared_ptr<Op> m_post_processing_op;
uint32_t m_num_of_classes = 0;
uint32_t m_max_boxes = 0;
};
void NetFlow_api_initialize_python_module(py::module &m)
{
YOLOv5PostProcessOpWrapper::add_to_python_module(m);
}
} /* namespace net_flow */
} /* namespace hailort */
#endif /* _HAILO_NET_FLOW_API_HPP_ */

View File

@@ -10,23 +10,24 @@ using namespace std;
#include "hailo/hailort.h"
#include "hailo/hailort_defaults.hpp"
#include "hailo/network_rate_calculator.hpp"
#include "hef_api.hpp"
#include "vstream_api.hpp"
#include "vdevice_api.hpp"
#include "device_api.hpp"
#include "quantization_api.hpp"
#include "net_flow_api.hpp"
#include "utils.hpp"
#include "utils.h"
#include "bindings_common.hpp"
#include "sensor_config_exports.h"
#if defined(__GNUC__)
#include "common/os/posix/traffic_control.hpp"
#endif
// should be same as socket.hpp
#define PADDING_BYTES_SIZE (6)
#define PADDING_ALIGN_BYTES (8 - PADDING_BYTES_SIZE)
#define MIN_UDP_PAYLOAD_SIZE (24)
#define MAX_UDP_PAYLOAD_SIZE (1456)
#define MAX_UDP_PADDED_PAYLOAD_SIZE (MAX_UDP_PAYLOAD_SIZE - PADDING_BYTES_SIZE - PADDING_ALIGN_BYTES)
namespace hailort
{
@@ -102,36 +103,22 @@ std::string get_status_message(uint32_t status_in)
}
}
#if defined(__GNUC__)
class TrafficControlUtilWrapper final
class NetworkRateLimiter final
{
public:
static TrafficControlUtilWrapper create(const std::string &ip, uint16_t port, uint32_t rate_bytes_per_sec)
static void set_rate_limit(const std::string &ip, uint16_t port, uint32_t rate_bytes_per_sec)
{
auto tc_expected = TrafficControlUtil::create(ip, port, rate_bytes_per_sec);
VALIDATE_STATUS(tc_expected.status());
auto tc_ptr = make_unique_nothrow<TrafficControlUtil>(tc_expected.release());
if (nullptr == tc_ptr) {
VALIDATE_STATUS(HAILO_OUT_OF_HOST_MEMORY);
}
return TrafficControlUtilWrapper(std::move(tc_ptr));
VALIDATE_STATUS(NetworkUdpRateCalculator::set_rate_limit(ip, port, rate_bytes_per_sec));
}
void set_rate_limit()
static void reset_rate_limit(const std::string &ip, uint16_t port)
{
VALIDATE_STATUS(m_tc->set_rate_limit());
}
void reset_rate_limit()
{
VALIDATE_STATUS(m_tc->reset_rate_limit());
VALIDATE_STATUS(NetworkUdpRateCalculator::reset_rate_limit(ip, port));
}
static std::string get_interface_name(const std::string &ip)
{
auto name = TrafficControlUtil::get_interface_name(ip);
auto name = NetworkUdpRateCalculator::get_interface_name(ip);
VALIDATE_STATUS(name.status());
return name.value();
@@ -139,26 +126,16 @@ public:
static void add_to_python_module(py::module &m)
{
py::class_<TrafficControlUtilWrapper>(m, "TrafficControlUtil")
.def(py::init(&TrafficControlUtilWrapper::create))
.def("set_rate_limit", &TrafficControlUtilWrapper::set_rate_limit)
.def("reset_rate_limit", &TrafficControlUtilWrapper::reset_rate_limit)
py::class_<NetworkRateLimiter>(m, "NetworkRateLimiter")
.def("set_rate_limit", &NetworkRateLimiter::set_rate_limit)
.def("reset_rate_limit", &NetworkRateLimiter::reset_rate_limit)
.def_static("get_interface_name", [](const std::string &ip) {
return TrafficControlUtilWrapper::get_interface_name(ip);
return NetworkRateLimiter::get_interface_name(ip);
})
;
}
private:
TrafficControlUtilWrapper(std::unique_ptr<TrafficControlUtil> tc) :
m_tc(std::move(tc))
{}
std::unique_ptr<TrafficControlUtil> m_tc;
};
#endif
static void validate_versions_match()
{
hailo_version_t libhailort_version = {};
@@ -437,13 +414,6 @@ PYBIND11_MODULE(_pyhailort, m) {
.value("HAILO8_ISP", HAILO_SENSOR_TYPES_HAILO8_ISP)
;
py::enum_<SENSOR_CONFIG_OPCODES_t>(m, "SensorConfigOpCode")
.value("SENSOR_CONFIG_OPCODES_WR", SENSOR_CONFIG_OPCODES_WR)
.value("SENSOR_CONFIG_OPCODES_RD", SENSOR_CONFIG_OPCODES_RD)
.value("SENSOR_CONFIG_OPCODES_RMW", SENSOR_CONFIG_OPCODES_RMW)
.value("SENSOR_CONFIG_OPCODES_DELAY", SENSOR_CONFIG_OPCODES_DELAY)
;
py::class_<hailo_i2c_slave_config_t>(m, "I2CSlaveConfig")
.def(py::init<>())
.def_readwrite("endianness", &hailo_i2c_slave_config_t::endianness)
@@ -755,11 +725,45 @@ PYBIND11_MODULE(_pyhailort, m) {
.value("MIPI", HAILO_STREAM_INTERFACE_MIPI)
;
py::enum_<hailo_vstream_stats_flags_t>(m, "VStreamStatsFlags")
.value("NONE", hailo_vstream_stats_flags_t::HAILO_VSTREAM_STATS_NONE)
.value("MEASURE_FPS", hailo_vstream_stats_flags_t::HAILO_VSTREAM_STATS_MEASURE_FPS)
.value("MEASURE_LATENCY", hailo_vstream_stats_flags_t::HAILO_VSTREAM_STATS_MEASURE_LATENCY)
;
py::enum_<hailo_pipeline_elem_stats_flags_t>(m, "PipelineElemStatsFlags")
.value("NONE", hailo_pipeline_elem_stats_flags_t::HAILO_PIPELINE_ELEM_STATS_NONE)
.value("MEASURE_FPS", hailo_pipeline_elem_stats_flags_t::HAILO_PIPELINE_ELEM_STATS_MEASURE_FPS)
.value("MEASURE_LATENCY", hailo_pipeline_elem_stats_flags_t::HAILO_PIPELINE_ELEM_STATS_MEASURE_LATENCY)
.value("MEASURE_QUEUE_SIZE", hailo_pipeline_elem_stats_flags_t::HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE)
;
py::class_<hailo_vstream_params_t>(m, "VStreamParams")
.def(py::init<>())
.def_readwrite("user_buffer_format", &hailo_vstream_params_t::user_buffer_format)
.def_readwrite("timeout_ms", &hailo_vstream_params_t::timeout_ms)
.def_readwrite("queue_size", &hailo_vstream_params_t::queue_size)
.def_readonly("vstream_stats_flags", &hailo_vstream_params_t::vstream_stats_flags)
.def_readonly("pipeline_elements_stats_flags", &hailo_vstream_params_t::pipeline_elements_stats_flags)
.def(py::pickle(
[](const hailo_vstream_params_t &vstream_params) { // __getstate__
return py::make_tuple(
vstream_params.user_buffer_format,
vstream_params.timeout_ms,
vstream_params.queue_size,
vstream_params.vstream_stats_flags,
vstream_params.pipeline_elements_stats_flags);
},
[](py::tuple t) { // __setstate__
hailo_vstream_params_t vstream_params;
vstream_params.user_buffer_format = t[0].cast<hailo_format_t>();
vstream_params.timeout_ms = t[1].cast<uint32_t>();
vstream_params.queue_size = t[2].cast<uint32_t>();
vstream_params.vstream_stats_flags = t[3].cast<hailo_vstream_stats_flags_t>();
vstream_params.pipeline_elements_stats_flags = t[4].cast<hailo_pipeline_elem_stats_flags_t>();
return vstream_params;
}
))
;
py::enum_<hailo_latency_measurement_flags_t>(m, "LatencyMeasurementFlags")
@@ -802,6 +806,7 @@ PYBIND11_MODULE(_pyhailort, m) {
},
[](VDeviceParamsWrapper& params, hailo_scheduling_algorithm_t scheduling_algorithm) {
params.orig_params.scheduling_algorithm = scheduling_algorithm;
params.orig_params.multi_process_service = (HAILO_SCHEDULING_ALGORITHM_NONE != scheduling_algorithm);
}
)
.def_property("group_id",
@@ -813,12 +818,9 @@ PYBIND11_MODULE(_pyhailort, m) {
params.orig_params.group_id = params.group_id_str.c_str();
}
)
.def_property("multi_process_service",
[](const VDeviceParamsWrapper& params) -> uint32_t {
.def_property_readonly("multi_process_service",
[](const VDeviceParamsWrapper& params) -> bool {
return params.orig_params.multi_process_service;
},
[](VDeviceParamsWrapper& params, bool multi_process_service) {
params.orig_params.multi_process_service = multi_process_service;
}
)
.def_static("default", []() {
@@ -1103,11 +1105,8 @@ PYBIND11_MODULE(_pyhailort, m) {
VStream_api_initialize_python_module(m);
VDevice_api_initialize_python_module(m);
DeviceWrapper::add_to_python_module(m);
hailort::net_flow::NetFlow_api_initialize_python_module(m);
#if defined(__GNUC__)
TrafficControlUtilWrapper::add_to_python_module(m);
#endif
NetworkRateLimiter::add_to_python_module(m);
std::stringstream version;
version << HAILORT_MAJOR_VERSION << "." << HAILORT_MINOR_VERSION << "." << HAILORT_REVISION_VERSION;

View File

@@ -12,6 +12,8 @@
#include "quantization_api.hpp"
#include "bindings_common.hpp"
#include <iostream>
namespace hailort
{
@@ -32,8 +34,7 @@ void QuantizationBindings::dequantize_output_buffer_from_uint8(py::array src_buf
static_cast<float32_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Output quantization isn't supported from src format type uint8 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Output quantization isn't supported from src format type uint8 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -52,8 +53,7 @@ void QuantizationBindings::dequantize_output_buffer_from_uint16(py::array src_bu
static_cast<float32_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Output quantization isn't supported from src dormat type uint16 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Output quantization isn't supported from src dormat type uint16 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -68,8 +68,7 @@ void QuantizationBindings::dequantize_output_buffer_from_float32(py::array src_b
static_cast<float32_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Output quantization isn't supported from src format type float32 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Output quantization isn't supported from src format type float32 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -92,8 +91,7 @@ void QuantizationBindings::dequantize_output_buffer_from_uint8_in_place(py::arra
static_cast<float32_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Output quantization isn't supported from src format type uint8 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Output quantization isn't supported from src format type uint8 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -112,8 +110,7 @@ void QuantizationBindings::dequantize_output_buffer_from_uint16_in_place(py::arr
static_cast<float32_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Output quantization isn't supported from src dormat type uint16 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Output quantization isn't supported from src dormat type uint16 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -128,8 +125,7 @@ void QuantizationBindings::dequantize_output_buffer_from_float32_in_place(py::ar
static_cast<float32_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Output quantization isn't supported from src format type float32 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Output quantization isn't supported from src format type float32 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -149,7 +145,7 @@ void QuantizationBindings::dequantize_output_buffer_in_place(py::array dst_buffe
QuantizationBindings::dequantize_output_buffer_from_float32_in_place(dst_buffer, dst_dtype, shape_size, quant_info);
break;
default:
LOGGER__ERROR("Unsupported src format type = {}", HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Unsupported src format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -169,7 +165,7 @@ void QuantizationBindings::dequantize_output_buffer(py::array src_buffer, py::ar
QuantizationBindings::dequantize_output_buffer_from_float32(src_buffer, dst_buffer, dst_dtype, shape_size, quant_info);
break;
default:
LOGGER__ERROR("Unsupported src format type = {}", HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Unsupported src format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -184,7 +180,7 @@ void QuantizationBindings::quantize_input_buffer_from_uint8(py::array src_buffer
static_cast<uint8_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Input quantization isn't supported from src format type uint8 to dst format type = {}", HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Input quantization isn't supported from src format type uint8 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -203,8 +199,7 @@ void QuantizationBindings::quantize_input_buffer_from_uint16(py::array src_buffe
static_cast<uint16_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Input quantization isn't supported from src format type uint16 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Input quantization isn't supported from src format type uint16 to dst format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -223,8 +218,8 @@ void QuantizationBindings::quantize_input_buffer_from_float32(py::array src_buff
static_cast<uint16_t*>(dst_buffer.mutable_data()), shape_size, quant_info);
break;
default:
LOGGER__ERROR("Input quantization isn't supported from src format type float32 to dst format type = {}",
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Input quantization isn't supported from src format type float32 to dst format type = " <<
HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}
@@ -244,7 +239,7 @@ void QuantizationBindings::quantize_input_buffer(py::array src_buffer, py::array
QuantizationBindings::quantize_input_buffer_from_float32(src_buffer, dst_buffer, dst_dtype, shape_size, quant_info);
break;
default:
LOGGER__ERROR("Input quantization isn't supported for src format type = {}", HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype));
std::cerr << "Input quantization isn't supported for src format type = " << HailoRTBindingsCommon::convert_format_type_to_string(dst_dtype);
THROW_STATUS_ERROR(HAILO_INVALID_ARGUMENT);
break;
}

View File

@@ -68,11 +68,11 @@ class HailoRTStatusException : public HailoRTException {
[](hailo_stream_parameters_t& self) -> const __property_type& \
{ \
if (__interface_value != self.stream_interface) { \
LOGGER__ERROR("Stream params interface is not {}.", #__interface_value); \
std::cerr << "Stream params interface is not " << __interface_value << "."; \
THROW_STATUS_ERROR(HAILO_INVALID_OPERATION); \
} \
if (__direction_value != self.direction) { \
LOGGER__ERROR("Stream params direction is not {}.", #__direction_value); \
std::cerr << "Stream params direction is not " << __direction_value << "."; \
THROW_STATUS_ERROR(HAILO_INVALID_OPERATION); \
} \
return self.__property_name; \
@@ -80,11 +80,11 @@ class HailoRTStatusException : public HailoRTException {
[](hailo_stream_parameters_t& self, const __property_type& value) \
{ \
if (__interface_value != self.stream_interface) { \
LOGGER__ERROR("Stream params interface is not {}.", #__interface_value); \
std::cerr << "Stream params interface is not " << __interface_value << "."; \
THROW_STATUS_ERROR(HAILO_INVALID_OPERATION); \
} \
if (__direction_value != self.direction) { \
LOGGER__ERROR("Stream params direction is not {}.", #__direction_value); \
std::cerr << "Stream params direction is not " << __direction_value << "."; \
THROW_STATUS_ERROR(HAILO_INVALID_OPERATION); \
} \
self.__property_name = value; \

View File

@@ -16,14 +16,8 @@
#include "hailo/vdevice.hpp"
#include "hailo/hailort_common.hpp"
#include "common/logger_macros.hpp"
#ifdef HAILO_SUPPORT_MULTI_PROCESS
#include "service/rpc_client_utils.hpp"
#endif // HAILO_SUPPORT_MULTI_PROCESS
#include "utils.hpp"
#include <iostream>
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/detail/common.h>
@@ -57,7 +51,7 @@ public:
static VDeviceWrapper create(const VDeviceParamsWrapper &params, const std::vector<std::string> &device_ids)
{
if (params.orig_params.device_ids != nullptr && (!device_ids.empty())) {
LOGGER__ERROR("VDevice device_ids can be set in params or device_ids argument. Both parameters were passed to the c'tor");
std::cerr << "VDevice device_ids can be set in params or device_ids argument. Both parameters were passed to the c'tor";
throw HailoRTStatusException(std::to_string(HAILO_INVALID_OPERATION));
}
auto modified_params = params;
@@ -124,32 +118,26 @@ public:
void before_fork()
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
if (m_vdevice != nullptr) {
auto status = m_vdevice->before_fork();
VALIDATE_STATUS(status);
}
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
void after_fork_in_parent()
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
if (m_vdevice != nullptr) {
auto status = m_vdevice->after_fork_in_parent();
VALIDATE_STATUS(status);
}
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
void after_fork_in_child()
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
if (m_vdevice != nullptr) {
auto status = m_vdevice->after_fork_in_child();
VALIDATE_STATUS(status);
}
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
private:

View File

@@ -7,12 +7,10 @@
* @brief Implementation of binding to virtual stream usage over Python.
**/
#include "common/logger_macros.hpp"
#include "common/utils.hpp"
#include "vstream_api.hpp"
#include "bindings_common.hpp"
#include "utils.hpp"
#include <iostream>
namespace hailort
@@ -87,7 +85,7 @@ InputVStreamsWrapper InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_gr
std::unordered_map<std::string, std::shared_ptr<InputVStream>> input_vstreams;
for (auto &input : input_vstreams_expected.value()) {
auto input_name = input.name();
input_vstreams.emplace(input_name, make_shared_nothrow<InputVStream>(std::move(input)));
input_vstreams.emplace(input_name, std::make_unique<InputVStream>(std::move(input)));
}
return InputVStreamsWrapper(input_vstreams);
}
@@ -106,7 +104,7 @@ std::shared_ptr<InputVStream> InputVStreamsWrapper::get_input_by_name(const std:
{
auto input = m_input_vstreams.find(name);
if (m_input_vstreams.end() == input) {
LOGGER__ERROR("Input virtual stream for name={} not found", name);
std::cerr << "Input virtual stream for name=" << name << " not found";
THROW_STATUS_ERROR(HAILO_NOT_FOUND);
}
@@ -210,7 +208,9 @@ void OutputVStreamWrapper::add_to_python_module(py::module &m)
// Note: The ownership of the buffer is transferred to Python wrapped as a py::array.
// When the py::array isn't referenced anymore in Python and is destructed, the py::capsule's dtor
// is called too (and it deletes the raw buffer)
const auto unmanaged_addr = buffer.release().release();
auto unmanaged_addr_exp = buffer->storage().release();
VALIDATE_EXPECTED(unmanaged_addr_exp);
const auto unmanaged_addr = unmanaged_addr_exp.release();
return py::array(get_dtype(self), get_shape(self), unmanaged_addr,
py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
})
@@ -263,7 +263,7 @@ OutputVStreamsWrapper OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_
std::unordered_map<std::string, std::shared_ptr<OutputVStream>> output_vstreams;
for (auto &output : output_vstreams_expected.value()) {
auto output_name = output.name();
output_vstreams.emplace(output_name, make_shared_nothrow<OutputVStream>(std::move(output)));
output_vstreams.emplace(output_name, std::make_unique<OutputVStream>(std::move(output)));
}
return OutputVStreamsWrapper(output_vstreams);
}
@@ -272,7 +272,7 @@ std::shared_ptr<OutputVStream> OutputVStreamsWrapper::get_output_by_name(const s
{
auto output = m_output_vstreams.find(name);
if (m_output_vstreams.end() == output) {
LOGGER__ERROR("Output virtual stream for name={} not found", name);
std::cerr << "Output virtual stream for name=" << name << " not found";
THROW_STATUS_ERROR(HAILO_NOT_FOUND);
}
@@ -361,7 +361,7 @@ InferVStreamsWrapper InferVStreamsWrapper::create(ConfiguredNetworkGroup &networ
{
auto infer_pipeline = InferVStreams::create(network_group, input_vstreams_params, output_vstreams_params);
VALIDATE_EXPECTED(infer_pipeline);
auto infer_vstream_ptr = make_shared_nothrow<InferVStreams>(std::move(infer_pipeline.value()));
auto infer_vstream_ptr = std::make_shared<InferVStreams>(std::move(infer_pipeline.value()));
return InferVStreamsWrapper(infer_vstream_ptr);
}
@@ -426,7 +426,7 @@ std::vector<size_t> InferVStreamsWrapper::get_shape(const std::string &stream_na
return HailoRTBindingsCommon::get_pybind_shape(output->get().get_info(), output->get().get_user_buffer_format());
}
LOGGER__ERROR("Stream {} not found", stream_name);
std::cerr << "Stream " << stream_name << " not found";
THROW_STATUS_ERROR(HAILO_NOT_FOUND);
}

View File

@@ -10,8 +10,6 @@
#ifndef _VSTREAM_API_HPP_
#define _VSTREAM_API_HPP_
#include "common/logger_macros.hpp"
#include "common/utils.hpp"
#include "hailo/vstream.hpp"
#include "hailo/inference_pipeline.hpp"
#include "utils.hpp"

View File

@@ -23,7 +23,7 @@
package_dest: /usr/include/aarch64-linux-gnu
- version: '3.9'
installation: manual
package_name: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa/+build/24906233/+files/libpython3.9-dev_3.9.16-1+bionic1_arm64.deb
package_name: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa/+build/26280901/+files/libpython3.9-dev_3.9.17-1+focal1_arm64.deb
package_dest: /usr/include/aarch64-linux-gnu
- version: '3.10'
installation: manual

View File

@@ -2,9 +2,33 @@ cmake_minimum_required(VERSION 3.0.0)
project(hailort-examples)
if(WIN32)
add_compile_options(/W4)
elseif(UNIX)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "QCC")
add_compile_options(-Wall -Wextra -Wconversion)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
add_compile_options(-Wall -Wextra -Wconversion -Wno-missing-braces)
endif()
else()
message(FATAL_ERROR "Unexpeced host, stopping build")
endif()
if (HAILO_COMPILE_WARNING_AS_ERROR)
# Treat warnings as errors for all examples
if(WIN32)
add_compile_options(/WX)
elseif(UNIX)
add_compile_options(-Werror)
else()
message(FATAL_ERROR "Unexpeced host, stopping build")
endif()
endif()
add_subdirectory(cpp)
add_subdirectory(c)
# We add a costum target in order to compile all of the hailort examples
add_custom_target(hailort_examples)
add_dependencies(hailort_examples c_hailort_examples cpp_hailort_examples)

View File

@@ -26,6 +26,11 @@ The following examples are provided, demonstrating the HailoRT API:
- this example uses udp device.
- `raw_streams_example` - Basic inference of a shortcut network using raw stream api.
- The data is transformed before sent and after received in the same thread sending/receiving using the transformation api.
- `raw_async_streams_single_thread_example` - Basic inference of a shortcut network using raw stream async api with
a single thread.
- Each async read operation will re-launch some new async read operation.
- Each async write operation will re-launch some new async write operation.
- The main thread will stop the async operations by deactivating the network group.
- `notification_callback_example` - Demonstrates how to work with notification callbacks.
- C++ examples:
@@ -38,9 +43,19 @@ The following examples are provided, demonstrating the HailoRT API:
- `infer_pipeline_example` - Basic inference of a shortcut network using inference pipeline (blocking) api.
- same as `infer_pipeline_example` C example, uses HailoRT C++ api.
- `raw_streams_example` - Basic inference of a shortcut network, same as `raw_streams_example` C example, uses HailoRT C++ api.
- `multi_process_example` - Demonstrates how to work with HailoRT as a service and using the HailoRT Model Scheduler for network groups switching.
- `raw_async_streams_single_thread_example` - Basic inference of a shortcut network using raw stream async api with
a single thread.
- Each async read operation will re-launch some new async read operation.
- Each async write operation will re-launch some new async write operation.
- The main thread will stop the async operations by deactivating the network group.
- `raw_async_streams_multi_thread_example` - Basic inference of a shortcut network using raw stream async api with
a thread for each stream.
- The threads will continuously initiate an async read or write operations.
- The main thread will stop the async operations and the threads by deactivating the network group.
- `multi_process_example` - Demonstrates how to work with HailoRT multi-process service and using the HailoRT Model Scheduler for network groups switching.
Using the script `multi_process_example.sh` one can specify the number of processes to run each hef, see `multi_process_example.sh -h` for more information.
- `notification_callback_example` - Demonstrates how to work with notification callbacks, same as `notification_callback_example` C example.
You can find more details about each example in the HailoRT user guide.
## Compiling with CMake
Examples are configured and compiled using the following commands:
```sh
@@ -58,9 +73,10 @@ cmake --build build --config release --target cpp_vstreams_example
## Running the examples
Before running an example, download the HEFs using the [download script](../../scripts/download_hefs.sh):
Before running an example, download the HEFs using the [download script](../../scripts/download_hefs.sh) from the scripts directory:
```sh
../../scripts/download_hefs.sh
cd ../../scripts
./download_hefs.sh
```
To run an example, use (from this examples directory):

Some files were not shown because too many files have changed in this diff Show More