Co-authored-by: HailoRT-Automation <contact@hailo.ai>
This commit is contained in:
HailoRT-Automation
2023-10-05 16:21:52 +03:00
committed by GitHub
parent 9bce73eb42
commit 459eaf0234
327 changed files with 22332 additions and 11135 deletions

View File

@@ -40,13 +40,12 @@ HailoRT uses 2 licenses:
Contact information and support is available at [**hailo.ai**](https://hailo.ai/contact-us/). Contact information and support is available at [**hailo.ai**](https://hailo.ai/contact-us/).
## About Hailo-8™ ## About Hailo
Hailo-8 is a deep learning processor for edge devices. The Hailo-8 provides groundbreaking efficiency for neural network deployment. Hailo offers breakthrough AI Inference Accelerators and AI Vision Processors uniquely designed to accelerate embedded deep learning applications on edge devices.
The Hailo-8 edge AI processor, featuring up to 26 Tera-Operations-Per-Second (TOPS), significantly outperforms all other edge processors.
Hailo-8 is available in various form-factors, including the Hailo-8 M.2 Module.
The Hailo-8 AI processor is designed to fit into a multitude of smart machines and devices, for a wide variety of sectors including Automotive, Smart Cities, Industry 4.0, The Hailo AI Inference Accelerators allow edge devices to run deep learning applications at full scale more efficiently, effectively, and sustainably, with an architecture that takes advantage of the core properties of neural networks.
Retail and Smart Homes.
The Hailo AI Vision Processors (SoC) combine Hailo's patented and field proven AI inferencing capabilities with advanced computer vision engines, generating premium image quality and advanced video analytics.
For more information, please visit [**hailo.ai**](https://hailo.ai/). For more information, please visit [**hailo.ai**](https://hailo.ai/).

View File

@@ -66,6 +66,7 @@ typedef struct {
uint16_t feature_padding_payload; uint16_t feature_padding_payload;
uint16_t buffer_padding_payload; uint16_t buffer_padding_payload;
uint16_t buffer_padding; uint16_t buffer_padding;
bool is_periph_calculated_in_hailort;
} CONTEXT_SWITCH_DEFS__stream_reg_info_t; } CONTEXT_SWITCH_DEFS__stream_reg_info_t;
#if defined(_MSC_VER) #if defined(_MSC_VER)
@@ -103,12 +104,16 @@ typedef enum __attribute__((packed)) {
CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CCW_BURSTS, CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CCW_BURSTS,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL, CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START, CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_RESET, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_RESET,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL, CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL, CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS, CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE, CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH, CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL,
CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL,
/* Must be last */ /* Must be last */
CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT
@@ -214,7 +219,7 @@ typedef struct {
typedef struct { typedef struct {
uint8_t packed_vdma_channel_id; uint8_t packed_vdma_channel_id;
uint8_t edge_layer_direction; uint8_t edge_layer_direction;
bool is_inter_context; bool check_host_empty_num_available;
uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
uint32_t initial_credit_size; uint32_t initial_credit_size;
} CONTEXT_SWITCH_DEFS__deactivate_vdma_channel_action_data_t; } CONTEXT_SWITCH_DEFS__deactivate_vdma_channel_action_data_t;
@@ -222,11 +227,21 @@ typedef struct {
typedef struct { typedef struct {
uint8_t packed_vdma_channel_id; uint8_t packed_vdma_channel_id;
uint8_t edge_layer_direction; uint8_t edge_layer_direction;
bool is_inter_context; bool check_host_empty_num_available;
uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
uint32_t initial_credit_size; uint32_t initial_credit_size;
} CONTEXT_SWITCH_DEFS__validate_vdma_channel_action_data_t; } CONTEXT_SWITCH_DEFS__validate_vdma_channel_action_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
uint8_t edge_layer_direction;
} CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
uint8_t edge_layer_direction;
} CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t;
typedef enum { typedef enum {
CONTEXT_SWITCH_DEFS__CREDIT_TYPE_UNINITIALIZED = 0, CONTEXT_SWITCH_DEFS__CREDIT_TYPE_UNINITIALIZED = 0,
CONTEXT_SWITCH_DEFS__CREDIT_IN_BYTES, CONTEXT_SWITCH_DEFS__CREDIT_IN_BYTES,
@@ -239,7 +254,6 @@ typedef struct {
uint8_t network_index; uint8_t network_index;
uint32_t frame_periph_size; uint32_t frame_periph_size;
uint8_t credit_type; // CONTEXT_SWITCH_DEFS__CREDIT_TYPE_t uint8_t credit_type; // CONTEXT_SWITCH_DEFS__CREDIT_TYPE_t
uint16_t periph_bytes_per_buffer;
uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t, relevant only for descriptors credit. uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t, relevant only for descriptors credit.
} CONTEXT_SWITCH_DEFS__fetch_data_action_data_t; } CONTEXT_SWITCH_DEFS__fetch_data_action_data_t;
@@ -264,6 +278,10 @@ typedef struct {
typedef struct { typedef struct {
uint8_t packed_vdma_channel_id; uint8_t packed_vdma_channel_id;
uint8_t stream_index;
uint8_t network_index;
bool is_inter_context;
uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
} CONTEXT_SWITCH_DEFS__vdma_dataflow_interrupt_data_t; } CONTEXT_SWITCH_DEFS__vdma_dataflow_interrupt_data_t;
typedef struct { typedef struct {
@@ -283,6 +301,7 @@ typedef struct {
uint8_t packed_vdma_channel_id; uint8_t packed_vdma_channel_id;
uint8_t stream_index; uint8_t stream_index;
bool is_inter_context; bool is_inter_context;
uint8_t host_buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
} CONTEXT_SWITCH_DEFS__wait_dma_idle_data_t; } CONTEXT_SWITCH_DEFS__wait_dma_idle_data_t;
typedef struct { typedef struct {
@@ -319,6 +338,7 @@ typedef struct {
typedef struct { typedef struct {
uint8_t packed_vdma_channel_id; uint8_t packed_vdma_channel_id;
uint8_t stream_index; uint8_t stream_index;
uint8_t network_index;
CONTEXT_SWITCH_DEFS__stream_reg_info_t stream_reg_info; CONTEXT_SWITCH_DEFS__stream_reg_info_t stream_reg_info;
CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info; CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;
} CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t; } CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t;
@@ -342,6 +362,10 @@ typedef struct {
typedef struct { typedef struct {
uint8_t packed_vdma_channel_id; uint8_t packed_vdma_channel_id;
CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info; CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;
uint8_t stream_index;
uint8_t network_index;
uint16_t periph_bytes_per_buffer;
uint32_t frame_periph_size;
} CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t; } CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t;
typedef struct { typedef struct {
@@ -365,6 +389,7 @@ typedef struct {
uint8_t network_index; uint8_t network_index;
uint16_t number_of_classes; uint16_t number_of_classes;
uint16_t burst_size; uint16_t burst_size;
uint8_t division_factor;
} CONTEXT_SWITCH_DEFS__enable_nms_action_t; } CONTEXT_SWITCH_DEFS__enable_nms_action_t;
typedef enum { typedef enum {
@@ -390,6 +415,10 @@ typedef struct {
uint32_t kernel_done_count; uint32_t kernel_done_count;
} CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t; } CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t;
typedef struct {
uint8_t packed_vdma_channel_id;
} CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t;
#pragma pack(pop) #pragma pack(pop)
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -158,6 +158,7 @@ extern "C" {
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_GET_HW_CONSTS, false, CPU_ID_CORE_CPU)\ CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_GET_HW_CONSTS, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SET_SLEEP_STATE, false, CPU_ID_APP_CPU)\ CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SET_SLEEP_STATE, false, CPU_ID_APP_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CHANGE_HW_INFER_STATUS, false, CPU_ID_CORE_CPU)\ CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CHANGE_HW_INFER_STATUS, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SIGNAL_DRIVER_DOWN, false, CPU_ID_CORE_CPU)\
typedef enum { typedef enum {
#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) name, #define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) name,
@@ -344,7 +345,8 @@ typedef enum {
CONTROL_PROTOCOL__HAILO8_A0 = 0, CONTROL_PROTOCOL__HAILO8_A0 = 0,
CONTROL_PROTOCOL__HAILO8, CONTROL_PROTOCOL__HAILO8,
CONTROL_PROTOCOL__HAILO8L, CONTROL_PROTOCOL__HAILO8L,
CONTROL_PROTOCOL__HAILO15, CONTROL_PROTOCOL__HAILO15H,
CONTROL_PROTOCOL__PLUTO,
/* Must be last!! */ /* Must be last!! */
CONTROL_PROTOCOL__DEVICE_ARCHITECTURE_COUNT CONTROL_PROTOCOL__DEVICE_ARCHITECTURE_COUNT
} CONTROL_PROTOCOL__device_architecture_t; } CONTROL_PROTOCOL__device_architecture_t;
@@ -439,6 +441,7 @@ typedef struct {
uint16_t feature_padding_payload; uint16_t feature_padding_payload;
uint16_t buffer_padding_payload; uint16_t buffer_padding_payload;
uint16_t buffer_padding; uint16_t buffer_padding;
bool is_periph_calculated_in_hailort;
} CONTROL_PROTOCOL__nn_stream_config_t; } CONTROL_PROTOCOL__nn_stream_config_t;
typedef struct { typedef struct {
@@ -1027,8 +1030,6 @@ typedef struct {
uint16_t dynamic_batch_size; uint16_t dynamic_batch_size;
uint32_t batch_count_length; uint32_t batch_count_length;
uint16_t batch_count; uint16_t batch_count;
uint32_t keep_nn_config_during_reset_length;
uint8_t keep_nn_config_during_reset;
} CONTROL_PROTOCOL__change_context_switch_status_request_t; } CONTROL_PROTOCOL__change_context_switch_status_request_t;
typedef struct { typedef struct {

View File

@@ -58,6 +58,7 @@ typedef enum {
CONTEXT_SWITCH_BREAKPOINT_REACHED, CONTEXT_SWITCH_BREAKPOINT_REACHED,
HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID, HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID,
HW_INFER_MANAGER_INFER_DONE, HW_INFER_MANAGER_INFER_DONE,
CONTEXT_SWITCH_RUN_TIME_ERROR,
D2H_EVENT_ID_COUNT /* Must be last*/ D2H_EVENT_ID_COUNT /* Must be last*/
} D2H_EVENT_ID_t; } D2H_EVENT_ID_t;
@@ -146,6 +147,16 @@ typedef struct {
#define D2H_EVENT_HW_INFER_MANAGER_INFER_DONE_PARAMETER_COUNT (1) #define D2H_EVENT_HW_INFER_MANAGER_INFER_DONE_PARAMETER_COUNT (1)
typedef struct {
uint32_t exit_status;
uint8_t application_index;
uint16_t batch_index;
uint8_t context_index;
uint16_t action_index;
} D2H_EVENT_context_switch_run_time_error_event_message_t;
#define D2H_EVENT_CONTEXT_SWITCH_RUN_TIME_ERROR_EVENT_PARAMETER_COUNT (5)
/* D2H_EVENT__message_parameters_t should be in the same order as hailo_notification_message_parameters_t */ /* D2H_EVENT__message_parameters_t should be in the same order as hailo_notification_message_parameters_t */
typedef union { typedef union {
D2H_EVENT_rx_error_event_message_t rx_error_event; D2H_EVENT_rx_error_event_message_t rx_error_event;
@@ -158,6 +169,7 @@ typedef union {
D2H_EVENT_context_switch_breakpoint_reached_event_massage_t context_switch_breakpoint_reached_event; D2H_EVENT_context_switch_breakpoint_reached_event_massage_t context_switch_breakpoint_reached_event;
D2H_EVENT_health_monitor_clock_changed_event_message_t health_monitor_clock_changed_event; D2H_EVENT_health_monitor_clock_changed_event_message_t health_monitor_clock_changed_event;
D2H_EVENT_hw_infer_mamager_infer_done_message_t hw_infer_manager_infer_done_event; D2H_EVENT_hw_infer_mamager_infer_done_message_t hw_infer_manager_infer_done_event;
D2H_EVENT_context_switch_run_time_error_event_message_t context_switch_run_time_error_event;
} D2H_EVENT__message_parameters_t; } D2H_EVENT__message_parameters_t;
typedef struct { typedef struct {

View File

@@ -19,6 +19,8 @@ extern "C" {
#define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0) #define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
#define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB) #define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
// TODO - HRT-11344 : change fw magic to pluto specific
#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
typedef enum { typedef enum {
FIRMWARE_HEADER_VERSION_INITIAL = 0, FIRMWARE_HEADER_VERSION_INITIAL = 0,
@@ -29,7 +31,8 @@ typedef enum {
typedef enum { typedef enum {
FIRMWARE_TYPE_HAILO8 = 0, FIRMWARE_TYPE_HAILO8 = 0,
FIRMWARE_TYPE_HAILO15 FIRMWARE_TYPE_HAILO15,
FIRMWARE_TYPE_PLUTO
} firmware_type_t; } firmware_type_t;
@@ -37,6 +40,8 @@ typedef enum {
#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO15) #define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO15)
#elif defined(HAILO8_B0) #elif defined(HAILO8_B0)
#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO8) #define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO8)
#elif defined(PLUTO)
#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_PLUTO)
#endif /* MERCURY */ #endif /* MERCURY */
typedef struct { typedef struct {

View File

@@ -111,6 +111,7 @@ Updating rules:
FIRMWARE_STATUS__X(HAILO_STATUS_DRAM_DMA_SERVICE_INIT_FAILED)\ FIRMWARE_STATUS__X(HAILO_STATUS_DRAM_DMA_SERVICE_INIT_FAILED)\
FIRMWARE_STATUS__X(HAILO_STATUS_VDMA_SERVICE_INIT_FAILED)\ FIRMWARE_STATUS__X(HAILO_STATUS_VDMA_SERVICE_INIT_FAILED)\
FIRMWARE_STATUS__X(HAILO_STATUS_ERROR_HANDLING_STACK_OVERFLOW)\ FIRMWARE_STATUS__X(HAILO_STATUS_ERROR_HANDLING_STACK_OVERFLOW)\
FIRMWARE_STATUS__X(HAILO_STATUS_UNEXPECTED_NULL_ARGUMENT)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__DATAFLOW)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__DATAFLOW)\
FIRMWARE_STATUS__X(HAILO_DATAFLOW_STATUS_INVALID_PARAMETER)\ FIRMWARE_STATUS__X(HAILO_DATAFLOW_STATUS_INVALID_PARAMETER)\
@@ -403,7 +404,7 @@ Updating rules:
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_DYNAMIC_BATCH_SIZE_LENGTH)\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_DYNAMIC_BATCH_SIZE_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_INFER_FEATURES_LENGTH) /* DEPRECATED */\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_INFER_FEATURES_LENGTH) /* DEPRECATED */\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONFIG_CHANNEL_INFOS)\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONFIG_CHANNEL_INFOS)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_IS_BATCH_SIZE_FLOW_LENGTH)\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_IS_BATCH_SIZE_FLOW_LENGTH) /* DEPRECATED */\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONTEXT_SWITCH_CONTEXT_TYPE_LENGTH)\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONTEXT_SWITCH_CONTEXT_TYPE_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONTEXT_SWITCH_CONTEXT_NETWORK_GROUP_ID_LENGTH)\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONTEXT_SWITCH_CONTEXT_NETWORK_GROUP_ID_LENGTH)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_SET_SLEEP_STATE_FAILED)\ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_SET_SLEEP_STATE_FAILED)\
@@ -552,10 +553,13 @@ Updating rules:
FIRMWARE_STATUS__X(PCIE_SERVICE__WAIT_UNTIL_CHANNEL_IS_IDLE_REACHED_TIMEOUT)\ FIRMWARE_STATUS__X(PCIE_SERVICE__WAIT_UNTIL_CHANNEL_IS_IDLE_REACHED_TIMEOUT)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_UNSUPPORTED_PERIPH_BYTES_PER_BUFFER)\ FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_UNSUPPORTED_PERIPH_BYTES_PER_BUFFER)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_GLUE_LOGIC_CHANNEL_OUT_OF_RANGE)\ FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_GLUE_LOGIC_CHANNEL_OUT_OF_RANGE)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_H2D_CHANNEL_INDEX)\ FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_H2D_GLUE_LOGIC_CHANNEL_INDEX)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_D2H_CHANNEL_INDEX)\ FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_D2H_GLUE_LOGIC_CHANNEL_INDEX)\
FIRMWARE_STATUS__X(PCIE_SERVICE_INVALID_INITIAL_CREDIT_SIZE)\ FIRMWARE_STATUS__X(PCIE_SERVICE_INVALID_INITIAL_CREDIT_SIZE)\
FIRMWARE_STATUS__X(PCIE_SERVICE_ERROR_ADDING_CREDITS_TO_PCIE_CHANNEL)\ FIRMWARE_STATUS__X(PCIE_SERVICE_ERROR_ADDING_CREDITS_TO_PCIE_CHANNEL)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_STREAM_INDEX)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_CHANNEL_TYPE)\
FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_DESC_PAGE_SIZE)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__FIRMWARE_UPDATE)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__FIRMWARE_UPDATE)\
FIRMWARE_STATUS__X(FIRMWARE_UPDATE_STATUS_INVALID_PARAMETERS)\ FIRMWARE_STATUS__X(FIRMWARE_UPDATE_STATUS_INVALID_PARAMETERS)\
@@ -703,7 +707,7 @@ Updating rules:
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_ADDING_CREDITS_IS_ALLOWED_ONLY_FOR_EDGE_LAYER_DIRECTION_HOST_TO_DEVICE)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_ADDING_CREDITS_IS_ALLOWED_ONLY_FOR_EDGE_LAYER_DIRECTION_HOST_TO_DEVICE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_PCIE_CHANNEL_INDEX_AND_DIRECTION_MISMATCH)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_PCIE_CHANNEL_INDEX_AND_DIRECTION_MISMATCH)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_ACTION_LIST_OFFSET)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_ACTION_LIST_OFFSET)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_CHANGING_APP_IS_ALLOWED_IN_RESET_STATE_ONLY)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_CHANGING_APP_IS_NOT_ALLOWED)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_BATCH_SIZE)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_BATCH_SIZE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_RECEIVED_CONFIG_BREAKPOINT_BEFORE_INIT_STATE_DONE)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_RECEIVED_CONFIG_BREAKPOINT_BEFORE_INIT_STATE_DONE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_RECEIVED_INVALID_APPLICATION_INDEX)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_RECEIVED_INVALID_APPLICATION_INDEX)\
@@ -758,6 +762,7 @@ Updating rules:
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_NETWORK_IDLE)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_NETWORK_IDLE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_TYPE)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_TYPE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_MEMORY_SPACE)\ FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_MEMORY_SPACE)\
FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_BATCH_SWITCH_CONTEXT_TO_END)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__D2H_EVENT_MANAGER)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__D2H_EVENT_MANAGER)\
FIRMWARE_STATUS__X(HAILO_D2H_EVENT_MANAGER_STATUS_MESSAGE_HIGH_PRIORITY_QUEUE_CREATE_FAILED)\ FIRMWARE_STATUS__X(HAILO_D2H_EVENT_MANAGER_STATUS_MESSAGE_HIGH_PRIORITY_QUEUE_CREATE_FAILED)\
@@ -865,6 +870,7 @@ Updating rules:
FIRMWARE_MODULE__X(FIRMWARE_MODULE__GPIO)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__GPIO)\
FIRMWARE_STATUS__X(GPIO_BAD_GPIO_INDEX)\ FIRMWARE_STATUS__X(GPIO_BAD_GPIO_INDEX)\
FIRMWARE_STATUS__X(GPIO_BAD_PINMUX_GROUP)\ FIRMWARE_STATUS__X(GPIO_BAD_PINMUX_GROUP)\
FIRMWARE_STATUS__X(GPIO_SETUP_PINMUX_NOT_SUPPORTED)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__OVERCURRENT_PROTECTION)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__OVERCURRENT_PROTECTION)\
FIRMWARE_STATUS__X(OVERCURRENT_PROTECTION_INVALID_ALERT_THRESHOLD_VALUE) /* DEPRECATED */\ FIRMWARE_STATUS__X(OVERCURRENT_PROTECTION_INVALID_ALERT_THRESHOLD_VALUE) /* DEPRECATED */\
@@ -1046,6 +1052,7 @@ Updating rules:
FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_BYTES_IN_PATTERN)\ FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_BYTES_IN_PATTERN)\
FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_STREAM_INDEX)\ FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_STREAM_INDEX)\
FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_CHANNEL_INDEX)\ FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_CHANNEL_INDEX)\
FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_FAILED_TO_RESET_QM_CREDITS)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__NN_CORE_SERVICE)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__NN_CORE_SERVICE)\
FIRMWARE_STATUS__X(NN_CORE_SERVICE_STATUS_INVALID_ARG_PASSED)\ FIRMWARE_STATUS__X(NN_CORE_SERVICE_STATUS_INVALID_ARG_PASSED)\
@@ -1060,6 +1067,8 @@ Updating rules:
FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_STATUS_INVALID_CREDIT_TYPE)\ FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_STATUS_INVALID_CREDIT_TYPE)\
FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_INVALID_HOST_BUFFER_TYPE)\ FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_INVALID_HOST_BUFFER_TYPE)\
FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_STATUS_BATCH_CREDITS_OVERFLOW)\ FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_STATUS_BATCH_CREDITS_OVERFLOW)\
FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_BURST_CREDIT_TASK_MUST_BE_DISABLED_WHILE_CHANGING_BATCH)\
FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_UNABLE_TO_RESET_FRAME_COUNTER)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__BURST_CREDITS_TASK)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__BURST_CREDITS_TASK)\
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TRYING_TO_ADD_ACTION_WHILE_NOT_IN_IDLE_STATE)\ FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TRYING_TO_ADD_ACTION_WHILE_NOT_IN_IDLE_STATE)\
@@ -1067,6 +1076,9 @@ Updating rules:
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TRYING_TO_CHANGE_STATE_TO_INFER_WHILE_ALREADY_IN_INFER)\ FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TRYING_TO_CHANGE_STATE_TO_INFER_WHILE_ALREADY_IN_INFER)\
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_INFER_REACHED_TIMEOUT)\ FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_INFER_REACHED_TIMEOUT)\
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_DEACTIVATED)\ FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_DEACTIVATED)\
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_FAILED_TO_FIND_STREAM_INDEX)\
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_NO_CONFIGURED_ACTIONS)\
FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_EXPECTED_HIGHER_BATCH)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__TASK_SYNC_EVENTS)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__TASK_SYNC_EVENTS)\
FIRMWARE_STATUS__X(TASK_SYNC_EVENTS_STATUS_START_TASK_WHILE_IT_IS_RUNNING)\ FIRMWARE_STATUS__X(TASK_SYNC_EVENTS_STATUS_START_TASK_WHILE_IT_IS_RUNNING)\
@@ -1097,6 +1109,7 @@ Updating rules:
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_ADDRESS)\ FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_ADDRESS)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_RECEIVED_UNEXPECTED_INTERRUPT)\ FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_RECEIVED_UNEXPECTED_INTERRUPT)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_NETWORK_INDEX)\ FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_NETWORK_INDEX)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_COUNT)\
\ \
FIRMWARE_MODULE__X(FIRMWARE_MODULE__HW_INFER_MANAGER)\ FIRMWARE_MODULE__X(FIRMWARE_MODULE__HW_INFER_MANAGER)\
FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_NETWORK_GROUP_NOT_CONFIGURED_BEFORE_INFER_START)\ FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_NETWORK_GROUP_NOT_CONFIGURED_BEFORE_INFER_START)\

View File

@@ -50,6 +50,9 @@ static HAILO_COMMON_STATUS_t firmware_header_utils__validate_fw_header(uintptr_t
case FIRMWARE_TYPE_HAILO15: case FIRMWARE_TYPE_HAILO15:
firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15; firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
break; break;
case FIRMWARE_TYPE_PLUTO:
firmware_magic = FIRMWARE_HEADER_MAGIC_PLUTO;
break;
default: default:
status = HAILO_STATUS__FIRMWARE_HEADER_UTILS__INVALID_FIRMWARE_TYPE; status = HAILO_STATUS__FIRMWARE_HEADER_UTILS__INVALID_FIRMWARE_TYPE;
goto exit; goto exit;

12
hailort/.gitignore vendored Normal file
View File

@@ -0,0 +1,12 @@
/external/
cmake/external/benchmark/
cmake/external/catch2/
cmake/external/dotwriter/
cmake/external/json/
cmake/external/pybind11/
cmake/external/readerwriterqueue/
cmake/external/spdlog/
pre_build/external/build/
pre_build/tools/build_protoc/
pre_build/install/

View File

@@ -30,7 +30,7 @@ endif()
# Set firmware version # Set firmware version
add_definitions( -DFIRMWARE_VERSION_MAJOR=4 ) add_definitions( -DFIRMWARE_VERSION_MAJOR=4 )
add_definitions( -DFIRMWARE_VERSION_MINOR=14 ) add_definitions( -DFIRMWARE_VERSION_MINOR=15 )
add_definitions( -DFIRMWARE_VERSION_REVISION=0 ) add_definitions( -DFIRMWARE_VERSION_REVISION=0 )
if(HAILO_BUILD_SERVICE) if(HAILO_BUILD_SERVICE)
add_definitions( -DHAILO_SUPPORT_MULTI_PROCESS ) add_definitions( -DHAILO_SUPPORT_MULTI_PROCESS )
@@ -39,11 +39,6 @@ endif()
# The logic of prepare_externals is executed in a sperate module so that it can be run externally (via cmake -P prepare_externals.cmake) # The logic of prepare_externals is executed in a sperate module so that it can be run externally (via cmake -P prepare_externals.cmake)
include(prepare_externals.cmake) include(prepare_externals.cmake)
# BENCHMARK_ENABLE_TESTING can be used by other 3rd party projects, therefore we define it
# before adding projects
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Enable testing of the benchmark library.")
add_subdirectory(external/benchmark EXCLUDE_FROM_ALL)
# Include host protobuf for protoc (https://stackoverflow.com/questions/53651181/cmake-find-protobuf-package-in-custom-directory) # Include host protobuf for protoc (https://stackoverflow.com/questions/53651181/cmake-find-protobuf-package-in-custom-directory)
if(CMAKE_HOST_UNIX) if(CMAKE_HOST_UNIX)
include(${CMAKE_CURRENT_LIST_DIR}/pre_build/install/lib/cmake/protobuf/protobuf-config.cmake) include(${CMAKE_CURRENT_LIST_DIR}/pre_build/install/lib/cmake/protobuf/protobuf-config.cmake)
@@ -53,6 +48,8 @@ else()
include(${CMAKE_CURRENT_LIST_DIR}/pre_build/install/cmake/protobuf-module.cmake) include(${CMAKE_CURRENT_LIST_DIR}/pre_build/install/cmake/protobuf-module.cmake)
endif() endif()
set(HAILO_EXTERNALS_CMAKE_SCRIPTS ${CMAKE_CURRENT_LIST_DIR}/cmake/external/)
# Add target protobuf directory and exclude its targets from all # Add target protobuf directory and exclude its targets from all
# Disable protobuf tests, protoc and MSVC static runtime unless they are already defined # Disable protobuf tests, protoc and MSVC static runtime unless they are already defined
# NOTE: we can also force - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests" FORCE) # NOTE: we can also force - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests" FORCE)
@@ -90,16 +87,9 @@ set(COMMON_INC_DIR ${PROJECT_SOURCE_DIR}/common/include)
set(DRIVER_INC_DIR ${PROJECT_SOURCE_DIR}/hailort/drivers/common) set(DRIVER_INC_DIR ${PROJECT_SOURCE_DIR}/hailort/drivers/common)
set(RPC_DIR ${PROJECT_SOURCE_DIR}/hailort/rpc) set(RPC_DIR ${PROJECT_SOURCE_DIR}/hailort/rpc)
add_subdirectory(external/Catch2 EXCLUDE_FROM_ALL)
add_subdirectory(external/CLI11 EXCLUDE_FROM_ALL) add_subdirectory(external/CLI11 EXCLUDE_FROM_ALL)
add_subdirectory(external/json EXCLUDE_FROM_ALL)
add_subdirectory(external/DotWriter EXCLUDE_FROM_ALL)
add_subdirectory(external/spdlog EXCLUDE_FROM_ALL)
set_target_properties(spdlog PROPERTIES POSITION_INDEPENDENT_CODE ON)
if(CMAKE_SYSTEM_NAME STREQUAL QNX) if(CMAKE_SYSTEM_NAME STREQUAL QNX)
add_library(pevents STATIC EXCLUDE_FROM_ALL external/pevents/src/pevents.cpp) include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pevents.cmake)
target_include_directories(pevents PUBLIC external/pevents/src)
target_compile_definitions(pevents PRIVATE -DWFMO)
endif() endif()
if(HAILO_BUILD_SERVICE) if(HAILO_BUILD_SERVICE)

View File

@@ -1,15 +1,16 @@
| Package | Copyright (c) | License | Version | Notes | References | | Package | Copyright (c) | License | Version | Notes | References |
|:---------------------------------|:----------------------------------|:-------------------|:---------------|:-------------------------------------------|:------------------------------------------------------------------------------| |:---------------------------------|:----------------------------------|:-------------------|:---------------|:----------------------------------------------|:------------------------------------------------------------------------------|
| CLI11 | University of Cincinnati | 3-Clause BSD | 2.2.0 | Fork | https://github.com/hailo-ai/CLI11 | | CLI11 | University of Cincinnati | 3-Clause BSD | 2.2.0 | Fork | https://github.com/hailo-ai/CLI11 |
| Catch2 | Catch2 Authors | BSL-1.0 | 2.13.7 | Cloned entire package | https://github.com/catchorg/Catch2 | | Catch2 | Catch2 Authors | BSL-1.0 | 2.13.7 | Cloned entire package | https://github.com/catchorg/Catch2 |
| protobuf | Google Inc. | BSD | 3.19.4 | Cloned entire package | https://github.com/protocolbuffers/protobuf | | protobuf | Google Inc. | BSD | 3.19.4 | Cloned entire package | https://github.com/protocolbuffers/protobuf |
| pybind11 | Wenzel Jakob | BSD | 2.10.1 | Cloned entire package | https://github.com/pybind/pybind11 | | pybind11 | Wenzel Jakob | BSD | 2.10.1 | Cloned entire package | https://github.com/pybind/pybind11 |
| spdlog | Gabi Melman | MIT | 1.6.1 | Cloned entire package | https://github.com/gabime/spdlog | | spdlog | Gabi Melman | MIT | 1.6.1 | Cloned entire package | https://github.com/gabime/spdlog |
| folly | Facebook, Inc. and its affiliates | Apache License 2.0 | v2020.08.17.00 | Copied only the file `folly/TokenBucket.h` | https://github.com/facebook/folly | | folly | Facebook, Inc. and its affiliates | Apache License 2.0 | v2020.08.17.00 | Copied only the file `folly/TokenBucket.h` | https://github.com/facebook/folly |
| nlohmann_json_cmake_fetchcontent | ArthurSonzogni | MIT License | v3.9.1 | Cloned entire package | https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent | | nlohmann_json_cmake_fetchcontent | ArthurSonzogni | MIT License | v3.9.1 | Cloned entire package | https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent |
| readerwriterqueue | Cameron Desrochers | Simplified BSD | 1.0.3 | Cloned entire package | https://github.com/cameron314/readerwriterqueue | | readerwriterqueue | Cameron Desrochers | Simplified BSD | 1.0.3 | Cloned entire package | https://github.com/cameron314/readerwriterqueue |
| DotWriter | John Vilk | MIT License | master | Fork | https://github.com/hailo-ai/DotWriter | | DotWriter | John Vilk | MIT License | master | Fork | https://github.com/hailo-ai/DotWriter |
| benchmark | Google Inc. | Apache License 2.0 | 1.6.0 | Cloned entire package | https://github.com/google/benchmark.git | | benchmark | Google Inc. | Apache License 2.0 | 1.6.0 | Cloned entire package | https://github.com/google/benchmark.git |
| md5 | Alexander Peslyak | cut-down BSD | - | Copied code from website | http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5 | | md5 | Alexander Peslyak | cut-down BSD | - | Copied code from website | http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5 |
| pevents | Mahmoud Al-Qudsi | MIT License | master | Cloned entire package | https://github.com/neosmart/pevents.git | | pevents | Mahmoud Al-Qudsi | MIT License | master | Cloned entire package | https://github.com/neosmart/pevents.git |
| grpc | Google Inc. | Apache License 2.0 | 1.46.0 | Cloned entire package | https://github.com/grpc/grpc | | grpc | Google Inc. | Apache License 2.0 | 1.46.0 | Cloned entire package | https://github.com/grpc/grpc |
| stb | Sean Barrett | MIT License | 0.97 | Copied only the file `stb/stb_image_resize.h` | https://github.com/nothings/stb |

View File

@@ -8,7 +8,7 @@ function(execute_process_in_clean_env)
else() else()
# TODO: make it clean env for cross compile # TODO: make it clean env for cross compile
set(cmdline ${execute_process_in_clean_env_UNPARSED_ARGUMENTS}) set(cmdline ${execute_process_in_clean_env_UNPARSED_ARGUMENTS})
execute_process(COMMAND cmd /C ${cmdline} OUTPUT_QUIET RESULT_VARIABLE result) execute_process(COMMAND ${cmdline} OUTPUT_QUIET RESULT_VARIABLE result)
endif() endif()
if(DEFINED ${execute_process_in_clean_env_RESULT_VARIABLE}) if(DEFINED ${execute_process_in_clean_env_RESULT_VARIABLE})
set(${execute_process_in_clean_env_RESULT_VARIABLE} ${result} PARENT_SCOPE) set(${execute_process_in_clean_env_RESULT_VARIABLE} ${result} PARENT_SCOPE)

27
hailort/cmake/external/benchmark.cmake vendored Normal file
View File

@@ -0,0 +1,27 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
# BENCHMARK_ENABLE_TESTING can be used by other 3rd party projects, therefore we define it
# before adding projects
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Enable testing of the benchmark library.")
FetchContent_Declare(
benchmark
GIT_REPOSITORY https://github.com/google/benchmark.git
GIT_TAG f91b6b42b1b9854772a90ae9501464a161707d1e # Version 1.6.0
GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/benchmark"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/benchmark"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(benchmark)
if(NOT benchmark_POPULATED)
FetchContent_Populate(benchmark)
add_subdirectory(${benchmark_SOURCE_DIR} ${benchmark_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/benchmark EXCLUDE_FROM_ALL)
endif()

23
hailort/cmake/external/catch2.cmake vendored Normal file
View File

@@ -0,0 +1,23 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
FetchContent_Declare(
catch2
GIT_REPOSITORY https://github.com/catchorg/Catch2.git
GIT_TAG c4e3767e265808590986d5db6ca1b5532a7f3d13 # Version 2.13.7
GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/catch2"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/catch2"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(catch2)
if(NOT catch2_POPULATED)
FetchContent_Populate(catch2)
add_subdirectory(${catch2_SOURCE_DIR} ${catch2_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/catch2 EXCLUDE_FROM_ALL)
endif()

23
hailort/cmake/external/dotwriter.cmake vendored Normal file
View File

@@ -0,0 +1,23 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
FetchContent_Declare(
dotwriter
GIT_REPOSITORY https://github.com/hailo-ai/DotWriter
GIT_TAG e5fa8f281adca10dd342b1d32e981499b8681daf # Version master
GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/dotwriter"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/dotwriter"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(dotwriter)
if(NOT dotwriter_POPULATED)
FetchContent_Populate(dotwriter)
add_subdirectory(${dotwriter_SOURCE_DIR} ${dotwriter_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/dotwriter EXCLUDE_FROM_ALL)
endif()

23
hailort/cmake/external/json.cmake vendored Normal file
View File

@@ -0,0 +1,23 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
FetchContent_Declare(
json
GIT_REPOSITORY https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent.git
GIT_TAG 391786c6c3abdd3eeb993a3154f1f2a4cfe137a0 # Version 3.9.1
GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/json"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/json"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(json)
if(NOT json_POPULATED)
FetchContent_Populate(json)
add_subdirectory(${json_SOURCE_DIR} ${json_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/json EXCLUDE_FROM_ALL)
endif()

26
hailort/cmake/external/pevents.cmake vendored Normal file
View File

@@ -0,0 +1,26 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
FetchContent_Declare(
pevents
GIT_REPOSITORY https://github.com/neosmart/pevents.git
GIT_TAG 1209b1fd1bd2e75daab4380cf43d280b90b45366 # Master
#GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pevents"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pevents"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(pevents)
if(NOT pevents_POPULATED)
FetchContent_Populate(pevents)
endif()
endif()
if(NOT TARGET pevents)
add_library(pevents STATIC EXCLUDE_FROM_ALL ${pevents_SOURCE_DIR}/src/pevents.cpp)
target_include_directories(pevents PUBLIC ${pevents_SOURCE_DIR}/src)
target_compile_definitions(pevents PRIVATE -DWFMO)
endif()

View File

@@ -18,7 +18,7 @@ FetchContent_Declare(
pybind11 pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG 80dc998efced8ceb2be59756668a7e90e8bef917 # Version 2.10.1 GIT_TAG 80dc998efced8ceb2be59756668a7e90e8bef917 # Version 2.10.1
#GIT_SHALLOW TRUE GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11" SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11" BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
) )

View File

@@ -0,0 +1,26 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
FetchContent_Declare(
readerwriterqueue
GIT_REPOSITORY https://github.com/cameron314/readerwriterqueue
GIT_TAG 435e36540e306cac40fcfeab8cc0a22d48464509 # Version 1.0.3
GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/readerwriterqueue"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/readerwriterqueue"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(readerwriterqueue)
if(NOT readerwriterqueue_POPULATED)
FetchContent_Populate(readerwriterqueue)
endif()
endif()
if(NOT TARGET readerwriterqueue)
# Add readerwriterqueue as a header-only library
add_library(readerwriterqueue INTERFACE)
target_include_directories(readerwriterqueue INTERFACE ${readerwriterqueue_SOURCE_DIR})
endif()

24
hailort/cmake/external/spdlog.cmake vendored Normal file
View File

@@ -0,0 +1,24 @@
cmake_minimum_required(VERSION 3.11.0)
include(FetchContent)
FetchContent_Declare(
spdlog
GIT_REPOSITORY https://github.com/gabime/spdlog
GIT_TAG 22a169bc319ac06948e7ee0be6b9b0ac81386604
GIT_SHALLOW TRUE
SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/spdlog"
BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/spdlog"
)
if(NOT HAILO_OFFLINE_COMPILATION)
# https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
FetchContent_GetProperties(spdlog)
if(NOT spdlog_POPULATED)
FetchContent_Populate(spdlog)
add_subdirectory(${spdlog_SOURCE_DIR} ${spdlog_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/spdlog EXCLUDE_FROM_ALL)
endif()
set_target_properties(spdlog PROPERTIES POSITION_INDEPENDENT_CODE ON)

View File

@@ -20,6 +20,7 @@ set(SRC_FILES
${CMAKE_CURRENT_SOURCE_DIR}/file_utils.cpp ${CMAKE_CURRENT_SOURCE_DIR}/file_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/string_utils.cpp ${CMAKE_CURRENT_SOURCE_DIR}/string_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/event_internal.cpp ${CMAKE_CURRENT_SOURCE_DIR}/event_internal.cpp
${CMAKE_CURRENT_SOURCE_DIR}/fork_support.cpp
${CMAKE_CURRENT_SOURCE_DIR}/device_measurements.cpp ${CMAKE_CURRENT_SOURCE_DIR}/device_measurements.cpp
) )

View File

@@ -36,7 +36,7 @@ typedef struct {
#define _CB_FETCH(x) (InterlockedOr((LONG volatile*)(&x), (LONG)0)) #define _CB_FETCH(x) (InterlockedOr((LONG volatile*)(&x), (LONG)0))
#define _CB_SET(x, value) (InterlockedExchange((LONG volatile*)(&x), (LONG)(value))) #define _CB_SET(x, value) (InterlockedExchange((LONG volatile*)(&x), (LONG)(value)))
#else #else
#define _CB_FETCH(x) (__sync_fetch_and_or(&(x), 0)) #define _CB_FETCH(x) (__sync_fetch_and_or(const_cast<volatile int*>(&(x)), 0))
#define _CB_SET(x, value) ((void)__sync_lock_test_and_set(&(x), value)) #define _CB_SET(x, value) ((void)__sync_lock_test_and_set(&(x), value))
#endif #endif
@@ -155,22 +155,22 @@ public:
} }
} }
bool empty() bool empty() const
{ {
return CB_HEAD(m_circ) == CB_TAIL(m_circ); return CB_HEAD(m_circ) == CB_TAIL(m_circ);
} }
bool full() bool full() const
{ {
return 0 == CB_AVAIL(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ)); return 0 == CB_AVAIL(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
} }
size_t size() size_t size() const
{ {
return CB_PROG(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ)); return CB_PROG(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
} }
size_t capacity() size_t capacity() const
{ {
return CB_SIZE(m_circ) - 1; return CB_SIZE(m_circ) - 1;
} }

View File

@@ -61,6 +61,11 @@ hailo_status WaitOrShutdown::signal()
return m_waitable->signal(); return m_waitable->signal();
} }
hailo_status WaitOrShutdown::shutdown()
{
return m_shutdown_event->signal();
}
WaitableGroup WaitOrShutdown::create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event) WaitableGroup WaitOrShutdown::create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event)
{ {
// Note the order - consistent with SHUTDOWN_INDEX, WAITABLE_INDEX. // Note the order - consistent with SHUTDOWN_INDEX, WAITABLE_INDEX.

View File

@@ -93,6 +93,7 @@ public:
// * On any failure an appropriate status shall be returned // * On any failure an appropriate status shall be returned
hailo_status wait(std::chrono::milliseconds timeout); hailo_status wait(std::chrono::milliseconds timeout);
hailo_status signal(); hailo_status signal();
hailo_status shutdown();
private: private:
static WaitableGroup create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event); static WaitableGroup create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event);

View File

@@ -1,10 +1,19 @@
#include "vdma/channel/channel_state.hpp" /**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file fork_support.cpp
**/
#include "fork_support.hpp"
#include "common/logger_macros.hpp"
namespace hailort { namespace hailort
namespace vdma { {
#ifndef _MSC_VER #ifdef HAILO_IS_FORK_SUPPORTED
RecursiveSharedMutex::RecursiveSharedMutex() RecursiveSharedMutex::RecursiveSharedMutex()
{ {
// Make sharable mutex // Make sharable mutex
@@ -178,63 +187,7 @@ void SharedConditionVariable::notify_all()
pthread_cond_broadcast(&m_cond); pthread_cond_broadcast(&m_cond);
} }
#endif /* _MSC_VER */ #endif /* HAILO_IS_FORK_SUPPORTED */
Expected<std::unique_ptr<VdmaChannelState>> VdmaChannelState::create(uint32_t descs_count, bool measure_latency)
{
// Note: we implement operator new so the state object will be shared with forked processes.
auto state = make_unique_nothrow<VdmaChannelState>(descs_count, measure_latency);
CHECK_NOT_NULL_AS_EXPECTED(state, HAILO_OUT_OF_HOST_MEMORY);
return state;
}
VdmaChannelState::VdmaChannelState(uint32_t descs_count, bool measure_latency) :
m_is_channel_activated(false),
// If we measuring latency, we may get 2 interrupts for each input channel (first descriptor and last descriptor).
// Hence we must limit the transfers count to half of the actual transfers count.
m_pending_buffers(measure_latency ? PENDING_BUFFERS_SIZE/2 : PENDING_BUFFERS_SIZE),
m_d2h_read_desc_index(0),
m_d2h_read_desc_index_abs(0),
m_is_aborted(false),
m_previous_tail(0),
m_desc_list_delta(0),
m_last_timestamp_num_processed(0),
m_accumulated_transfers(0)
{
CB_INIT(m_descs, descs_count);
}
void VdmaChannelState::reset_counters()
{
CB_RESET(m_descs);
m_pending_buffers.reset();
m_last_timestamp_num_processed = 0;
m_accumulated_transfers = 0;
}
void VdmaChannelState::reset_previous_state_counters()
{
m_previous_tail = 0;
m_desc_list_delta = 0;
m_d2h_read_desc_index = 0;
m_d2h_read_desc_index_abs = 0;
}
void VdmaChannelState::add_pending_buffer(uint16_t first_desc, uint16_t last_desc, HailoRTDriver::DmaDirection direction,
const InternalTransferDoneCallback &on_transfer_done, MappedBufferPtr mapped_buffer)
{
if (m_pending_buffers.full()) {
// TODO- HRT-8900 : Fix log and check if should return error
LOGGER__ERROR("no avail space");
}
PendingBuffer pending_buffer{};
pending_buffer.last_desc = last_desc;
pending_buffer.latency_measure_desc = (direction == HailoRTDriver::DmaDirection::H2D) ? first_desc : last_desc;
pending_buffer.on_transfer_done = on_transfer_done;
pending_buffer.mapped_buffer = mapped_buffer;
m_pending_buffers.push_back(std::move(pending_buffer));
}
} /* namespace vdma */
} /* namespace hailort */ } /* namespace hailort */

View File

@@ -0,0 +1,219 @@
/**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file fork_support.hpp
* @brief Utilities/classes uses to support fork in the process.
* In general, fork SHOULD NOT be supported, but we still have some places that uses fork.
* Hopefully this file will be delete as soon as possible.
**/
#ifndef _HAILO_FORK_SUPPORT_HPP_
#define _HAILO_FORK_SUPPORT_HPP_
#include <mutex>
#include <functional>
#include <map>
#include <assert.h>
#ifndef _MSC_VER
#include <sys/mman.h>
#endif
#ifndef _MSC_VER
// Windows did the right choice - not supporting fork() at all, so we don't support it either.
#define HAILO_IS_FORK_SUPPORTED
#endif
namespace hailort
{
#ifdef HAILO_IS_FORK_SUPPORTED
// Replacement for std::recursive_mutex
class RecursiveSharedMutex final {
public:
RecursiveSharedMutex();
~RecursiveSharedMutex();
RecursiveSharedMutex(const RecursiveSharedMutex &) = delete;
RecursiveSharedMutex &operator=(const RecursiveSharedMutex &) = delete;
RecursiveSharedMutex(RecursiveSharedMutex &&) = delete;
RecursiveSharedMutex &operator=(RecursiveSharedMutex &&) = delete;
void lock();
void unlock();
pthread_mutex_t *native_handle()
{
return &m_mutex;
}
private:
pthread_mutex_t m_mutex;
};
// Replacement for std::condition_variable, can work only with RecursiveSharedMutex
class SharedConditionVariable final {
public:
SharedConditionVariable();
~SharedConditionVariable();
SharedConditionVariable(const SharedConditionVariable &) = delete;
SharedConditionVariable &operator=(const SharedConditionVariable &) = delete;
SharedConditionVariable(SharedConditionVariable &&) = delete;
SharedConditionVariable &operator=(SharedConditionVariable &&) = delete;
bool wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout,
std::function<bool()> condition);
void notify_one();
void notify_all();
private:
pthread_cond_t m_cond;
};
// Objects that inherit from this class, will automatically reside in memory region shared
// between forked processed.
// virtual dtor is not implemented for this class since it shouldn't be used for polymorphism (=
// delete shouldn't be called on the SharedAllocatedObject).
class SharedAllocatedObject {
public:
void* operator new(std::size_t size) = delete;
void* operator new(std::size_t size, const std::nothrow_t&) throw()
{
// Map a shared memory region into the virtual memory of the process
void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED) {
return nullptr;
}
return ptr;
}
// Custom operator delete function that unmaps the shared memory region
void operator delete(void* ptr, std::size_t size)
{
munmap(ptr, size);
}
};
// pthread_atfork api has 2 problems:
// 1. The callbacks doesn't accept context.
// 2. Callbacks cannot be unregistered.
// In order to solve this issue, the AtForkRegistry singleton exists and manages some registry
// of atfork callbacks.
// pthread_atfork is called only once, and on the provided callbacks, the registered user callbacks
// are called.
class AtForkRegistry final {
public:
static AtForkRegistry &get_instance()
{
static AtForkRegistry at_fork;
return at_fork;
}
AtForkRegistry(const AtForkRegistry &) = delete;
AtForkRegistry &operator=(const AtForkRegistry &) = delete;
// Special key used to identify the registered callbacks. One can use `this` as
// a unique identifier.
using Key = void*;
struct AtForkCallbacks {
std::function<void()> before_fork;
std::function<void()> after_fork_in_parent;
std::function<void()> after_fork_in_child;
};
// Init this guard with AtForkCallbacks, and the callbacks will be registered until destructed.
struct AtForkGuard {
AtForkGuard(Key key, const AtForkCallbacks &callbacks) :
m_key(key)
{
AtForkRegistry::get_instance().register_atfork(key, callbacks);
}
~AtForkGuard()
{
AtForkRegistry::get_instance().unregister_atfork(m_key);
}
AtForkGuard(const AtForkGuard&) = delete;
AtForkGuard &operator=(const AtForkGuard &) = delete;
const Key m_key;
};
private:
AtForkRegistry()
{
pthread_atfork(
[]() { get_instance().before_fork(); },
[]() { get_instance().after_fork_in_parent(); },
[]() { get_instance().after_fork_in_child(); }
);
}
void register_atfork(Key key, const AtForkCallbacks &callbacks)
{
std::lock_guard<std::mutex> lock(m_mutex);
assert(m_callbacks.end() == m_callbacks.find(key));
m_callbacks[key] = callbacks;
}
void unregister_atfork(Key key)
{
std::lock_guard<std::mutex> lock(m_mutex);
assert(m_callbacks.end() != m_callbacks.find(key));
m_callbacks.erase(key);
}
void before_fork()
{
std::lock_guard<std::mutex> lock(m_mutex);
for (const auto &callback : m_callbacks) {
callback.second.before_fork();
}
}
void after_fork_in_parent()
{
std::lock_guard<std::mutex> lock(m_mutex);
for (const auto &callback : m_callbacks) {
callback.second.after_fork_in_parent();
}
}
void after_fork_in_child()
{
std::lock_guard<std::mutex> lock(m_mutex);
for (const auto &callback : m_callbacks) {
callback.second.after_fork_in_child();
}
}
std::mutex m_mutex;
std::map<Key, AtForkCallbacks> m_callbacks;
};
#else /* HAILO_IS_FORK_SUPPORTED */
using RecursiveSharedMutex = std::recursive_mutex;
using SharedConditionVariable = std::condition_variable_any;
class SharedAllocatedObject {};
#endif
} /* namespace hailort */
#endif /* _HAILO_FORK_SUPPORT_HPP_ */

View File

@@ -329,10 +329,10 @@ static uint32_t get_min_value_of_unordered_map(const std::unordered_map<K, V> &m
return min_count; return min_count;
} }
static inline bool is_env_variable_on(const char* env_var_name) static inline bool is_env_variable_on(const char* env_var_name, const char* required_value, size_t size)
{ {
auto env_var = std::getenv(env_var_name); auto env_var = std::getenv(env_var_name);
return ((nullptr != env_var) && (strnlen(env_var, 2) == 1) && (strncmp(env_var, "1", 1) == 0)); return ((nullptr != env_var) && (strncmp(env_var, required_value, size) == 0));
} }
} /* namespace hailort */ } /* namespace hailort */

View File

@@ -24,6 +24,8 @@
#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT) #define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0) #define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT) #define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
#define INVALID_VDMA_CHANNEL (0xff) #define INVALID_VDMA_CHANNEL (0xff)
@@ -317,6 +319,7 @@ struct hailo_d2h_notification {
enum hailo_board_type { enum hailo_board_type {
HAILO_BOARD_TYPE_HAILO8 = 0, HAILO_BOARD_TYPE_HAILO8 = 0,
HAILO_BOARD_TYPE_HAILO15, HAILO_BOARD_TYPE_HAILO15,
HAILO_BOARD_TYPE_PLUTO,
HAILO_BOARD_TYPE_COUNT, HAILO_BOARD_TYPE_COUNT,
/** Max enum value to maintain ABI Integrity */ /** Max enum value to maintain ABI Integrity */

View File

@@ -1,5 +1,7 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
if(WIN32) if(WIN32)
set(HAILORT_SERVICE_OS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/windows") set(HAILORT_SERVICE_OS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/windows")
elseif(UNIX) elseif(UNIX)

View File

@@ -32,6 +32,24 @@ HailoRtRpcService::HailoRtRpcService()
}); });
} }
hailo_status HailoRtRpcService::flush_input_vstream(uint32_t handle)
{
if (is_input_vstream_aborted(handle)) {
return HAILO_SUCCESS;
}
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->flush();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(handle, lambda);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to flush input vstream with status {}", status);
}
return status;
}
hailo_status HailoRtRpcService::abort_input_vstream(uint32_t handle) hailo_status HailoRtRpcService::abort_input_vstream(uint32_t handle)
{ {
if (is_input_vstream_aborted(handle)) { if (is_input_vstream_aborted(handle)) {
@@ -131,7 +149,6 @@ void HailoRtRpcService::abort_vstreams_by_pids(std::set<uint32_t> &pids)
} }
} }
void HailoRtRpcService::remove_disconnected_clients() void HailoRtRpcService::remove_disconnected_clients()
{ {
std::this_thread::sleep_for(hailort::HAILO_KEEPALIVE_INTERVAL / 2); std::this_thread::sleep_for(hailort::HAILO_KEEPALIVE_INTERVAL / 2);
@@ -172,12 +189,17 @@ void HailoRtRpcService::keep_alive()
} }
} }
void HailoRtRpcService::update_client_id_timestamp(uint32_t pid)
{
std::unique_lock<std::mutex> lock(m_mutex);
m_clients_pids[pid] = std::chrono::high_resolution_clock::now();
}
grpc::Status HailoRtRpcService::client_keep_alive(grpc::ServerContext*, const keepalive_Request *request, grpc::Status HailoRtRpcService::client_keep_alive(grpc::ServerContext*, const keepalive_Request *request,
empty*) empty*)
{ {
auto client_id = request->pid(); auto client_id = request->pid();
std::unique_lock<std::mutex> lock(m_mutex); update_client_id_timestamp(client_id);
m_clients_pids[client_id] = std::chrono::high_resolution_clock::now();
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -195,15 +217,6 @@ grpc::Status HailoRtRpcService::get_service_version(grpc::ServerContext*, const
return grpc::Status::OK; return grpc::Status::OK;
} }
grpc::Status HailoRtRpcService::VDevice_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
dup_handle_Reply* reply)
{
auto &manager = ServiceResourceManager<VDevice>::get_instance();
auto handle = manager.dup_handle(request->pid(), request->handle());
reply->set_handle(handle);
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request, grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
VDevice_create_Reply *reply) VDevice_create_Reply *reply)
{ {
@@ -230,6 +243,7 @@ grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDev
auto vdevice = VDevice::create(params); auto vdevice = VDevice::create(params);
CHECK_EXPECTED_AS_RPC_STATUS(vdevice, reply); CHECK_EXPECTED_AS_RPC_STATUS(vdevice, reply);
update_client_id_timestamp(request->pid());
auto &manager = ServiceResourceManager<VDevice>::get_instance(); auto &manager = ServiceResourceManager<VDevice>::get_instance();
auto handle = manager.register_resource(request->pid(), std::move(vdevice.release())); auto handle = manager.register_resource(request->pid(), std::move(vdevice.release()));
reply->set_handle(handle); reply->set_handle(handle);
@@ -241,7 +255,7 @@ grpc::Status HailoRtRpcService::VDevice_release(grpc::ServerContext*, const Rele
Release_Reply *reply) Release_Reply *reply)
{ {
auto &manager = ServiceResourceManager<VDevice>::get_instance(); auto &manager = ServiceResourceManager<VDevice>::get_instance();
manager.release_resource(request->handle(), request->pid()); manager.release_resource(request->vdevice_identifier().vdevice_handle(), request->pid());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -291,11 +305,13 @@ grpc::Status HailoRtRpcService::VDevice_configure(grpc::ServerContext*, const VD
configure_params_map.insert({name_configure_params_pair.name(), network_configure_params}); configure_params_map.insert({name_configure_params_pair.name(), network_configure_params});
} }
update_client_id_timestamp(request->pid());
auto lambda = [](std::shared_ptr<VDevice> vdevice, Hef &hef, NetworkGroupsParamsMap &configure_params_map) { auto lambda = [](std::shared_ptr<VDevice> vdevice, Hef &hef, NetworkGroupsParamsMap &configure_params_map) {
return vdevice->configure(hef, configure_params_map); return vdevice->configure(hef, configure_params_map);
}; };
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance(); auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
auto networks = vdevice_manager.execute<Expected<ConfiguredNetworkGroupVector>>(request->handle(), lambda, hef.release(), configure_params_map); auto networks = vdevice_manager.execute<Expected<ConfiguredNetworkGroupVector>>(request->identifier().vdevice_handle(), lambda,
hef.release(), configure_params_map);
CHECK_SUCCESS_AS_RPC_STATUS(networks.status(), reply); CHECK_SUCCESS_AS_RPC_STATUS(networks.status(), reply);
auto &networks_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &networks_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
@@ -315,7 +331,7 @@ grpc::Status HailoRtRpcService::VDevice_get_physical_devices_ids(grpc::ServerCon
return vdevice->get_physical_devices_ids(); return vdevice->get_physical_devices_ids();
}; };
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance(); auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
auto expected_devices_ids = vdevice_manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda); auto expected_devices_ids = vdevice_manager.execute<Expected<std::vector<std::string>>>(request->identifier().vdevice_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(expected_devices_ids, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_devices_ids, reply);
auto devices_ids = expected_devices_ids.value(); auto devices_ids = expected_devices_ids.value();
auto devices_ids_proto = reply->mutable_devices_ids(); auto devices_ids_proto = reply->mutable_devices_ids();
@@ -333,17 +349,20 @@ grpc::Status HailoRtRpcService::VDevice_get_default_streams_interface(grpc::Serv
return vdevice->get_default_streams_interface(); return vdevice->get_default_streams_interface();
}; };
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance(); auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
auto stream_interface = vdevice_manager.execute<Expected<hailo_stream_interface_t>>(request->handle(), lambda); auto stream_interface = vdevice_manager.execute<Expected<hailo_stream_interface_t>>(request->identifier().vdevice_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(stream_interface, reply); CHECK_EXPECTED_AS_RPC_STATUS(stream_interface, reply);
reply->set_stream_interface(*stream_interface); reply->set_stream_interface(*stream_interface);
return grpc::Status::OK; return grpc::Status::OK;
} }
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_dup_handle(grpc::ServerContext*, const dup_handle_Request *request, grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_dup_handle(grpc::ServerContext*, const ConfiguredNetworkGroup_dup_handle_Request *request,
dup_handle_Reply* reply) ConfiguredNetworkGroup_dup_handle_Reply* reply)
{ {
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
auto handle = manager.dup_handle(request->pid(), request->handle()); vdevice_manager.dup_handle(request->identifier().vdevice_handle(), request->pid());
auto &ng_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto handle = ng_manager.dup_handle(request->identifier().network_group_handle(), request->pid());
reply->set_handle(handle); reply->set_handle(handle);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -352,7 +371,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_release(grpc::ServerConte
Release_Reply *reply) Release_Reply *reply)
{ {
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
manager.release_resource(request->handle(), request->pid()); manager.release_resource(request->network_group_identifier().network_group_handle(), request->pid());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -382,7 +401,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_make_input_vstream_params
return cng->make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name); return cng->make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name);
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(request->handle(), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()), auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(
request->identifier().network_group_handle(), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
request->timeout_ms(), request->queue_size(), request->network_name()); request->timeout_ms(), request->queue_size(), request->network_name());
CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
auto params_map = reply->mutable_vstream_params_map(); auto params_map = reply->mutable_vstream_params_map();
@@ -404,7 +424,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_make_output_vstream_param
return cng->make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name); return cng->make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name);
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(request->handle(), auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(request->identifier().network_group_handle(),
lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
request->timeout_ms(), request->queue_size(), request->network_name()); request->timeout_ms(), request->queue_size(), request->network_name());
CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
@@ -427,8 +447,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_make_output_vstream_param
return cng->make_output_vstream_params_groups(quantized, format_type, timeout_ms, queue_size); return cng->make_output_vstream_params_groups(quantized, format_type, timeout_ms, queue_size);
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_params = manager.execute<Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>>>(request->handle(), auto expected_params = manager.execute<Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>>>(
lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()), request->identifier().network_group_handle(), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
request->timeout_ms(), request->queue_size()); request->timeout_ms(), request->queue_size());
CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
auto params_map_vector = reply->mutable_vstream_params_groups(); auto params_map_vector = reply->mutable_vstream_params_groups();
@@ -453,7 +473,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_default_stream_interf
return cng->get_default_streams_interface(); return cng->get_default_streams_interface();
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_stream_interface = net_group_manager.execute<Expected<hailo_stream_interface_t>>(request->handle(), lambda); auto expected_stream_interface = net_group_manager.execute<Expected<hailo_stream_interface_t>>(request->identifier().network_group_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(expected_stream_interface, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_stream_interface, reply);
reply->set_stream_interface(static_cast<uint32_t>(expected_stream_interface.value())); reply->set_stream_interface(static_cast<uint32_t>(expected_stream_interface.value()));
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -468,7 +488,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_output_vstream_groups
return cng->get_output_vstream_groups(); return cng->get_output_vstream_groups();
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_output_vstream_groups = net_group_manager.execute<Expected<std::vector<std::vector<std::string>>>>(request->handle(), lambda); auto expected_output_vstream_groups = net_group_manager.execute<Expected<std::vector<std::vector<std::string>>>>(
request->identifier().network_group_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(expected_output_vstream_groups, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_output_vstream_groups, reply);
auto output_vstream_groups = expected_output_vstream_groups.value(); auto output_vstream_groups = expected_output_vstream_groups.value();
auto groups_proto = reply->mutable_output_vstream_groups(); auto groups_proto = reply->mutable_output_vstream_groups();
@@ -497,6 +518,7 @@ void serialize_vstream_info(const hailo_vstream_info_t &info, ProtoVStreamInfo *
auto nms_shape_proto = info_proto->mutable_nms_shape(); auto nms_shape_proto = info_proto->mutable_nms_shape();
nms_shape_proto->set_number_of_classes(info.nms_shape.number_of_classes); nms_shape_proto->set_number_of_classes(info.nms_shape.number_of_classes);
nms_shape_proto->set_max_bbox_per_class(info.nms_shape.max_bboxes_per_class); nms_shape_proto->set_max_bbox_per_class(info.nms_shape.max_bboxes_per_class);
nms_shape_proto->set_max_mask_size(info.nms_shape.max_mask_size);
} else { } else {
auto shape_proto = info_proto->mutable_shape(); auto shape_proto = info_proto->mutable_shape();
shape_proto->set_height(info.shape.height); shape_proto->set_height(info.shape.height);
@@ -529,7 +551,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_input_vstream_infos(g
return cng->get_input_vstream_infos(network_name); return cng->get_input_vstream_infos(network_name);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(request->handle(), lambda, request->network_name()); auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(
request->identifier().network_group_handle(), lambda, request->network_name());
CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply);
serialize_vstream_infos(reply, expected_vstream_infos.value()); serialize_vstream_infos(reply, expected_vstream_infos.value());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -544,7 +567,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_output_vstream_infos(
return cng->get_output_vstream_infos(network_name); return cng->get_output_vstream_infos(network_name);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(request->handle(), lambda, request->network_name()); auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(
request->identifier().network_group_handle(), lambda, request->network_name());
CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply);
serialize_vstream_infos(reply, expected_vstream_infos.value()); serialize_vstream_infos(reply, expected_vstream_infos.value());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -559,7 +583,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_all_vstream_infos(grp
return cng->get_all_vstream_infos(network_name); return cng->get_all_vstream_infos(network_name);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(request->handle(), lambda, request->network_name()); auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(
request->identifier().network_group_handle(), lambda, request->network_name());
CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply);
serialize_vstream_infos(reply, expected_vstream_infos.value()); serialize_vstream_infos(reply, expected_vstream_infos.value());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -574,7 +599,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_is_scheduled(grpc::Server
return cng->is_scheduled(); return cng->is_scheduled();
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto is_scheduled = manager.execute<bool>(request->handle(), lambda); auto is_scheduled = manager.execute<bool>(request->identifier().network_group_handle(), lambda);
reply->set_is_scheduled(static_cast<bool>(is_scheduled)); reply->set_is_scheduled(static_cast<bool>(is_scheduled));
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -588,7 +613,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_timeout(grp
return cng->set_scheduler_timeout(timeout_ms, network_name); return cng->set_scheduler_timeout(timeout_ms, network_name);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<std::chrono::milliseconds>(request->timeout_ms()), auto status = net_group_manager.execute<hailo_status>(request->identifier().network_group_handle(), lambda,
static_cast<std::chrono::milliseconds>(request->timeout_ms()),
request->network_name()); request->network_name());
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
@@ -602,7 +628,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_threshold(g
return cng->set_scheduler_threshold(threshold, network_name); return cng->set_scheduler_threshold(threshold, network_name);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, request->threshold(), auto status = net_group_manager.execute<hailo_status>(request->identifier().network_group_handle(), lambda, request->threshold(),
request->network_name()); request->network_name());
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
@@ -616,7 +642,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_priority(gr
return cng->set_scheduler_priority(priority, network_name); return cng->set_scheduler_priority(priority, network_name);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<uint8_t>(request->priority()), auto status = net_group_manager.execute<hailo_status>(request->identifier().network_group_handle(), lambda, static_cast<uint8_t>(request->priority()),
request->network_name()); request->network_name());
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
@@ -630,7 +656,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_config_params(grpc::S
return cng->get_config_params(); return cng->get_config_params();
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_params = net_group_manager.execute<Expected<ConfigureNetworkParams>>(request->handle(), lambda); auto expected_params = net_group_manager.execute<Expected<ConfigureNetworkParams>>(request->identifier().network_group_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
auto net_configure_params = expected_params.value(); auto net_configure_params = expected_params.value();
auto proto_network_configure_params = reply->mutable_params(); auto proto_network_configure_params = reply->mutable_params();
@@ -678,13 +704,20 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
}; };
inputs_params.emplace(param_proto.name(), std::move(params)); inputs_params.emplace(param_proto.name(), std::move(params));
} }
auto network_group_handle = request->net_group(); auto network_group_handle = request->identifier().network_group_handle();
auto client_pid = request->pid(); auto client_pid = request->pid();
update_client_id_timestamp(client_pid);
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
vdevice_manager.dup_handle(request->identifier().vdevice_handle(), client_pid);
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
net_group_manager.dup_handle(network_group_handle, client_pid);
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &inputs_params) { auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &inputs_params) {
return cng->create_input_vstreams(inputs_params); return cng->create_input_vstreams(inputs_params);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto vstreams_expected = net_group_manager.execute<Expected<std::vector<InputVStream>>>(network_group_handle, lambda, inputs_params); auto vstreams_expected = net_group_manager.execute<Expected<std::vector<InputVStream>>>(network_group_handle, lambda, inputs_params);
CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply); CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply);
auto vstreams = vstreams_expected.release(); auto vstreams = vstreams_expected.release();
@@ -694,7 +727,7 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
auto handle = manager.register_resource(client_pid, make_shared_nothrow<InputVStream>(std::move(vstreams[i]))); auto handle = manager.register_resource(client_pid, make_shared_nothrow<InputVStream>(std::move(vstreams[i])));
reply->add_handles(handle); reply->add_handles(handle);
} }
net_group_manager.dup_handle(client_pid, network_group_handle);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -703,9 +736,20 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
grpc::Status HailoRtRpcService::InputVStream_release(grpc::ServerContext *, const Release_Request *request, grpc::Status HailoRtRpcService::InputVStream_release(grpc::ServerContext *, const Release_Request *request,
Release_Reply *reply) Release_Reply *reply)
{ {
auto vstream_handle = request->vstream_identifier().vstream_handle();
auto was_aborted = is_input_vstream_aborted(vstream_handle);
flush_input_vstream(vstream_handle);
abort_input_vstream(vstream_handle);
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
manager.release_resource(request->handle(), request->pid()); auto resource = manager.release_resource(vstream_handle, request->pid());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); auto status = HAILO_SUCCESS;
if (resource && (!was_aborted)) {
status = resource->resume();
if (HAILO_SUCCESS != status) {
LOGGER__INFO("Failed to resume input vstream {} after destruction", resource->name());
}
}
reply->set_status(static_cast<uint32_t>(status));
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -730,13 +774,19 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
output_params.emplace(param_proto.name(), std::move(params)); output_params.emplace(param_proto.name(), std::move(params));
} }
auto network_group_handle = request->net_group(); auto network_group_handle = request->identifier().network_group_handle();
auto client_pid = request->pid(); auto client_pid = request->pid();
update_client_id_timestamp(client_pid);
auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
vdevice_manager.dup_handle(request->identifier().vdevice_handle(), client_pid);
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
net_group_manager.dup_handle(network_group_handle, client_pid);
auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &output_params) { auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &output_params) {
return cng->create_output_vstreams(output_params); return cng->create_output_vstreams(output_params);
}; };
auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto vstreams_expected = net_group_manager.execute<Expected<std::vector<OutputVStream>>>(network_group_handle, lambda, output_params); auto vstreams_expected = net_group_manager.execute<Expected<std::vector<OutputVStream>>>(network_group_handle, lambda, output_params);
CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply); CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply);
auto vstreams = vstreams_expected.release(); auto vstreams = vstreams_expected.release();
@@ -746,7 +796,7 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
auto handle = manager.register_resource(client_pid, make_shared_nothrow<OutputVStream>(std::move(vstreams[i]))); auto handle = manager.register_resource(client_pid, make_shared_nothrow<OutputVStream>(std::move(vstreams[i])));
reply->add_handles(handle); reply->add_handles(handle);
} }
net_group_manager.dup_handle(client_pid, network_group_handle);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -755,10 +805,11 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
grpc::Status HailoRtRpcService::OutputVStream_release(grpc::ServerContext *, const Release_Request *request, grpc::Status HailoRtRpcService::OutputVStream_release(grpc::ServerContext *, const Release_Request *request,
Release_Reply *reply) Release_Reply *reply)
{ {
auto was_aborted = is_output_vstream_aborted(request->handle()); auto vstream_handle = request->vstream_identifier().vstream_handle();
abort_output_vstream(request->handle()); auto was_aborted = is_output_vstream_aborted(vstream_handle);
abort_output_vstream(vstream_handle);
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto resource = manager.release_resource(request->handle(), request->pid()); auto resource = manager.release_resource(vstream_handle, request->pid());
auto status = HAILO_SUCCESS; auto status = HAILO_SUCCESS;
if (resource && (!was_aborted)) { if (resource && (!was_aborted)) {
status = resource->resume(); status = resource->resume();
@@ -778,23 +829,35 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_name(grpc::ServerContext*
return cng->name(); return cng->name();
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto network_group_name = manager.execute<std::string>(request->handle(), lambda); auto network_group_name = manager.execute<std::string>(request->identifier().network_group_handle(), lambda);
reply->set_network_group_name(network_group_name); reply->set_network_group_name(network_group_name);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
} }
grpc::Status HailoRtRpcService::InputVStream_is_multi_planar(grpc::ServerContext*, const InputVStream_is_multi_planar_Request *request,
InputVStream_is_multi_planar_Reply *reply)
{
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->is_multi_planar();
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto multi_planar = manager.execute<bool>(request->identifier().vstream_handle(), lambda);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
reply->set_is_multi_planar(multi_planar);
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_write(grpc::ServerContext*, const InputVStream_write_Request *request, grpc::Status HailoRtRpcService::InputVStream_write(grpc::ServerContext*, const InputVStream_write_Request *request,
InputVStream_write_Reply *reply) InputVStream_write_Reply *reply)
{ {
auto buffer_expected = Buffer::create_shared(request->data().length());
CHECK_EXPECTED_AS_RPC_STATUS(buffer_expected, reply);
std::vector<uint8_t> data(request->data().begin(), request->data().end()); std::vector<uint8_t> data(request->data().begin(), request->data().end());
auto lambda = [](std::shared_ptr<InputVStream> input_vstream, const MemoryView &buffer) { auto lambda = [](std::shared_ptr<InputVStream> input_vstream, const MemoryView &buffer) {
return input_vstream->write(std::move(buffer)); return input_vstream->write(std::move(buffer));
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda, MemoryView::create_const(data.data(), data.size())); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, MemoryView::create_const(data.data(), data.size()));
if (HAILO_STREAM_ABORTED_BY_USER == status) { if (HAILO_STREAM_ABORTED_BY_USER == status) {
LOGGER__INFO("User aborted VStream write."); LOGGER__INFO("User aborted VStream write.");
@@ -806,21 +869,33 @@ grpc::Status HailoRtRpcService::InputVStream_write(grpc::ServerContext*, const I
return grpc::Status::OK; return grpc::Status::OK;
} }
grpc::Status HailoRtRpcService::InputVStream_dup_handle(grpc::ServerContext*, const dup_handle_Request *request, grpc::Status HailoRtRpcService::InputVStream_write_pix(grpc::ServerContext*, const InputVStream_write_pix_Request *request,
dup_handle_Reply *reply) InputVStream_write_pix_Reply *reply)
{ {
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); hailo_pix_buffer_t pix_buffer = {};
auto handle = manager.dup_handle(request->pid(), request->handle()); pix_buffer.index = request->index();
reply->set_handle(handle); pix_buffer.number_of_planes = request->number_of_planes();
return grpc::Status::OK; std::vector<std::vector<uint8_t>> data_arrays;
} data_arrays.reserve(pix_buffer.number_of_planes);
for (uint32_t i =0; i < pix_buffer.number_of_planes; i++) {
data_arrays.push_back(std::vector<uint8_t>(request->planes_data(i).begin(), request->planes_data(i).end()));
pix_buffer.planes[i].user_ptr = data_arrays[i].data();
pix_buffer.planes[i].bytes_used = static_cast<uint32_t>(data_arrays[i].size());
}
grpc::Status HailoRtRpcService::OutputVStream_dup_handle(grpc::ServerContext*, const dup_handle_Request *request, auto lambda = [](std::shared_ptr<InputVStream> input_vstream, const hailo_pix_buffer_t &buffer) {
dup_handle_Reply *reply) return input_vstream->write(std::move(buffer));
{ };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto handle = manager.dup_handle(request->pid(), request->handle()); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, pix_buffer);
reply->set_handle(handle);
if (HAILO_STREAM_ABORTED_BY_USER == status) {
LOGGER__INFO("User aborted VStream write.");
reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER));
return grpc::Status::OK;
}
CHECK_SUCCESS_AS_RPC_STATUS(status, reply, "VStream write failed");
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -832,7 +907,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_network_infos(grpc::S
return cng->get_network_infos(); return cng->get_network_infos();
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_network_infos = manager.execute<Expected<std::vector<hailo_network_info_t>>>(request->handle(), lambda); auto expected_network_infos = manager.execute<Expected<std::vector<hailo_network_info_t>>>(request->identifier().network_group_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(expected_network_infos, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_network_infos, reply);
auto infos_proto = reply->mutable_network_infos(); auto infos_proto = reply->mutable_network_infos();
for (auto& info : expected_network_infos.value()) { for (auto& info : expected_network_infos.value()) {
@@ -850,7 +925,7 @@ grpc::Status HailoRtRpcService::OutputVStream_read(grpc::ServerContext*, const O
return output_vstream->read(std::move(buffer)); return output_vstream->read(std::move(buffer));
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda, MemoryView(data.data(), data.size())); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, MemoryView(data.data(), data.size()));
if (HAILO_STREAM_ABORTED_BY_USER == status) { if (HAILO_STREAM_ABORTED_BY_USER == status) {
LOGGER__INFO("User aborted VStream read."); LOGGER__INFO("User aborted VStream read.");
reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER)); reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER));
@@ -870,7 +945,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_all_stream_infos(grpc
return cng->get_all_stream_infos(); return cng->get_all_stream_infos();
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_stream_infos = manager.execute<Expected<std::vector<hailo_stream_info_t>>>(request->handle(), lambda); auto expected_stream_infos = manager.execute<Expected<std::vector<hailo_stream_info_t>>>(request->identifier().network_group_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(expected_stream_infos, reply); CHECK_EXPECTED_AS_RPC_STATUS(expected_stream_infos, reply);
auto proto_stream_infos = reply->mutable_stream_infos(); auto proto_stream_infos = reply->mutable_stream_infos();
for (auto& stream_info : expected_stream_infos.value()) { for (auto& stream_info : expected_stream_infos.value()) {
@@ -927,7 +1002,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_latency_measurement(g
return cng->get_latency_measurement(network_name); return cng->get_latency_measurement(network_name);
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto expected_latency_result = manager.execute<Expected<LatencyMeasurementResult>>(request->handle(), lambda, request->network_name()); auto expected_latency_result = manager.execute<Expected<LatencyMeasurementResult>>(
request->identifier().network_group_handle(), lambda, request->network_name());
if (HAILO_NOT_AVAILABLE == expected_latency_result.status()) { if (HAILO_NOT_AVAILABLE == expected_latency_result.status()) {
reply->set_status(static_cast<uint32_t>(HAILO_NOT_AVAILABLE)); reply->set_status(static_cast<uint32_t>(HAILO_NOT_AVAILABLE));
} else { } else {
@@ -946,7 +1022,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_is_multi_context(grpc::Se
return cng->is_multi_context(); return cng->is_multi_context();
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto is_multi_context = manager.execute<bool>(request->handle(), lambda); auto is_multi_context = manager.execute<bool>(request->identifier().network_group_handle(), lambda);
reply->set_is_multi_context(static_cast<bool>(is_multi_context)); reply->set_is_multi_context(static_cast<bool>(is_multi_context));
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -960,7 +1036,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_sorted_output_names(g
return cng->get_sorted_output_names(); return cng->get_sorted_output_names();
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto sorted_output_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda); auto sorted_output_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->identifier().network_group_handle(), lambda);
CHECK_EXPECTED_AS_RPC_STATUS(sorted_output_names_expected, reply); CHECK_EXPECTED_AS_RPC_STATUS(sorted_output_names_expected, reply);
auto sorted_output_names_proto = reply->mutable_sorted_output_names(); auto sorted_output_names_proto = reply->mutable_sorted_output_names();
for (auto &name : sorted_output_names_expected.value()) { for (auto &name : sorted_output_names_expected.value()) {
@@ -978,7 +1054,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_stream_names_from_vst
return cng->get_stream_names_from_vstream_name(vstream_name); return cng->get_stream_names_from_vstream_name(vstream_name);
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto streams_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda, request->vstream_name()); auto streams_names_expected = manager.execute<Expected<std::vector<std::string>>>(
request->identifier().network_group_handle(), lambda, request->vstream_name());
CHECK_EXPECTED_AS_RPC_STATUS(streams_names_expected, reply); CHECK_EXPECTED_AS_RPC_STATUS(streams_names_expected, reply);
auto streams_names_proto = reply->mutable_streams_names(); auto streams_names_proto = reply->mutable_streams_names();
for (auto &name : streams_names_expected.value()) { for (auto &name : streams_names_expected.value()) {
@@ -996,7 +1073,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_vstream_names_from_st
return cng->get_vstream_names_from_stream_name(stream_name); return cng->get_vstream_names_from_stream_name(stream_name);
}; };
auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance(); auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
auto vstreams_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda, request->stream_name()); auto vstreams_names_expected = manager.execute<Expected<std::vector<std::string>>>(
request->identifier().network_group_handle(), lambda, request->stream_name());
CHECK_EXPECTED_AS_RPC_STATUS(vstreams_names_expected, reply); CHECK_EXPECTED_AS_RPC_STATUS(vstreams_names_expected, reply);
auto vstreams_names_proto = reply->mutable_vstreams_names(); auto vstreams_names_proto = reply->mutable_vstreams_names();
for (auto &name : vstreams_names_expected.value()) { for (auto &name : vstreams_names_expected.value()) {
@@ -1013,7 +1091,7 @@ grpc::Status HailoRtRpcService::InputVStream_get_frame_size(grpc::ServerContext*
return input_vstream->get_frame_size(); return input_vstream->get_frame_size();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto frame_size = manager.execute<size_t>(request->handle(), lambda); auto frame_size = manager.execute<size_t>(request->identifier().vstream_handle(), lambda);
reply->set_frame_size(static_cast<uint32_t>(frame_size)); reply->set_frame_size(static_cast<uint32_t>(frame_size));
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -1026,7 +1104,7 @@ grpc::Status HailoRtRpcService::OutputVStream_get_frame_size(grpc::ServerContext
return output_vstream->get_frame_size(); return output_vstream->get_frame_size();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto frame_size = manager.execute<size_t>(request->handle(), lambda); auto frame_size = manager.execute<size_t>(request->identifier().vstream_handle(), lambda);
reply->set_frame_size(static_cast<uint32_t>(frame_size)); reply->set_frame_size(static_cast<uint32_t>(frame_size));
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -1035,12 +1113,8 @@ grpc::Status HailoRtRpcService::OutputVStream_get_frame_size(grpc::ServerContext
grpc::Status HailoRtRpcService::InputVStream_flush(grpc::ServerContext*, const InputVStream_flush_Request *request, grpc::Status HailoRtRpcService::InputVStream_flush(grpc::ServerContext*, const InputVStream_flush_Request *request,
InputVStream_flush_Reply *reply) InputVStream_flush_Reply *reply)
{ {
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) { auto status = flush_input_vstream(request->identifier().vstream_handle());
return input_vstream->flush(); reply->set_status(status);
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto flush_status = manager.execute<hailo_status>(request->handle(), lambda);
reply->set_status(static_cast<uint32_t>(flush_status));
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1051,7 +1125,7 @@ grpc::Status HailoRtRpcService::InputVStream_name(grpc::ServerContext*, const VS
return input_vstream->name(); return input_vstream->name();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto name = manager.execute<std::string>(request->handle(), lambda); auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
reply->set_name(name); reply->set_name(name);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -1064,7 +1138,7 @@ grpc::Status HailoRtRpcService::OutputVStream_name(grpc::ServerContext*, const V
return output_vstream->name(); return output_vstream->name();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto name = manager.execute<std::string>(request->handle(), lambda); auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
reply->set_name(name); reply->set_name(name);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -1077,7 +1151,7 @@ grpc::Status HailoRtRpcService::InputVStream_network_name(grpc::ServerContext*,
return input_vstream->network_name(); return input_vstream->network_name();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto name = manager.execute<std::string>(request->handle(), lambda); auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
reply->set_network_name(name); reply->set_network_name(name);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -1090,7 +1164,7 @@ grpc::Status HailoRtRpcService::OutputVStream_network_name(grpc::ServerContext*,
return output_vstream->network_name(); return output_vstream->network_name();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto name = manager.execute<std::string>(request->handle(), lambda); auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
reply->set_network_name(name); reply->set_network_name(name);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
@@ -1099,7 +1173,7 @@ grpc::Status HailoRtRpcService::OutputVStream_network_name(grpc::ServerContext*,
grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request, grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request,
VStream_abort_Reply *reply) VStream_abort_Reply *reply)
{ {
auto status = abort_input_vstream(request->handle()); auto status = abort_input_vstream(request->identifier().vstream_handle());
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1107,7 +1181,7 @@ grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const V
grpc::Status HailoRtRpcService::OutputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request, grpc::Status HailoRtRpcService::OutputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request,
VStream_abort_Reply *reply) VStream_abort_Reply *reply)
{ {
auto status = abort_output_vstream(request->handle()); auto status = abort_output_vstream(request->identifier().vstream_handle());
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1119,7 +1193,7 @@ grpc::Status HailoRtRpcService::InputVStream_resume(grpc::ServerContext*, const
return input_vstream->resume(); return input_vstream->resume();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1131,7 +1205,7 @@ grpc::Status HailoRtRpcService::OutputVStream_resume(grpc::ServerContext*, const
return output_vstream->resume(); return output_vstream->resume();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1143,7 +1217,7 @@ grpc::Status HailoRtRpcService::InputVStream_stop_and_clear(grpc::ServerContext*
return input_vstream->stop_and_clear(); return input_vstream->stop_and_clear();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1155,7 +1229,7 @@ grpc::Status HailoRtRpcService::OutputVStream_stop_and_clear(grpc::ServerContext
return output_vstream->stop_and_clear(); return output_vstream->stop_and_clear();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1167,7 +1241,7 @@ grpc::Status HailoRtRpcService::InputVStream_start_vstream(grpc::ServerContext*,
return input_vstream->start_vstream(); return input_vstream->start_vstream();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1179,7 +1253,7 @@ grpc::Status HailoRtRpcService::OutputVStream_start_vstream(grpc::ServerContext*
return output_vstream->start_vstream(); return output_vstream->start_vstream();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda); auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
reply->set_status(status); reply->set_status(status);
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -1191,7 +1265,7 @@ grpc::Status HailoRtRpcService::InputVStream_get_user_buffer_format(grpc::Server
return input_vstream->get_user_buffer_format(); return input_vstream->get_user_buffer_format();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto format = manager.execute<hailo_format_t>(request->handle(), lambda); auto format = manager.execute<hailo_format_t>(request->identifier().vstream_handle(), lambda);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
auto proto_user_buffer_format = reply->mutable_user_buffer_format(); auto proto_user_buffer_format = reply->mutable_user_buffer_format();
@@ -1209,7 +1283,7 @@ grpc::Status HailoRtRpcService::OutputVStream_get_user_buffer_format(grpc::Serve
return output_vstream->get_user_buffer_format(); return output_vstream->get_user_buffer_format();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto format = manager.execute<hailo_format_t>(request->handle(), lambda); auto format = manager.execute<hailo_format_t>(request->identifier().vstream_handle(), lambda);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
auto proto_user_buffer_format = reply->mutable_user_buffer_format(); auto proto_user_buffer_format = reply->mutable_user_buffer_format();
@@ -1227,7 +1301,7 @@ grpc::Status HailoRtRpcService::InputVStream_get_info(grpc::ServerContext*, cons
return input_vstream->get_info(); return input_vstream->get_info();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto info = manager.execute<hailo_vstream_info_t>(request->handle(), lambda); auto info = manager.execute<hailo_vstream_info_t>(request->identifier().vstream_handle(), lambda);
auto info_proto = reply->mutable_vstream_info(); auto info_proto = reply->mutable_vstream_info();
serialize_vstream_info(info, info_proto); serialize_vstream_info(info, info_proto);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -1241,38 +1315,77 @@ grpc::Status HailoRtRpcService::OutputVStream_get_info(grpc::ServerContext*, con
return output_vstream->get_info(); return output_vstream->get_info();
}; };
auto &manager = ServiceResourceManager<OutputVStream>::get_instance(); auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto info = manager.execute<hailo_vstream_info_t>(request->handle(), lambda); auto info = manager.execute<hailo_vstream_info_t>(request->identifier().vstream_handle(), lambda);
auto info_proto = reply->mutable_vstream_info(); auto info_proto = reply->mutable_vstream_info();
serialize_vstream_info(info, info_proto); serialize_vstream_info(info, info_proto);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
} }
grpc::Status HailoRtRpcService::InputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> input_vstream) {
return input_vstream->is_aborted();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto is_aborted = manager.execute<bool>(request->handle(), lambda);
reply->set_is_aborted(is_aborted);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::OutputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request, grpc::Status HailoRtRpcService::OutputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply *reply) VStream_is_aborted_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
return output_vstream->is_aborted();
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto is_aborted = manager.execute<bool>(request->identifier().vstream_handle(), lambda);
reply->set_is_aborted(is_aborted);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::InputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply *reply)
{ {
auto lambda = [](std::shared_ptr<InputVStream> input_vstream) { auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
return input_vstream->is_aborted(); return input_vstream->is_aborted();
}; };
auto &manager = ServiceResourceManager<InputVStream>::get_instance(); auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto is_aborted = manager.execute<bool>(request->handle(), lambda); auto is_aborted = manager.execute<bool>(request->identifier().vstream_handle(), lambda);
reply->set_is_aborted(is_aborted); reply->set_is_aborted(is_aborted);
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS)); reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK; return grpc::Status::OK;
} }
grpc::Status HailoRtRpcService::OutputVStream_set_nms_score_threshold(grpc::ServerContext*, const VStream_set_nms_score_threshold_Request *request,
VStream_set_nms_score_threshold_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, float32_t threshold) {
return output_vstream->set_nms_score_threshold(threshold);
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, static_cast<float32_t>(request->threshold()));
CHECK_SUCCESS_AS_RPC_STATUS(status, reply, "set_nms_score_threshold failed");
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::OutputVStream_set_nms_iou_threshold(grpc::ServerContext*, const VStream_set_nms_iou_threshold_Request *request,
VStream_set_nms_iou_threshold_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, float32_t threshold) {
return output_vstream->set_nms_iou_threshold(threshold);
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, static_cast<float32_t>(request->threshold()));
CHECK_SUCCESS_AS_RPC_STATUS(status, reply, "set_nms_iou_threshold failed");
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
grpc::Status HailoRtRpcService::OutputVStream_set_nms_max_proposals_per_class(grpc::ServerContext*, const VStream_set_nms_max_proposals_per_class_Request *request,
VStream_set_nms_max_proposals_per_class_Reply *reply)
{
auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, uint32_t max_proposals_per_class) {
return output_vstream->set_nms_max_proposals_per_class(max_proposals_per_class);
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, static_cast<uint32_t>(request->max_proposals_per_class()));
CHECK_SUCCESS_AS_RPC_STATUS(status, reply, "set_nms_max_proposals_per_class failed");
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
} }

View File

@@ -39,8 +39,6 @@ public:
empty*) override; empty*) override;
virtual grpc::Status get_service_version(grpc::ServerContext *, const get_service_version_Request *request, virtual grpc::Status get_service_version(grpc::ServerContext *, const get_service_version_Request *request,
get_service_version_Reply *reply) override; get_service_version_Reply *reply) override;
virtual grpc::Status VDevice_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
dup_handle_Reply*) override;
virtual grpc::Status VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request, virtual grpc::Status VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
VDevice_create_Reply *reply) override; VDevice_create_Reply *reply) override;
@@ -61,8 +59,12 @@ public:
VStreams_create_Reply *reply) override; VStreams_create_Reply *reply) override;
virtual grpc::Status OutputVStream_release(grpc::ServerContext *, const Release_Request *request, virtual grpc::Status OutputVStream_release(grpc::ServerContext *, const Release_Request *request,
Release_Reply *reply) override; Release_Reply *reply) override;
virtual grpc::Status InputVStream_is_multi_planar(grpc::ServerContext*, const InputVStream_is_multi_planar_Request *request,
InputVStream_is_multi_planar_Reply *reply) override;
virtual grpc::Status InputVStream_write(grpc::ServerContext*, const InputVStream_write_Request *request, virtual grpc::Status InputVStream_write(grpc::ServerContext*, const InputVStream_write_Request *request,
InputVStream_write_Reply *reply) override; InputVStream_write_Reply *reply) override;
virtual grpc::Status InputVStream_write_pix(grpc::ServerContext*, const InputVStream_write_pix_Request *request,
InputVStream_write_pix_Reply *reply) override;
virtual grpc::Status OutputVStream_read(grpc::ServerContext*, const OutputVStream_read_Request *request, virtual grpc::Status OutputVStream_read(grpc::ServerContext*, const OutputVStream_read_Request *request,
OutputVStream_read_Reply *reply) override; OutputVStream_read_Reply *reply) override;
virtual grpc::Status InputVStream_get_frame_size(grpc::ServerContext*, const VStream_get_frame_size_Request *request, virtual grpc::Status InputVStream_get_frame_size(grpc::ServerContext*, const VStream_get_frame_size_Request *request,
@@ -95,10 +97,6 @@ public:
VStream_get_info_Reply *reply) override; VStream_get_info_Reply *reply) override;
virtual grpc::Status OutputVStream_get_info(grpc::ServerContext*, const VStream_get_info_Request *request, virtual grpc::Status OutputVStream_get_info(grpc::ServerContext*, const VStream_get_info_Request *request,
VStream_get_info_Reply *reply) override; VStream_get_info_Reply *reply) override;
virtual grpc::Status InputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
dup_handle_Reply*) override;
virtual grpc::Status OutputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
dup_handle_Reply*) override;
virtual grpc::Status InputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request, virtual grpc::Status InputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request,
VStream_stop_and_clear_Reply*) override; VStream_stop_and_clear_Reply*) override;
virtual grpc::Status OutputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request, virtual grpc::Status OutputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request,
@@ -111,9 +109,15 @@ public:
VStream_is_aborted_Reply*) override; VStream_is_aborted_Reply*) override;
virtual grpc::Status OutputVStream_is_aborted(grpc::ServerContext *ctx, const VStream_is_aborted_Request *request, virtual grpc::Status OutputVStream_is_aborted(grpc::ServerContext *ctx, const VStream_is_aborted_Request *request,
VStream_is_aborted_Reply*) override; VStream_is_aborted_Reply*) override;
virtual grpc::Status OutputVStream_set_nms_score_threshold(grpc::ServerContext *ctx,
const VStream_set_nms_score_threshold_Request *request, VStream_set_nms_score_threshold_Reply*) override;
virtual grpc::Status OutputVStream_set_nms_iou_threshold(grpc::ServerContext *ctx,
const VStream_set_nms_iou_threshold_Request *request, VStream_set_nms_iou_threshold_Reply*) override;
virtual grpc::Status OutputVStream_set_nms_max_proposals_per_class(grpc::ServerContext *ctx,
const VStream_set_nms_max_proposals_per_class_Request *request, VStream_set_nms_max_proposals_per_class_Reply*) override;
virtual grpc::Status ConfiguredNetworkGroup_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request, virtual grpc::Status ConfiguredNetworkGroup_dup_handle(grpc::ServerContext *ctx, const ConfiguredNetworkGroup_dup_handle_Request *request,
dup_handle_Reply*) override; ConfiguredNetworkGroup_dup_handle_Reply*) override;
virtual grpc::Status ConfiguredNetworkGroup_release(grpc::ServerContext*, const Release_Request* request, virtual grpc::Status ConfiguredNetworkGroup_release(grpc::ServerContext*, const Release_Request* request,
Release_Reply* reply) override; Release_Reply* reply) override;
virtual grpc::Status ConfiguredNetworkGroup_make_input_vstream_params(grpc::ServerContext*, virtual grpc::Status ConfiguredNetworkGroup_make_input_vstream_params(grpc::ServerContext*,
@@ -182,6 +186,7 @@ public:
private: private:
void keep_alive(); void keep_alive();
hailo_status flush_input_vstream(uint32_t handle);
hailo_status abort_input_vstream(uint32_t handle); hailo_status abort_input_vstream(uint32_t handle);
hailo_status abort_output_vstream(uint32_t handle); hailo_status abort_output_vstream(uint32_t handle);
hailo_status resume_input_vstream(uint32_t handle); hailo_status resume_input_vstream(uint32_t handle);
@@ -190,6 +195,7 @@ private:
bool is_output_vstream_aborted(uint32_t handle); bool is_output_vstream_aborted(uint32_t handle);
void abort_vstreams_by_pids(std::set<uint32_t> &pids); void abort_vstreams_by_pids(std::set<uint32_t> &pids);
void remove_disconnected_clients(); void remove_disconnected_clients();
void update_client_id_timestamp(uint32_t pid);
std::mutex m_mutex; std::mutex m_mutex;
std::map<uint32_t, std::chrono::time_point<std::chrono::high_resolution_clock>> m_clients_pids; std::map<uint32_t, std::chrono::time_point<std::chrono::high_resolution_clock>> m_clients_pids;

View File

@@ -3,8 +3,10 @@
# To change an environment variable's value, follow the steps: # To change an environment variable's value, follow the steps:
# 1. Change the value of the selected environemt variable in this file # 1. Change the value of the selected environemt variable in this file
# 2. Reload systemd unit files by running: `sudo systemctl daemon-reload` # 2. Reload systemd unit files by running: `sudo systemctl daemon-reload`
# 3. Enable and start service by running: `sudo systemctl enable --now hailort.service` # 3. Copy this file to /etc/default/hailort_service
# 4. Enable and start service by running: `sudo systemctl enable --now hailort.service`
[Service] [Service]
HAILORT_LOGGER_PATH="/var/log/hailo" HAILORT_LOGGER_PATH="/var/log/hailo"
HAILORT_LOGGER_FLUSH_EVERY_PRINT=0
HAILO_MONITOR=0 HAILO_MONITOR=0

View File

@@ -71,7 +71,7 @@ public:
return index; return index;
} }
uint32_t dup_handle(uint32_t pid, uint32_t handle) uint32_t dup_handle(uint32_t handle, uint32_t pid)
{ {
std::unique_lock<std::mutex> lock(m_mutex); std::unique_lock<std::mutex> lock(m_mutex);
auto resource_expected = resource_lookup(handle); auto resource_expected = resource_lookup(handle);

View File

@@ -31,7 +31,7 @@
#include <sys/stat.h> #include <sys/stat.h>
void RunService() { void RunService() {
const std::string server_address = hailort::HAILORT_SERVICE_DEFAULT_ADDR; const std::string server_address = hailort::HAILORT_SERVICE_ADDRESS;
hailort::HailoRtRpcService service; hailort::HailoRtRpcService service;
grpc::ServerBuilder builder; grpc::ServerBuilder builder;

View File

@@ -46,7 +46,7 @@ std::unique_ptr<grpc::Server> g_hailort_rpc_server = nullptr;
void RunService() void RunService()
{ {
const std::string server_address = hailort::HAILORT_SERVICE_DEFAULT_ADDR; const std::string server_address = hailort::HAILORT_SERVICE_ADDRESS;
hailort::HailoRtRpcService service; hailort::HailoRtRpcService service;
grpc::ServerBuilder builder; grpc::ServerBuilder builder;

View File

@@ -1,6 +1,10 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
include(GNUInstallDirs) include(GNUInstallDirs)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/json.cmake)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/readerwriterqueue.cmake)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/dotwriter.cmake)
set(HAILORTCLI_CPP_FILES set(HAILORTCLI_CPP_FILES
hailortcli.cpp hailortcli.cpp
@@ -31,14 +35,13 @@ set(HAILORTCLI_CPP_FILES
run2/network_live_track.cpp run2/network_live_track.cpp
run2/measurement_live_track.cpp run2/measurement_live_track.cpp
run2/io_wrappers.cpp run2/io_wrappers.cpp
download_action_list_command.cpp
) )
if(UNIX) if(UNIX)
# Unix only modules # Unix only modules
set(HAILORTCLI_CPP_FILES ${HAILORTCLI_CPP_FILES} set(HAILORTCLI_CPP_FILES ${HAILORTCLI_CPP_FILES}
udp_rate_limiter_command.cpp udp_rate_limiter_command.cpp
# TODO: We dont compile download_action_list_command on windows, as it uses packed enums (HRT-5919)
download_action_list_command.cpp
measure_nnc_performance_command.cpp measure_nnc_performance_command.cpp
) )
endif() endif()
@@ -69,11 +72,13 @@ target_link_libraries(hailortcli
spdlog::spdlog spdlog::spdlog
readerwriterqueue readerwriterqueue
DotWriter DotWriter
scheduler_mon_proto) scheduler_mon_proto
profiler_proto)
if(WIN32) if(WIN32)
target_link_libraries(hailortcli Ws2_32 Iphlpapi Shlwapi winmm.lib) target_link_libraries(hailortcli Ws2_32 Iphlpapi Shlwapi winmm.lib)
elseif(CMAKE_SYSTEM_NAME STREQUAL QNX) elseif(CMAKE_SYSTEM_NAME STREQUAL QNX)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pevents.cmake)
target_link_libraries(hailortcli pevents) target_link_libraries(hailortcli pevents)
endif() endif()
target_include_directories(hailortcli target_include_directories(hailortcli

View File

@@ -36,47 +36,74 @@ DownloadActionListCommand::DownloadActionListCommand(CLI::App &parent_app) :
hailo_status DownloadActionListCommand::execute(Device &device, const std::string &output_file_path, hailo_status DownloadActionListCommand::execute(Device &device, const std::string &output_file_path,
const ConfiguredNetworkGroupVector &network_groups, const std::string &hef_file_path) const ConfiguredNetworkGroupVector &network_groups, const std::string &hef_file_path)
{ {
std::cout << "> Writing action list to '" << output_file_path << "'... "; auto expected_action_list_json = init_json_object(device, hef_file_path);
CHECK_EXPECTED_AS_STATUS(expected_action_list_json);
auto curr_time = CliCommon::current_time_to_string(); auto action_list_json = expected_action_list_json.value();
CHECK_EXPECTED_AS_STATUS(curr_time);
auto chip_arch = device.get_architecture();
CHECK_EXPECTED_AS_STATUS(chip_arch);
unsigned int clock_cycle = 0;
// TODO - HRT-8046 Implement extended device info for hailo15
if (HAILO_ARCH_HAILO15 == chip_arch.value()) {
clock_cycle = HAILO15_VPU_CORE_CPU_DEFAULT_FREQ_MHZ;
} else {
auto extended_info = device.get_extended_device_information();
CHECK_EXPECTED_AS_STATUS(extended_info);
clock_cycle = (extended_info->neural_network_core_clock_rate / NN_CORE_TO_TIMER_FREQ_FACTOR) / MHz;
}
ordered_json action_list_json = {
{"version", ACTION_LIST_FORMAT_VERSION()},
{"creation_time", curr_time.release()},
{"clock_cycle_MHz", clock_cycle},
{"hef", json({})}
};
if (!hef_file_path.empty()) {
auto hef_info = parse_hef_metadata(hef_file_path);
CHECK_EXPECTED_AS_STATUS(hef_info);
action_list_json["hef"] = hef_info.release();
}
auto network_groups_list_json = parse_network_groups(device, network_groups); auto network_groups_list_json = parse_network_groups(device, network_groups);
CHECK_EXPECTED_AS_STATUS(network_groups_list_json); CHECK_EXPECTED_AS_STATUS(network_groups_list_json);
action_list_json["network_groups"] = network_groups_list_json.release(); action_list_json["network_groups"] = network_groups_list_json.release();
CHECK_SUCCESS(write_json(action_list_json, output_file_path)); return write_to_json(action_list_json, output_file_path);
}
hailo_status DownloadActionListCommand::execute(Device &device, std::shared_ptr<ConfiguredNetworkGroup> network_group,
uint16_t batch_size, ordered_json &action_list_json_param, double fps, uint32_t network_group_index)
{
auto expected_network_groups_list_json = parse_network_group(device, network_group, network_group_index);
CHECK_EXPECTED_AS_STATUS(expected_network_groups_list_json);
auto network_groups_list_json = expected_network_groups_list_json.release();
network_groups_list_json[0]["batch_size"] = batch_size;
network_groups_list_json[0]["fps"] = fps;
action_list_json_param["runs"] += network_groups_list_json[0];
return HAILO_SUCCESS;
}
hailo_status DownloadActionListCommand::write_to_json(ordered_json &action_list_json_param, const std::string &output_file_path)
{
std::cout << "> Writing action list to '" << output_file_path << "'... ";
CHECK_SUCCESS(write_json(action_list_json_param, output_file_path));
std::cout << "done." << std::endl; std::cout << "done." << std::endl;
return HAILO_SUCCESS; return HAILO_SUCCESS;
} }
Expected<ordered_json> DownloadActionListCommand::init_json_object(Device &device, const std::string &hef_file_path)
{
ordered_json action_list_json = {};
auto curr_time = CliCommon::current_time_to_string();
CHECK_EXPECTED(curr_time);
auto chip_arch = device.get_architecture();
CHECK_EXPECTED(chip_arch);
unsigned int clock_cycle = 0;
// TODO - HRT-8046 Implement extended device info for hailo15
if (HAILO_ARCH_HAILO15H == chip_arch.value()) {
clock_cycle = HAILO15_VPU_CORE_CPU_DEFAULT_FREQ_MHZ;
} else {
auto extended_info = device.get_extended_device_information();
CHECK_EXPECTED(extended_info);
clock_cycle = (extended_info->neural_network_core_clock_rate / NN_CORE_TO_TIMER_FREQ_FACTOR) / MHz;
}
action_list_json["version"] = ACTION_LIST_FORMAT_VERSION();
action_list_json["creation_time"] = curr_time.release();
action_list_json["clock_cycle_MHz"] = clock_cycle;
action_list_json["hef"] = json({});
if (!hef_file_path.empty()) {
auto hef_info = parse_hef_metadata(hef_file_path);
CHECK_EXPECTED(hef_info);
action_list_json["hef"] = hef_info.release();
}
action_list_json["runs"] = ordered_json::array();
return action_list_json;
}
hailo_status DownloadActionListCommand::set_batch_to_measure(Device &device, uint16_t batch_to_measure) hailo_status DownloadActionListCommand::set_batch_to_measure(Device &device, uint16_t batch_to_measure)
{ {
return device.set_context_action_list_timestamp_batch(batch_to_measure); return device.set_context_action_list_timestamp_batch(batch_to_measure);
@@ -148,6 +175,11 @@ hailo_status DownloadActionListCommand::write_json(const ordered_json &json_obj,
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic error "-Wswitch-enum" #pragma GCC diagnostic error "-Wswitch-enum"
#endif #endif
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(error: 4061)
#endif
Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t base_address, uint8_t *action, Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t base_address, uint8_t *action,
uint32_t current_buffer_offset, uint32_t *action_length, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_type, uint32_t current_buffer_offset, uint32_t *action_length, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_type,
uint32_t timestamp, uint8_t sub_action_index, bool sub_action_index_set, bool *is_repeated, uint8_t *num_repeated, uint32_t timestamp, uint8_t sub_action_index, bool sub_action_index_set, bool *is_repeated, uint8_t *num_repeated,
@@ -284,6 +316,10 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
data_json = json({}); data_json = json({});
action_length_local = 0; action_length_local = 0;
break; break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET:
data_json = json({});
action_length_local = 0;
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CFG_CHANNEL: case CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CFG_CHANNEL:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__activate_cfg_channel_t *>(action); data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__activate_cfg_channel_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__activate_cfg_channel_t); action_length_local = sizeof(CONTEXT_SWITCH_DEFS__activate_cfg_channel_t);
@@ -316,6 +352,18 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t *>(action); data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t); action_length_local = sizeof(CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t);
break; break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL:
data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t *>(action);
action_length_local = sizeof(CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t);
break;
case CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT: case CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT:
// Fallthrough // Fallthrough
// Handling CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT is needed because we compile this file with -Wswitch-enum // Handling CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT is needed because we compile this file with -Wswitch-enum
@@ -330,6 +378,9 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
#if defined(__GNUC__) #if defined(__GNUC__)
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
Expected<ordered_json> DownloadActionListCommand::parse_single_repeated_action(uint32_t base_address, Expected<ordered_json> DownloadActionListCommand::parse_single_repeated_action(uint32_t base_address,
uint8_t *action, uint32_t current_buffer_offset, uint32_t *action_length, uint8_t *action, uint32_t current_buffer_offset, uint32_t *action_length,
@@ -362,10 +413,11 @@ Expected<ordered_json> DownloadActionListCommand::parse_single_action(uint32_t b
Expected<ordered_json> DownloadActionListCommand::parse_context(Device &device, uint32_t network_group_id, Expected<ordered_json> DownloadActionListCommand::parse_context(Device &device, uint32_t network_group_id,
CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, const std::string &context_name) CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, const std::string &context_name)
{ {
uint8_t converted_context_type = static_cast<uint8_t>(context_type);
uint32_t action_list_base_address = 0; uint32_t action_list_base_address = 0;
uint32_t batch_counter = 0; uint32_t batch_counter = 0;
auto action_list = device.download_context_action_list(network_group_id, context_type, context_index, auto action_list = device.download_context_action_list(network_group_id, converted_context_type, context_index,
&action_list_base_address, &batch_counter); &action_list_base_address, &batch_counter);
CHECK_EXPECTED(action_list); CHECK_EXPECTED(action_list);
// Needs to fit in 2 bytes due to firmware limitation of action list size // Needs to fit in 2 bytes due to firmware limitation of action list size
@@ -424,55 +476,66 @@ Expected<ordered_json> DownloadActionListCommand::parse_network_groups(Device &d
const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group(); const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group();
CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group); CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group);
auto number_of_network_groups = (uint32_t)number_of_dynamic_contexts_per_network_group->size();
ordered_json network_group_list_json; ordered_json network_group_list_json;
for (uint32_t network_group_index = 0; network_group_index < number_of_dynamic_contexts_per_network_group->size(); network_group_index++) { for (uint32_t network_group_index = 0; network_group_index < number_of_network_groups; network_group_index++) {
// TODO: HRT-8147 use the real network_group_id instead of network_group_index auto &network_group = (network_group_index < network_groups.size()) ? network_groups[network_group_index] : nullptr;
const uint32_t network_group_id = network_group_index; auto expected_json_file = parse_network_group(device, network_group, network_group_index);
CHECK_EXPECTED(expected_json_file);
// TODO: network_group_name via Hef::get_network_groups_names (HRT-5997) network_group_list_json.emplace_back(expected_json_file.value());
ordered_json network_group_json = {
{"mean_activation_time_ms", INVALID_NUMERIC_VALUE},
{"mean_deactivation_time_ms", INVALID_NUMERIC_VALUE},
{"network_group_id", network_group_id},
{"contexts", json::array()}
};
// We assume the the order of the network_groups in the ConfiguredNetworkGroupVector and in the action_list
// downloaded from the fw is the same. If the received ConfiguredNetworkGroupVector is empty, we leave the
// mean_de/activation_time_ms with their default values (INVALID_NUMERIC_VALUE).
if (network_groups.size() > network_group_index) {
network_group_json["mean_activation_time_ms"] = get_accumulator_mean_value(
network_groups[network_group_index]->get_activation_time_accumulator());
network_group_json["mean_deactivation_time_ms"] = get_accumulator_mean_value(
network_groups[network_group_index]->get_deactivation_time_accumulator());
}
auto activation_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION, 0, "activation");
CHECK_EXPECTED(activation_context_json);
network_group_json["contexts"].emplace_back(activation_context_json.release());
auto preliminary_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY, 0, "preliminary");
CHECK_EXPECTED(preliminary_context_json);
network_group_json["contexts"].emplace_back(preliminary_context_json.release());
const auto dynamic_contexts_count = number_of_dynamic_contexts_per_network_group.value()[network_group_index];
for (uint8_t context_index = 0; context_index < dynamic_contexts_count; context_index++) {
auto context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC, context_index,
fmt::format("dynamic_{}", context_index));
CHECK_EXPECTED(context_json);
network_group_json["contexts"].emplace_back(context_json.release());
}
auto batch_switching_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING, 0, "batch_switching");
CHECK_EXPECTED(batch_switching_context_json);
network_group_json["contexts"].emplace_back(batch_switching_context_json.release());
network_group_list_json.emplace_back(network_group_json);
} }
return network_group_list_json;
}
Expected<ordered_json> DownloadActionListCommand::parse_network_group(Device &device, const std::shared_ptr<ConfiguredNetworkGroup> network_group, uint32_t network_group_id)
{
const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group();
CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group);
ordered_json network_group_list_json;
// TODO: network_group_name via Hef::get_network_groups_names (HRT-5997)
ordered_json network_group_json = {
{"batch_size", INVALID_NUMERIC_VALUE},
{"mean_activation_time_ms", INVALID_NUMERIC_VALUE},
{"mean_deactivation_time_ms", INVALID_NUMERIC_VALUE},
{"network_group_id", network_group_id},
{"fps", INVALID_NUMERIC_VALUE},
{"contexts", json::array()}
};
if(network_group != nullptr) {
network_group_json["mean_activation_time_ms"] = get_accumulator_mean_value(
network_group->get_activation_time_accumulator());
network_group_json["mean_deactivation_time_ms"] = get_accumulator_mean_value(
network_group->get_deactivation_time_accumulator());
}
auto activation_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION, 0, "activation");
CHECK_EXPECTED(activation_context_json);
network_group_json["contexts"].emplace_back(activation_context_json.release());
auto preliminary_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY, 0, "preliminary");
CHECK_EXPECTED(preliminary_context_json);
network_group_json["contexts"].emplace_back(preliminary_context_json.release());
const auto dynamic_contexts_count = number_of_dynamic_contexts_per_network_group.value()[network_group_id];
for (uint8_t context_index = 0; context_index < dynamic_contexts_count; context_index++) {
auto context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC, context_index,
fmt::format("dynamic_{}", context_index));
CHECK_EXPECTED(context_json);
network_group_json["contexts"].emplace_back(context_json.release());
}
auto batch_switching_context_json = parse_context(device, network_group_id,
CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING, 0, "batch_switching");
CHECK_EXPECTED(batch_switching_context_json);
network_group_json["contexts"].emplace_back(batch_switching_context_json.release());
network_group_list_json.emplace_back(network_group_json);
return network_group_list_json; return network_group_list_json;
} }
@@ -639,3 +702,18 @@ void to_json(json& j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t&
j = json{{"cluster_index", cluster_index}, {"lcu_index", lcu_index}, {"network_index", network_index}, j = json{{"cluster_index", cluster_index}, {"lcu_index", lcu_index}, {"network_index", network_index},
{"kernel_done_count", kernel_done_count}}; {"kernel_done_count", kernel_done_count}};
} }
void to_json(json &j, const CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t &data)
{
j = unpack_vdma_channel_id(data);
}
void to_json(json &j, const CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t &data)
{
j = unpack_vdma_channel_id(data);
}
void to_json(json &j, const CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t &data)
{
j = unpack_vdma_channel_id(data);
}

View File

@@ -29,6 +29,10 @@ public:
// To be used from external commands // To be used from external commands
static hailo_status execute(Device &device, const std::string &output_file_path, static hailo_status execute(Device &device, const std::string &output_file_path,
const ConfiguredNetworkGroupVector &network_groups={}, const std::string &hef_file_path=""); const ConfiguredNetworkGroupVector &network_groups={}, const std::string &hef_file_path="");
static hailo_status execute(Device &device, std::shared_ptr<ConfiguredNetworkGroup> network_group,
uint16_t batch_size, ordered_json &action_list_json, double fps, uint32_t network_group_index);
static hailo_status write_to_json(ordered_json &action_list_json_param, const std::string &output_file_path);
static Expected<ordered_json> init_json_object(Device &device, const std::string &hef_file_path);
static hailo_status set_batch_to_measure(Device &device, uint16_t batch_to_measure); static hailo_status set_batch_to_measure(Device &device, uint16_t batch_to_measure);
protected: protected:
@@ -38,7 +42,7 @@ private:
std::string m_output_file_path; std::string m_output_file_path;
static constexpr int DEFAULT_JSON_TAB_WIDTH = 4; static constexpr int DEFAULT_JSON_TAB_WIDTH = 4;
static constexpr int INVALID_NUMERIC_VALUE = -1; static constexpr int INVALID_NUMERIC_VALUE = -1;
static std::string ACTION_LIST_FORMAT_VERSION() { return "1.0"; } static std::string ACTION_LIST_FORMAT_VERSION() { return "2.0"; }
static Expected<ordered_json> parse_hef_metadata(const std::string &hef_file_path); static Expected<ordered_json> parse_hef_metadata(const std::string &hef_file_path);
static bool is_valid_hef(const std::string &hef_file_path); static bool is_valid_hef(const std::string &hef_file_path);
@@ -61,6 +65,8 @@ private:
const std::string &context_name); const std::string &context_name);
static double get_accumulator_mean_value(const AccumulatorPtr &accumulator, double default_value = INVALID_NUMERIC_VALUE); static double get_accumulator_mean_value(const AccumulatorPtr &accumulator, double default_value = INVALID_NUMERIC_VALUE);
static Expected<ordered_json> parse_network_groups(Device &device, const ConfiguredNetworkGroupVector &network_groups); static Expected<ordered_json> parse_network_groups(Device &device, const ConfiguredNetworkGroupVector &network_groups);
static Expected<ordered_json> parse_network_group(Device &device,
const std::shared_ptr<ConfiguredNetworkGroup> network_group, uint32_t network_group_id);
}; };
// JSON serialization // JSON serialization
@@ -84,6 +90,7 @@ static std::pair<CONTEXT_SWITCH_DEFS__ACTION_TYPE_t, std::string> mapping[] = {
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ADD_DDR_PAIR_INFO, "add_ddr_pair_info"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_ADD_DDR_PAIR_INFO, "add_ddr_pair_info"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START, "ddr_buffering_start"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START, "ddr_buffering_start"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START, "burst_credits_task_start"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START, "burst_credits_task_start"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET, "burst_credits_task_reset"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_LCU_INTERRUPT, "lcu_interrupt"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_LCU_INTERRUPT, "lcu_interrupt"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_SEQUENCER_DONE_INTERRUPT, "sequencer_done_interrupt"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_SEQUENCER_DONE_INTERRUPT, "sequencer_done_interrupt"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_INPUT_CHANNEL_TRANSFER_DONE_INTERRUPT, "input_channel_transfer_done_interrupt"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_INPUT_CHANNEL_TRANSFER_DONE_INTERRUPT, "input_channel_transfer_done_interrupt"},
@@ -102,6 +109,9 @@ static std::pair<CONTEXT_SWITCH_DEFS__ACTION_TYPE_t, std::string> mapping[] = {
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS, "enable_nms"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS, "enable_nms"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE, "write_data_by_type"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE, "write_data_by_type"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH, "switch_lcu_batch"}, {CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH, "switch_lcu_batch"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH, "change boundary input batch"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL, "pause vdma channel"},
{CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL, "resume vdma channel"},
}; };
static_assert(ARRAY_ENTRIES(mapping) == CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, static_assert(ARRAY_ENTRIES(mapping) == CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT,
"Missing a mapping from a CONTEXT_SWITCH_DEFS__ACTION_TYPE_t to it's string value"); "Missing a mapping from a CONTEXT_SWITCH_DEFS__ACTION_TYPE_t to it's string value");
@@ -142,5 +152,8 @@ void to_json(json &j, const CONTEXT_SWITCH_DEFS__add_ddr_pair_info_action_data_t
void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t &data); void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_output_channel_data_t &data); void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_output_channel_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t &data); void to_json(json &j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t &data);
void to_json(json &j, const CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t &data);
#endif /* _HAILO_DOWNLOAD_ACTION_LIST_COMMAND_HPP_ */ #endif /* _HAILO_DOWNLOAD_ACTION_LIST_COMMAND_HPP_ */

View File

@@ -138,8 +138,10 @@ static std::string identity_arch_string(const hailo_device_identity_t &identity)
return "HAILO8"; return "HAILO8";
case HAILO_ARCH_HAILO8L: case HAILO_ARCH_HAILO8L:
return "HAILO8L"; return "HAILO8L";
case HAILO_ARCH_HAILO15: case HAILO_ARCH_HAILO15H:
return "HAILO15"; return "HAILO15H";
case HAILO_ARCH_PLUTO:
return "PLUTO";
default: default:
return "Unknown"; return "Unknown";
} }
@@ -230,8 +232,5 @@ FwControlCommand::FwControlCommand(CLI::App &parent_app) :
add_subcommand<FwControlIdentifyCommand>(); add_subcommand<FwControlIdentifyCommand>();
add_subcommand<FwControlResetCommand>(); add_subcommand<FwControlResetCommand>();
add_subcommand<FwControlTestMemoriesCommand>(); add_subcommand<FwControlTestMemoriesCommand>();
// TODO: Support on windows (HRT-5919)
#if defined(__GNUC__)
add_subcommand<DownloadActionListCommand>(); add_subcommand<DownloadActionListCommand>();
#endif
} }

View File

@@ -12,10 +12,7 @@
#include "hailortcli.hpp" #include "hailortcli.hpp"
#include "command.hpp" #include "command.hpp"
#if defined(__GNUC__)
// TODO: Support on windows (HRT-5919)
#include "download_action_list_command.hpp" #include "download_action_list_command.hpp"
#endif
class FwControlIdentifyCommand : public DeviceCommand { class FwControlIdentifyCommand : public DeviceCommand {
public: public:

View File

@@ -215,7 +215,7 @@ private:
inline void hailo_deprecate_options(CLI::App *app, const std::vector<DeprecationActionPtr> &actions, bool set_footer = true) inline void hailo_deprecate_options(CLI::App *app, const std::vector<DeprecationActionPtr> &actions, bool set_footer = true)
{ {
// std::set and not std::vector in case two actions have the smae deprection string // std::set and not std::vector in case two actions have the same deprecation string
std::set<std::string> deprecation_messages; std::set<std::string> deprecation_messages;
for (const auto& deprecation_action : actions) { for (const auto& deprecation_action : actions) {
deprecation_messages.insert(deprecation_action->deprecate(!set_footer)); deprecation_messages.insert(deprecation_action->deprecate(!set_footer));

View File

@@ -28,13 +28,15 @@ Expected<std::shared_ptr<InferProgress>> InferProgress::create(const inference_r
InferProgress::InferProgress(const inference_runner_params &params, InferProgress::InferProgress(const inference_runner_params &params,
std::chrono::milliseconds print_interval, hailo_status &status) : std::chrono::milliseconds print_interval, hailo_status &status) :
m_params(params), m_print_interval(print_interval), m_networks_progress(), m_params(params), m_print_interval(print_interval), m_networks_progress(),
m_stop_event(Event::create_shared(Event::State::not_signalled)), m_finished(false) m_stop_event(), m_finished(false)
{ {
if (nullptr == m_stop_event) { auto event_exp = Event::create_shared(Event::State::not_signalled);
if (!event_exp) {
LOGGER__ERROR("Failed to create event for progress bar"); LOGGER__ERROR("Failed to create event for progress bar");
status = HAILO_OUT_OF_HOST_MEMORY; status = event_exp.status();
return; return;
} }
m_stop_event = event_exp.release();
status = HAILO_SUCCESS; status = HAILO_SUCCESS;
} }

View File

@@ -30,6 +30,7 @@ constexpr size_t DEVICE_ID_WIDTH = STRING_WIDTH;
constexpr size_t STREAM_NAME_WIDTH = STRING_WIDTH; constexpr size_t STREAM_NAME_WIDTH = STRING_WIDTH;
constexpr size_t UTILIZATION_WIDTH = 25; constexpr size_t UTILIZATION_WIDTH = 25;
constexpr size_t NUMBER_WIDTH = 15; constexpr size_t NUMBER_WIDTH = 15;
constexpr size_t FRAME_VALUE_WIDTH = 8;
constexpr size_t TERMINAL_DEFAULT_WIDTH = 80; constexpr size_t TERMINAL_DEFAULT_WIDTH = 80;
constexpr size_t LINE_LENGTH = NETWORK_GROUP_NAME_WIDTH + STREAM_NAME_WIDTH + UTILIZATION_WIDTH + NUMBER_WIDTH; constexpr size_t LINE_LENGTH = NETWORK_GROUP_NAME_WIDTH + STREAM_NAME_WIDTH + UTILIZATION_WIDTH + NUMBER_WIDTH;
constexpr std::chrono::milliseconds EPSILON_TIME(500); constexpr std::chrono::milliseconds EPSILON_TIME(500);
@@ -119,11 +120,19 @@ void MonCommand::print_frames_header()
std::setw(STRING_WIDTH) << std::left << "Model" << std::setw(STRING_WIDTH) << std::left << "Model" <<
std::setw(STRING_WIDTH) << std::left << "Stream" << std::setw(STRING_WIDTH) << std::left << "Stream" <<
std::setw(NUMBER_WIDTH) << std::left << "Direction" << std::setw(NUMBER_WIDTH) << std::left << "Direction" <<
std::setw(NUMBER_WIDTH) << std::left << "Frames" << std::setw(3 * FRAME_VALUE_WIDTH - 2) << std::internal << "Frames Queue" <<
"\n" << std::left << std::string(LINE_LENGTH, '-') << "\n"; "\n" <<
std::setw(STRING_WIDTH) << std::left << "" <<
std::setw(STRING_WIDTH) << std::left << "" <<
std::setw(NUMBER_WIDTH) << std::left << "" <<
std::setw(FRAME_VALUE_WIDTH) << "Avg" <<
std::setw(FRAME_VALUE_WIDTH) << "Max" <<
std::setw(FRAME_VALUE_WIDTH) << "Min" <<
std::setw(FRAME_VALUE_WIDTH) << "Capacity" <<
"\n" << std::left << std::string(LINE_LENGTH + NUMBER_WIDTH, '-') << "\n";
} }
void MonCommand::print_frames_table(const ProtoMon &mon_message) hailo_status MonCommand::print_frames_table(const ProtoMon &mon_message)
{ {
for (const auto &net_info : mon_message.net_frames_infos()) { for (const auto &net_info : mon_message.net_frames_infos()) {
auto &original_net_name = net_info.network_name(); auto &original_net_name = net_info.network_name();
@@ -133,20 +142,40 @@ void MonCommand::print_frames_table(const ProtoMon &mon_message)
auto stream_name = truncate_str(stream_name_original, STREAM_NAME_WIDTH); auto stream_name = truncate_str(stream_name_original, STREAM_NAME_WIDTH);
auto stream_direction = (streams_frames.stream_direction() == PROTO__STREAM_DIRECTION__HOST_TO_DEVICE) ? "H2D" : "D2H"; auto stream_direction = (streams_frames.stream_direction() == PROTO__STREAM_DIRECTION__HOST_TO_DEVICE) ? "H2D" : "D2H";
std::string frames; std::string max_frames, min_frames, queue_size;
double avg_frames;
if (SCHEDULER_MON_NAN_VAL == streams_frames.buffer_frames_size() || SCHEDULER_MON_NAN_VAL == streams_frames.pending_frames_count()) { if (SCHEDULER_MON_NAN_VAL == streams_frames.buffer_frames_size() || SCHEDULER_MON_NAN_VAL == streams_frames.pending_frames_count()) {
frames = "NaN"; avg_frames = -1;
max_frames = "NaN";
min_frames = "NaN";
queue_size = "NaN";
} else { } else {
frames = std::to_string(streams_frames.pending_frames_count()) + "/" + std::to_string(streams_frames.buffer_frames_size()); avg_frames = streams_frames.avg_pending_frames_count();
max_frames = std::to_string(streams_frames.max_pending_frames_count());
min_frames = std::to_string(streams_frames.min_pending_frames_count());
queue_size = std::to_string(streams_frames.buffer_frames_size());
}
std::string avg_frames_str;
if (avg_frames == -1) {
avg_frames_str = "NaN";
} else {
std::stringstream ss;
ss << std::fixed << std::setprecision(2) << avg_frames;
avg_frames_str = ss.str();
} }
std::cout << std::cout <<
std::setw(STRING_WIDTH) << std::left << net_name << std::setw(STRING_WIDTH) << std::left << net_name <<
std::setw(STRING_WIDTH) << std::left << stream_name << std::setw(STRING_WIDTH) << std::left << stream_name <<
std::setw(NUMBER_WIDTH) << std::left << stream_direction << std::setw(NUMBER_WIDTH) << std::left << stream_direction <<
std::setw(NUMBER_WIDTH) << std::left << frames << "\n"; std::setw(FRAME_VALUE_WIDTH) << std::left << avg_frames_str <<
std::setw(FRAME_VALUE_WIDTH) << std::left << max_frames <<
std::setw(FRAME_VALUE_WIDTH) << std::left << min_frames <<
std::setw(FRAME_VALUE_WIDTH) << std::left << queue_size << "\n";
} }
} }
return HAILO_SUCCESS;
} }
#if defined(__GNUC__) #if defined(__GNUC__)
@@ -163,7 +192,7 @@ Expected<uint16_t> get_terminal_line_width()
return terminal_line_width; return terminal_line_width;
} }
void MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width) hailo_status MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width)
{ {
print_devices_info_header(); print_devices_info_header();
for (const auto &mon_message : mon_messages) { for (const auto &mon_message : mon_messages) {
@@ -184,8 +213,9 @@ void MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_
print_frames_header(); print_frames_header();
for (const auto &mon_message : mon_messages) { for (const auto &mon_message : mon_messages) {
print_frames_table(mon_message); CHECK_SUCCESS(print_frames_table(mon_message));
} }
return HAILO_SUCCESS;
} }
static volatile bool keep_running = true; static volatile bool keep_running = true;
@@ -235,7 +265,7 @@ hailo_status MonCommand::run_monitor()
} }
} }
print_tables(mon_messages, terminal_line_width); CHECK_SUCCESS(print_tables(mon_messages, terminal_line_width));
if (print_warning_msg) { if (print_warning_msg) {
std::cout << FORMAT_GREEN_PRINT << "Monitor did not retrieve any files. This occurs when there is no application currently running.\n" std::cout << FORMAT_GREEN_PRINT << "Monitor did not retrieve any files. This occurs when there is no application currently running.\n"
<< "If this is not the case, verify that environment variable '" << SCHEDULER_MON_ENV_VAR << "' is set to 1.\n" << FORMAT_NORMAL_PRINT; << "If this is not the case, verify that environment variable '" << SCHEDULER_MON_ENV_VAR << "' is set to 1.\n" << FORMAT_NORMAL_PRINT;

View File

@@ -15,6 +15,7 @@
#include "hailortcli.hpp" #include "hailortcli.hpp"
#include "command.hpp" #include "command.hpp"
#include "utils/profiler/monitor_handler.hpp" #include "utils/profiler/monitor_handler.hpp"
#include "common/runtime_statistics_internal.hpp"
#include "CLI/CLI.hpp" #include "CLI/CLI.hpp"
@@ -30,13 +31,13 @@ public:
private: private:
hailo_status run_monitor(); hailo_status run_monitor();
void print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width); hailo_status print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width);
void print_devices_info_header(); void print_devices_info_header();
void print_networks_info_header(); void print_networks_info_header();
void print_frames_header(); void print_frames_header();
void print_devices_info_table(const ProtoMon &mon_message); void print_devices_info_table(const ProtoMon &mon_message);
void print_networks_info_table(const ProtoMon &mon_message); void print_networks_info_table(const ProtoMon &mon_message);
void print_frames_table(const ProtoMon &mon_message); hailo_status print_frames_table(const ProtoMon &mon_message);
hailo_status run_in_alternative_terminal(); hailo_status run_in_alternative_terminal();
}; };

View File

@@ -44,8 +44,10 @@ private:
}; };
// Wrapper for InputStream or InputVStream objects. // Wrapper for InputStream or InputVStream objects.
// We use std::enable_from_this because on async api the callback is using `this`. We want to increase the reference
// count until the callback is over.
template<typename Writer> template<typename Writer>
class WriterWrapper final class WriterWrapper final : public std::enable_shared_from_this<WriterWrapper<Writer>>
{ {
public: public:
template<typename WriterParams> template<typename WriterParams>
@@ -85,8 +87,12 @@ public:
hailo_status write_async(typename Writer::TransferDoneCallback callback) hailo_status write_async(typename Writer::TransferDoneCallback callback)
{ {
before_write_start(); before_write_start();
// We can use the same buffer for multiple writes simultaneously. That is OK since we don't modify the buffers. auto self = std::enable_shared_from_this<WriterWrapper<Writer>>::shared_from_this();
auto status = get().write_async(MemoryView(*next_buffer()), callback); auto status = get().write_async(MemoryView(*next_buffer()),
[self, original=callback](const typename Writer::CompletionInfo &completion_info) {
(void)self; // Keeping self here so the buffer won't be deleted until the callback is called.
original(completion_info);
});
if (HAILO_SUCCESS != status) { if (HAILO_SUCCESS != status) {
return status; return status;
} }
@@ -150,9 +156,6 @@ private:
"Input file ({}) size {} must be a multiple of the frame size {}", "Input file ({}) size {} must be a multiple of the frame size {}",
file_path, buffer->size(), frame_size); file_path, buffer->size(), frame_size);
auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
std::vector<BufferPtr> dataset; std::vector<BufferPtr> dataset;
const size_t frames_count = buffer->size() / frame_size; const size_t frames_count = buffer->size() / frame_size;
dataset.reserve(frames_count); dataset.reserve(frames_count);

View File

@@ -17,6 +17,8 @@
using namespace hailort; using namespace hailort;
const uint8_t NETWORK_STATS_LEVEL = 1;
hailo_status LiveStats::Track::start() hailo_status LiveStats::Track::start()
{ {
CHECK_SUCCESS(start_impl()); CHECK_SUCCESS(start_impl());
@@ -40,15 +42,25 @@ void LiveStats::Track::push_json(nlohmann::ordered_json &json)
push_json_impl(json); push_json_impl(json);
} }
Expected<double> LiveStats::Track::get_last_measured_fps()
{
// This virtual getter is supported only for the derived class NetworkLiveTrack
return make_unexpected(HAILO_NOT_AVAILABLE);
}
LiveStats::LiveStats(std::chrono::milliseconds interval) : LiveStats::LiveStats(std::chrono::milliseconds interval) :
m_running(false), m_running(false),
m_interval(interval), m_interval(interval),
m_stop_event(Event::create_shared(Event::State::not_signalled)), m_stop_event(),
m_tracks(), m_tracks(),
m_mutex(), m_mutex(),
m_prev_count(0), m_prev_count(0),
m_enable_ansi_escape_sequences(CursorAdjustment()) m_enable_ansi_escape_sequences(CursorAdjustment())
{ {
auto event_exp = Event::create_shared(Event::State::not_signalled);
assert(event_exp);
m_stop_event = event_exp.release();
} }
LiveStats::~LiveStats() LiveStats::~LiveStats()
@@ -114,6 +126,20 @@ hailo_status LiveStats::dump_stats(const std::string &json_path, const std::stri
return HAILO_SUCCESS; return HAILO_SUCCESS;
} }
Expected<std::vector<double>> LiveStats::get_last_measured_fps_per_network_group()
{
std::vector<double> last_measured_fpss;
CHECK_AS_EXPECTED(contains(m_tracks, NETWORK_STATS_LEVEL), HAILO_NOT_AVAILABLE);
for (size_t network_stats_track_index = 0; network_stats_track_index < m_tracks[NETWORK_STATS_LEVEL].size(); network_stats_track_index++) {
auto expected_fps = m_tracks[NETWORK_STATS_LEVEL][network_stats_track_index]->get_last_measured_fps();
CHECK_EXPECTED(expected_fps);
last_measured_fpss.emplace_back(expected_fps.release());
}
return last_measured_fpss;
}
hailo_status LiveStats::start() hailo_status LiveStats::start()
{ {
// In order to re-start LiveStats, we should add m_stop_event->reset() here // In order to re-start LiveStats, we should add m_stop_event->reset() here

View File

@@ -12,6 +12,8 @@
#include "common/os_utils.hpp" #include "common/os_utils.hpp"
#include "hailo/event.hpp" #include "hailo/event.hpp"
#include "hailo/expected.hpp"
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include <stdint.h> #include <stdint.h>
#include <chrono> #include <chrono>
@@ -32,6 +34,7 @@ public:
hailo_status start(); hailo_status start();
uint32_t push_text(std::stringstream &ss); uint32_t push_text(std::stringstream &ss);
void push_json(nlohmann::ordered_json &json); void push_json(nlohmann::ordered_json &json);
virtual hailort::Expected<double> get_last_measured_fps();
protected: protected:
virtual hailo_status start_impl() = 0; virtual hailo_status start_impl() = 0;
@@ -48,6 +51,7 @@ public:
hailo_status dump_stats(const std::string &json_path, const std::string &inference_mode); hailo_status dump_stats(const std::string &json_path, const std::string &inference_mode);
hailo_status start(); hailo_status start();
void stop(); void stop();
hailort::Expected<std::vector<double>> get_last_measured_fps_per_network_group();
private: private:
bool m_running; bool m_running;

View File

@@ -24,7 +24,8 @@ NetworkLiveTrack::NetworkLiveTrack(const std::string &name, std::shared_ptr<Conf
m_cng(cng), m_cng(cng),
m_overall_latency_meter(overall_latency_meter), m_overall_latency_meter(overall_latency_meter),
m_measure_fps(measure_fps), m_measure_fps(measure_fps),
m_hef_path(hef_path) m_hef_path(hef_path),
m_last_measured_fps(0)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
max_ng_name = std::max(m_name.size(), max_ng_name); max_ng_name = std::max(m_name.size(), max_ng_name);
@@ -43,9 +44,15 @@ double NetworkLiveTrack::get_fps()
auto elapsed_time = std::chrono::steady_clock::now() - m_last_get_time; auto elapsed_time = std::chrono::steady_clock::now() - m_last_get_time;
auto count = m_count.load(); auto count = m_count.load();
auto fps = count / std::chrono::duration<double>(elapsed_time).count(); auto fps = count / std::chrono::duration<double>(elapsed_time).count();
m_last_measured_fps = fps;
return fps; return fps;
} }
Expected<double> NetworkLiveTrack::get_last_measured_fps()
{
return Expected<double>(m_last_measured_fps);
}
uint32_t NetworkLiveTrack::push_text_impl(std::stringstream &ss) uint32_t NetworkLiveTrack::push_text_impl(std::stringstream &ss)
{ {
ss << fmt::format("{}:", m_name); ss << fmt::format("{}:", m_name);

View File

@@ -32,6 +32,8 @@ public:
void progress(); void progress();
hailort::Expected<double> get_last_measured_fps();
private: private:
double get_fps(); double get_fps();
@@ -45,6 +47,8 @@ private:
hailort::LatencyMeterPtr m_overall_latency_meter; hailort::LatencyMeterPtr m_overall_latency_meter;
const bool m_measure_fps; const bool m_measure_fps;
const std::string &m_hef_path; const std::string &m_hef_path;
double m_last_measured_fps;
}; };
#endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_ */ #endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_ */

View File

@@ -100,7 +100,8 @@ NetworkRunner::NetworkRunner(const NetworkParams &params, const std::string &nam
m_name(name), m_name(name),
m_cng(cng), m_cng(cng),
m_overall_latency_meter(nullptr), m_overall_latency_meter(nullptr),
m_latency_barrier(nullptr) m_latency_barrier(nullptr),
m_last_measured_fps(0)
{ {
} }
@@ -262,6 +263,32 @@ void NetworkRunner::set_latency_barrier(BarrierPtr latency_barrier)
m_latency_barrier = latency_barrier; m_latency_barrier = latency_barrier;
} }
std::shared_ptr<ConfiguredNetworkGroup> NetworkRunner::get_configured_network_group()
{
return m_cng;
}
void NetworkRunner::set_last_measured_fps(double fps)
{
m_last_measured_fps = fps;
}
double NetworkRunner::get_last_measured_fps()
{
return m_last_measured_fps;
}
hailo_vstream_params_t update_quantize_flag_in_vstream_param(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &old_vstream_params)
{
hailo_vstream_params_t res = old_vstream_params;
if ((HAILO_FORMAT_TYPE_FLOAT32 == old_vstream_params.user_buffer_format.type) || (HailoRTCommon::is_nms(vstream_info))) {
res.user_buffer_format.flags &= (~HAILO_FORMAT_FLAGS_QUANTIZED);
} else {
res.user_buffer_format.flags |= (HAILO_FORMAT_FLAGS_QUANTIZED);
}
return res;
}
Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> NetworkRunner::create_vstreams( Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> NetworkRunner::create_vstreams(
ConfiguredNetworkGroup &net_group, const std::map<std::string, hailo_vstream_params_t> &params) ConfiguredNetworkGroup &net_group, const std::map<std::string, hailo_vstream_params_t> &params)
{//TODO: support network name {//TODO: support network name
@@ -273,10 +300,12 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
for (auto &input_vstream_info : input_vstreams_info.value()) { for (auto &input_vstream_info : input_vstreams_info.value()) {
auto elem_it = params.find(input_vstream_info.name); auto elem_it = params.find(input_vstream_info.name);
if (elem_it != params.end()) { if (elem_it != params.end()) {
input_vstreams_params.emplace(input_vstream_info.name, elem_it->second); auto vstream_param = update_quantize_flag_in_vstream_param(input_vstream_info, elem_it->second);
input_vstreams_params.emplace(input_vstream_info.name, vstream_param);
match_count++; match_count++;
} else { } else {
input_vstreams_params.emplace(input_vstream_info.name, HailoRTDefaults::get_vstreams_params()); auto vstream_param = update_quantize_flag_in_vstream_param(input_vstream_info, HailoRTDefaults::get_vstreams_params());
input_vstreams_params.emplace(input_vstream_info.name, vstream_param);
} }
} }
@@ -286,11 +315,13 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
for (auto &output_vstream_info : output_vstreams_info.value()) { for (auto &output_vstream_info : output_vstreams_info.value()) {
auto elem_it = params.find(output_vstream_info.name); auto elem_it = params.find(output_vstream_info.name);
if (elem_it != params.end()) { if (elem_it != params.end()) {
output_vstreams_params.emplace(output_vstream_info.name, elem_it->second); auto vstream_param = update_quantize_flag_in_vstream_param(output_vstream_info, elem_it->second);
output_vstreams_params.emplace(output_vstream_info.name, vstream_param);
match_count++; match_count++;
} }
else { else {
output_vstreams_params.emplace(output_vstream_info.name, HailoRTDefaults::get_vstreams_params()); auto vstream_param = update_quantize_flag_in_vstream_param(output_vstream_info, HailoRTDefaults::get_vstreams_params());
output_vstreams_params.emplace(output_vstream_info.name, vstream_param);
} }
} }

View File

@@ -128,6 +128,9 @@ public:
// Must be called prior to run // Must be called prior to run
void set_overall_latency_meter(LatencyMeterPtr latency_meter); void set_overall_latency_meter(LatencyMeterPtr latency_meter);
void set_latency_barrier(BarrierPtr latency_barrier); void set_latency_barrier(BarrierPtr latency_barrier);
std::shared_ptr<ConfiguredNetworkGroup> get_configured_network_group();
void set_last_measured_fps(double fps);
double get_last_measured_fps();
protected: protected:
static bool inference_succeeded(hailo_status status); static bool inference_succeeded(hailo_status status);
@@ -177,8 +180,9 @@ protected:
// sync_event will be used to send one frame at a time // sync_event will be used to send one frame at a time
EventPtr sync_event = nullptr; EventPtr sync_event = nullptr;
if (m_params.measure_hw_latency || m_params.measure_overall_latency) { if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
sync_event = Event::create_shared(Event::State::not_signalled); auto sync_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_NOT_NULL(sync_event, HAILO_OUT_OF_HOST_MEMORY); CHECK_EXPECTED_AS_STATUS(sync_event_exp);
sync_event = sync_event_exp.release();
} }
while (true) { while (true) {
@@ -253,8 +257,9 @@ protected:
// sync_event will be used to send one frame at a time // sync_event will be used to send one frame at a time
EventPtr sync_event = nullptr; EventPtr sync_event = nullptr;
if (m_params.measure_hw_latency || m_params.measure_overall_latency) { if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
sync_event = Event::create_shared(Event::State::not_signalled); auto sync_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_NOT_NULL(sync_event, HAILO_OUT_OF_HOST_MEMORY); CHECK_EXPECTED_AS_STATUS(sync_event_exp);
sync_event = sync_event_exp.release();
} }
while (true) { while (true) {
@@ -301,6 +306,7 @@ protected:
std::shared_ptr<ConfiguredNetworkGroup> m_cng; std::shared_ptr<ConfiguredNetworkGroup> m_cng;
LatencyMeterPtr m_overall_latency_meter; LatencyMeterPtr m_overall_latency_meter;
BarrierPtr m_latency_barrier; BarrierPtr m_latency_barrier;
double m_last_measured_fps;
private: private:
static const std::vector<hailo_status> ALLOWED_INFERENCE_RETURN_VALUES; static const std::vector<hailo_status> ALLOWED_INFERENCE_RETURN_VALUES;

View File

@@ -18,14 +18,24 @@
#include "../common.hpp" #include "../common.hpp"
#include "hailo/vdevice.hpp" #include "hailo/vdevice.hpp"
#include "hailo/hef.hpp" #include "hailo/hef.hpp"
#include "../download_action_list_command.hpp"
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <regex>
using namespace hailort; using namespace hailort;
constexpr uint32_t DEFAULT_TIME_TO_RUN_SECONDS = 5; constexpr uint32_t DEFAULT_TIME_TO_RUN_SECONDS = 5;
static const char *JSON_SUFFIX = ".json";
static const char *RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER = "<hef>";
static const std::vector<uint16_t> DEFAULT_BATCH_SIZES = {1, 2, 4, 8, 16};
static const uint16_t RUNTIME_DATA_BATCH_INDEX_TO_MEASURE_DEFAULT = 2;
using json = nlohmann::json;
using ordered_json = nlohmann::ordered_json;
/** VStreamNameValidator */ /** VStreamNameValidator */
class VStreamNameValidator : public CLI::Validator { class VStreamNameValidator : public CLI::Validator {
public: public:
@@ -199,13 +209,11 @@ VStreamApp::VStreamApp(const std::string &description, const std::string &name,
})) }))
->default_val("auto"); ->default_val("auto");
add_flag_callback(format_opt_group, "-q,--quantized,!--no-quantized", "Whether or not data is quantized", auto quantized_option = format_opt_group->add_flag("-q,--quantized,!--no-quantized",
[this](bool result){ "Whether or not data is quantized. This flag is ignored - Determine if the data requires quantization is decided by the src-data and dst-data types.")
m_vstream_params.params.user_buffer_format.flags = result ?
static_cast<hailo_format_flags_t>(m_vstream_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED) :
static_cast<hailo_format_flags_t>(m_vstream_params.params.user_buffer_format.flags & (~HAILO_FORMAT_FLAGS_QUANTIZED));})
->run_callback_for_default()
->default_val(true); // default_val() must be after run_callback_for_default() ->default_val(true); // default_val() must be after run_callback_for_default()
hailo_deprecate_options(format_opt_group, { std::make_shared<OptionDeprecation>(quantized_option) }, false);
} }
CLI::Option* VStreamApp::add_flag_callback(CLI::App *app, const std::string &name, const std::string &description, CLI::Option* VStreamApp::add_flag_callback(CLI::App *app, const std::string &name, const std::string &description,
@@ -307,6 +315,9 @@ class Run2 : public CLI::App
public: public:
Run2(); Run2();
Expected<std::unique_ptr<VDevice>> create_vdevice();
Expected<std::vector<std::shared_ptr<NetworkRunner>>> init_and_run_net_runners(VDevice *vdevice);
const std::vector<NetworkParams>& get_network_params(); const std::vector<NetworkParams>& get_network_params();
std::chrono::seconds get_time_to_run(); std::chrono::seconds get_time_to_run();
std::vector<hailo_device_id_t> get_dev_ids(); std::vector<hailo_device_id_t> get_dev_ids();
@@ -317,6 +328,8 @@ public:
bool get_measure_hw_latency(); bool get_measure_hw_latency();
bool get_measure_overall_latency(); bool get_measure_overall_latency();
bool get_multi_process_service(); bool get_multi_process_service();
bool get_measure_fw_actions();
std::string get_measure_fw_actions_output_path();
const std::string &get_group_id(); const std::string &get_group_id();
InferenceMode get_mode() const; InferenceMode get_mode() const;
const std::string &get_output_json_path(); const std::string &get_output_json_path();
@@ -324,8 +337,10 @@ public:
void set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm); void set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm);
void set_inference_mode(); void set_inference_mode();
void set_measure_latency(); void set_measure_latency();
void set_batch_size(uint16_t batch_size);
private: private:
void add_measure_fw_actions_subcom();
void add_net_app_subcom(); void add_net_app_subcom();
std::vector<NetworkParams> m_network_params; std::vector<NetworkParams> m_network_params;
uint32_t m_time_to_run; uint32_t m_time_to_run;
@@ -342,11 +357,15 @@ private:
bool m_measure_power; bool m_measure_power;
bool m_measure_current; bool m_measure_current;
bool m_measure_temp; bool m_measure_temp;
bool m_measure_fw_actions;
std::string m_measure_fw_actions_output_path;
}; };
Run2::Run2() : CLI::App("Run networks (preview)", "run2") Run2::Run2() : CLI::App("Run networks", "run2")
{ {
add_measure_fw_actions_subcom();
add_net_app_subcom(); add_net_app_subcom();
add_option("-t,--time-to-run", m_time_to_run, "Time to run (seconds)") add_option("-t,--time-to-run", m_time_to_run, "Time to run (seconds)")
->default_val(DEFAULT_TIME_TO_RUN_SECONDS) ->default_val(DEFAULT_TIME_TO_RUN_SECONDS)
@@ -358,7 +377,6 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
{ "raw_async", InferenceMode::RAW_ASYNC }, { "raw_async", InferenceMode::RAW_ASYNC },
{ "raw_async_single_thread", InferenceMode::RAW_ASYNC_SINGLE_THREAD, OptionVisibility::HIDDEN } { "raw_async_single_thread", InferenceMode::RAW_ASYNC_SINGLE_THREAD, OptionVisibility::HIDDEN }
}))->default_val("full"); }))->default_val("full");
static const char *JSON_SUFFIX = ".json";
add_option("-j,--json", m_stats_json_path, "If set save statistics as json to the specified path") add_option("-j,--json", m_stats_json_path, "If set save statistics as json to the specified path")
->default_val("") ->default_val("")
->check(FileSuffixValidator(JSON_SUFFIX)); ->check(FileSuffixValidator(JSON_SUFFIX));
@@ -373,9 +391,6 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
->check(CLI::PositiveNumber) ->check(CLI::PositiveNumber)
->excludes(dev_id_opt); ->excludes(dev_id_opt);
vdevice_options_group->add_flag("--multi-process-service", m_multi_process_service, "VDevice multi process service")
->default_val(false);
vdevice_options_group->add_option("--group-id", m_group_id, "VDevice group id") vdevice_options_group->add_option("--group-id", m_group_id, "VDevice group id")
->default_val(HAILO_DEFAULT_VDEVICE_GROUP_ID); ->default_val(HAILO_DEFAULT_VDEVICE_GROUP_ID);
@@ -384,7 +399,7 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
auto measure_power_opt = measurement_options_group->add_flag("--measure-power", m_measure_power, "Measure power consumption") auto measure_power_opt = measurement_options_group->add_flag("--measure-power", m_measure_power, "Measure power consumption")
->default_val(false); ->default_val(false);
measurement_options_group->add_flag("--measure-current", m_measure_current, "Measure current")->excludes(measure_power_opt) auto measure_current_opt = measurement_options_group->add_flag("--measure-current", m_measure_current, "Measure current")->excludes(measure_power_opt)
->default_val(false); ->default_val(false);
measurement_options_group->add_flag("--measure-latency", m_measure_hw_latency, "Measure network latency on the NN core") measurement_options_group->add_flag("--measure-latency", m_measure_hw_latency, "Measure network latency on the NN core")
@@ -393,8 +408,41 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
measurement_options_group->add_flag("--measure-overall-latency", m_measure_overall_latency, "Measure overall latency measurement") measurement_options_group->add_flag("--measure-overall-latency", m_measure_overall_latency, "Measure overall latency measurement")
->default_val(false); ->default_val(false);
measurement_options_group->add_flag("--measure-temp", m_measure_temp, "Measure chip temperature") auto measure_temp_opt = measurement_options_group->add_flag("--measure-temp", m_measure_temp, "Measure chip temperature")
->default_val(false); ->default_val(false);
auto multi_process_flag = vdevice_options_group->add_flag("--multi-process-service", m_multi_process_service, "VDevice multi process service")
->default_val(false);
if (VDevice::service_over_ip_mode()) {
multi_process_flag
->excludes(measure_power_opt)
->excludes(measure_current_opt)
->excludes(measure_temp_opt);
// When working with service over ip - client doesn't have access to physical devices
} else {
(void)measure_power_opt;
(void)measure_current_opt;
(void)measure_temp_opt;
(void)multi_process_flag;
}
}
void Run2::add_measure_fw_actions_subcom()
{
m_measure_fw_actions = false;
auto measure_fw_actions_subcommand = std::make_shared<NetworkApp>("Collect runtime data to be used by the Profiler", "measure-fw-actions");
measure_fw_actions_subcommand->parse_complete_callback([this]() {
m_measure_fw_actions = true;
});
measure_fw_actions_subcommand->add_option("--output-path", m_measure_fw_actions_output_path,
fmt::format("Runtime data output file path\n'{}' will be replaced with the current running hef", RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER))
->default_val(fmt::format("runtime_data_{}.json", RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER))
->check(FileSuffixValidator(JSON_SUFFIX));
measure_fw_actions_subcommand->alias("collect-runtime-data");
add_subcommand(measure_fw_actions_subcommand);
} }
void Run2::add_net_app_subcom() void Run2::add_net_app_subcom()
@@ -499,11 +547,28 @@ void Run2::set_measure_latency()
} }
} }
void Run2::set_batch_size(uint16_t batch_size)
{
for (auto &params: m_network_params) {
params.batch_size = batch_size;
}
}
bool Run2::get_multi_process_service() bool Run2::get_multi_process_service()
{ {
return m_multi_process_service; return m_multi_process_service;
} }
bool Run2::get_measure_fw_actions()
{
return m_measure_fw_actions;
}
std::string Run2::get_measure_fw_actions_output_path()
{
return m_measure_fw_actions_output_path;
}
const std::string &Run2::get_group_id() const std::string &Run2::get_group_id()
{ {
return m_group_id; return m_group_id;
@@ -560,69 +625,83 @@ std::string get_str_infer_mode(const InferenceMode& infer_mode)
return "<Unknown>"; return "<Unknown>";
} }
hailo_status Run2Command::execute() // We assume that hef_place_holder_regex is valid
std::string format_measure_fw_actions_output_path(const std::string &base_output_path, const std::string &hef_path,
const std::string &hef_place_holder_regex = RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER,
const std::string &hef_suffix = ".hef")
{ {
Run2 *app = reinterpret_cast<Run2*>(m_app); const auto hef_basename = Filesystem::basename(hef_path);
const auto hef_no_suffix = Filesystem::remove_suffix(hef_basename, hef_suffix);
return std::regex_replace(base_output_path, std::regex(hef_place_holder_regex), hef_no_suffix);
}
app->set_inference_mode(); Expected<std::reference_wrapper<Device>> get_single_physical_device(VDevice &vdevice)
app->set_measure_latency(); {
auto expected_physical_devices = vdevice.get_physical_devices();
CHECK_EXPECTED(expected_physical_devices);
CHECK_AS_EXPECTED(1 == expected_physical_devices->size(), HAILO_INVALID_OPERATION, "Operation not allowed for multi-device");
auto &res = expected_physical_devices->at(0);
return std::move(res);
}
if (0 == app->get_network_params().size()) { Expected<std::unique_ptr<VDevice>> Run2::create_vdevice()
LOGGER__ERROR("Nothing to run"); {
return HAILO_INVALID_OPERATION; // hailo_vdevice_params_t is a c-structure that have pointers of device_ids, we must keep reference to the devices
} // object alive until vdevice_params is destructed.
if (1 == app->get_network_params().size()) { auto dev_ids = get_dev_ids();
LOGGER__WARN("\"hailortcli run2\" is in preview. It is recommended to use \"hailortcli run\" command for a single network group");
}
if (app->get_measure_hw_latency() || app->get_measure_overall_latency()) {
CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "When latency measurement is enabled, only one model is allowed");
LOGGER__WARN("Measuring latency; frames are sent one at a time and FPS will not be measured");
}
hailo_vdevice_params_t vdevice_params{}; hailo_vdevice_params_t vdevice_params{};
CHECK_SUCCESS(hailo_init_vdevice_params(&vdevice_params)); CHECK_SUCCESS_AS_EXPECTED(hailo_init_vdevice_params(&vdevice_params));
auto dev_ids = app->get_dev_ids();
if (!dev_ids.empty()) { if (!dev_ids.empty()) {
vdevice_params.device_count = static_cast<uint32_t>(dev_ids.size()); vdevice_params.device_count = static_cast<uint32_t>(dev_ids.size());
vdevice_params.device_ids = dev_ids.data(); vdevice_params.device_ids = dev_ids.data();
// Disable scheduler for eth VDevice // Disable scheduler for eth VDevice
if ((1 == dev_ids.size()) && (is_valid_ip(dev_ids[0].id))) { if ((1 == dev_ids.size()) && (is_valid_ip(dev_ids[0].id))) {
vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE; vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "On Ethernet inference only one model is allowed"); CHECK_AS_EXPECTED(1 == get_network_params().size(), HAILO_INVALID_OPERATION, "On Ethernet inference only one model is allowed");
app->set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE); set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
} }
} else { } else {
vdevice_params.device_count = app->get_device_count(); vdevice_params.device_count = get_device_count();
} }
// TODO: Async stream support for scheduler (HRT-9878)
if ((app->get_mode() == InferenceMode::RAW_ASYNC) || (app->get_mode() == InferenceMode::RAW_ASYNC_SINGLE_THREAD)) { if (get_measure_fw_actions()) {
CHECK_AS_EXPECTED(1 == get_network_params().size(), HAILO_INVALID_OPERATION, "Only one model is allowed when collecting runtime data");
CHECK_AS_EXPECTED(!get_multi_process_service(), HAILO_INVALID_OPERATION, "Collecting runtime data is not supported with multi process service");
CHECK_AS_EXPECTED(get_device_count() == 1, HAILO_INVALID_OPERATION, "Collecting runtime data is not supported with multi device");
CHECK_AS_EXPECTED(!(get_measure_hw_latency() || get_measure_overall_latency()), HAILO_INVALID_OPERATION, "Latency measurement is not allowed when collecting runtime data");
CHECK_AS_EXPECTED((get_mode() == InferenceMode::RAW) || (get_mode() == InferenceMode::RAW_ASYNC), HAILO_INVALID_OPERATION,
"'measure-fw-actions' is only supported with '--mode=raw'. Received mode: '{}'", get_str_infer_mode(get_mode()));
vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE; vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "Only one model is allowed with aw async inference mode"); set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
app->set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
} }
vdevice_params.group_id = app->get_group_id().c_str(); vdevice_params.group_id = get_group_id().c_str();
vdevice_params.multi_process_service = app->get_multi_process_service(); vdevice_params.multi_process_service = get_multi_process_service();
auto vdevice = VDevice::create(vdevice_params); return VDevice::create(vdevice_params);
CHECK_EXPECTED_AS_STATUS(vdevice); }
Expected<std::vector<std::shared_ptr<NetworkRunner>>> Run2::init_and_run_net_runners(VDevice *vdevice)
{
std::vector<std::shared_ptr<NetworkRunner>> net_runners;
auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
CHECK_EXPECTED(shutdown_event_exp);
auto shutdown_event = shutdown_event_exp.release();
// create network runners // create network runners
std::vector<std::shared_ptr<NetworkRunner>> net_runners; for (auto &net_params : get_network_params()) {
for (auto &net_params : app->get_network_params()) { auto expected_net_runner = NetworkRunner::create_shared(*vdevice, net_params);
auto net_runner = NetworkRunner::create_shared(*vdevice->get(), net_params); CHECK_EXPECTED(expected_net_runner);
CHECK_EXPECTED_AS_STATUS(net_runner); auto net_runner = expected_net_runner.release();
net_runners.emplace_back(net_runner);
net_runners.emplace_back(net_runner.release());
} }
auto live_stats = std::make_unique<LiveStats>(std::chrono::seconds(1)); auto live_stats = std::make_unique<LiveStats>(std::chrono::seconds(1));
live_stats->add(std::make_shared<TimerLiveTrack>(app->get_time_to_run()), 0); live_stats->add(std::make_shared<TimerLiveTrack>(get_time_to_run()), 0);
auto shutdown_event = Event::create_shared(Event::State::not_signalled);
CHECK_NOT_NULL(shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
std::vector<AsyncThreadPtr<hailo_status>> threads; std::vector<AsyncThreadPtr<hailo_status>> threads;
Barrier activation_barrier(net_runners.size() + 1); // We wait for all nets to finish activation + this thread to start sampling Barrier activation_barrier(net_runners.size() + 1); // We wait for all nets to finish activation + this thread to start sampling
@@ -635,32 +714,115 @@ hailo_status Run2Command::execute()
auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event); auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
auto physical_devices = vdevice.value()->get_physical_devices(); if (get_measure_power() || get_measure_current() || get_measure_temp()) {
CHECK_EXPECTED_AS_STATUS(physical_devices); auto physical_devices = vdevice->get_physical_devices();
CHECK_EXPECTED(physical_devices);
for (auto &device : physical_devices.value()) { for (auto &device : physical_devices.value()) {
auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), app->get_measure_power(), auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), get_measure_power(),
app->get_measure_current(), app->get_measure_temp()); get_measure_current(), get_measure_temp());
if (HAILO_SUCCESS != measurement_live_track.status()) { if (HAILO_SUCCESS != measurement_live_track.status()) {
activation_barrier.terminate(); activation_barrier.terminate();
}
CHECK_EXPECTED(measurement_live_track);
live_stats->add(measurement_live_track.release(), 2);
} }
CHECK_EXPECTED_AS_STATUS(measurement_live_track);
live_stats->add(measurement_live_track.release(), 2);
} }
// TODO: wait for all nets before starting timer. start() should update TimerLiveTrack to start. or maybe append here but first in vector... // TODO: wait for all nets before starting timer. start() should update TimerLiveTrack to start. or maybe append here but first in vector...
activation_barrier.arrive_and_wait(); activation_barrier.arrive_and_wait();
CHECK_SUCCESS(live_stats->start()); CHECK_SUCCESS_AS_EXPECTED(live_stats->start());
auto status = shutdown_event->wait(app->get_time_to_run()); auto status = shutdown_event->wait(get_time_to_run());
if (HAILO_TIMEOUT != status) { if (HAILO_TIMEOUT != status) {
// if shutdown_event is signaled its because one of the send/recv threads failed // if shutdown_event is signaled its because one of the send/recv threads failed
LOGGER__ERROR("Encountered error during inference. See log for more information."); LOGGER__ERROR("Encountered error during inference. See log for more information.");
} }
if (!app->get_output_json_path().empty()){ if (!get_output_json_path().empty()){
live_stats->dump_stats(app->get_output_json_path(), get_str_infer_mode(app->get_mode())); live_stats->dump_stats(get_output_json_path(), get_str_infer_mode(get_mode()));
}
auto expected_fps_per_network = live_stats->get_last_measured_fps_per_network_group();
CHECK_EXPECTED(expected_fps_per_network);
auto fps_per_network = expected_fps_per_network.release();
for (size_t network_runner_index = 0; network_runner_index < fps_per_network.size(); network_runner_index++) {
net_runners[network_runner_index]->set_last_measured_fps(fps_per_network[network_runner_index]);
} }
live_stats.reset(); // Ensures that the final print will include real values and not with values of when streams are already aborted. live_stats.reset(); // Ensures that the final print will include real values and not with values of when streams are already aborted.
shutdown_event->signal(); shutdown_event->signal();
return wait_for_threads(threads); wait_for_threads(threads);
return net_runners;
}
hailo_status Run2Command::execute()
{
Run2 *app = reinterpret_cast<Run2*>(m_app);
app->set_inference_mode();
app->set_measure_latency();
CHECK(0 < app->get_network_params().size(), HAILO_INVALID_OPERATION, "Nothing to run");
if (app->get_measure_hw_latency() || app->get_measure_overall_latency()) {
CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "When latency measurement is enabled, only one model is allowed");
LOGGER__WARNING("Measuring latency; frames are sent one at a time and FPS will not be measured");
}
if (1 == app->get_network_params().size()) {
LOGGER__WARNING("\"hailortcli run2\" is not optimized for single model usage. It is recommended to use \"hailortcli run\" command for a single model");
}
auto expected_vdevice = app->create_vdevice();
CHECK_EXPECTED_AS_STATUS(expected_vdevice);
auto vdevice = expected_vdevice.release();
std::vector<uint16_t> batch_sizes_to_run = { app->get_network_params()[0].batch_size };
if(app->get_measure_fw_actions() && app->get_network_params()[0].batch_size == HAILO_DEFAULT_BATCH_SIZE) {
// In case measure-fw-actions is enabled and no batch size was provided - we want to run with batch sizes 1,2,4,8,16
batch_sizes_to_run = DEFAULT_BATCH_SIZES;
}
std::string runtime_data_output_path;
ordered_json action_list_json;
if (app->get_measure_fw_actions()) {
auto device = get_single_physical_device(*vdevice);
CHECK_EXPECTED_AS_STATUS(device);
auto expected_action_list_json = DownloadActionListCommand::init_json_object(device.release(), app->get_network_params()[0].hef_path);
CHECK_EXPECTED_AS_STATUS(expected_action_list_json);
action_list_json = expected_action_list_json.release();
runtime_data_output_path = format_measure_fw_actions_output_path(
app->get_measure_fw_actions_output_path(), app->get_network_params()[0].hef_path);
}
uint32_t network_group_index = 0;
for (auto batch_size : batch_sizes_to_run) {
if(app->get_measure_fw_actions()) {
app->set_batch_size(batch_size);
auto device = get_single_physical_device(*vdevice);
CHECK_EXPECTED_AS_STATUS(device);
auto status = DownloadActionListCommand::set_batch_to_measure(device.release(), RUNTIME_DATA_BATCH_INDEX_TO_MEASURE_DEFAULT);
CHECK_SUCCESS(status);
}
auto expected_net_runners = app->init_and_run_net_runners(vdevice.get());
CHECK_EXPECTED_AS_STATUS(expected_net_runners);
auto net_runners = expected_net_runners.release();
if(app->get_measure_fw_actions()) { // Collecting runtime data
auto device = get_single_physical_device(*vdevice);
CHECK_EXPECTED_AS_STATUS(device);
auto status = DownloadActionListCommand::execute(device.release(), net_runners[0]->get_configured_network_group(), batch_size, action_list_json, net_runners[0]->get_last_measured_fps(), network_group_index);
CHECK_SUCCESS(status);
network_group_index++;
}
}
if(app->get_measure_fw_actions()) { // In case measure-fw-actions is enabled - write data to JSON file
CHECK_SUCCESS(DownloadActionListCommand::write_to_json(action_list_json, runtime_data_output_path));
}
return HAILO_SUCCESS;
} }

View File

@@ -42,3 +42,8 @@ void TimerLiveTrack::push_json_impl(nlohmann::ordered_json &json)
time_to_run << std::fixed << std::setprecision(2) << std::round(std::chrono::duration<double>(m_duration).count()) << " seconds"; time_to_run << std::fixed << std::setprecision(2) << std::round(std::chrono::duration<double>(m_duration).count()) << " seconds";
json["time_to_run"] = time_to_run.str(); json["time_to_run"] = time_to_run.str();
} }
Expected<double> TimerLiveTrack::get_last_measured_fps()
{
return make_unexpected(HAILO_NOT_AVAILABLE);
}

View File

@@ -20,6 +20,7 @@ public:
virtual hailo_status start_impl() override; virtual hailo_status start_impl() override;
virtual uint32_t push_text_impl(std::stringstream &ss) override; virtual uint32_t push_text_impl(std::stringstream &ss) override;
virtual void push_json_impl(nlohmann::ordered_json &json) override; virtual void push_json_impl(nlohmann::ordered_json &json) override;
virtual hailort::Expected<double> get_last_measured_fps();
private: private:
std::chrono::milliseconds m_duration; std::chrono::milliseconds m_duration;

View File

@@ -12,10 +12,7 @@
#include "inference_progress.hpp" #include "inference_progress.hpp"
#include "infer_stats_printer.hpp" #include "infer_stats_printer.hpp"
#include "graph_printer.hpp" #include "graph_printer.hpp"
#if defined(__GNUC__)
// TODO: Support on windows (HRT-5919)
#include "download_action_list_command.hpp" #include "download_action_list_command.hpp"
#endif
#include "common.hpp" #include "common.hpp"
#include "common/string_utils.hpp" #include "common/string_utils.hpp"
@@ -183,9 +180,9 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
->default_val("true"); ->default_val("true");
auto transformation_group = run_subcommand->add_option_group("Transformations"); auto transformation_group = run_subcommand->add_option_group("Transformations");
transformation_group->add_option("--quantized", params.transform.quantized, auto quantized_option = transformation_group->add_option("--quantized", params.transform.quantized,
"true means the tool assumes that the data is already quantized,\n" "true means the tool assumes that the data is already quantized,\n"
"false means it is the tool's responsability to quantize (scale) the data.") "false means it is the tool's responsibility to quantize (scale) the data.")
->default_val("true"); ->default_val("true");
transformation_group->add_option("--user-format-type", params.transform.format_type, transformation_group->add_option("--user-format-type", params.transform.format_type,
"The host data type") "The host data type")
@@ -221,8 +218,6 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
"No measurement flags provided; Run 'hailortcli run measure-stats --help' for options"); "No measurement flags provided; Run 'hailortcli run measure-stats --help' for options");
}); });
// TODO: Support on windows (HRT-5919)
#if defined(__GNUC__)
auto *collect_runtime_data_subcommand = run_subcommand->add_subcommand("collect-runtime-data", auto *collect_runtime_data_subcommand = run_subcommand->add_subcommand("collect-runtime-data",
"Collect runtime data to be used by the Profiler"); "Collect runtime data to be used by the Profiler");
static const char *JSON_SUFFIX = ".json"; static const char *JSON_SUFFIX = ".json";
@@ -240,7 +235,6 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
// If this subcommand was parsed, then we need to download runtime_data // If this subcommand was parsed, then we need to download runtime_data
params.runtime_data.collect_runtime_data = true; params.runtime_data.collect_runtime_data = true;
}); });
#endif
auto measure_power_group = run_subcommand->add_option_group("Measure Power/Current"); auto measure_power_group = run_subcommand->add_option_group("Measure Power/Current");
CLI::Option *power_sampling_period = measure_power_group->add_option("--sampling-period", CLI::Option *power_sampling_period = measure_power_group->add_option("--sampling-period",
@@ -263,13 +257,13 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
->excludes(elem_latency_option) ->excludes(elem_latency_option)
->excludes(elem_queue_size_option); ->excludes(elem_queue_size_option);
hailo_deprecate_options(run_subcommand, { std::make_shared<OptionDeprecation>(quantized_option) }, false);
run_subcommand->parse_complete_callback([&params, hef_new, power_sampling_period, run_subcommand->parse_complete_callback([&params, hef_new, power_sampling_period,
power_averaging_factor, measure_power_opt, measure_current_opt]() { power_averaging_factor, measure_power_opt, measure_current_opt]() {
PARSE_CHECK(!hef_new->empty(), "Single HEF file/directory is required"); PARSE_CHECK(!hef_new->empty(), "Single HEF file/directory is required");
bool is_hw_only = InferMode::HW_ONLY == params.mode; bool is_hw_only = InferMode::HW_ONLY == params.mode;
params.transform.transform = (!is_hw_only || (params.inputs_name_and_file_path.size() > 0)); params.transform.transform = (!is_hw_only || (params.inputs_name_and_file_path.size() > 0));
PARSE_CHECK((!params.transform.quantized || (HAILO_FORMAT_TYPE_AUTO == params.transform.format_type)),
"User data type must be auto when quantized is set");
bool has_oneof_measure_flags = (!measure_power_opt->empty() || !measure_current_opt->empty()); bool has_oneof_measure_flags = (!measure_power_opt->empty() || !measure_current_opt->empty());
PARSE_CHECK(power_sampling_period->empty() || has_oneof_measure_flags, PARSE_CHECK(power_sampling_period->empty() || has_oneof_measure_flags,
"--sampling-period requires --measure-power or --measure-current"); "--sampling-period requires --measure-power or --measure-current");
@@ -302,6 +296,10 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
params.runtime_data.batch_to_measure); params.runtime_data.batch_to_measure);
} }
} }
PARSE_CHECK((params.dot_output.empty() || !is_hw_only),
"Generating .dot file for pipeline graph is impossible when running in 'hw-only' mode");
}); });
} }
@@ -476,7 +474,8 @@ Expected<std::map<std::string, std::vector<InputVStream>>> create_input_vstreams
auto network_infos = configured_net_group.get_network_infos(); auto network_infos = configured_net_group.get_network_infos();
CHECK_EXPECTED(network_infos); CHECK_EXPECTED(network_infos);
for (auto &network_info : network_infos.value()) { for (auto &network_info : network_infos.value()) {
auto input_vstreams_params = configured_net_group.make_input_vstream_params(params.transform.quantized, auto quantized = (params.transform.format_type != HAILO_FORMAT_TYPE_FLOAT32);
auto input_vstreams_params = configured_net_group.make_input_vstream_params(quantized,
params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name); params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name);
CHECK_EXPECTED(input_vstreams_params); CHECK_EXPECTED(input_vstreams_params);
@@ -498,7 +497,15 @@ Expected<std::map<std::string, std::vector<OutputVStream>>> create_output_vstrea
auto network_infos = configured_net_group.get_network_infos(); auto network_infos = configured_net_group.get_network_infos();
CHECK_EXPECTED(network_infos); CHECK_EXPECTED(network_infos);
for (auto &network_info : network_infos.value()) { for (auto &network_info : network_infos.value()) {
auto output_vstreams_params = configured_net_group.make_output_vstream_params(params.transform.quantized, // Data is not quantized if format_type is explicitly float32, or if an output is NMS (which also enforces float32 output)
// We don't cover a case of multiple outputs where only some of them are NMS (no such model currently), and anyway it is handled in run2
auto vstream_infos = configured_net_group.get_output_vstream_infos();
CHECK_EXPECTED(vstream_infos);
auto nms_output = std::any_of(vstream_infos->begin(), vstream_infos->end(), [] (const hailo_vstream_info_t &output_info) {
return HailoRTCommon::is_nms(output_info);
});
auto quantized = ((params.transform.format_type != HAILO_FORMAT_TYPE_FLOAT32) && !nms_output);
auto output_vstreams_params = configured_net_group.make_output_vstream_params(quantized,
params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name); params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name);
CHECK_EXPECTED(output_vstreams_params); CHECK_EXPECTED(output_vstreams_params);
@@ -958,43 +965,59 @@ static Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_network_group(C
} }
static Expected<std::map<std::string, BufferPtr>> create_constant_dataset( static Expected<std::map<std::string, BufferPtr>> create_constant_dataset(
const std::vector<hailo_stream_info_t> &input_streams_infos, const hailo_transform_params_t &trans_params) const std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>> &input_infos, const hailo_transform_params_t &trans_params,
InferMode mode)
{ {
const uint8_t const_byte = 0xAB; const uint8_t const_byte = 0xAB;
std::map<std::string, BufferPtr> dataset; std::map<std::string, BufferPtr> dataset;
for (const auto &input_stream_info : input_streams_infos) {
const auto frame_size = hailo_get_host_frame_size(&input_stream_info, &trans_params);
auto constant_buffer = Buffer::create_shared(frame_size, const_byte);
if (!constant_buffer) {
std::cerr << "Out of memory, tried to allocate " << frame_size << std::endl;
return make_unexpected(constant_buffer.status());
}
dataset.emplace(std::string(input_stream_info.name), constant_buffer.release()); if (InferMode::HW_ONLY == mode) {
for (const auto &input_stream_info : input_infos.first) {
const auto frame_size = input_stream_info.hw_frame_size;
auto constant_buffer = Buffer::create_shared(frame_size, const_byte);
if (!constant_buffer) {
std::cerr << "Out of memory, tried to allocate " << frame_size << std::endl;
return make_unexpected(constant_buffer.status());
}
dataset.emplace(std::string(input_stream_info.name), constant_buffer.release());
}
} else {
for (const auto &input_vstream_info : input_infos.second) {
const auto frame_size = HailoRTCommon::get_frame_size(input_vstream_info, trans_params.user_buffer_format);
auto constant_buffer = Buffer::create_shared(frame_size, const_byte);
if (!constant_buffer) {
std::cerr << "Out of memory, tried to allocate " << frame_size << std::endl;
return make_unexpected(constant_buffer.status());
}
dataset.emplace(std::string(input_vstream_info.name), constant_buffer.release());
}
} }
return dataset; return dataset;
} }
static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files( static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
const std::vector<hailo_stream_info_t> &input_streams_infos, const std::vector<std::string> &input_files, const std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>> &input_infos, const std::vector<std::string> &input_files,
const hailo_transform_params_t &trans_params, InferMode mode) const hailo_transform_params_t &trans_params, InferMode mode)
{ {
CHECK_AS_EXPECTED(input_streams_infos.size() == input_files.size(), HAILO_INVALID_ARGUMENT, "Number of input files ({}) must be equal to the number of inputs ({})", input_files.size(), input_streams_infos.size()); // When creating dataset from files we always care about the logic-inputs (e.g. vstreams)
CHECK_AS_EXPECTED(input_infos.second.size() == input_files.size(),
HAILO_INVALID_ARGUMENT, "Number of input files ({}) must be equal to the number of inputs ({})", input_files.size(), input_infos.second.size());
std::map<std::string, std::string> file_paths; std::map<std::string, std::string> file_paths;
if ((input_streams_infos.size() == 1) && (input_files[0].find("=") == std::string::npos)) { // Legacy single input format if ((input_infos.second.size() == 1) && (input_files[0].find("=") == std::string::npos)) { // Legacy single input format
file_paths.emplace(std::string(input_streams_infos[0].name), input_files[0]); file_paths.emplace(std::string(input_infos.second.begin()->name), input_files[0]);
} }
else { else {
file_paths = format_strings_to_key_value_pairs(input_files); file_paths = format_strings_to_key_value_pairs(input_files);
} }
std::map<std::string, BufferPtr> dataset; std::map<std::string, BufferPtr> dataset;
for (const auto &input_stream_info : input_streams_infos) { for (const auto &input_vstream_info : input_infos.second) {
const auto host_frame_size = hailo_get_host_frame_size(&input_stream_info, &trans_params); const auto host_frame_size = HailoRTCommon::get_frame_size(input_vstream_info, trans_params.user_buffer_format);
const auto stream_name = std::string(input_stream_info.name); const auto stream_name = std::string(input_vstream_info.name);
CHECK_AS_EXPECTED(stream_name.find("=") == std::string::npos, HAILO_INVALID_ARGUMENT, "stream inputs must not contain '=' characters: {}", stream_name); CHECK_AS_EXPECTED(stream_name.find("=") == std::string::npos,
HAILO_INVALID_ARGUMENT, "stream inputs must not contain '=' characters: {}", stream_name);
const auto file_path_it = file_paths.find(stream_name); const auto file_path_it = file_paths.find(stream_name);
CHECK_AS_EXPECTED(file_paths.end() != file_path_it, HAILO_INVALID_ARGUMENT, "Missing input file for input: {}", stream_name); CHECK_AS_EXPECTED(file_paths.end() != file_path_it, HAILO_INVALID_ARGUMENT, "Missing input file for input: {}", stream_name);
@@ -1005,13 +1028,17 @@ static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
"Input file ({}) size {} must be a multiple of the frame size {} ({})", file_path_it->second, host_buffer->size(), host_frame_size, stream_name); "Input file ({}) size {} must be a multiple of the frame size {} ({})", file_path_it->second, host_buffer->size(), host_frame_size, stream_name);
if (InferMode::HW_ONLY == mode) { if (InferMode::HW_ONLY == mode) {
auto matching_stream_info = std::find_if(input_infos.first.begin(), input_infos.first.end(), [&stream_name] (const auto &stream_info) {
return std::string(stream_info.name) == stream_name;
});
CHECK_AS_EXPECTED(matching_stream_info != input_infos.first.end(), HAILO_INVALID_OPERATION, "Failed to find raw-stream with name {}.", stream_name);
const size_t frames_count = (host_buffer->size() / host_frame_size); const size_t frames_count = (host_buffer->size() / host_frame_size);
const size_t hw_frame_size = input_stream_info.hw_frame_size; const size_t hw_frame_size = matching_stream_info->hw_frame_size;
const size_t hw_buffer_size = frames_count * hw_frame_size; const size_t hw_buffer_size = frames_count * hw_frame_size;
auto hw_buffer = Buffer::create_shared(hw_buffer_size); auto hw_buffer = Buffer::create_shared(hw_buffer_size);
CHECK_EXPECTED(hw_buffer); CHECK_EXPECTED(hw_buffer);
auto transform_context = InputTransformContext::create(input_stream_info, trans_params); auto transform_context = InputTransformContext::create(*matching_stream_info, trans_params);
CHECK_EXPECTED(transform_context); CHECK_EXPECTED(transform_context);
for (size_t i = 0; i < frames_count; i++) { for (size_t i = 0; i < frames_count; i++) {
@@ -1022,8 +1049,7 @@ static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
CHECK_SUCCESS_AS_EXPECTED(status); CHECK_SUCCESS_AS_EXPECTED(status);
} }
dataset[stream_name] = hw_buffer.release(); dataset[stream_name] = hw_buffer.release();
} } else {
else {
auto host_buffer_shared = make_shared_nothrow<Buffer>(host_buffer.release()); auto host_buffer_shared = make_shared_nothrow<Buffer>(host_buffer.release());
CHECK_NOT_NULL_AS_EXPECTED(host_buffer_shared, HAILO_OUT_OF_HOST_MEMORY); CHECK_NOT_NULL_AS_EXPECTED(host_buffer_shared, HAILO_OUT_OF_HOST_MEMORY);
dataset[stream_name] = host_buffer_shared; dataset[stream_name] = host_buffer_shared;
@@ -1044,17 +1070,22 @@ static Expected<std::vector<std::map<std::string, BufferPtr>>> create_dataset(
trans_params.user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO; trans_params.user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
trans_params.user_buffer_format.flags = (params.transform.quantized ? HAILO_FORMAT_FLAGS_QUANTIZED : HAILO_FORMAT_FLAGS_NONE); trans_params.user_buffer_format.flags = (params.transform.quantized ? HAILO_FORMAT_FLAGS_QUANTIZED : HAILO_FORMAT_FLAGS_NONE);
trans_params.user_buffer_format.type = params.transform.format_type; trans_params.user_buffer_format.type = params.transform.format_type;
std::vector<std::vector<hailo_stream_info_t>> input_infos;
// Vector of len(ng.conut), each element is pair of all input_stream_infos, and all input_vstream_infos
std::vector<std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>>> input_infos;
for (auto &network_group : network_groups) { for (auto &network_group : network_groups) {
auto expected_all_streams_infos = network_group->get_all_stream_infos(); auto expected_all_streams_infos = network_group->get_all_stream_infos();
CHECK_EXPECTED(expected_all_streams_infos); CHECK_EXPECTED(expected_all_streams_infos);
auto &all_infos = expected_all_streams_infos.value(); auto &all_stream_infos = expected_all_streams_infos.value();
std::vector<hailo_stream_info_t> group_input_infos; std::vector<hailo_stream_info_t> group_input_stream_infos;
std::copy_if(all_infos.begin(), all_infos.end(), std::back_inserter(group_input_infos), [](auto &info) { std::copy_if(all_stream_infos.begin(), all_stream_infos.end(), std::back_inserter(group_input_stream_infos), [](const auto &info) {
return info.direction == HAILO_H2D_STREAM; return info.direction == HAILO_H2D_STREAM;
}); });
input_infos.push_back(group_input_infos); auto expected_input_vstreams_infos = network_group->get_input_vstream_infos();
CHECK_EXPECTED(expected_input_vstreams_infos);
input_infos.push_back({group_input_stream_infos, expected_input_vstreams_infos.release()});
} }
if (!params.inputs_name_and_file_path.empty()) { if (!params.inputs_name_and_file_path.empty()) {
for (auto &group_input_infos : input_infos) { for (auto &group_input_infos : input_infos) {
auto network_group_dataset = create_dataset_from_files(group_input_infos, params.inputs_name_and_file_path, auto network_group_dataset = create_dataset_from_files(group_input_infos, params.inputs_name_and_file_path,
@@ -1062,10 +1093,9 @@ static Expected<std::vector<std::map<std::string, BufferPtr>>> create_dataset(
CHECK_EXPECTED(network_group_dataset); CHECK_EXPECTED(network_group_dataset);
results.emplace_back(network_group_dataset.release()); results.emplace_back(network_group_dataset.release());
} }
} } else {
else {
for (auto &group_input_infos : input_infos) { for (auto &group_input_infos : input_infos) {
auto network_group_dataset = create_constant_dataset(group_input_infos, trans_params); auto network_group_dataset = create_constant_dataset(group_input_infos, trans_params, params.mode);
CHECK_EXPECTED(network_group_dataset); CHECK_EXPECTED(network_group_dataset);
results.emplace_back(network_group_dataset.release()); results.emplace_back(network_group_dataset.release());
} }
@@ -1179,18 +1209,13 @@ Expected<InferResult> run_command_hef_single_device(const inference_runner_param
auto network_group_list = device->configure(hef.value(), configure_params.value()); auto network_group_list = device->configure(hef.value(), configure_params.value());
CHECK_EXPECTED(network_group_list, "Failed configure device from hef"); CHECK_EXPECTED(network_group_list, "Failed configure device from hef");
#if defined(__GNUC__)
// TODO: Support on windows (HRT-5919)
if (use_batch_to_measure_opt(params)) { if (use_batch_to_measure_opt(params)) {
auto status = DownloadActionListCommand::set_batch_to_measure(*device, params.runtime_data.batch_to_measure); auto status = DownloadActionListCommand::set_batch_to_measure(*device, params.runtime_data.batch_to_measure);
CHECK_SUCCESS_AS_EXPECTED(status); CHECK_SUCCESS_AS_EXPECTED(status);
} }
#endif
auto inference_result = activate_and_run_single_device(*device, network_group_list.value(), params); auto inference_result = activate_and_run_single_device(*device, network_group_list.value(), params);
#if defined(__GNUC__)
// TODO: Support on windows (HRT-5919)
if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && inference_result) { if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && inference_result) {
auto min_frames_count = get_min_inferred_frames_count(inference_result.value()); auto min_frames_count = get_min_inferred_frames_count(inference_result.value());
CHECK_EXPECTED(min_frames_count); CHECK_EXPECTED(min_frames_count);
@@ -1208,7 +1233,6 @@ Expected<InferResult> run_command_hef_single_device(const inference_runner_param
params.hef_path); params.hef_path);
} }
#endif
CHECK_EXPECTED(inference_result); CHECK_EXPECTED(inference_result);
return inference_result; return inference_result;
} }
@@ -1356,22 +1380,17 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
auto network_group_list = vdevice.value()->configure(hef.value(), configure_params.value()); auto network_group_list = vdevice.value()->configure(hef.value(), configure_params.value());
CHECK_EXPECTED(network_group_list, "Failed configure vdevice from hef"); CHECK_EXPECTED(network_group_list, "Failed configure vdevice from hef");
#if defined(__GNUC__)
for (auto &device : physical_devices) { for (auto &device : physical_devices) {
// TODO: Support on windows (HRT-5919)
if (use_batch_to_measure_opt(params)) { if (use_batch_to_measure_opt(params)) {
status = DownloadActionListCommand::set_batch_to_measure(device.get(), params.runtime_data.batch_to_measure); status = DownloadActionListCommand::set_batch_to_measure(device.get(), params.runtime_data.batch_to_measure);
CHECK_SUCCESS_AS_EXPECTED(status); CHECK_SUCCESS_AS_EXPECTED(status);
} }
} }
#endif
auto infer_result = activate_and_run_vdevice(physical_devices, scheduler_is_used, network_group_list.value(), params); auto infer_result = activate_and_run_vdevice(physical_devices, scheduler_is_used, network_group_list.value(), params);
CHECK_EXPECTED(infer_result, "Error failed running inference"); CHECK_EXPECTED(infer_result, "Error failed running inference");
#if defined(__GNUC__)
for (auto &device : physical_devices) { for (auto &device : physical_devices) {
// TODO: Support on windows (HRT-5919)
if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && infer_result) { if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && infer_result) {
auto min_frames_count = get_min_inferred_frames_count(infer_result.value()); auto min_frames_count = get_min_inferred_frames_count(infer_result.value());
CHECK_EXPECTED(min_frames_count); CHECK_EXPECTED(min_frames_count);
@@ -1390,7 +1409,6 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
params.hef_path); params.hef_path);
} }
} }
#endif
return infer_result; return infer_result;
} }

View File

@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.0.0)
# set(CMAKE_C_CLANG_TIDY "clang-tidy;-checks=*") # set(CMAKE_C_CLANG_TIDY "clang-tidy;-checks=*")
set(HAILORT_MAJOR_VERSION 4) set(HAILORT_MAJOR_VERSION 4)
set(HAILORT_MINOR_VERSION 14) set(HAILORT_MINOR_VERSION 15)
set(HAILORT_REVISION_VERSION 0) set(HAILORT_REVISION_VERSION 0)
# Add the cmake folder so the modules there are found # Add the cmake folder so the modules there are found
@@ -43,9 +43,19 @@ target_include_directories(scheduler_mon_proto
$<BUILD_INTERFACE: ${Protobuf_INCLUDE_DIRS}> $<BUILD_INTERFACE: ${Protobuf_INCLUDE_DIRS}>
) )
# Add readerwriterqueue as a header-only library protobuf_generate_cpp(PROTO_PROFILER_SRC PROTO_PROFILER_HEADR tracer_profiler.proto)
add_library(readerwriterqueue INTERFACE) add_library(profiler_proto ${PROTO_PROFILER_SRC} ${PROTO_PROFILER_HEADR})
target_include_directories(readerwriterqueue INTERFACE ${HAILO_EXTERNAL_DIR}/readerwriterqueue) target_link_libraries(profiler_proto libprotobuf-lite)
set_target_properties(profiler_proto PROPERTIES CXX_STANDARD 14 GENERATED TRUE POSITION_INDEPENDENT_CODE ON)
if(CMAKE_HOST_WIN32)
target_compile_options(profiler_proto PRIVATE /wd4244)
endif()
get_filename_component(PROTO_PROFILER_HEADER_DIRECTORY ${PROTO_PROFILER_HEADR} DIRECTORY)
target_include_directories(profiler_proto
PUBLIC
$<BUILD_INTERFACE: ${PROTO_PROFILER_HEADER_DIRECTORY}>
$<BUILD_INTERFACE: ${Protobuf_INCLUDE_DIRS}>
)
add_subdirectory(src) add_subdirectory(src)
set(NET_FLOW_INFRA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests/infra/net_flow") set(NET_FLOW_INFRA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests/infra/net_flow")

View File

@@ -8,7 +8,7 @@ if(NOT CMAKE_HOST_UNIX)
message(FATAL_ERROR "Only unix hosts are supported, stopping build") message(FATAL_ERROR "Only unix hosts are supported, stopping build")
endif() endif()
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
# GST_PLUGIN_DEFINE needs PACKAGE to be defined # GST_PLUGIN_DEFINE needs PACKAGE to be defined
set(GST_HAILO_PACKAGE_NAME "hailo") set(GST_HAILO_PACKAGE_NAME "hailo")
@@ -54,6 +54,7 @@ target_link_libraries(gsthailo HailoRT::libhailort ${GSTREAMER_VIDEO_LDFLAGS})
install(TARGETS gsthailo install(TARGETS gsthailo
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
# TODO: get gstreamer-1.0 in an automate way # TODO: get gstreamer-1.0 in an automate way
PUBLIC_HEADER DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/gstreamer-1.0/gst/hailo/" PUBLIC_HEADER DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/gstreamer-1.0/gst/hailo/"
CONFIGURATIONS Release) CONFIGURATIONS Release)

View File

@@ -23,6 +23,7 @@
#include "hailo_events/hailo_events.hpp" #include "hailo_events/hailo_events.hpp"
#include "metadata/hailo_buffer_flag_meta.hpp" #include "metadata/hailo_buffer_flag_meta.hpp"
#include "hailo/hailort_common.hpp" #include "hailo/hailort_common.hpp"
#include "hailo/hailort_defaults.hpp"
#include <sstream> #include <sstream>
#include <algorithm> #include <algorithm>
@@ -108,6 +109,9 @@ enum
PROP_OUTPUT_QUANTIZED, PROP_OUTPUT_QUANTIZED,
PROP_INPUT_FORMAT_TYPE, PROP_INPUT_FORMAT_TYPE,
PROP_OUTPUT_FORMAT_TYPE, PROP_OUTPUT_FORMAT_TYPE,
PROP_NMS_SCORE_THRESHOLD,
PROP_NMS_IOU_THRESHOLD,
PROP_NMS_MAX_PROPOSALS_PER_CLASS,
}; };
G_DEFINE_TYPE(GstHailoNet, gst_hailonet, GST_TYPE_BIN); G_DEFINE_TYPE(GstHailoNet, gst_hailonet, GST_TYPE_BIN);
@@ -197,12 +201,12 @@ static void gst_hailonet_class_init(GstHailoNetClass *klass)
"To use this property, the service should be active and scheduling-algorithm should be set. Defaults to false.", "To use this property, the service should be active and scheduling-algorithm should be set. Defaults to false.",
HAILO_DEFAULT_MULTI_PROCESS_SERVICE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); HAILO_DEFAULT_MULTI_PROCESS_SERVICE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_INPUT_QUANTIZED, g_object_class_install_property(gobject_class, PROP_INPUT_QUANTIZED,
g_param_spec_boolean("input-quantized", "Is the input quantized or not", "Passing `true` under the argument means that the input data sent to the stream is quantized to begin with." g_param_spec_boolean("input-quantized", "Is the input quantized or not", "Deprecated parameter that will be ignored. "
"This will result in an input stream that doesn't quantize the input data. Passing `false` under the argument, will lead to input data being quantized.", "Determine whether to quantize (scale) the data will be decided by the src-data and dst-data types.",
true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_OUTPUT_QUANTIZED, g_object_class_install_property(gobject_class, PROP_OUTPUT_QUANTIZED,
g_param_spec_boolean("output-quantized", "Should the output be quantized or de-quantized","Passing `true` under the argument means that the output data received from the stream is to remain quantized" g_param_spec_boolean("output-quantized", "Should the output be quantized or de-quantized","Deprecated parameter that will be ignored. "
"(such as it is upon exiting the device). This will result in an output stream that doesn't de-quantize the output data. Passing `false` under the argument will lead to output data being de-quantized.", "Determine whether to de-quantize (rescale) the data will be decided by the src-data and dst-data types.",
true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_INPUT_FORMAT_TYPE, g_object_class_install_property(gobject_class, PROP_INPUT_FORMAT_TYPE,
g_param_spec_enum("input-format-type", "Input format type", "Input format type(auto, float32, uint16, uint8). Default value is auto." g_param_spec_enum("input-format-type", "Input format type", "Input format type(auto, float32, uint16, uint8). Default value is auto."
@@ -214,6 +218,16 @@ static void gst_hailonet_class_init(GstHailoNetClass *klass)
"Gets values from the enum GstHailoFormatType. ", "Gets values from the enum GstHailoFormatType. ",
GST_TYPE_HAILO_FORMAT_TYPE, HAILO_FORMAT_TYPE_AUTO, GST_TYPE_HAILO_FORMAT_TYPE, HAILO_FORMAT_TYPE_AUTO,
(GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_NMS_SCORE_THRESHOLD,
g_param_spec_float("nms-score-threshold", "NMS score threshold", "Threshold used for filtering out candidates. Any box with score<TH is suppressed.",
0, 1, 0, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_NMS_IOU_THRESHOLD,
g_param_spec_float("nms-iou-threshold", "NMS IoU threshold", "Intersection over union overlap Threshold, used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.",
0, 1, 0, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_NMS_MAX_PROPOSALS_PER_CLASS,
g_param_spec_uint("nms-max-proposals-per-class", "NMS max proposals per class", "Set a limit for the maximum number of boxes per class.",
0, std::numeric_limits<uint32_t>::max(), 0, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
// See information about the "flush" signal in the element description // See information about the "flush" signal in the element description
g_signal_new( g_signal_new(
"flush", "flush",
@@ -271,9 +285,9 @@ Expected<std::unique_ptr<HailoNetImpl>> HailoNetImpl::create(GstHailoNet *elemen
g_signal_connect(element, "flush", G_CALLBACK(gst_hailonet_flush_callback), nullptr); g_signal_connect(element, "flush", G_CALLBACK(gst_hailonet_flush_callback), nullptr);
auto was_flushed_event = Event::create_shared(Event::State::not_signalled); auto was_flushed_event = Event::create_shared(Event::State::not_signalled);
GST_CHECK(nullptr != was_flushed_event, make_unexpected(HAILO_OUT_OF_HOST_MEMORY), element, RESOURCE, "Failed allocating memory for event!"); GST_CHECK_EXPECTED(was_flushed_event, element, RESOURCE, "Failed allocating memory for event!");
auto ptr = make_unique_nothrow<HailoNetImpl>(element, hailosend, queue, hailorecv, was_flushed_event); auto ptr = make_unique_nothrow<HailoNetImpl>(element, hailosend, queue, hailorecv, was_flushed_event.release());
if (nullptr == ptr) { if (nullptr == ptr) {
return make_unexpected(HAILO_OUT_OF_HOST_MEMORY); return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);
} }
@@ -518,6 +532,7 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
m_props.m_multi_process_service = g_value_get_boolean(value); m_props.m_multi_process_service = g_value_get_boolean(value);
break; break;
case PROP_INPUT_QUANTIZED: case PROP_INPUT_QUANTIZED:
g_warning("'input-quantized' is a deprecated parameter that will be ignored.");
if (m_was_configured) { if (m_was_configured) {
g_warning("The network was already configured so changing the quantized flag will not take place!"); g_warning("The network was already configured so changing the quantized flag will not take place!");
break; break;
@@ -525,6 +540,7 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
m_props.m_input_quantized = g_value_get_boolean(value); m_props.m_input_quantized = g_value_get_boolean(value);
break; break;
case PROP_OUTPUT_QUANTIZED: case PROP_OUTPUT_QUANTIZED:
g_warning("'output-quantized' is a deprecated parameter that will be ignored.");
if (m_was_configured) { if (m_was_configured) {
g_warning("The network was already configured so changing the quantized flag will not take place!"); g_warning("The network was already configured so changing the quantized flag will not take place!");
break; break;
@@ -545,6 +561,27 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
} }
m_props.m_output_format_type = static_cast<hailo_format_type_t>(g_value_get_enum(value)); m_props.m_output_format_type = static_cast<hailo_format_type_t>(g_value_get_enum(value));
break; break;
case PROP_NMS_SCORE_THRESHOLD:
if (m_was_configured) {
g_warning("The network was already configured so changing the score threshold will not take place!");
break;
}
m_props.m_nms_score_threshold = static_cast<gfloat>(g_value_get_float(value));
break;
case PROP_NMS_IOU_THRESHOLD:
if (m_was_configured) {
g_warning("The network was already configured so changing the IoU threshold will not take place!");
break;
}
m_props.m_nms_iou_threshold = static_cast<gfloat>(g_value_get_float(value));
break;
case PROP_NMS_MAX_PROPOSALS_PER_CLASS:
if (m_was_configured) {
g_warning("The network was already configured so changing the max proposals per class will not take place!");
break;
}
m_props.m_nms_max_proposals_per_class = static_cast<guint32>(g_value_get_uint(value));
break;
default: default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec); G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
break; break;
@@ -630,6 +667,15 @@ void HailoNetImpl::get_property(GObject *object, guint property_id, GValue *valu
case PROP_OUTPUT_FORMAT_TYPE: case PROP_OUTPUT_FORMAT_TYPE:
g_value_set_enum(value, m_props.m_output_format_type.get()); g_value_set_enum(value, m_props.m_output_format_type.get());
break; break;
case PROP_NMS_SCORE_THRESHOLD:
g_value_set_float(value, m_props.m_nms_score_threshold.get());
break;
case PROP_NMS_IOU_THRESHOLD:
g_value_set_float(value, m_props.m_nms_iou_threshold.get());
break;
case PROP_NMS_MAX_PROPOSALS_PER_CLASS:
g_value_set_uint(value, m_props.m_nms_max_proposals_per_class.get());
break;
default: default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec); G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
break; break;
@@ -657,11 +703,15 @@ hailo_status HailoNetImpl::set_hef()
// TODO: HRT-4957 // TODO: HRT-4957
GST_CHECK(m_net_group_handle->hef()->get_network_groups_names().size() == 1, HAILO_INVALID_ARGUMENT, m_element, RESOURCE, GST_CHECK(m_net_group_handle->hef()->get_network_groups_names().size() == 1, HAILO_INVALID_ARGUMENT, m_element, RESOURCE,
"Network group has to be specified when there are more than one network groups in the HEF!"); "Network group has to be specified when there are more than one network groups in the HEF!");
auto networks_infos = m_net_group_handle->hef()->get_network_infos(m_net_group_handle->hef()->get_network_groups_names()[0].c_str()); auto network_group_name = m_net_group_handle->hef()->get_network_groups_names()[0];
auto networks_infos = m_net_group_handle->hef()->get_network_infos(network_group_name.c_str());
GST_CHECK_EXPECTED_AS_STATUS(networks_infos, m_element, RESOURCE, "Getting network infos from network group name was failed, status %d", networks_infos.status()); GST_CHECK_EXPECTED_AS_STATUS(networks_infos, m_element, RESOURCE, "Getting network infos from network group name was failed, status %d", networks_infos.status());
GST_CHECK(networks_infos.value().size() == 1, HAILO_INVALID_ARGUMENT, m_element, RESOURCE, GST_CHECK(networks_infos.value().size() == 1, HAILO_INVALID_ARGUMENT, m_element, RESOURCE,
"Network has to be specified when there are more than one network in the network group!"); "Network has to be specified when there are more than one network in the network group!");
m_props.m_network_name = g_strdup(networks_infos.release()[0].name);
std::string default_ng_name = HailoRTDefaults::get_network_name(network_group_name);
m_props.m_network_name = g_strdup(default_ng_name.c_str());
} }
auto input_vstream_infos = m_net_group_handle->hef()->get_input_vstream_infos(m_props.m_network_name.get()); auto input_vstream_infos = m_net_group_handle->hef()->get_input_vstream_infos(m_props.m_network_name.get());
@@ -720,12 +770,48 @@ hailo_status HailoNetImpl::configure_network_group()
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting scheduler priority failed, status = %d", status); GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting scheduler priority failed, status = %d", status);
} }
auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats, static_cast<bool>(m_props.m_input_quantized.get()), auto input_quantized = (m_props.m_input_quantized.was_changed()) ? static_cast<bool>(m_props.m_input_quantized.get()) :
static_cast<bool>(m_props.m_output_quantized.get()), m_props.m_input_format_type.get(), m_props.m_output_format_type.get()); (m_props.m_input_format_type.get() != HAILO_FORMAT_TYPE_FLOAT32);
auto output_quantized = (m_props.m_output_quantized.was_changed()) ? static_cast<bool>(m_props.m_output_quantized.get()) :
(m_props.m_output_format_type.get() != HAILO_FORMAT_TYPE_FLOAT32);
auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats,
input_quantized, output_quantized, m_props.m_input_format_type.get(), m_props.m_output_format_type.get());
GST_CHECK_EXPECTED_AS_STATUS(vstreams, m_element, RESOURCE, "Creating vstreams failed, status = %d", status); GST_CHECK_EXPECTED_AS_STATUS(vstreams, m_element, RESOURCE, "Creating vstreams failed, status = %d", status);
GST_HAILOSEND(m_hailosend)->impl->set_input_vstreams(std::move(vstreams->first)); GST_HAILOSEND(m_hailosend)->impl->set_input_vstreams(std::move(vstreams->first));
// Check that if one of the NMS params are changed, we have NMS outputs in the model
auto has_nms_output = std::any_of(vstreams->second.begin(), vstreams->second.end(), [](const auto &vs)
{
return HailoRTCommon::is_nms(vs.get_info());
});
for (auto &out_vs : vstreams->second) {
if (m_props.m_nms_score_threshold.was_changed()) {
GST_CHECK(has_nms_output, HAILO_INVALID_OPERATION, m_element, RESOURCE, "NMS score threshold is set, but there is no NMS output in this model.");
if (HailoRTCommon::is_nms(out_vs.get_info())) {
status = out_vs.set_nms_score_threshold(m_props.m_nms_score_threshold.get());
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting NMS score threshold failed, status = %d", status);
}
}
if (m_props.m_nms_iou_threshold.was_changed()) {
GST_CHECK(has_nms_output, HAILO_INVALID_OPERATION, m_element, RESOURCE, "NMS IoU threshold is set, but there is no NMS output in this model.");
if (HailoRTCommon::is_nms(out_vs.get_info())) {
status = out_vs.set_nms_iou_threshold(m_props.m_nms_iou_threshold.get());
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting NMS IoU threshold failed, status = %d", status);
}
}
if (m_props.m_nms_max_proposals_per_class.was_changed()) {
GST_CHECK(has_nms_output, HAILO_INVALID_OPERATION, m_element, RESOURCE, "NMS max proposals per class is set, but there is no NMS output in this model.");
if (HailoRTCommon::is_nms(out_vs.get_info())) {
status = out_vs.set_nms_max_proposals_per_class(m_props.m_nms_max_proposals_per_class.get());
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting NMS max proposals per class failed, status = %d", status);
}
}
}
status = GST_HAILORECV(m_hailorecv)->impl->set_output_vstreams(std::move(vstreams->second), m_props.m_batch_size.get()); status = GST_HAILORECV(m_hailorecv)->impl->set_output_vstreams(std::move(vstreams->second), m_props.m_batch_size.get());
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting output vstreams failed, status = %d", status); GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting output vstreams failed, status = %d", status);
@@ -760,7 +846,7 @@ Expected<std::string> HailoNetImpl::get_network_group_name(const std::string &ne
{ {
for (const auto &network_group_name : m_net_group_handle->hef()->get_network_groups_names()) { for (const auto &network_group_name : m_net_group_handle->hef()->get_network_groups_names()) {
// Look for network_group with the given name // Look for network_group with the given name
if (network_name == network_group_name) { if ((network_name == network_group_name) || (network_name == HailoRTDefaults::get_network_name(network_group_name))) {
return std::string(network_group_name); return std::string(network_group_name);
} }

View File

@@ -55,7 +55,7 @@ public:
m_is_active(false), m_device_count(0), m_vdevice_key(DEFAULT_VDEVICE_KEY), m_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN), m_is_active(false), m_device_count(0), m_vdevice_key(DEFAULT_VDEVICE_KEY), m_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN),
m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS), m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD), m_scheduler_priority(HAILO_SCHEDULER_PRIORITY_NORMAL), m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS), m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD), m_scheduler_priority(HAILO_SCHEDULER_PRIORITY_NORMAL),
m_multi_process_service(HAILO_DEFAULT_MULTI_PROCESS_SERVICE), m_input_quantized(true), m_output_quantized(true), m_input_format_type(HAILO_FORMAT_TYPE_AUTO), m_multi_process_service(HAILO_DEFAULT_MULTI_PROCESS_SERVICE), m_input_quantized(true), m_output_quantized(true), m_input_format_type(HAILO_FORMAT_TYPE_AUTO),
m_output_format_type(HAILO_FORMAT_TYPE_AUTO) m_output_format_type(HAILO_FORMAT_TYPE_AUTO), m_nms_score_threshold(0), m_nms_iou_threshold(0), m_nms_max_proposals_per_class(0)
{} {}
@@ -75,6 +75,9 @@ public:
HailoElemProperty<gboolean> m_output_quantized; HailoElemProperty<gboolean> m_output_quantized;
HailoElemProperty<hailo_format_type_t> m_input_format_type; HailoElemProperty<hailo_format_type_t> m_input_format_type;
HailoElemProperty<hailo_format_type_t> m_output_format_type; HailoElemProperty<hailo_format_type_t> m_output_format_type;
HailoElemProperty<gfloat> m_nms_score_threshold;
HailoElemProperty<gfloat> m_nms_iou_threshold;
HailoElemProperty<guint32> m_nms_max_proposals_per_class;
}; };
class HailoNetImpl final class HailoNetImpl final

View File

@@ -149,18 +149,26 @@ GstFlowReturn HailoSendImpl::handle_frame(GstVideoFilter */*filter*/, GstVideoFr
return GST_FLOW_OK; return GST_FLOW_OK;
} }
guint8 *frame_buffer = reinterpret_cast<guint8*>(GST_VIDEO_FRAME_PLANE_DATA(frame, 0)); hailo_pix_buffer_t pix_buffer = {};
pix_buffer.index = 0;
pix_buffer.number_of_planes = GST_VIDEO_INFO_N_PLANES(&frame->info);
for (uint32_t plane_index = 0; plane_index < pix_buffer.number_of_planes; plane_index++) {
pix_buffer.planes[plane_index].bytes_used = GST_VIDEO_INFO_PLANE_STRIDE(&frame->info, plane_index) * GST_VIDEO_INFO_COMP_HEIGHT(&frame->info, plane_index);
pix_buffer.planes[plane_index].plane_size = GST_VIDEO_INFO_PLANE_STRIDE(&frame->info, plane_index) * GST_VIDEO_INFO_COMP_HEIGHT(&frame->info, plane_index);
pix_buffer.planes[plane_index].user_ptr = GST_VIDEO_FRAME_PLANE_DATA(frame, plane_index);
}
hailo_status status = HAILO_UNINITIALIZED; hailo_status status = HAILO_UNINITIALIZED;
if (m_props.m_debug.get()) { if (m_props.m_debug.get()) {
std::chrono::duration<double, std::milli> latency; std::chrono::duration<double, std::milli> latency;
std::chrono::time_point<std::chrono::system_clock> start_time; std::chrono::time_point<std::chrono::system_clock> start_time;
start_time = std::chrono::system_clock::now(); start_time = std::chrono::system_clock::now();
status = write_to_vstreams(frame_buffer, GST_VIDEO_FRAME_SIZE(frame)); status = write_to_vstreams(pix_buffer);
latency = std::chrono::system_clock::now() - start_time; latency = std::chrono::system_clock::now() - start_time;
GST_DEBUG("hailosend latency: %f milliseconds", latency.count()); GST_DEBUG("hailosend latency: %f milliseconds", latency.count());
} else { } else {
status = write_to_vstreams(frame_buffer, GST_VIDEO_FRAME_SIZE(frame)); status = write_to_vstreams(pix_buffer);
} }
if (HAILO_SUCCESS != status) { if (HAILO_SUCCESS != status) {
@@ -169,10 +177,13 @@ GstFlowReturn HailoSendImpl::handle_frame(GstVideoFilter */*filter*/, GstVideoFr
return GST_FLOW_OK; return GST_FLOW_OK;
} }
hailo_status HailoSendImpl::write_to_vstreams(void *buf, size_t size) hailo_status HailoSendImpl::write_to_vstreams(const hailo_pix_buffer_t &pix_buffer)
{ {
for (auto &in_vstream : m_input_vstreams) { for (auto &in_vstream : m_input_vstreams) {
auto status = in_vstream.write(MemoryView(buf, size)); auto status = in_vstream.write(pix_buffer);
if (HAILO_STREAM_ABORTED_BY_USER == status) {
return status;
}
GST_CHECK_SUCCESS(status, m_element, STREAM, "Failed writing to input vstream %s, status = %d", in_vstream.name().c_str(), status); GST_CHECK_SUCCESS(status, m_element, STREAM, "Failed writing to input vstream %s, status = %d", in_vstream.name().c_str(), status);
} }
return HAILO_SUCCESS; return HAILO_SUCCESS;

View File

@@ -89,7 +89,7 @@ public:
} }
private: private:
hailo_status write_to_vstreams(void *buf, size_t size); hailo_status write_to_vstreams(const hailo_pix_buffer_t &pix_buffer);
GstHailoSend *m_element; GstHailoSend *m_element;
GstHailoNet *m_hailonet; GstHailoNet *m_hailonet;

View File

@@ -111,7 +111,6 @@ Expected<std::shared_ptr<VDevice>> NetworkGroupHandle::create_vdevice(const void
} }
auto result = create_unique_vdevice(element, device_count, scheduling_algorithm, multi_process_service); auto result = create_unique_vdevice(element, device_count, scheduling_algorithm, multi_process_service);
GST_CHECK_EXPECTED(result, element, RESOURCE, "Failed creating vdevice, status = %d", result.status()); GST_CHECK_EXPECTED(result, element, RESOURCE, "Failed creating vdevice, status = %d", result.status());
m_vdevices.insert(result.value());
return result; return result;
} }
@@ -361,7 +360,7 @@ std::shared_ptr<ConfiguredNetworkGroup> NetworkGroupConfigManager::get_configure
return nullptr; return nullptr;
} }
return found->second; return found->second.lock();
} }
std::string NetworkGroupConfigManager::get_configure_string(const std::string &device_id, const std::string &hef_hash, std::string NetworkGroupConfigManager::get_configure_string(const std::string &device_id, const std::string &hef_hash,

View File

@@ -51,7 +51,7 @@ private:
const char *net_group_name, uint16_t batch_size); const char *net_group_name, uint16_t batch_size);
// TODO: change this map to store only the shared network_groups (used by multiple hailonets with the same vdevices) // TODO: change this map to store only the shared network_groups (used by multiple hailonets with the same vdevices)
std::unordered_map<std::string, std::shared_ptr<ConfiguredNetworkGroup>> m_configured_net_groups; std::unordered_map<std::string, std::weak_ptr<ConfiguredNetworkGroup>> m_configured_net_groups;
std::unordered_map<device_id_t, std::unordered_map<network_name_t, hailonet_name_t>> m_configured_networks; std::unordered_map<device_id_t, std::unordered_map<network_name_t, hailonet_name_t>> m_configured_networks;
std::mutex m_mutex; std::mutex m_mutex;
}; };

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.11.0) cmake_minimum_required(VERSION 3.11.0)
include(externals/pybind11.cmake) include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pybind11.cmake)
add_subdirectory(src) add_subdirectory(src)

View File

@@ -154,7 +154,7 @@ class HailoHWObject(object):
if len(self._loaded_network_groups) == 1: if len(self._loaded_network_groups) == 1:
return self._loaded_network_groups[0].name return self._loaded_network_groups[0].name
raise HailoHWObjectException( raise HailoHWObjectException(
"This function is only supported when there is exactly 1 loaded network group. one should use HEF.get_network_group_names() / ConfiguredNetwork.name / ActivatedNetwork.name") "This function is only supported when there is exactly 1 loaded network group. Use HEF.get_network_group_names() / ConfiguredNetwork.name / ActivatedNetwork.name")
def get_output_shapes(self): def get_output_shapes(self):
"""Get the model output shapes, as returned to the user (without any hardware padding) (deprecated). """Get the model output shapes, as returned to the user (without any hardware padding) (deprecated).

View File

@@ -685,30 +685,6 @@ class ConfiguredNetwork(object):
with ExceptionWrapper(): with ExceptionWrapper():
return self._configured_network.get_udp_rates_dict(int(fps), int(max_supported_rate_bytes)) return self._configured_network.get_udp_rates_dict(int(fps), int(max_supported_rate_bytes))
def _before_fork(self):
if self._configured_network is not None:
self._configured_network.before_fork()
for input_vstreams in self._input_vstreams_holders:
input_vstreams.before_fork()
for output_vstreams in self._output_vstreams_holders:
output_vstreams.before_fork()
def _after_fork_in_parent(self):
if self._configured_network is not None:
self._configured_network.after_fork_in_parent()
for input_vstreams in self._input_vstreams_holders:
input_vstreams.after_fork_in_parent()
for output_vstreams in self._output_vstreams_holders:
output_vstreams.after_fork_in_parent()
def _after_fork_in_child(self):
if self._configured_network is not None:
self._configured_network.after_fork_in_child()
for input_vstreams in self._input_vstreams_holders:
input_vstreams.after_fork_in_child()
for output_vstreams in self._output_vstreams_holders:
output_vstreams.after_fork_in_child()
def _create_input_vstreams(self, input_vstreams_params): def _create_input_vstreams(self, input_vstreams_params):
input_vstreams_holder = self._configured_network.InputVStreams(input_vstreams_params) input_vstreams_holder = self._configured_network.InputVStreams(input_vstreams_params)
self._input_vstreams_holders.append(input_vstreams_holder) self._input_vstreams_holders.append(input_vstreams_holder)
@@ -752,7 +728,7 @@ class ConfiguredNetwork(object):
Args: Args:
timeout_ms (int): Timeout in milliseconds. timeout_ms (int): Timeout in milliseconds.
""" """
name = network_name if network_name is not None else self.name name = network_name if network_name is not None else ""
return self._configured_network.set_scheduler_timeout(timeout_ms, name) return self._configured_network.set_scheduler_timeout(timeout_ms, name)
def set_scheduler_threshold(self, threshold): def set_scheduler_threshold(self, threshold):
@@ -956,6 +932,20 @@ class InferVStreams(object):
self._hw_time = time.perf_counter() - time_before_infer self._hw_time = time.perf_counter() - time_before_infer
for name, result_array in output_buffers.items(): for name, result_array in output_buffers.items():
# TODO: HRT-11726 - Combine Pyhailort NMS and NMS_WITH_BYTE_MASK decoding function
if output_buffers_info[name].output_order == FormatOrder.HAILO_NMS_WITH_BYTE_MASK:
nms_shape = output_buffers_info[name].vstream_info.nms_shape
output_dtype = output_buffers_info[name].output_dtype
input_stream_infos = self._configured_net_group.get_input_stream_infos()
if len(input_stream_infos) != 1:
raise Exception("Output format HAILO_NMS_WITH_BYTE_MASK should have 1 input. Number of inputs: {}".format(len(input_stream_infos)))
input_height = input_stream_infos[0].shape[0]
input_width = input_stream_infos[0].shape[1]
output_buffers[name] = HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_format(result_array,
nms_shape.number_of_classes, batch_size, input_height, input_width,
nms_shape.max_bboxes_per_class, output_dtype, self._tf_nms_format)
continue
is_nms = output_buffers_info[name].is_nms is_nms = output_buffers_info[name].is_nms
if not is_nms: if not is_nms:
continue continue
@@ -1032,11 +1022,90 @@ class InferVStreams(object):
input_layer_name)) input_layer_name))
input_data[input_layer_name] = numpy.asarray(input_data[input_layer_name], order='C') input_data[input_layer_name] = numpy.asarray(input_data[input_layer_name], order='C')
def set_nms_score_threshold(self, threshold):
"""Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
Args:
threshold (float): NMS score threshold to set.
Note:
This function will fail in cases where there is no output with NMS operations on the CPU.
"""
return self._infer_pipeline.set_nms_score_threshold(threshold)
def set_nms_iou_threshold(self, threshold):
"""Set NMS intersection over union overlap Threshold,
used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
Args:
threshold (float): NMS IoU threshold to set.
Note:
This function will fail in cases where there is no output with NMS operations on the CPU.
"""
return self._infer_pipeline.set_nms_iou_threshold(threshold)
def set_nms_max_proposals_per_class(self, max_proposals_per_class):
"""Set a limit for the maximum number of boxes per class.
Args:
max_proposals_per_class (int): NMS max proposals per class to set.
Note:
This function will fail in cases where there is no output with NMS operations on the CPU.
"""
return self._infer_pipeline.set_nms_max_proposals_per_class(max_proposals_per_class)
def __exit__(self, *args): def __exit__(self, *args):
self._infer_pipeline.release() self._infer_pipeline.release()
return False return False
class HailoDetectionBox(object):
# TODO: HRT-11492 - Add documentation to class and functions
def __init__(self, bbox, class_id, mask_size, mask):
self._bbox = bbox
self._mask_size = mask_size
self._mask = mask
self._class_id = class_id
@property
def bbox(self):
return self._bbox
@property
def y_min(self):
return self._bbox[0]
@property
def x_min(self):
return self._bbox[1]
@property
def y_max(self):
return self._bbox[2]
@property
def x_max(self):
return self._bbox[3]
@property
def score(self):
return self._bbox[4]
@property
def class_id(self):
return self._class_id
@property
def mask_size(self):
return self._mask_size
@property
def mask(self):
return self._mask
class HailoRTTransformUtils(object): class HailoRTTransformUtils(object):
@staticmethod @staticmethod
def get_dtype(data_bytes): def get_dtype(data_bytes):
@@ -1064,6 +1133,9 @@ class HailoRTTransformUtils(object):
with ExceptionWrapper(): with ExceptionWrapper():
src_format_type = HailoRTTransformUtils._get_format_type(src_buffer.dtype) src_format_type = HailoRTTransformUtils._get_format_type(src_buffer.dtype)
dst_format_type = HailoRTTransformUtils._get_format_type(dst_buffer.dtype) dst_format_type = HailoRTTransformUtils._get_format_type(dst_buffer.dtype)
if not _pyhailort.is_qp_valid(quant_info):
raise HailoRTInvalidOperationException("quant_info is invalid as the model was compiled with multiple quant_infos. "
"Please compile again or provide a list of quant_infos.")
_pyhailort.dequantize_output_buffer(src_buffer, dst_buffer, src_format_type, dst_format_type, elements_count, quant_info) _pyhailort.dequantize_output_buffer(src_buffer, dst_buffer, src_format_type, dst_format_type, elements_count, quant_info)
@staticmethod @staticmethod
@@ -1079,8 +1151,20 @@ class HailoRTTransformUtils(object):
with ExceptionWrapper(): with ExceptionWrapper():
src_format_type = HailoRTTransformUtils._get_format_type(raw_buffer.dtype) src_format_type = HailoRTTransformUtils._get_format_type(raw_buffer.dtype)
dst_format_type = HailoRTTransformUtils._get_format_type(dst_dtype) dst_format_type = HailoRTTransformUtils._get_format_type(dst_dtype)
if not _pyhailort.is_qp_valid(quant_info):
raise HailoRTInvalidOperationException("quant_info is invalid as the model was compiled with multiple quant_infos. "
"Please compile again or provide a list of quant_infos.")
_pyhailort.dequantize_output_buffer_in_place(raw_buffer, src_format_type, dst_format_type, elements_count, quant_info) _pyhailort.dequantize_output_buffer_in_place(raw_buffer, src_format_type, dst_format_type, elements_count, quant_info)
@staticmethod
def is_qp_valid(quant_info):
"""Returns if quant_info is valid.
Args:
quant_info (:class:`~hailo_platform.pyhailort.pyhailort.QuantInfo`): The quantization info.
"""
return _pyhailort.is_qp_valid(quant_info)
@staticmethod @staticmethod
def quantize_input_buffer(src_buffer, dst_buffer, elements_count, quant_info): def quantize_input_buffer(src_buffer, dst_buffer, elements_count, quant_info):
"""Quantize the data in input buffer `src_buffer` and output it to the buffer `dst_buffer` """Quantize the data in input buffer `src_buffer` and output it to the buffer `dst_buffer`
@@ -1096,6 +1180,9 @@ class HailoRTTransformUtils(object):
with ExceptionWrapper(): with ExceptionWrapper():
src_format_type = HailoRTTransformUtils._get_format_type(src_buffer.dtype) src_format_type = HailoRTTransformUtils._get_format_type(src_buffer.dtype)
dst_format_type = HailoRTTransformUtils._get_format_type(dst_buffer.dtype) dst_format_type = HailoRTTransformUtils._get_format_type(dst_buffer.dtype)
if not _pyhailort.is_qp_valid(quant_info):
raise HailoRTInvalidOperationException("quant_info is invalid as the model was compiled with multiple quant_infos. "
"Please compile again or provide a list of quant_infos.")
_pyhailort.quantize_input_buffer(src_buffer, dst_buffer, src_format_type, dst_format_type, elements_count, quant_info) _pyhailort.quantize_input_buffer(src_buffer, dst_buffer, src_format_type, dst_format_type, elements_count, quant_info)
@staticmethod @staticmethod
@@ -1142,6 +1229,121 @@ class HailoRTTransformUtils(object):
offset += BBOX_PARAMS * class_bboxes_amount offset += BBOX_PARAMS * class_bboxes_amount
return converted_output_frame return converted_output_frame
@staticmethod
def _output_raw_buffer_to_nms_with_byte_mask_format(raw_output_buffer, number_of_classes, batch_size, image_height, image_width,
max_bboxes_per_class, output_dtype, is_tf_format=False):
if is_tf_format:
if os.environ.get('HAILO_TF_FORMAT_INTERNAL'):
return HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_tf_format(raw_output_buffer, number_of_classes,
batch_size, image_height, image_width, max_bboxes_per_class, output_dtype)
else:
raise HailoRTException("TF format is not supported with HAILO_NMS_WITH_BYTE_MASK format order")
else:
return HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_hailo_format(raw_output_buffer, number_of_classes)
@staticmethod
def _output_raw_buffer_to_nms_with_byte_mask_hailo_format(raw_output_buffer, number_of_classes):
converted_output_buffer = []
for frame in raw_output_buffer:
converted_output_buffer.append(
HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_hailo_format_single_frame(frame, number_of_classes))
return converted_output_buffer
@staticmethod
def _output_raw_buffer_to_nms_with_byte_mask_hailo_format_single_frame(raw_output_buffer, number_of_classes):
offset = 0
converted_output_frame = []
for class_i in range(number_of_classes):
class_bboxes_amount = int(raw_output_buffer[offset])
offset += 1
classes_boxes = []
if class_bboxes_amount != 0:
for bbox_i in range(class_bboxes_amount):
bbox = raw_output_buffer[offset : offset + BBOX_PARAMS]
offset += BBOX_PARAMS
bbox_mask_size_in_bytes = raw_output_buffer[offset]
offset += 1
bbox_mask_size = int(bbox_mask_size_in_bytes / 4)
bbox_mask = raw_output_buffer[offset : (offset + bbox_mask_size)]
offset += bbox_mask_size
hailo_bbox = HailoDetectionBox(bbox, class_i, bbox_mask_size_in_bytes, bbox_mask)
classes_boxes.append(hailo_bbox)
converted_output_frame.append(classes_boxes)
return converted_output_frame
@staticmethod
def _output_raw_buffer_to_nms_with_byte_mask_tf_format(raw_output_buffer, number_of_classes, batch_size, image_height, image_width,
max_bboxes_per_class, output_dtype):
offset = 0
# The + 1 is for the extra row containing the bbox coordinates, score and class_id
output_height = image_height + 1
# We create the tf_format buffer with reversed max_bboxes_per_class/features for performance optimization
converted_output_buffer = numpy.empty([batch_size, max_bboxes_per_class, output_height, image_width], dtype=output_dtype)
for frame_idx in range(len(raw_output_buffer)):
offset = HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_tf_format_single_frame(
raw_output_buffer[frame_idx], converted_output_buffer[frame_idx], number_of_classes, max_bboxes_per_class,
image_height, image_width, offset)
converted_output_buffer = numpy.moveaxis(converted_output_buffer, 1, 3)
return converted_output_buffer
@staticmethod
def _output_raw_buffer_to_nms_with_byte_mask_tf_format_single_frame(raw_output_buffer, converted_output_frame, number_of_classes,
max_boxes, image_height, image_width, offset):
detections = []
for class_i in range(number_of_classes):
class_bboxes_amount = int(raw_output_buffer[offset])
offset += 1
if class_bboxes_amount != 0:
for bbox_i in range(class_bboxes_amount):
bbox = raw_output_buffer[offset : offset + BBOX_PARAMS]
offset += BBOX_PARAMS
bbox_mask_size_in_bytes = raw_output_buffer[offset]
offset += 1
bbox_mask_size = int(bbox_mask_size_in_bytes // 4)
bbox_mask = raw_output_buffer[offset : (offset + bbox_mask_size)]
offset += bbox_mask_size
y_min = bbox[0] * image_height
x_min = bbox[1] * image_width
bbox_width = round((bbox[3] - bbox[1]) * image_width)
resized_mask = numpy.empty([image_height, image_width])
for i in range(bbox_mask_size):
if (bbox_mask[i] == 1):
x = int(x_min + (i % bbox_width))
y = int(y_min + (i // bbox_width))
if (x >= image_width):
x = image_width - 1
if ( y >= image_height):
y = image_height - 1
resized_mask[y][x] = 1
padding = image_width - len(bbox)
bbox_padded = numpy.pad(bbox, pad_width=(0, padding), mode='constant')
bbox_padded[len(bbox)] = class_i
converted_detection = numpy.append(resized_mask ,[bbox_padded], axis=0)
detections.append((bbox[4], converted_detection))
detections.sort(key=lambda tup: tup[0], reverse=True)
for detection_idx in range(len(detections)):
if (detection_idx >= max_boxes):
return offset
converted_output_frame[detection_idx] = detections[detection_idx][1]
return offset
@staticmethod @staticmethod
def _get_format_type(dtype): def _get_format_type(dtype):
if dtype == numpy.uint8: if dtype == numpy.uint8:
@@ -1313,7 +1515,7 @@ class HailoFormatFlags(_pyhailort.FormatFlags):
SUPPORTED_PROTOCOL_VERSION = 2 SUPPORTED_PROTOCOL_VERSION = 2
SUPPORTED_FW_MAJOR = 4 SUPPORTED_FW_MAJOR = 4
SUPPORTED_FW_MINOR = 14 SUPPORTED_FW_MINOR = 15
SUPPORTED_FW_REVISION = 0 SUPPORTED_FW_REVISION = 0
MEGA_MULTIPLIER = 1000.0 * 1000.0 MEGA_MULTIPLIER = 1000.0 * 1000.0
@@ -1323,7 +1525,8 @@ class DeviceArchitectureTypes(IntEnum):
HAILO8_A0 = 0 HAILO8_A0 = 0
HAILO8 = 1 HAILO8 = 1
HAILO8L = 2 HAILO8L = 2
HAILO15 = 3 HAILO15H = 3
PLUTO = 4
def __str__(self): def __str__(self):
return self.name return self.name
@@ -1379,7 +1582,7 @@ class BoardInformation(object):
if ((device_arch == DeviceArchitectureTypes.HAILO8) or if ((device_arch == DeviceArchitectureTypes.HAILO8) or
(device_arch == DeviceArchitectureTypes.HAILO8L)): (device_arch == DeviceArchitectureTypes.HAILO8L)):
return 'hailo8' return 'hailo8'
elif device_arch == DeviceArchitectureTypes.HAILO15: elif device_arch == DeviceArchitectureTypes.HAILO15H:
return 'hailo15' return 'hailo15'
else: else:
raise HailoRTException("Unsupported device architecture.") raise HailoRTException("Unsupported device architecture.")
@@ -2415,31 +2618,9 @@ class VDevice(object):
self._open_vdevice() self._open_vdevice()
def _before_fork(self):
if self._vdevice is not None:
self._vdevice.before_fork()
for configured_network in self._loaded_network_groups:
configured_network._before_fork()
def _after_fork_in_parent(self):
if self._vdevice is not None:
self._vdevice.after_fork_in_parent()
for configured_network in self._loaded_network_groups:
configured_network._after_fork_in_parent()
def _after_fork_in_child(self):
if self._vdevice is not None:
self._vdevice.after_fork_in_child()
for configured_network in self._loaded_network_groups:
configured_network._after_fork_in_child()
def _open_vdevice(self): def _open_vdevice(self):
if self._params is None: if self._params is None:
self._params = VDevice.create_params() self._params = VDevice.create_params()
if sys.platform != "win32" and self._params.multi_process_service:
os.register_at_fork(before=lambda: self._before_fork())
os.register_at_fork(after_in_parent=lambda: self._after_fork_in_parent())
os.register_at_fork(after_in_child=lambda: self._after_fork_in_child())
with ExceptionWrapper(): with ExceptionWrapper():
device_ids = [] if self._device_ids is None else self._device_ids device_ids = [] if self._device_ids is None else self._device_ids
self._vdevice = _pyhailort.VDevice.create(self._params, device_ids) self._vdevice = _pyhailort.VDevice.create(self._params, device_ids)
@@ -2518,20 +2699,18 @@ class InputVStreamParams(object):
"""Parameters of an input virtual stream (host to device).""" """Parameters of an input virtual stream (host to device)."""
@staticmethod @staticmethod
def make(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None): def make(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
"""Create input virtual stream params from a configured network group. These params determine the format of the """Create input virtual stream params from a configured network group. These params determine the format of the
data that will be fed into the network group. data that will be fed into the network group.
Args: Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created. the params are created.
quantized (bool): Whether the data fed into the chip is already quantized. True means quantized (bool): Deprecated parameter that will be ignored. Determine whether to quantize (scale)
the data is already quantized. False means it's HailoRT's responsibility to quantize the data will be decided by the src-data and dst-data types.
(scale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
default format type of the data for all input virtual streams. If quantized is False, default format type of the data for all input virtual streams.
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise, The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
which means the data is fed in the same format expected by the device (usually which means the data is fed in the same format expected by the device (usually
uint8). uint8).
timeout_ms (int): The default timeout in milliseconds for all input virtual streams. timeout_ms (int): The default timeout in milliseconds for all input virtual streams.
@@ -2545,10 +2724,9 @@ class InputVStreamParams(object):
params. params.
""" """
if format_type is None: if format_type is None:
if not quantized: format_type = FormatType.AUTO
format_type = FormatType.FLOAT32 if quantized is None:
else: quantized = format_type != FormatType.FLOAT32
format_type = FormatType.AUTO
if timeout_ms is None: if timeout_ms is None:
timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS
if queue_size is None: if queue_size is None:
@@ -2559,20 +2737,18 @@ class InputVStreamParams(object):
format_type, timeout_ms, queue_size) format_type, timeout_ms, queue_size)
@staticmethod @staticmethod
def make_from_network_group(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None): def make_from_network_group(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
"""Create input virtual stream params from a configured network group. These params determine the format of the """Create input virtual stream params from a configured network group. These params determine the format of the
data that will be fed into the network group. data that will be fed into the network group.
Args: Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created. the params are created.
quantized (bool): Whether the data fed into the chip is already quantized. True means quantized (bool): Deprecated parameter that will be ignored. Determine whether to quantize (scale)
the data is already quantized. False means it's HailoRT's responsibility to quantize the data will be decided by the src-data and dst-data types.
(scale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
default format type of the data for all input virtual streams. If quantized is False, default format type of the data for all input virtual streams.
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise, The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
which means the data is fed in the same format expected by the device (usually which means the data is fed in the same format expected by the device (usually
uint8). uint8).
timeout_ms (int): The default timeout in milliseconds for all input virtual streams. timeout_ms (int): The default timeout in milliseconds for all input virtual streams.
@@ -2592,20 +2768,18 @@ class OutputVStreamParams(object):
"""Parameters of an output virtual stream (device to host).""" """Parameters of an output virtual stream (device to host)."""
@staticmethod @staticmethod
def make(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None): def make(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
"""Create output virtual stream params from a configured network group. These params determine the format of the """Create output virtual stream params from a configured network group. These params determine the format of the
data that will be returned from the network group. data that will be returned from the network group.
Args: Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created. the params are created.
quantized (bool): Whether the data returned from the chip should be quantized. True means quantized (bool): Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
the data is still quantized. False means it's HailoRT's responsibility to de-quantize the data will be decided by the src-data and dst-data types.
(rescale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
default format type of the data for all output virtual streams. If quantized is False, default format type of the data for all output virtual streams.
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise, The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
which means the returned data is in the same format returned from the device (usually which means the returned data is in the same format returned from the device (usually
uint8). uint8).
timeout_ms (int): The default timeout in milliseconds for all output virtual streams. timeout_ms (int): The default timeout in milliseconds for all output virtual streams.
@@ -2619,10 +2793,9 @@ class OutputVStreamParams(object):
params. params.
""" """
if format_type is None: if format_type is None:
if not quantized: format_type = FormatType.AUTO
format_type = FormatType.FLOAT32 if quantized is None:
else: quantized = format_type != FormatType.FLOAT32
format_type = FormatType.AUTO
if timeout_ms is None: if timeout_ms is None:
timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS
if queue_size is None: if queue_size is None:
@@ -2633,21 +2806,19 @@ class OutputVStreamParams(object):
format_type, timeout_ms, queue_size) format_type, timeout_ms, queue_size)
@staticmethod @staticmethod
def make_from_network_group(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None): def make_from_network_group(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
"""Create output virtual stream params from a configured network group. These params determine the format of the """Create output virtual stream params from a configured network group. These params determine the format of the
data that will be returned from the network group. data that will be returned from the network group.
Args: Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created. the params are created.
quantized (bool): Whether the data returned from the chip is already quantized. True means quantized (bool): Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
the data is already quantized. False means it's HailoRT's responsibility to quantize the data will be decided by the src-data and dst-data types.
(scale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
default format type of the data for all output virtual streams. If quantized is False, default format type of the data for all output virtual streams.
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise, The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`, which means the returned data is in the same format returned from the device (usually
which means the data is fed in the same format expected by the device (usually
uint8). uint8).
timeout_ms (int): The default timeout in milliseconds for all output virtual streams. timeout_ms (int): The default timeout in milliseconds for all output virtual streams.
Defaults to DEFAULT_VSTREAM_TIMEOUT_MS. In case of timeout, :class:`HailoRTTimeout` will be raised. Defaults to DEFAULT_VSTREAM_TIMEOUT_MS. In case of timeout, :class:`HailoRTTimeout` will be raised.
@@ -2662,21 +2833,19 @@ class OutputVStreamParams(object):
return OutputVStreamParams.make(configured_network, quantized, format_type, timeout_ms, queue_size, network_name) return OutputVStreamParams.make(configured_network, quantized, format_type, timeout_ms, queue_size, network_name)
@staticmethod @staticmethod
def make_groups(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None): def make_groups(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None):
"""Create output virtual stream params from a configured network group. These params determine the format of the """Create output virtual stream params from a configured network group. These params determine the format of the
data that will be returned from the network group. The params groups are splitted with respect to their underlying streams for multi process usges. data that will be returned from the network group. The params groups are splitted with respect to their underlying streams for multi process usges.
Args: Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created. the params are created.
quantized (bool): Whether the data returned from the chip is already quantized. True means quantized (bool): Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
the data is already quantized. False means it's HailoRT's responsibility to quantize the data will be decided by the src-data and dst-data types.
(scale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
default format type of the data for all output virtual streams. If quantized is False, default format type of the data for all output virtual streams.
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise, The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`, which means the returned data is in the same format returned from the device (usually
which means the data is fed in the same format expected by the device (usually
uint8). uint8).
timeout_ms (int): The default timeout in milliseconds for all output virtual streams. timeout_ms (int): The default timeout in milliseconds for all output virtual streams.
Defaults to DEFAULT_VSTREAM_TIMEOUT_MS. In case of timeout, :class:`HailoRTTimeout` will be raised. Defaults to DEFAULT_VSTREAM_TIMEOUT_MS. In case of timeout, :class:`HailoRTTimeout` will be raised.
@@ -2758,19 +2927,6 @@ class InputVStream(object):
with ExceptionWrapper(): with ExceptionWrapper():
return self._send_object.info return self._send_object.info
def _before_fork(self):
if self._send_object is not None:
self._send_object.before_fork()
def _after_fork_in_parent(self):
if self._send_object is not None:
self._send_object.after_fork_in_parent()
def _after_fork_in_child(self):
if self._send_object is not None:
self._send_object.after_fork_in_child()
class InputVStreams(object): class InputVStreams(object):
"""Input vstreams pipelines that allows to send data, to be used as a context manager.""" """Input vstreams pipelines that allows to send data, to be used as a context manager."""
@@ -2820,17 +2976,6 @@ class InputVStreams(object):
def __iter__(self): def __iter__(self):
return iter(self._vstreams.values()) return iter(self._vstreams.values())
def _before_fork(self):
for vstream in self._vstreams.values():
vstream._before_fork()
def _after_fork_in_parent(self):
for vstream in self._vstreams.values():
vstream._after_fork_in_parent()
def _after_fork_in_child(self):
for vstream in self._vstreams.values():
vstream._after_fork_in_child()
class OutputLayerUtils(object): class OutputLayerUtils(object):
@@ -2857,6 +3002,10 @@ class OutputLayerUtils(object):
def output_dtype(self): def output_dtype(self):
return _pyhailort.get_dtype(self._user_buffer_format.type) return _pyhailort.get_dtype(self._user_buffer_format.type)
@property
def output_order(self):
return self._user_buffer_format.order
@property @property
def output_shape(self): def output_shape(self):
return self._output_shape return self._output_shape
@@ -2887,7 +3036,8 @@ class OutputLayerUtils(object):
@property @property
def tf_nms_fomrat_shape(self): def tf_nms_fomrat_shape(self):
if not self.is_nms: # TODO: HRT-11726 - Combine is_nms for HAILO_NMS and NMS_WITH_BYTE_MASK
if not self.is_nms and not self.output_order == FormatOrder.HAILO_NMS_WITH_BYTE_MASK:
raise HailoRTException("Requested NMS info for non-NMS layer") raise HailoRTException("Requested NMS info for non-NMS layer")
nms_shape = self._vstream_info.nms_shape nms_shape = self._vstream_info.nms_shape
return [nms_shape.number_of_classes, BBOX_PARAMS, return [nms_shape.number_of_classes, BBOX_PARAMS,
@@ -2907,6 +3057,11 @@ class OutputVStream(object):
if self._is_nms: if self._is_nms:
self._quantized_empty_bbox = self._output_layer_utils.quantized_empty_bbox self._quantized_empty_bbox = self._output_layer_utils.quantized_empty_bbox
self._tf_nms_format = tf_nms_format self._tf_nms_format = tf_nms_format
self._input_stream_infos = configured_network.get_input_stream_infos()
@property
def output_order(self):
return self._output_layer_utils.output_order
@property @property
def shape(self): def shape(self):
@@ -2936,6 +3091,17 @@ class OutputVStream(object):
with ExceptionWrapper(): with ExceptionWrapper():
result_array = self._recv_object.recv() result_array = self._recv_object.recv()
if self.output_order == FormatOrder.HAILO_NMS_WITH_BYTE_MASK:
nms_shape = self._vstream_info.nms_shape
if len(self._input_stream_infos) != 1:
raise Exception("Output format HAILO_NMS_WITH_BYTE_MASK should have 1 input. Number of inputs: {}".format(len(self._input_stream_infos)))
input_height = self._input_stream_infos[0].shape[0]
input_width = self._input_stream_infos[0].shape[1]
res = HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_format(result_array,
nms_shape.number_of_classes, 1, input_height, input_width, nms_shape.max_bboxes_per_class,
self._output_dtype, self._tf_nms_format)
return res
if self._is_nms: if self._is_nms:
nms_shape = self._vstream_info.nms_shape nms_shape = self._vstream_info.nms_shape
if self._tf_nms_format: if self._tf_nms_format:
@@ -2957,17 +3123,39 @@ class OutputVStream(object):
with ExceptionWrapper(): with ExceptionWrapper():
return self._recv_object.info return self._recv_object.info
def _before_fork(self): def set_nms_score_threshold(self, threshold):
if self._recv_object is not None: """Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
self._recv_object.before_fork()
def _after_fork_in_parent(self): Args:
if self._recv_object is not None: threshold (float): NMS score threshold to set.
self._recv_object.after_fork_in_parent()
def _after_fork_in_child(self): Note:
if self._recv_object is not None: This function will fail in cases where the output vstream has no NMS operations on the CPU.
self._recv_object.after_fork_in_child() """
return self._recv_object.set_nms_score_threshold(threshold)
def set_nms_iou_threshold(self, threshold):
"""Set NMS intersection over union overlap Threshold,
used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
Args:
threshold (float): NMS IoU threshold to set.
Note:
This function will fail in cases where the output vstream has no NMS operations on the CPU.
"""
return self._recv_object.set_nms_iou_threshold(threshold)
def set_nms_max_proposals_per_class(self, max_proposals_per_class):
"""Set a limit for the maximum number of boxes per class.
Args:
max_proposals_per_class (int): NMS max proposals per class to set.
Note:
This function will fail in cases where the output vstream has no NMS operations on the CPU.
"""
return self._recv_object.set_nms_max_proposals_per_class(max_proposals_per_class)
class OutputVStreams(object): class OutputVStreams(object):
@@ -3032,15 +3220,3 @@ class OutputVStreams(object):
def __iter__(self): def __iter__(self):
return iter(self._vstreams.values()) return iter(self._vstreams.values())
def _before_fork(self):
for vstream in self._vstreams.values():
vstream._before_fork()
def _after_fork_in_parent(self):
for vstream in self._vstreams.values():
vstream._after_fork_in_parent()
def _after_fork_in_child(self):
for vstream in self._vstreams.values():
vstream._after_fork_in_child()

View File

@@ -176,7 +176,9 @@
"with network_group.activate(network_group_params):\n", "with network_group.activate(network_group_params):\n",
" send_process.join()\n", " send_process.join()\n",
" recv_process.join()\n", " recv_process.join()\n",
"print('Done')" "print('Done')\n",
"\n",
"target.release()"
] ]
} }
], ],

View File

@@ -22,11 +22,11 @@
] ]
}, },
{ {
"cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Single power measurement" "## Single power measurement"
], ]
"cell_type": "markdown",
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
@@ -156,9 +156,11 @@
" # Get saved power measurement values from the firmware.\n", " # Get saved power measurement values from the firmware.\n",
" measurements = target.control.get_power_measurement(buffer_index=buffer_index, should_clear=should_clear)\n", " measurements = target.control.get_power_measurement(buffer_index=buffer_index, should_clear=should_clear)\n",
" print('Average power is {} W. Min power is {} W. Max power is {} W.\\nAverage time between power samples is {} mS\\n'.format(measurements.average_value, measurements.min_value, measurements.max_value, measurements.average_time_value_milliseconds))\n", " print('Average power is {} W. Min power is {} W. Max power is {} W.\\nAverage time between power samples is {} mS\\n'.format(measurements.average_value, measurements.min_value, measurements.max_value, measurements.average_time_value_milliseconds))\n",
" \n", "\n",
"# Stop performing periodic power measurement\n", "# Stop performing periodic power measurement\n",
"target.control.stop_power_measurement()" "target.control.stop_power_measurement()\n",
"\n",
"target.release()"
] ]
} }
], ],

View File

@@ -11,7 +11,7 @@
"\n", "\n",
"**Requirements:**\n", "**Requirements:**\n",
"\n", "\n",
"* Run HailoRT Multi-Process Service before running inference. See installation steps in [Multi-Process Service](../../inference/inference.rst)\n", "* Enable HailoRT Multi-Process Service before running inference\n",
"* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n", "* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
"\n", "\n",
"It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials." "It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials."

View File

@@ -69,6 +69,6 @@ if __name__ == "__main__":
"linux_aarch64", "linux_aarch64",
], ],
url="https://hailo.ai/", url="https://hailo.ai/",
version="4.14.0", version="4.15.0",
zip_safe=False, zip_safe=False,
) )

View File

@@ -34,6 +34,7 @@ set(PYHAILORT_DIR ${CMAKE_CURRENT_LIST_DIR})
pybind11_add_module(_pyhailort pybind11_add_module(_pyhailort
pyhailort.cpp pyhailort.cpp
device_api.cpp device_api.cpp
network_group_api.cpp
hef_api.cpp hef_api.cpp
vstream_api.cpp vstream_api.cpp
quantization_api.cpp quantization_api.cpp
@@ -48,7 +49,7 @@ set_target_properties(_pyhailort PROPERTIES
# VISIBILITY_INLINES_HIDDEN YES # VISIBILITY_INLINES_HIDDEN YES
) )
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
target_link_libraries(_pyhailort PRIVATE HailoRT::libhailort) target_link_libraries(_pyhailort PRIVATE HailoRT::libhailort)
if(WIN32) if(WIN32)

View File

@@ -47,6 +47,9 @@ public:
{ {
case HAILO_FORMAT_ORDER_HAILO_NMS: case HAILO_FORMAT_ORDER_HAILO_NMS:
return { HailoRTCommon::get_nms_host_shape_size(vstream_info.nms_shape) }; return { HailoRTCommon::get_nms_host_shape_size(vstream_info.nms_shape) };
case HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK: {
return { HailoRTCommon::get_nms_with_byte_mask_host_shape_size(vstream_info.nms_shape, user_format) };
}
case HAILO_FORMAT_ORDER_NC: case HAILO_FORMAT_ORDER_NC:
return {shape.features}; return {shape.features};
case HAILO_FORMAT_ORDER_NHW: case HAILO_FORMAT_ORDER_NHW:

View File

@@ -9,6 +9,7 @@
**/ **/
#include "device_api.hpp" #include "device_api.hpp"
#include <memory> #include <memory>
@@ -341,8 +342,11 @@ py::list DeviceWrapper::configure(const HefWrapper &hef,
VALIDATE_EXPECTED(network_groups); VALIDATE_EXPECTED(network_groups);
py::list results; py::list results;
m_net_groups.reserve(m_net_groups.size() + network_groups->size());
for (const auto &network_group : network_groups.value()) { for (const auto &network_group : network_groups.value()) {
results.append(network_group.get()); auto wrapper = ConfiguredNetworkGroupWrapper::create(network_group);
results.append(wrapper);
m_net_groups.emplace_back(wrapper);
} }
return results; return results;

View File

@@ -17,6 +17,7 @@
#include "utils.hpp" #include "utils.hpp"
#include "hef_api.hpp" #include "hef_api.hpp"
#include "network_group_api.hpp"
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
@@ -140,6 +141,7 @@ private:
: m_device(std::move(device)) {} : m_device(std::move(device)) {}
std::unique_ptr<Device> m_device; std::unique_ptr<Device> m_device;
std::vector<ConfiguredNetworkGroupWrapperPtr> m_net_groups;
}; };
} /* namespace hailort */ } /* namespace hailort */

View File

@@ -12,7 +12,6 @@
#include "hef_api.hpp" #include "hef_api.hpp"
#include <memory> #include <memory>
namespace hailort namespace hailort
{ {
@@ -200,35 +199,6 @@ py::list HefWrapper::get_networks_names(const std::string &net_group_name)
return py::cast(res); return py::cast(res);
} }
ActivatedAppContextManagerWrapper::ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
const hailo_activate_network_group_params_t &network_group_params) :
m_net_group(net_group), m_network_group_params(network_group_params)
{}
const ActivatedNetworkGroup& ActivatedAppContextManagerWrapper::enter()
{
auto activated = m_net_group.activate(m_network_group_params);
if (activated.status() != HAILO_NOT_IMPLEMENTED) {
VALIDATE_EXPECTED(activated);
m_activated_net_group = activated.release();
}
return std::ref(*m_activated_net_group);
}
void ActivatedAppContextManagerWrapper::exit()
{
m_activated_net_group.reset();
}
void ActivatedAppContextManagerWrapper::add_to_python_module(py::module &m)
{
py::class_<ActivatedAppContextManagerWrapper>(m, "ActivatedApp")
.def("__enter__", &ActivatedAppContextManagerWrapper::enter, py::return_value_policy::reference)
.def("__exit__", [&](ActivatedAppContextManagerWrapper &self, py::args) { self.exit(); })
;
}
void HefWrapper::initialize_python_module(py::module &m) void HefWrapper::initialize_python_module(py::module &m)
{ {
py::class_<HefWrapper>(m, "Hef") py::class_<HefWrapper>(m, "Hef")
@@ -255,209 +225,6 @@ void HefWrapper::initialize_python_module(py::module &m)
.def("get_all_stream_infos", &HefWrapper::get_all_stream_infos) .def("get_all_stream_infos", &HefWrapper::get_all_stream_infos)
.def("get_networks_names", &HefWrapper::get_networks_names) .def("get_networks_names", &HefWrapper::get_networks_names)
; ;
py::class_<ConfiguredNetworkGroup, std::shared_ptr<ConfiguredNetworkGroup>>(m, "ConfiguredNetworkGroup")
.def("is_scheduled", [](ConfiguredNetworkGroup& self)
{
return self.is_scheduled();
})
.def("get_name", [](ConfiguredNetworkGroup& self)
{
return self.name();
})
.def("get_default_streams_interface", [](ConfiguredNetworkGroup& self)
{
auto result = self.get_default_streams_interface();
VALIDATE_EXPECTED(result);
return result.value();
})
.def("activate", [](ConfiguredNetworkGroup& self,
const hailo_activate_network_group_params_t &network_group_params)
{
return ActivatedAppContextManagerWrapper(self, network_group_params);
})
.def("wait_for_activation", [](ConfiguredNetworkGroup& self, uint32_t timeout_ms)
{
auto status = self.wait_for_activation(std::chrono::milliseconds(timeout_ms));
if (status != HAILO_NOT_IMPLEMENTED) {
VALIDATE_STATUS(status);
}
})
.def("InputVStreams", [](ConfiguredNetworkGroup &self, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
{
return InputVStreamsWrapper::create(self, input_vstreams_params);
})
.def("OutputVStreams", [](ConfiguredNetworkGroup &self, std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
{
return OutputVStreamsWrapper::create(self, output_vstreams_params);
})
.def("get_udp_rates_dict", [](ConfiguredNetworkGroup& self, uint32_t fps, uint32_t max_supported_rate_bytes)
{
auto rate_calculator = NetworkUdpRateCalculator::create(self);
VALIDATE_EXPECTED(rate_calculator);
auto udp_input_streams = self.get_input_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
auto results = rate_calculator->get_udp_ports_rates_dict(udp_input_streams,
fps, max_supported_rate_bytes);
VALIDATE_EXPECTED(results);
return py::cast(results.value());
})
.def("before_fork", [](ConfiguredNetworkGroup& self)
{
auto status = self.before_fork();
VALIDATE_STATUS(status);
})
.def("after_fork_in_parent", [](ConfiguredNetworkGroup& self)
{
auto status = self.after_fork_in_parent();
VALIDATE_STATUS(status);
})
.def("after_fork_in_child", [](ConfiguredNetworkGroup& self)
{
auto status = self.after_fork_in_child();
VALIDATE_STATUS(status);
})
.def("set_scheduler_timeout", [](ConfiguredNetworkGroup& self, int timeout, const std::string &network_name="")
{
auto timeout_mili = std::chrono::milliseconds(timeout);
auto status = self.set_scheduler_timeout(timeout_mili, network_name);
VALIDATE_STATUS(status);
})
.def("set_scheduler_threshold", [](ConfiguredNetworkGroup& self, uint32_t threshold)
{
auto status = self.set_scheduler_threshold(threshold);
VALIDATE_STATUS(status);
})
.def("set_scheduler_priority", [](ConfiguredNetworkGroup& self, uint8_t priority)
{
auto status = self.set_scheduler_priority(priority);
VALIDATE_STATUS(status);
})
.def("get_networks_names", [](ConfiguredNetworkGroup& self)
{
auto network_infos = self.get_network_infos();
VALIDATE_EXPECTED(network_infos);
std::vector<std::string> result;
result.reserve(network_infos->size());
for (const auto &info : network_infos.value()) {
result.push_back(info.name);
}
return py::cast(result);
})
.def("get_sorted_output_names", [](ConfiguredNetworkGroup& self)
{
auto names_list = self.get_sorted_output_names();
VALIDATE_EXPECTED(names_list);
return py::cast(names_list.release());
})
.def("get_input_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_input_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_output_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_output_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_all_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_all_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_all_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
auto result = self.get_all_stream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
})
.def("get_input_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
std::vector<hailo_stream_info_t> input_streams_infos;
auto all_streams = self.get_all_stream_infos(name);
VALIDATE_EXPECTED(all_streams);
for (auto &info : all_streams.value()) {
if (HAILO_H2D_STREAM == info.direction) {
input_streams_infos.push_back(std::move(info));
}
}
return py::cast(input_streams_infos);
})
.def("get_output_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
{
std::vector<hailo_stream_info_t> output_streams_infos;
auto all_streams = self.get_all_stream_infos(name);
VALIDATE_EXPECTED(all_streams);
for (auto &info : all_streams.value()) {
if (HAILO_D2H_STREAM == info.direction) {
output_streams_infos.push_back(std::move(info));
}
}
return py::cast(output_streams_infos);
})
.def("get_vstream_names_from_stream_name", [](ConfiguredNetworkGroup& self, const std::string &stream_name)
{
auto result = self.get_vstream_names_from_stream_name(stream_name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def("get_stream_names_from_vstream_name", [](ConfiguredNetworkGroup& self, const std::string &vstream_name)
{
auto result = self.get_stream_names_from_vstream_name(vstream_name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def("make_input_vstream_params", [](ConfiguredNetworkGroup& self, const std::string &name, bool quantized, hailo_format_type_t format_type,
uint32_t timeout_ms, uint32_t queue_size)
{
auto result = self.make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def("make_output_vstream_params", [](ConfiguredNetworkGroup& self, const std::string &name, bool quantized, hailo_format_type_t format_type,
uint32_t timeout_ms, uint32_t queue_size)
{
auto result = self.make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
})
.def(py::pickle(
[](const ConfiguredNetworkGroup &cng) { // __getstate__
auto handle = cng.get_client_handle();
VALIDATE_EXPECTED(handle);
return py::make_tuple(handle.value(), cng.name());
},
[](py::tuple t) { // __setstate__
auto handle = t[0].cast<uint32_t>();
auto net_group_name = t[1].cast<std::string>();
auto net_group = ConfiguredNetworkGroup::duplicate_network_group_client(handle, net_group_name);
VALIDATE_EXPECTED(net_group);
return net_group.value();
}
))
;
ActivatedAppContextManagerWrapper::add_to_python_module(m);
py::class_<ActivatedNetworkGroup>(m, "ActivatedNetworkGroup")
.def("get_intermediate_buffer", [](ActivatedNetworkGroup& self, uint8_t src_context_index,
uint8_t src_stream_index)
{
auto buff = self.get_intermediate_buffer(std::make_pair(src_context_index, src_stream_index));
VALIDATE_EXPECTED(buff);
return py::bytes(reinterpret_cast<char*>(buff->data()), buff->size());
})
.def("get_invalid_frames_count", [](ActivatedNetworkGroup& self)
{
return self.get_invalid_frames_count();
})
;
} }
} /* namespace hailort */ } /* namespace hailort */

View File

@@ -5,8 +5,6 @@
/** /**
* @file hef_api.hpp * @file hef_api.hpp
* @brief Defines binding to an HEF class, and network_group usage over Python. * @brief Defines binding to an HEF class, and network_group usage over Python.
*
* TODO: doc
**/ **/
#ifndef HEF_API_HPP_ #ifndef HEF_API_HPP_
@@ -72,20 +70,6 @@ private:
std::unique_ptr<Hef> hef; std::unique_ptr<Hef> hef;
}; };
class ActivatedAppContextManagerWrapper final
{
public:
ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
const hailo_activate_network_group_params_t &network_group_params);
const ActivatedNetworkGroup& enter();
void exit();
static void add_to_python_module(py::module &m);
private:
std::unique_ptr<ActivatedNetworkGroup> m_activated_net_group;
ConfiguredNetworkGroup &m_net_group;
hailo_activate_network_group_params_t m_network_group_params;
};
} /* namespace hailort */ } /* namespace hailort */

View File

@@ -1,5 +1,8 @@
cmake_minimum_required(VERSION 3.15.0) cmake_minimum_required(VERSION 3.15.0)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/readerwriterqueue.cmake)
pybind11_add_module(_pyhailort_internal SHARED pybind11_add_module(_pyhailort_internal SHARED
pyhailort_internal.cpp pyhailort_internal.cpp
control_api.cpp control_api.cpp
@@ -27,6 +30,7 @@ target_link_libraries(_pyhailort_internal PRIVATE
hef_proto hef_proto
spdlog::spdlog spdlog::spdlog
readerwriterqueue readerwriterqueue
profiler_proto
scheduler_mon_proto) scheduler_mon_proto)
if(HAILO_BUILD_SERVICE) if(HAILO_BUILD_SERVICE)
target_link_libraries(_pyhailort_internal PRIVATE grpc++_unsecure hailort_rpc_grpc_proto) target_link_libraries(_pyhailort_internal PRIVATE grpc++_unsecure hailort_rpc_grpc_proto)

View File

@@ -212,10 +212,10 @@ void PyhailortInternal::demux_output_buffer(
void PyhailortInternal::transform_input_buffer( void PyhailortInternal::transform_input_buffer(
py::array src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape, py::array src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
uintptr_t dst, size_t dst_size, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape, uintptr_t dst, size_t dst_size, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
const hailo_quant_info_t &dst_quant_info) const std::vector<hailo_quant_info_t> &dst_quant_infos)
{ {
auto transform_context = InputTransformContext::create(src_shape, src_format, dst_shape, dst_format, auto transform_context = InputTransformContext::create(src_shape, src_format, dst_shape, dst_format,
dst_quant_info); dst_quant_infos);
VALIDATE_EXPECTED(transform_context); VALIDATE_EXPECTED(transform_context);
MemoryView dst_buffer(reinterpret_cast<uint8_t*>(dst), dst_size); MemoryView dst_buffer(reinterpret_cast<uint8_t*>(dst), dst_size);
@@ -228,10 +228,10 @@ void PyhailortInternal::transform_input_buffer(
void PyhailortInternal::transform_output_buffer( void PyhailortInternal::transform_output_buffer(
py::bytes src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape, py::bytes src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape, py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
const hailo_quant_info_t &dst_quant_info) const std::vector<hailo_quant_info_t> &dst_quant_infos)
{ {
auto transform_context = OutputTransformContext::create(src_shape, src_format, dst_shape, dst_format, auto transform_context = OutputTransformContext::create(src_shape, src_format, dst_shape, dst_format,
dst_quant_info, {}); dst_quant_infos, {});
VALIDATE_EXPECTED(transform_context); VALIDATE_EXPECTED(transform_context);
const auto src_str = static_cast<std::string>(src); const auto src_str = static_cast<std::string>(src);
@@ -244,10 +244,10 @@ void PyhailortInternal::transform_output_buffer(
void PyhailortInternal::transform_output_buffer_nms( void PyhailortInternal::transform_output_buffer_nms(
py::bytes src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape, py::bytes src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape, py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info) const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info)
{ {
auto transform_context = OutputTransformContext::create(src_shape, src_format, dst_shape, dst_format, auto transform_context = OutputTransformContext::create(src_shape, src_format, dst_shape, dst_format,
dst_quant_info, nms_info); dst_quant_infos, nms_info);
VALIDATE_EXPECTED(transform_context); VALIDATE_EXPECTED(transform_context);
const auto src_str = static_cast<std::string>(src); const auto src_str = static_cast<std::string>(src);
@@ -260,19 +260,25 @@ void PyhailortInternal::transform_output_buffer_nms(
bool PyhailortInternal::is_input_transformation_required( bool PyhailortInternal::is_input_transformation_required(
const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format,
const hailo_quant_info_t &quant_info) const std::vector<hailo_quant_info_t> &quant_infos)
{ {
return InputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format, auto expected_is_transforamtion_required = InputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format,
quant_info); quant_infos);
VALIDATE_EXPECTED(expected_is_transforamtion_required);
return expected_is_transforamtion_required.release();
} }
bool PyhailortInternal::is_output_transformation_required( bool PyhailortInternal::is_output_transformation_required(
const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format,
const hailo_quant_info_t &quant_info) const std::vector<hailo_quant_info_t> &quant_infos)
{ {
return OutputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format, auto expected_is_transforamtion_required = OutputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format,
quant_info); quant_infos);
VALIDATE_EXPECTED(expected_is_transforamtion_required);
return expected_is_transforamtion_required.release();
} }
py::list PyhailortInternal::get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name) py::list PyhailortInternal::get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name)
@@ -309,6 +315,9 @@ PYBIND11_MODULE(_pyhailort_internal, m) {
py::class_<LayerInfo>(m, "HailoLayerInfo", py::module_local()) py::class_<LayerInfo>(m, "HailoLayerInfo", py::module_local())
.def_readonly("is_mux", &LayerInfo::is_mux) .def_readonly("is_mux", &LayerInfo::is_mux)
.def_readonly("mux_predecessors", &LayerInfo::predecessor) .def_readonly("mux_predecessors", &LayerInfo::predecessor)
.def_readonly("is_multi_planar", &LayerInfo::is_multi_planar)
.def_readonly("planes", &LayerInfo::planes)
.def_readonly("plane_index", &LayerInfo::plane_index)
.def_readonly("is_defused_nms", &LayerInfo::is_defused_nms) .def_readonly("is_defused_nms", &LayerInfo::is_defused_nms)
.def_readonly("fused_nms_layer", &LayerInfo::fused_nms_layer) .def_readonly("fused_nms_layer", &LayerInfo::fused_nms_layer)
.def_property_readonly("shape", [](LayerInfo& self) .def_property_readonly("shape", [](LayerInfo& self)
@@ -359,7 +368,7 @@ PYBIND11_MODULE(_pyhailort_internal, m) {
.def_readonly("direction", &LayerInfo::direction) .def_readonly("direction", &LayerInfo::direction)
.def_readonly("sys_index", &LayerInfo::stream_index) .def_readonly("sys_index", &LayerInfo::stream_index)
.def_readonly("name", &LayerInfo::name) .def_readonly("name", &LayerInfo::name)
.def_readonly("quant_info", &LayerInfo::quant_info) .def_readonly("quant_infos", &LayerInfo::quant_infos)
// For backwards compatibility (accessing qp through layer_info directly) // For backwards compatibility (accessing qp through layer_info directly)
.def_property_readonly("qp_zp", [](LayerInfo& self) .def_property_readonly("qp_zp", [](LayerInfo& self)
{ {

View File

@@ -34,17 +34,17 @@ public:
std::map<std::string, py::array> dst_buffers, const LayerInfo &mux_layer_info); std::map<std::string, py::array> dst_buffers, const LayerInfo &mux_layer_info);
static void transform_input_buffer(py::array src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape, static void transform_input_buffer(py::array src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
uintptr_t dst, size_t dst_size, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape, uintptr_t dst, size_t dst_size, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
const hailo_quant_info_t &dst_quant_info); const std::vector<hailo_quant_info_t> &dst_quant_infos);
static void transform_output_buffer(py::bytes src, const hailo_format_t &src_format, static void transform_output_buffer(py::bytes src, const hailo_format_t &src_format,
const hailo_3d_image_shape_t &src_shape, py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &src_shape, py::array dst, const hailo_format_t &dst_format,
const hailo_3d_image_shape_t &dst_shape, const hailo_quant_info_t &dst_quant_info); const hailo_3d_image_shape_t &dst_shape, const std::vector<hailo_quant_info_t> &dst_quant_infos);
static void transform_output_buffer_nms(py::bytes src, const hailo_format_t &src_format, static void transform_output_buffer_nms(py::bytes src, const hailo_format_t &src_format,
const hailo_3d_image_shape_t &src_shape, py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &src_shape, py::array dst, const hailo_format_t &dst_format,
const hailo_3d_image_shape_t &dst_shape, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info); const hailo_3d_image_shape_t &dst_shape, const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info);
static bool is_input_transformation_required(const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format, static bool is_input_transformation_required(const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info); const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_infos);
static bool is_output_transformation_required(const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format, static bool is_output_transformation_required(const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info); const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_infos);
static py::list get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name); static py::list get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name);
}; };

View File

@@ -0,0 +1,95 @@
/**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file network_group_api.cpp
**/
#include "network_group_api.hpp"
namespace hailort
{
void ConfiguredNetworkGroupWrapper::add_to_python_module(py::module &m)
{
py::class_<ConfiguredNetworkGroupWrapper, ConfiguredNetworkGroupWrapperPtr>(m, "ConfiguredNetworkGroup")
.def("is_scheduled", &ConfiguredNetworkGroupWrapper::is_scheduled)
.def("get_name", &ConfiguredNetworkGroupWrapper::get_name)
.def("get_default_streams_interface", &ConfiguredNetworkGroupWrapper::get_default_streams_interface)
.def("activate", &ConfiguredNetworkGroupWrapper::activate)
.def("wait_for_activation", &ConfiguredNetworkGroupWrapper::wait_for_activation)
.def("InputVStreams", &ConfiguredNetworkGroupWrapper::InputVStreams)
.def("OutputVStreams", &ConfiguredNetworkGroupWrapper::OutputVStreams)
.def("get_udp_rates_dict", &ConfiguredNetworkGroupWrapper::get_udp_rates_dict)
.def("set_scheduler_timeout", &ConfiguredNetworkGroupWrapper::set_scheduler_timeout)
.def("set_scheduler_threshold", &ConfiguredNetworkGroupWrapper::set_scheduler_threshold)
.def("set_scheduler_priority", &ConfiguredNetworkGroupWrapper::set_scheduler_priority)
.def("get_networks_names", &ConfiguredNetworkGroupWrapper::get_networks_names)
.def("get_sorted_output_names", &ConfiguredNetworkGroupWrapper::get_sorted_output_names)
.def("get_input_vstream_infos", &ConfiguredNetworkGroupWrapper::get_input_vstream_infos)
.def("get_output_vstream_infos", &ConfiguredNetworkGroupWrapper::get_output_vstream_infos)
.def("get_all_vstream_infos", &ConfiguredNetworkGroupWrapper::get_all_vstream_infos)
.def("get_all_stream_infos", &ConfiguredNetworkGroupWrapper::get_all_stream_infos)
.def("get_input_stream_infos", &ConfiguredNetworkGroupWrapper::get_input_stream_infos)
.def("get_output_stream_infos", &ConfiguredNetworkGroupWrapper::get_output_stream_infos)
.def("get_vstream_names_from_stream_name", &ConfiguredNetworkGroupWrapper::get_vstream_names_from_stream_name)
.def("get_stream_names_from_vstream_name", &ConfiguredNetworkGroupWrapper::get_stream_names_from_vstream_name)
.def("make_input_vstream_params", &ConfiguredNetworkGroupWrapper::make_input_vstream_params)
.def("make_output_vstream_params", &ConfiguredNetworkGroupWrapper::make_output_vstream_params)
.def(py::pickle(&ConfiguredNetworkGroupWrapper::pickle_get_state, &ConfiguredNetworkGroupWrapper::pickle_set_state))
;
}
ActivatedAppContextManagerWrapper::ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
const hailo_activate_network_group_params_t &network_group_params) :
m_net_group(net_group), m_network_group_params(network_group_params)
{}
const ActivatedNetworkGroup& ActivatedAppContextManagerWrapper::enter()
{
auto activated = m_net_group.activate(m_network_group_params);
if (activated.status() != HAILO_NOT_IMPLEMENTED) {
VALIDATE_EXPECTED(activated);
m_activated_net_group = activated.release();
}
return std::ref(*m_activated_net_group);
}
void ActivatedAppContextManagerWrapper::exit()
{
m_activated_net_group.reset();
}
void ActivatedAppContextManagerWrapper::add_to_python_module(py::module &m)
{
py::class_<ActivatedAppContextManagerWrapper>(m, "ActivatedApp")
.def("__enter__", &ActivatedAppContextManagerWrapper::enter, py::return_value_policy::reference)
.def("__exit__", [&](ActivatedAppContextManagerWrapper &self, py::args) { self.exit(); })
;
py::class_<ActivatedNetworkGroup>(m, "ActivatedNetworkGroup")
.def("get_intermediate_buffer", [](ActivatedNetworkGroup& self, uint8_t src_context_index,
uint8_t src_stream_index)
{
auto buff = self.get_intermediate_buffer(std::make_pair(src_context_index, src_stream_index));
VALIDATE_EXPECTED(buff);
return py::bytes(reinterpret_cast<char*>(buff->data()), buff->size());
})
.def("get_invalid_frames_count", [](ActivatedNetworkGroup& self)
{
return self.get_invalid_frames_count();
})
;
}
void NetworkGroup_api_initialize_python_module(py::module &m)
{
ConfiguredNetworkGroupWrapper::add_to_python_module(m);
ActivatedAppContextManagerWrapper::add_to_python_module(m);
}
} /* namespace hailort */

View File

@@ -0,0 +1,325 @@
/**
* Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
* Distributed under the MIT license (https://opensource.org/licenses/MIT)
**/
/**
* @file network_group_api.hpp
* @brief Defines binding to network group
**/
#ifndef _HAILO_NETWORK_GROUP_API_HPP_
#define _HAILO_NETWORK_GROUP_API_HPP_
#include "utils.hpp"
#include "vstream_api.hpp"
#include "common/fork_support.hpp"
#include "hailo/network_group.hpp"
#include "hailo/network_rate_calculator.hpp"
namespace hailort
{
class ActivatedAppContextManagerWrapper final
{
public:
ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
const hailo_activate_network_group_params_t &network_group_params);
const ActivatedNetworkGroup& enter();
void exit();
static void add_to_python_module(py::module &m);
private:
std::unique_ptr<ActivatedNetworkGroup> m_activated_net_group;
ConfiguredNetworkGroup &m_net_group;
hailo_activate_network_group_params_t m_network_group_params;
};
class ConfiguredNetworkGroupWrapper;
using ConfiguredNetworkGroupWrapperPtr = std::shared_ptr<ConfiguredNetworkGroupWrapper>;
class ConfiguredNetworkGroupWrapper final {
public:
static ConfiguredNetworkGroupWrapperPtr create(std::shared_ptr<ConfiguredNetworkGroup> cng)
{
return std::make_shared<ConfiguredNetworkGroupWrapper>(cng);
}
ConfiguredNetworkGroupWrapper(std::shared_ptr<ConfiguredNetworkGroup> cng, bool store_guard_for_multi_process = false) :
m_cng(cng)
#ifdef HAILO_IS_FORK_SUPPORTED
,
m_atfork_guard(this, {
.before_fork = [this]() { before_fork(); },
.after_fork_in_parent = [this]() { after_fork_in_parent(); },
.after_fork_in_child = [this]() { after_fork_in_child(); }
})
#endif
{
if (store_guard_for_multi_process) {
m_cng_guard_for_mt = cng;
}
}
auto is_scheduled()
{
return get().is_scheduled();
}
auto get_name()
{
return get().name();
}
auto get_default_streams_interface()
{
auto result = get().get_default_streams_interface();
VALIDATE_EXPECTED(result);
return result.value();
}
auto activate(const hailo_activate_network_group_params_t &network_group_params)
{
return ActivatedAppContextManagerWrapper(get(), network_group_params);
}
void wait_for_activation(uint32_t timeout_ms)
{
auto status = get().wait_for_activation(std::chrono::milliseconds(timeout_ms));
if (status != HAILO_NOT_IMPLEMENTED) {
VALIDATE_STATUS(status);
}
}
auto InputVStreams(const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
{
return InputVStreamsWrapper::create(get(), input_vstreams_params);
}
auto OutputVStreams(const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
{
return OutputVStreamsWrapper::create(get(), output_vstreams_params);
}
auto get_udp_rates_dict(uint32_t fps, uint32_t max_supported_rate_bytes)
{
auto rate_calculator = NetworkUdpRateCalculator::create(get());
VALIDATE_EXPECTED(rate_calculator);
auto udp_input_streams = get().get_input_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
auto results = rate_calculator->get_udp_ports_rates_dict(udp_input_streams,
fps, max_supported_rate_bytes);
VALIDATE_EXPECTED(results);
return py::cast(results.value());
}
void set_scheduler_timeout(int timeout, const std::string &network_name="")
{
auto timeout_mili = std::chrono::milliseconds(timeout);
auto status = get().set_scheduler_timeout(timeout_mili, network_name);
VALIDATE_STATUS(status);
}
void set_scheduler_threshold(uint32_t threshold)
{
auto status = get().set_scheduler_threshold(threshold);
VALIDATE_STATUS(status);
}
void set_scheduler_priority(uint8_t priority)
{
auto status = get().set_scheduler_priority(priority);
VALIDATE_STATUS(status);
}
auto get_networks_names()
{
auto network_infos = get().get_network_infos();
VALIDATE_EXPECTED(network_infos);
std::vector<std::string> result;
result.reserve(network_infos->size());
for (const auto &info : network_infos.value()) {
result.push_back(info.name);
}
return py::cast(result);
}
auto get_sorted_output_names()
{
auto names_list = get().get_sorted_output_names();
VALIDATE_EXPECTED(names_list);
return py::cast(names_list.release());
}
auto get_input_vstream_infos(const std::string &name)
{
auto result = get().get_input_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
}
auto get_output_vstream_infos(const std::string &name)
{
auto result = get().get_output_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
}
auto get_all_vstream_infos(const std::string &name)
{
auto result = get().get_all_vstream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
}
auto get_all_stream_infos(const std::string &name)
{
auto result = get().get_all_stream_infos(name);
VALIDATE_EXPECTED(result);
return py::cast(result.value());
}
auto get_input_stream_infos(const std::string &name)
{
std::vector<hailo_stream_info_t> input_streams_infos;
auto all_streams = get().get_all_stream_infos(name);
VALIDATE_EXPECTED(all_streams);
for (auto &info : all_streams.value()) {
if (HAILO_H2D_STREAM == info.direction) {
input_streams_infos.push_back(std::move(info));
}
}
return py::cast(input_streams_infos);
}
auto get_output_stream_infos(const std::string &name)
{
std::vector<hailo_stream_info_t> output_streams_infos;
auto all_streams = get().get_all_stream_infos(name);
VALIDATE_EXPECTED(all_streams);
for (auto &info : all_streams.value()) {
if (HAILO_D2H_STREAM == info.direction) {
output_streams_infos.push_back(std::move(info));
}
}
return py::cast(output_streams_infos);
}
auto get_vstream_names_from_stream_name(const std::string &stream_name)
{
auto result = get().get_vstream_names_from_stream_name(stream_name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
}
auto get_stream_names_from_vstream_name(const std::string &vstream_name)
{
auto result = get().get_stream_names_from_vstream_name(vstream_name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
}
auto make_input_vstream_params(const std::string &name, bool quantized, hailo_format_type_t format_type,
uint32_t timeout_ms, uint32_t queue_size)
{
auto result = get().make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
}
auto make_output_vstream_params(const std::string &name, bool quantized, hailo_format_type_t format_type,
uint32_t timeout_ms, uint32_t queue_size)
{
auto result = get().make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
VALIDATE_EXPECTED(result);
return py::cast(result.release());
}
ConfiguredNetworkGroup &get()
{
auto cng = m_cng.lock();
VALIDATE_NOT_NULL(cng, HAILO_INTERNAL_FAILURE);
return *cng;
}
ConfiguredNetworkGroup &get() const
{
auto cng = m_cng.lock();
VALIDATE_NOT_NULL(cng, HAILO_INTERNAL_FAILURE);
return *cng;
}
void before_fork()
{
auto cng = m_cng.lock();
if (cng) {
cng->before_fork();
}
}
void after_fork_in_parent()
{
auto cng = m_cng.lock();
if (cng) {
cng->after_fork_in_parent();
}
}
void after_fork_in_child()
{
auto cng = m_cng.lock();
if (cng) {
cng->after_fork_in_child();
}
}
static auto pickle_get_state(const ConfiguredNetworkGroupWrapper &self)
{
auto handle = self.get().get_client_handle();
VALIDATE_EXPECTED(handle);
auto vdevice_handle = self.get().get_vdevice_client_handle();
VALIDATE_EXPECTED(vdevice_handle);
return py::make_tuple(handle.value(), vdevice_handle.value(), self.get().name());
}
static auto pickle_set_state(py::tuple t)
{
auto handle = t[0].cast<uint32_t>();
auto vdevice_handle = t[1].cast<uint32_t>();
auto net_group_name = t[2].cast<std::string>();
auto net_group = ConfiguredNetworkGroup::duplicate_network_group_client(handle, vdevice_handle, net_group_name);
VALIDATE_EXPECTED(net_group);
const bool store_guard_for_multi_process = true;
return std::make_shared<ConfiguredNetworkGroupWrapper>(net_group.release(), store_guard_for_multi_process);
}
static void add_to_python_module(py::module &m);
private:
// Normally, the ownership of the network group is the Device/VDevice objects. We keep weak_ptr
// to force free the network group before freeing the device/vdevice.
std::weak_ptr<ConfiguredNetworkGroup> m_cng;
// On multi-process, when pickling this object (the windows multi-process flow) the device/vdevice
// doesn't own the network group object.
// To solve this problem, we store here and optional guard for the network group that will exist
// only when the object is constructed with pickle.
std::shared_ptr<ConfiguredNetworkGroup> m_cng_guard_for_mt;
#ifdef HAILO_IS_FORK_SUPPORTED
AtForkRegistry::AtForkGuard m_atfork_guard;
#endif
};
void NetworkGroup_api_initialize_python_module(py::module &m);
} /* namespace hailort */
#endif /* _HAILO_NETWORK_GROUP_API_HPP_ */

View File

@@ -15,6 +15,7 @@ using namespace std;
#include "hef_api.hpp" #include "hef_api.hpp"
#include "vstream_api.hpp" #include "vstream_api.hpp"
#include "vdevice_api.hpp" #include "vdevice_api.hpp"
#include "network_group_api.hpp"
#include "device_api.hpp" #include "device_api.hpp"
#include "quantization_api.hpp" #include "quantization_api.hpp"
@@ -164,6 +165,7 @@ PYBIND11_MODULE(_pyhailort, m) {
m.def("dequantize_output_buffer_in_place", &QuantizationBindings::dequantize_output_buffer_in_place); m.def("dequantize_output_buffer_in_place", &QuantizationBindings::dequantize_output_buffer_in_place);
m.def("dequantize_output_buffer", &QuantizationBindings::dequantize_output_buffer); m.def("dequantize_output_buffer", &QuantizationBindings::dequantize_output_buffer);
m.def("quantize_input_buffer", &QuantizationBindings::quantize_input_buffer); m.def("quantize_input_buffer", &QuantizationBindings::quantize_input_buffer);
m.def("is_qp_valid", &QuantizationBindings::is_qp_valid);
m.def("get_format_data_bytes", &HailoRTCommon::get_format_data_bytes); m.def("get_format_data_bytes", &HailoRTCommon::get_format_data_bytes);
m.def("get_dtype", &HailoRTBindingsCommon::get_dtype); m.def("get_dtype", &HailoRTBindingsCommon::get_dtype);
@@ -209,7 +211,8 @@ PYBIND11_MODULE(_pyhailort, m) {
.value("HAILO8_A0", HAILO_ARCH_HAILO8_A0) .value("HAILO8_A0", HAILO_ARCH_HAILO8_A0)
.value("HAILO8", HAILO_ARCH_HAILO8) .value("HAILO8", HAILO_ARCH_HAILO8)
.value("HAILO8L", HAILO_ARCH_HAILO8L) .value("HAILO8L", HAILO_ARCH_HAILO8L)
.value("HAILO15", HAILO_ARCH_HAILO15) .value("HAILO15H", HAILO_ARCH_HAILO15H)
.value("PLUTO", HAILO_ARCH_PLUTO)
; ;
/* TODO: SDK-15648 */ /* TODO: SDK-15648 */
@@ -462,16 +465,19 @@ PYBIND11_MODULE(_pyhailort, m) {
.def(py::init<>()) .def(py::init<>())
.def_readonly("number_of_classes", &hailo_nms_shape_t::number_of_classes) .def_readonly("number_of_classes", &hailo_nms_shape_t::number_of_classes)
.def_readonly("max_bboxes_per_class", &hailo_nms_shape_t::max_bboxes_per_class) .def_readonly("max_bboxes_per_class", &hailo_nms_shape_t::max_bboxes_per_class)
.def_readonly("max_mask_size", &hailo_nms_shape_t::max_mask_size)
.def(py::pickle( .def(py::pickle(
[](const hailo_nms_shape_t &nms_shape) { // __getstate__ [](const hailo_nms_shape_t &nms_shape) { // __getstate__
return py::make_tuple( return py::make_tuple(
nms_shape.number_of_classes, nms_shape.number_of_classes,
nms_shape.max_bboxes_per_class); nms_shape.max_bboxes_per_class,
nms_shape.max_mask_size);
}, },
[](py::tuple t) { // __setstate__ [](py::tuple t) { // __setstate__
hailo_nms_shape_t nms_shape; hailo_nms_shape_t nms_shape;
nms_shape.number_of_classes = t[0].cast<uint32_t>(); nms_shape.number_of_classes = t[0].cast<uint32_t>();
nms_shape.max_bboxes_per_class = t[1].cast<uint32_t>(); nms_shape.max_bboxes_per_class = t[1].cast<uint32_t>();
nms_shape.max_mask_size = t[2].cast<uint32_t>();
return nms_shape; return nms_shape;
} }
)) ))
@@ -513,6 +519,7 @@ PYBIND11_MODULE(_pyhailort, m) {
.value("RGB4", HAILO_FORMAT_ORDER_RGB4) .value("RGB4", HAILO_FORMAT_ORDER_RGB4)
.value("I420", HAILO_FORMAT_ORDER_I420) .value("I420", HAILO_FORMAT_ORDER_I420)
.value("YYYYUV", HAILO_FORMAT_ORDER_HAILO_YYYYUV) .value("YYYYUV", HAILO_FORMAT_ORDER_HAILO_YYYYUV)
.value("HAILO_NMS_WITH_BYTE_MASK", HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK)
; ;
py::enum_<hailo_format_flags_t>(m, "FormatFlags", py::arithmetic()) py::enum_<hailo_format_flags_t>(m, "FormatFlags", py::arithmetic())
@@ -1010,7 +1017,7 @@ PYBIND11_MODULE(_pyhailort, m) {
} }
}) })
.def_property_readonly("nms_shape", [](const hailo_vstream_info_t &self) { .def_property_readonly("nms_shape", [](const hailo_vstream_info_t &self) {
if (HAILO_FORMAT_ORDER_HAILO_NMS != self.format.order) { if (!HailoRTCommon::is_nms(self)) {
throw HailoRTCustomException("nms_shape is availale only on nms order vstreams"); throw HailoRTCustomException("nms_shape is availale only on nms order vstreams");
} }
return self.nms_shape; return self.nms_shape;
@@ -1025,7 +1032,7 @@ PYBIND11_MODULE(_pyhailort, m) {
}) })
.def(py::pickle( .def(py::pickle(
[](const hailo_vstream_info_t &vstream_info) { // __getstate__ [](const hailo_vstream_info_t &vstream_info) { // __getstate__
if (HAILO_FORMAT_ORDER_HAILO_NMS == vstream_info.format.order) { if (HailoRTCommon::is_nms(vstream_info)) {
return py::make_tuple( return py::make_tuple(
vstream_info.name, vstream_info.name,
vstream_info.network_name, vstream_info.network_name,
@@ -1050,7 +1057,7 @@ PYBIND11_MODULE(_pyhailort, m) {
strcpy(vstream_info.network_name, t[1].cast<std::string>().c_str()); strcpy(vstream_info.network_name, t[1].cast<std::string>().c_str());
vstream_info.direction = t[2].cast<hailo_stream_direction_t>(); vstream_info.direction = t[2].cast<hailo_stream_direction_t>();
vstream_info.format = t[3].cast<hailo_format_t>(); vstream_info.format = t[3].cast<hailo_format_t>();
if (HAILO_FORMAT_ORDER_HAILO_NMS == vstream_info.format.order) { if (HailoRTCommon::is_nms(vstream_info)) {
vstream_info.nms_shape = t[4].cast<hailo_nms_shape_t>(); vstream_info.nms_shape = t[4].cast<hailo_nms_shape_t>();
} }
else { else {
@@ -1104,6 +1111,7 @@ PYBIND11_MODULE(_pyhailort, m) {
HefWrapper::initialize_python_module(m); HefWrapper::initialize_python_module(m);
VStream_api_initialize_python_module(m); VStream_api_initialize_python_module(m);
VDevice_api_initialize_python_module(m); VDevice_api_initialize_python_module(m);
NetworkGroup_api_initialize_python_module(m);
DeviceWrapper::add_to_python_module(m); DeviceWrapper::add_to_python_module(m);
NetworkRateLimiter::add_to_python_module(m); NetworkRateLimiter::add_to_python_module(m);

View File

@@ -245,4 +245,9 @@ void QuantizationBindings::quantize_input_buffer(py::array src_buffer, py::array
} }
} }
bool QuantizationBindings::is_qp_valid(const hailo_quant_info_t &quant_info)
{
return Quantization::is_qp_valid(quant_info);
}
} /* namespace hailort */ } /* namespace hailort */

View File

@@ -30,6 +30,7 @@ public:
const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info); const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info);
static void dequantize_output_buffer(py::array src_buffer, py::array dst_buffer, const hailo_format_type_t &src_dtype, static void dequantize_output_buffer(py::array src_buffer, py::array dst_buffer, const hailo_format_type_t &src_dtype,
const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info); const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info);
static bool is_qp_valid(const hailo_quant_info_t &quant_info);
private: private:
static void dequantize_output_buffer_from_uint8(py::array src_buffer, py::array dst_buffer, static void dequantize_output_buffer_from_uint8(py::array src_buffer, py::array dst_buffer,
const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info); const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info);

View File

@@ -5,18 +5,18 @@
/** /**
* @file vdevice_api.hpp * @file vdevice_api.hpp
* @brief Defines binding to a VDevice class usage over Python. * @brief Defines binding to a VDevice class usage over Python.
*
* TODO: doc
**/ **/
#ifndef VDEVICE_API_HPP_ #ifndef VDEVICE_API_HPP_
#define VDEVICE_API_HPP_ #define VDEVICE_API_HPP_
#include "utils.hpp"
#include "network_group_api.hpp"
#include "hailo/hef.hpp" #include "hailo/hef.hpp"
#include "hailo/vdevice.hpp" #include "hailo/vdevice.hpp"
#include "hailo/hailort_common.hpp" #include "hailo/hailort_common.hpp"
#include "utils.hpp"
#include <iostream> #include <iostream>
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/numpy.h> #include <pybind11/numpy.h>
@@ -36,19 +36,23 @@ struct VDeviceParamsWrapper {
std::string group_id_str; std::string group_id_str;
}; };
class VDeviceWrapper;
using VDeviceWrapperPtr = std::shared_ptr<VDeviceWrapper>;
class VDeviceWrapper { class VDeviceWrapper {
public: public:
static VDeviceWrapper create(const hailo_vdevice_params_t &params) static VDeviceWrapperPtr create(const hailo_vdevice_params_t &params)
{ {
return VDeviceWrapper(params); return std::make_shared<VDeviceWrapper>(params);
}; };
static VDeviceWrapper create(const VDeviceParamsWrapper &params) static VDeviceWrapperPtr create(const VDeviceParamsWrapper &params)
{ {
return VDeviceWrapper(params.orig_params); return std::make_shared<VDeviceWrapper>(params.orig_params);
} }
static VDeviceWrapper create(const VDeviceParamsWrapper &params, const std::vector<std::string> &device_ids) static VDeviceWrapperPtr create(const VDeviceParamsWrapper &params, const std::vector<std::string> &device_ids)
{ {
if (params.orig_params.device_ids != nullptr && (!device_ids.empty())) { if (params.orig_params.device_ids != nullptr && (!device_ids.empty())) {
std::cerr << "VDevice device_ids can be set in params or device_ids argument. Both parameters were passed to the c'tor"; std::cerr << "VDevice device_ids can be set in params or device_ids argument. Both parameters were passed to the c'tor";
@@ -58,10 +62,10 @@ public:
auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids); auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
VALIDATE_EXPECTED(device_ids_vector); VALIDATE_EXPECTED(device_ids_vector);
modified_params.orig_params.device_ids = device_ids_vector->data(); modified_params.orig_params.device_ids = device_ids_vector->data();
return VDeviceWrapper(modified_params.orig_params); return std::make_shared<VDeviceWrapper>(modified_params.orig_params);
} }
static VDeviceWrapper create_from_ids(const std::vector<std::string> &device_ids) static VDeviceWrapperPtr create_from_ids(const std::vector<std::string> &device_ids)
{ {
auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids); auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
VALIDATE_EXPECTED(device_ids_vector); VALIDATE_EXPECTED(device_ids_vector);
@@ -74,10 +78,18 @@ public:
params.device_count = static_cast<uint32_t>(device_ids_vector->size()); params.device_count = static_cast<uint32_t>(device_ids_vector->size());
params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE; params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
return VDeviceWrapper(params); return std::make_shared<VDeviceWrapper>(params);
} }
VDeviceWrapper(const hailo_vdevice_params_t &params) VDeviceWrapper(const hailo_vdevice_params_t &params)
#ifdef HAILO_IS_FORK_SUPPORTED
:
m_atfork_guard(this, {
.before_fork = [this]() { if (m_vdevice) m_vdevice->before_fork(); },
.after_fork_in_parent = [this]() { if (m_vdevice) m_vdevice->after_fork_in_parent(); },
.after_fork_in_child = [this]() { if (m_vdevice) m_vdevice->after_fork_in_child(); },
})
#endif
{ {
auto vdevice_expected = VDevice::create(params); auto vdevice_expected = VDevice::create(params);
VALIDATE_EXPECTED(vdevice_expected); VALIDATE_EXPECTED(vdevice_expected);
@@ -96,15 +108,15 @@ public:
py::list configure(const HefWrapper &hef, py::list configure(const HefWrapper &hef,
const NetworkGroupsParamsMap &configure_params={}) const NetworkGroupsParamsMap &configure_params={})
{ {
auto network_groups = m_vdevice->configure(*hef.hef_ptr(), configure_params); auto network_groups = m_vdevice->configure(*hef.hef_ptr(), configure_params);
VALIDATE_EXPECTED(network_groups); VALIDATE_EXPECTED(network_groups);
py::list results; py::list results;
m_net_groups.reserve(m_net_groups.size() + network_groups->size()); m_net_groups.reserve(m_net_groups.size() + network_groups->size());
for (const auto &network_group : network_groups.value()) { for (const auto &network_group : network_groups.value()) {
results.append(network_group.get()); auto wrapper = ConfiguredNetworkGroupWrapper::create(network_group);
m_net_groups.emplace_back(network_group); results.append(wrapper);
m_net_groups.emplace_back(wrapper);
} }
return results; return results;
@@ -116,38 +128,18 @@ public:
m_vdevice.reset(); m_vdevice.reset();
} }
void before_fork()
{
if (m_vdevice != nullptr) {
auto status = m_vdevice->before_fork();
VALIDATE_STATUS(status);
}
}
void after_fork_in_parent()
{
if (m_vdevice != nullptr) {
auto status = m_vdevice->after_fork_in_parent();
VALIDATE_STATUS(status);
}
}
void after_fork_in_child()
{
if (m_vdevice != nullptr) {
auto status = m_vdevice->after_fork_in_child();
VALIDATE_STATUS(status);
}
}
private: private:
std::unique_ptr<VDevice> m_vdevice; std::unique_ptr<VDevice> m_vdevice;
ConfiguredNetworkGroupVector m_net_groups; std::vector<ConfiguredNetworkGroupWrapperPtr> m_net_groups;
#ifdef HAILO_IS_FORK_SUPPORTED
AtForkRegistry::AtForkGuard m_atfork_guard;
#endif
}; };
void VDevice_api_initialize_python_module(py::module &m) void VDevice_api_initialize_python_module(py::module &m)
{ {
py::class_<VDeviceWrapper>(m, "VDevice") py::class_<VDeviceWrapper, VDeviceWrapperPtr>(m, "VDevice")
.def("create", py::overload_cast<const hailo_vdevice_params_t&>(&VDeviceWrapper::create)) .def("create", py::overload_cast<const hailo_vdevice_params_t&>(&VDeviceWrapper::create))
.def("create", py::overload_cast<const VDeviceParamsWrapper&>(&VDeviceWrapper::create)) .def("create", py::overload_cast<const VDeviceParamsWrapper&>(&VDeviceWrapper::create))
.def("create", py::overload_cast<const VDeviceParamsWrapper&, const std::vector<std::string>&>(&VDeviceWrapper::create)) .def("create", py::overload_cast<const VDeviceParamsWrapper&, const std::vector<std::string>&>(&VDeviceWrapper::create))
@@ -155,9 +147,6 @@ void VDevice_api_initialize_python_module(py::module &m)
.def("get_physical_devices_ids", &VDeviceWrapper::get_physical_devices_ids) .def("get_physical_devices_ids", &VDeviceWrapper::get_physical_devices_ids)
.def("configure", &VDeviceWrapper::configure) .def("configure", &VDeviceWrapper::configure)
.def("release", &VDeviceWrapper::release) .def("release", &VDeviceWrapper::release)
.def("before_fork", &VDeviceWrapper::before_fork)
.def("after_fork_in_parent", &VDeviceWrapper::after_fork_in_parent)
.def("after_fork_in_child", &VDeviceWrapper::after_fork_in_child)
; ;
} }

View File

@@ -10,6 +10,8 @@
#include "vstream_api.hpp" #include "vstream_api.hpp"
#include "bindings_common.hpp" #include "bindings_common.hpp"
#include "utils.hpp" #include "utils.hpp"
#include "network_group_api.hpp"
#include <iostream> #include <iostream>
@@ -25,36 +27,6 @@ void InputVStreamWrapper::add_to_python_module(py::module &m)
MemoryView(const_cast<void*>(reinterpret_cast<const void*>(data.data())), data.nbytes())); MemoryView(const_cast<void*>(reinterpret_cast<const void*>(data.data())), data.nbytes()));
VALIDATE_STATUS(status); VALIDATE_STATUS(status);
}) })
.def("before_fork", [](InputVStream &self)
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
auto status = self.before_fork();
VALIDATE_STATUS(status);
#else
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
)
.def("after_fork_in_parent", [](InputVStream &self)
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
auto status = self.after_fork_in_parent();
VALIDATE_STATUS(status);
#else
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
)
.def("after_fork_in_child", [](InputVStream &self)
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
auto status = self.after_fork_in_child();
VALIDATE_STATUS(status);
#else
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
)
.def("flush", [](InputVStream &self) .def("flush", [](InputVStream &self)
{ {
hailo_status status = self.flush(); hailo_status status = self.flush();
@@ -76,7 +48,7 @@ void InputVStreamWrapper::add_to_python_module(py::module &m)
; ;
} }
InputVStreamsWrapper InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group, InputVStreamsWrapperPtr InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group,
const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params) const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
{ {
auto input_vstreams_expected = VStreamsBuilder::create_input_vstreams(net_group, input_vstreams_params); auto input_vstreams_expected = VStreamsBuilder::create_input_vstreams(net_group, input_vstreams_params);
@@ -87,7 +59,7 @@ InputVStreamsWrapper InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_gr
auto input_name = input.name(); auto input_name = input.name();
input_vstreams.emplace(input_name, std::make_unique<InputVStream>(std::move(input))); input_vstreams.emplace(input_name, std::make_unique<InputVStream>(std::move(input)));
} }
return InputVStreamsWrapper(input_vstreams); return std::make_shared<InputVStreamsWrapper>(input_vstreams);
} }
const InputVStreamsWrapper &InputVStreamsWrapper::enter() const InputVStreamsWrapper &InputVStreamsWrapper::enter()
@@ -128,53 +100,28 @@ void InputVStreamsWrapper::clear()
VALIDATE_STATUS(status); VALIDATE_STATUS(status);
} }
void InputVStreamsWrapper::before_fork()
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
for (auto &input_vstream : m_input_vstreams) {
auto status = input_vstream.second->before_fork();
VALIDATE_STATUS(status);
}
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
void InputVStreamsWrapper::after_fork_in_parent()
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
for (auto &input_vstream : m_input_vstreams) {
auto status = input_vstream.second->after_fork_in_parent();
VALIDATE_STATUS(status);
}
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
void InputVStreamsWrapper::after_fork_in_child()
{
#ifdef HAILO_SUPPORT_MULTI_PROCESS
for (auto &input_vstream : m_input_vstreams) {
auto status = input_vstream.second->after_fork_in_child();
VALIDATE_STATUS(status);
}
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
void InputVStreamsWrapper::add_to_python_module(py::module &m) void InputVStreamsWrapper::add_to_python_module(py::module &m)
{ {
py::class_<InputVStreamsWrapper>(m, "InputVStreams") py::class_<InputVStreamsWrapper, InputVStreamsWrapperPtr>(m, "InputVStreams")
.def(py::init(&InputVStreamsWrapper::create)) .def(py::init(&InputVStreamsWrapper::create))
.def("get_input_by_name", &InputVStreamsWrapper::get_input_by_name) .def("get_input_by_name", &InputVStreamsWrapper::get_input_by_name)
.def("get_all_inputs", &InputVStreamsWrapper::get_all_inputs) .def("get_all_inputs", &InputVStreamsWrapper::get_all_inputs)
.def("clear", &InputVStreamsWrapper::clear) .def("clear", &InputVStreamsWrapper::clear)
.def("__enter__", &InputVStreamsWrapper::enter, py::return_value_policy::reference) .def("__enter__", &InputVStreamsWrapper::enter, py::return_value_policy::reference)
.def("__exit__", [&](InputVStreamsWrapper &self, py::args) { self.exit(); }) .def("__exit__", [&](InputVStreamsWrapper &self, py::args) { self.exit(); })
.def("before_fork", &InputVStreamsWrapper::before_fork)
.def("after_fork_in_parent", &InputVStreamsWrapper::after_fork_in_parent)
.def("after_fork_in_child", &InputVStreamsWrapper::after_fork_in_child)
; ;
} }
InputVStreamsWrapper::InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams) InputVStreamsWrapper::InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams) :
: m_input_vstreams(std::move(input_vstreams)) m_input_vstreams(std::move(input_vstreams))
#ifdef HAILO_IS_FORK_SUPPORTED
,
m_atfork_guard(this, {
.before_fork = [this]() { before_fork(); },
.after_fork_in_parent = [this]() { after_fork_in_parent(); },
.after_fork_in_child = [this]() { after_fork_in_child(); }
})
#endif
{} {}
py::dtype OutputVStreamWrapper::get_dtype(OutputVStream &self) py::dtype OutputVStreamWrapper::get_dtype(OutputVStream &self)
@@ -214,36 +161,21 @@ void OutputVStreamWrapper::add_to_python_module(py::module &m)
return py::array(get_dtype(self), get_shape(self), unmanaged_addr, return py::array(get_dtype(self), get_shape(self), unmanaged_addr,
py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); })); py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
}) })
.def("before_fork", [](OutputVStream &self) .def("set_nms_score_threshold", [](OutputVStream &self, float32_t threshold)
{ {
#ifdef HAILO_SUPPORT_MULTI_PROCESS hailo_status status = self.set_nms_score_threshold(threshold);
auto status = self.before_fork();
VALIDATE_STATUS(status); VALIDATE_STATUS(status);
#else })
(void)self; .def("set_nms_iou_threshold", [](OutputVStream &self, float32_t threshold)
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
)
.def("after_fork_in_parent", [](OutputVStream &self)
{ {
#ifdef HAILO_SUPPORT_MULTI_PROCESS hailo_status status = self.set_nms_iou_threshold(threshold);
auto status = self.after_fork_in_parent();
VALIDATE_STATUS(status); VALIDATE_STATUS(status);
#else })
(void)self; .def("set_nms_max_proposals_per_class", [](OutputVStream &self, uint32_t max_proposals_per_class)
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
)
.def("after_fork_in_child", [](OutputVStream &self)
{ {
#ifdef HAILO_SUPPORT_MULTI_PROCESS hailo_status status = self.set_nms_max_proposals_per_class(max_proposals_per_class);
auto status = self.after_fork_in_child();
VALIDATE_STATUS(status); VALIDATE_STATUS(status);
#else })
(void)self;
#endif // HAILO_SUPPORT_MULTI_PROCESS
}
)
.def_property_readonly("info", [](OutputVStream &self) .def_property_readonly("info", [](OutputVStream &self)
{ {
return self.get_info(); return self.get_info();
@@ -254,7 +186,7 @@ void OutputVStreamWrapper::add_to_python_module(py::module &m)
; ;
} }
OutputVStreamsWrapper OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group, OutputVStreamsWrapperPtr OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group,
const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params) const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
{ {
auto output_vstreams_expected = VStreamsBuilder::create_output_vstreams(net_group, output_vstreams_params); auto output_vstreams_expected = VStreamsBuilder::create_output_vstreams(net_group, output_vstreams_params);
@@ -265,7 +197,7 @@ OutputVStreamsWrapper OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_
auto output_name = output.name(); auto output_name = output.name();
output_vstreams.emplace(output_name, std::make_unique<OutputVStream>(std::move(output))); output_vstreams.emplace(output_name, std::make_unique<OutputVStream>(std::move(output)));
} }
return OutputVStreamsWrapper(output_vstreams); return std::make_shared<OutputVStreamsWrapper>(output_vstreams);
} }
std::shared_ptr<OutputVStream> OutputVStreamsWrapper::get_output_by_name(const std::string &name) std::shared_ptr<OutputVStream> OutputVStreamsWrapper::get_output_by_name(const std::string &name)
@@ -308,58 +240,54 @@ void OutputVStreamsWrapper::clear()
void OutputVStreamsWrapper::before_fork() void OutputVStreamsWrapper::before_fork()
{ {
#ifdef HAILO_SUPPORT_MULTI_PROCESS for (auto &vstream : m_output_vstreams) {
for (auto &output_vstream : m_output_vstreams) { vstream.second->before_fork();
auto status = output_vstream.second->before_fork();
VALIDATE_STATUS(status);
} }
#endif // HAILO_SUPPORT_MULTI_PROCESS
} }
void OutputVStreamsWrapper::after_fork_in_parent() void OutputVStreamsWrapper::after_fork_in_parent()
{ {
#ifdef HAILO_SUPPORT_MULTI_PROCESS for (auto &vstream : m_output_vstreams) {
for (auto &output_vstream : m_output_vstreams) { vstream.second->after_fork_in_parent();
auto status = output_vstream.second->after_fork_in_parent();
VALIDATE_STATUS(status);
} }
#endif // HAILO_SUPPORT_MULTI_PROCESS
} }
void OutputVStreamsWrapper::after_fork_in_child() void OutputVStreamsWrapper::after_fork_in_child()
{ {
#ifdef HAILO_SUPPORT_MULTI_PROCESS for (auto &vstream : m_output_vstreams) {
for (auto &output_vstream : m_output_vstreams) { vstream.second->after_fork_in_child();
auto status = output_vstream.second->after_fork_in_child();
VALIDATE_STATUS(status);
} }
#endif // HAILO_SUPPORT_MULTI_PROCESS
} }
void OutputVStreamsWrapper::add_to_python_module(py::module &m) void OutputVStreamsWrapper::add_to_python_module(py::module &m)
{ {
py::class_<OutputVStreamsWrapper>(m, "OutputVStreams") py::class_<OutputVStreamsWrapper, OutputVStreamsWrapperPtr>(m, "OutputVStreams")
.def(py::init(&OutputVStreamsWrapper::create)) .def(py::init(&OutputVStreamsWrapper::create))
.def("get_output_by_name", &OutputVStreamsWrapper::get_output_by_name) .def("get_output_by_name", &OutputVStreamsWrapper::get_output_by_name)
.def("get_all_outputs", &OutputVStreamsWrapper::get_all_outputs) .def("get_all_outputs", &OutputVStreamsWrapper::get_all_outputs)
.def("clear", &OutputVStreamsWrapper::clear) .def("clear", &OutputVStreamsWrapper::clear)
.def("__enter__", &OutputVStreamsWrapper::enter, py::return_value_policy::reference) .def("__enter__", &OutputVStreamsWrapper::enter, py::return_value_policy::reference)
.def("__exit__", [&](OutputVStreamsWrapper &self, py::args) { self.exit(); }) .def("__exit__", [&](OutputVStreamsWrapper &self, py::args) { self.exit(); })
.def("before_fork", &OutputVStreamsWrapper::before_fork)
.def("after_fork_in_parent", &OutputVStreamsWrapper::after_fork_in_parent)
.def("after_fork_in_child", &OutputVStreamsWrapper::after_fork_in_child)
; ;
} }
OutputVStreamsWrapper::OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams) OutputVStreamsWrapper::OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams) :
: m_output_vstreams(std::move(output_vstreams)) m_output_vstreams(std::move(output_vstreams))
#ifdef HAILO_IS_FORK_SUPPORTED
,
m_atfork_guard(this, {
.before_fork = [this]() { before_fork(); },
.after_fork_in_parent = [this]() { after_fork_in_parent(); },
.after_fork_in_child = [this]() { after_fork_in_child(); }
})
#endif
{} {}
InferVStreamsWrapper InferVStreamsWrapper::create(ConfiguredNetworkGroup &network_group, InferVStreamsWrapper InferVStreamsWrapper::create(ConfiguredNetworkGroupWrapper &network_group,
const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params, const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params) const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
{ {
auto infer_pipeline = InferVStreams::create(network_group, input_vstreams_params, output_vstreams_params); auto infer_pipeline = InferVStreams::create(network_group.get(), input_vstreams_params, output_vstreams_params);
VALIDATE_EXPECTED(infer_pipeline); VALIDATE_EXPECTED(infer_pipeline);
auto infer_vstream_ptr = std::make_shared<InferVStreams>(std::move(infer_pipeline.value())); auto infer_vstream_ptr = std::make_shared<InferVStreams>(std::move(infer_pipeline.value()));
@@ -435,6 +363,25 @@ void InferVStreamsWrapper::release()
m_infer_pipeline.reset(); m_infer_pipeline.reset();
} }
void InputVStreamsWrapper::before_fork()
{
for (auto &vstream : m_input_vstreams) {
vstream.second->before_fork();
}
}
void InputVStreamsWrapper::after_fork_in_parent()
{
for (auto &vstream : m_input_vstreams) {
vstream.second->after_fork_in_parent();
}
}
void InputVStreamsWrapper::after_fork_in_child()
{
for (auto &vstream : m_input_vstreams) {
vstream.second->after_fork_in_child();
}
}
void InferVStreamsWrapper::add_to_python_module(py::module &m) void InferVStreamsWrapper::add_to_python_module(py::module &m)
{ {
py::class_<InferVStreamsWrapper>(m, "InferVStreams") py::class_<InferVStreamsWrapper>(m, "InferVStreams")
@@ -444,6 +391,18 @@ void InferVStreamsWrapper::add_to_python_module(py::module &m)
.def("get_user_buffer_format", &InferVStreamsWrapper::get_user_buffer_format) .def("get_user_buffer_format", &InferVStreamsWrapper::get_user_buffer_format)
.def("infer", &InferVStreamsWrapper::infer) .def("infer", &InferVStreamsWrapper::infer)
.def("release", [](InferVStreamsWrapper &self, py::args) { self.release(); }) .def("release", [](InferVStreamsWrapper &self, py::args) { self.release(); })
.def("set_nms_score_threshold", [](InferVStreamsWrapper &self, float32_t threshold)
{
VALIDATE_STATUS(self.m_infer_pipeline->set_nms_score_threshold(threshold));
})
.def("set_nms_iou_threshold", [](InferVStreamsWrapper &self, float32_t threshold)
{
VALIDATE_STATUS(self.m_infer_pipeline->set_nms_iou_threshold(threshold));
})
.def("set_nms_max_proposals_per_class", [](InferVStreamsWrapper &self, uint32_t max_proposals_per_class)
{
VALIDATE_STATUS(self.m_infer_pipeline->set_nms_max_proposals_per_class(max_proposals_per_class));
})
; ;
} }

View File

@@ -10,9 +10,12 @@
#ifndef _VSTREAM_API_HPP_ #ifndef _VSTREAM_API_HPP_
#define _VSTREAM_API_HPP_ #define _VSTREAM_API_HPP_
#include "utils.hpp"
#include "common/fork_support.hpp"
#include "hailo/vstream.hpp" #include "hailo/vstream.hpp"
#include "hailo/inference_pipeline.hpp" #include "hailo/inference_pipeline.hpp"
#include "utils.hpp"
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/numpy.h> #include <pybind11/numpy.h>
@@ -23,16 +26,22 @@
namespace hailort namespace hailort
{ {
class ConfiguredNetworkGroupWrapper;
class InputVStreamWrapper final class InputVStreamWrapper final
{ {
public: public:
static void add_to_python_module(py::module &m); static void add_to_python_module(py::module &m);
}; };
class InputVStreamsWrapper;
using InputVStreamsWrapperPtr = std::shared_ptr<InputVStreamsWrapper>;
class InputVStreamsWrapper final class InputVStreamsWrapper final
{ {
public: public:
static InputVStreamsWrapper create(ConfiguredNetworkGroup &net_group, static InputVStreamsWrapperPtr create(ConfiguredNetworkGroup &net_group,
const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params); const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params);
const InputVStreamsWrapper &enter(); const InputVStreamsWrapper &enter();
void exit(); void exit();
@@ -42,11 +51,17 @@ public:
void before_fork(); void before_fork();
void after_fork_in_parent(); void after_fork_in_parent();
void after_fork_in_child(); void after_fork_in_child();
static void add_to_python_module(py::module &m); static void add_to_python_module(py::module &m);
private:
InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams); InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams);
std::unordered_map<std::string, std::shared_ptr<InputVStream>> m_input_vstreams; std::unordered_map<std::string, std::shared_ptr<InputVStream>> m_input_vstreams;
private:
#ifdef HAILO_IS_FORK_SUPPORTED
AtForkRegistry::AtForkGuard m_atfork_guard;
#endif
}; };
class OutputVStreamWrapper final class OutputVStreamWrapper final
@@ -58,10 +73,13 @@ public:
static void add_to_python_module(py::module &m); static void add_to_python_module(py::module &m);
}; };
class OutputVStreamsWrapper;
using OutputVStreamsWrapperPtr = std::shared_ptr<OutputVStreamsWrapper>;
class OutputVStreamsWrapper final class OutputVStreamsWrapper final
{ {
public: public:
static OutputVStreamsWrapper create(ConfiguredNetworkGroup &net_group, static OutputVStreamsWrapperPtr create(ConfiguredNetworkGroup &net_group,
const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params); const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params);
std::shared_ptr<OutputVStream> get_output_by_name(const std::string &name); std::shared_ptr<OutputVStream> get_output_by_name(const std::string &name);
const OutputVStreamsWrapper &enter(); const OutputVStreamsWrapper &enter();
@@ -73,15 +91,20 @@ public:
void after_fork_in_child(); void after_fork_in_child();
static void add_to_python_module(py::module &m); static void add_to_python_module(py::module &m);
private:
OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams); OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams);
std::unordered_map<std::string, std::shared_ptr<OutputVStream>> m_output_vstreams; std::unordered_map<std::string, std::shared_ptr<OutputVStream>> m_output_vstreams;
private:
#ifdef HAILO_IS_FORK_SUPPORTED
AtForkRegistry::AtForkGuard m_atfork_guard;
#endif
}; };
class InferVStreamsWrapper final class InferVStreamsWrapper final
{ {
public: public:
static InferVStreamsWrapper create(ConfiguredNetworkGroup &network_group, static InferVStreamsWrapper create(ConfiguredNetworkGroupWrapper &network_group,
const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params, const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params); const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params);
void infer(std::map<std::string, py::array> input_data, std::map<std::string, py::array> output_data, void infer(std::map<std::string, py::array> input_data, std::map<std::string, py::array> output_data,
@@ -90,6 +113,9 @@ public:
hailo_format_t get_user_buffer_format(const std::string &stream_name); hailo_format_t get_user_buffer_format(const std::string &stream_name);
std::vector<size_t> get_shape(const std::string &stream_name); std::vector<size_t> get_shape(const std::string &stream_name);
void release(); void release();
void before_fork();
void after_fork_in_parent();
void after_fork_in_child();
static void add_to_python_module(py::module &m); static void add_to_python_module(py::module &m);
private: private:

View File

@@ -23,7 +23,7 @@
package_dest: /usr/include/aarch64-linux-gnu package_dest: /usr/include/aarch64-linux-gnu
- version: '3.9' - version: '3.9'
installation: manual installation: manual
package_name: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa/+build/26280901/+files/libpython3.9-dev_3.9.17-1+focal1_arm64.deb package_name: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa/+files/libpython3.9-dev_3.9.18-1+focal1_arm64.deb
package_dest: /usr/include/aarch64-linux-gnu package_dest: /usr/include/aarch64-linux-gnu
- version: '3.10' - version: '3.10'
installation: manual installation: manual

View File

@@ -53,7 +53,7 @@ The following examples are provided, demonstrating the HailoRT API:
- The threads will continuously initiate an async read or write operations. - The threads will continuously initiate an async read or write operations.
- The main thread will stop the async operations and the threads by deactivating the network group. - The main thread will stop the async operations and the threads by deactivating the network group.
- `multi_process_example` - Demonstrates how to work with HailoRT multi-process service and using the HailoRT Model Scheduler for network groups switching. - `multi_process_example` - Demonstrates how to work with HailoRT multi-process service and using the HailoRT Model Scheduler for network groups switching.
Using the script `multi_process_example.sh` one can specify the number of processes to run each hef, see `multi_process_example.sh -h` for more information. Using the script `multi_process_example.sh` / `multi_process_example.ps1` one can specify the number of processes to run each hef, see `multi_process_example.sh -h` / `multi_process_example.ps1 -h` for more information.
- `notification_callback_example` - Demonstrates how to work with notification callbacks, same as `notification_callback_example` C example. - `notification_callback_example` - Demonstrates how to work with notification callbacks, same as `notification_callback_example` C example.
You can find more details about each example in the HailoRT user guide. You can find more details about each example in the HailoRT user guide.
## Compiling with CMake ## Compiling with CMake

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(data_quantization_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(data_quantization_example.c PROPERTIES LANGUAGE C)

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(infer_pipeline_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(infer_pipeline_example.c PROPERTIES LANGUAGE C)

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(multi_device_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(multi_device_example.c PROPERTIES LANGUAGE C)

View File

@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(multi_network_vstream_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(multi_network_vstream_example.c PROPERTIES LANGUAGE C)

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(notification_callback_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(notification_callback_example.c PROPERTIES LANGUAGE C)

View File

@@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(power_measurement_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(power_measurement_example.c PROPERTIES LANGUAGE C)

View File

@@ -1,14 +1,11 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(HailoRT 4.14.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(raw_async_streams_single_thread_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(raw_async_streams_single_thread_example.c PROPERTIES LANGUAGE C)
add_executable(c_raw_async_streams_single_thread_example raw_async_streams_single_thread_example.c) add_executable(c_raw_async_streams_single_thread_example raw_async_streams_single_thread_example.c)
target_link_libraries(c_raw_async_streams_single_thread_example PRIVATE HailoRT::libhailort Threads::Threads) target_link_libraries(c_raw_async_streams_single_thread_example PRIVATE HailoRT::libhailort)
target_include_directories(c_raw_async_streams_single_thread_example PRIVATE "${CMAKE_CURRENT_LIST_DIR}/../common") target_include_directories(c_raw_async_streams_single_thread_example PRIVATE "${CMAKE_CURRENT_LIST_DIR}/../common")
if(WIN32) if(WIN32)

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(raw_streams_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(raw_streams_example.c PROPERTIES LANGUAGE C)

View File

@@ -185,7 +185,7 @@ int main()
hailo_output_stream output_streams [MAX_EDGE_LAYERS] = {NULL}; hailo_output_stream output_streams [MAX_EDGE_LAYERS] = {NULL};
size_t number_input_streams = 0; size_t number_input_streams = 0;
size_t number_output_streams = 0; size_t number_output_streams = 0;
size_t index = 0; size_t i = 0;
status = hailo_scan_devices(NULL, device_ids, &actual_devices_count); status = hailo_scan_devices(NULL, device_ids, &actual_devices_count);
REQUIRE_SUCCESS(status, l_exit, "Failed to scan devices"); REQUIRE_SUCCESS(status, l_exit, "Failed to scan devices");
@@ -214,14 +214,14 @@ int main()
&number_output_streams); &number_output_streams);
REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output streams infos"); REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output streams infos");
for (index = 0; index < number_input_streams; index++) { for (i = 0; i < number_input_streams; i++) {
status = hailo_get_input_stream(network_group, input_streams_info[index].name, &input_streams[index]); status = hailo_get_input_stream(network_group, input_streams_info[i].name, &input_streams[i]);
REQUIRE_SUCCESS(status, l_release_hef, "Failed getting input stream %s", input_streams_info[index].name); REQUIRE_SUCCESS(status, l_release_hef, "Failed getting input stream %s", input_streams_info[i].name);
} }
for (index = 0; index < number_output_streams; index++) { for (i = 0; i < number_output_streams; i++) {
status = hailo_get_output_stream(network_group, output_streams_info[index].name, &output_streams[index]); status = hailo_get_output_stream(network_group, output_streams_info[i].name, &output_streams[i]);
REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output stream %s", output_streams_info[index].name); REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output stream %s", output_streams_info[i].name);
} }
status = hailo_activate_network_group(network_group, NULL, &activated_network_group); status = hailo_activate_network_group(network_group, NULL, &activated_network_group);

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(switch_network_groups_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(switch_network_groups_example.c PROPERTIES LANGUAGE C)

View File

@@ -192,6 +192,7 @@ int main()
read_thread_args_t read_args[HEF_COUNT][MAX_EDGE_LAYERS]; read_thread_args_t read_args[HEF_COUNT][MAX_EDGE_LAYERS];
char HEF_FILES[HEF_COUNT][MAX_HEF_PATH_LEN] = {"hefs/multi_network_shortcut_net.hef", "hefs/shortcut_net.hef"}; char HEF_FILES[HEF_COUNT][MAX_HEF_PATH_LEN] = {"hefs/multi_network_shortcut_net.hef", "hefs/shortcut_net.hef"};
// Note: default batch_size is 0, which is not used in this example
uint16_t batch_sizes[HEF_COUNT] = {BATCH_SIZE_1, BATCH_SIZE_2}; uint16_t batch_sizes[HEF_COUNT] = {BATCH_SIZE_1, BATCH_SIZE_2};
status = hailo_init_vdevice_params(&params); status = hailo_init_vdevice_params(&params);

View File

@@ -1,9 +1,9 @@
cmake_minimum_required(VERSION 3.0.0) cmake_minimum_required(VERSION 3.0.0)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON) set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
find_package(HailoRT 4.14.0 EXACT REQUIRED) find_package(HailoRT 4.15.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(switch_network_groups_manually_example.c PROPERTIES LANGUAGE C) SET_SOURCE_FILES_PROPERTIES(switch_network_groups_manually_example.c PROPERTIES LANGUAGE C)

Some files were not shown because too many files have changed in this diff Show More