From 53309baeb2f84709dd33158db407346481fa1632 Mon Sep 17 00:00:00 2001 From: hyunil park Date: Wed, 30 Oct 2024 14:11:49 +0900 Subject: [PATCH] Move node_type_e, node_info_s, and pipeline_sink_cb to ml_service - ml_service_extension and ml_service_training_offloading respectively defined note_type_e, node_info_s, and pipeline_sink_cb for using pipeline sink callback. Now these are moved to ml_service and changed to be used together. Signed-off-by: hyunil park --- c/src/ml-api-service-extension.c | 81 ++++++---------------- c/src/ml-api-service-private.h | 32 ++++++++- c/src/ml-api-service-training-offloading.c | 70 ++++--------------- c/src/ml-api-service.c | 22 +++++- 4 files changed, 86 insertions(+), 119 deletions(-) diff --git a/c/src/ml-api-service-extension.c b/c/src/ml-api-service-extension.c index dad9a228..bf453aec 100644 --- a/c/src/ml-api-service-extension.c +++ b/c/src/ml-api-service-extension.c @@ -34,30 +34,6 @@ typedef enum ML_EXTENSION_TYPE_MAX } ml_extension_type_e; -/** - * @brief Internal enumeration for the node type in pipeline. - */ -typedef enum -{ - ML_EXTENSION_NODE_TYPE_UNKNOWN = 0, - ML_EXTENSION_NODE_TYPE_INPUT = 1, - ML_EXTENSION_NODE_TYPE_OUTPUT = 2, - - ML_EXTENSION_NODE_TYPE_MAX -} ml_extension_node_type_e; - -/** - * @brief Internal structure of the node info in pipeline. - */ -typedef struct -{ - gchar *name; - ml_extension_node_type_e type; - ml_tensors_info_h info; - void *handle; - void *mls; -} ml_extension_node_info_s; - /** * @brief Internal structure of the message in ml-service extension handle. */ @@ -94,12 +70,12 @@ typedef struct /** * @brief Internal function to create node info in pipeline. */ -static ml_extension_node_info_s * +static ml_service_node_info_s * _ml_extension_node_info_new (ml_service_s * mls, const gchar * name, - ml_extension_node_type_e type) + ml_service_node_type_e type) { ml_extension_s *ext = (ml_extension_s *) mls->priv; - ml_extension_node_info_s *node_info; + ml_service_node_info_s *node_info; if (!STR_IS_VALID (name)) { _ml_error_report_return (NULL, @@ -111,7 +87,7 @@ _ml_extension_node_info_new (ml_service_s * mls, const gchar * name, "Cannot add duplicated node '%s' in ml-service pipeline.", name); } - node_info = g_try_new0 (ml_extension_node_info_s, 1); + node_info = g_try_new0 (ml_service_node_info_s, 1); if (!node_info) { _ml_error_report_return (NULL, "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?"); @@ -132,7 +108,7 @@ _ml_extension_node_info_new (ml_service_s * mls, const gchar * name, static void _ml_extension_node_info_free (gpointer data) { - ml_extension_node_info_s *node_info = (ml_extension_node_info_s *) data; + ml_service_node_info_s *node_info = (ml_service_node_info_s *) data; if (!node_info) return; @@ -147,7 +123,7 @@ _ml_extension_node_info_free (gpointer data) /** * @brief Internal function to get the node info in ml-service extension. */ -static ml_extension_node_info_s * +static ml_service_node_info_s * _ml_extension_node_info_get (ml_extension_s * ext, const gchar * name) { if (!STR_IS_VALID (name)) @@ -156,19 +132,6 @@ _ml_extension_node_info_get (ml_extension_s * ext, const gchar * name) return g_hash_table_lookup (ext->node_table, name); } -/** - * @brief Internal callback for sink node in pipeline description. - */ -static void -_ml_extension_pipeline_sink_cb (const ml_tensors_data_h data, - const ml_tensors_info_h info, void *user_data) -{ - ml_extension_node_info_s *node_info = (ml_extension_node_info_s *) user_data; - ml_service_s *mls = (ml_service_s *) node_info->mls; - - _ml_service_invoke_event_new_data (mls, node_info->name, data); -} - /** * @brief Internal function to release ml-service extension message. */ @@ -217,7 +180,7 @@ _ml_extension_msg_thread (gpointer data) status = ml_single_invoke (ext->single, msg->input, &msg->output); if (status == ML_ERROR_NONE) { - _ml_service_invoke_event_new_data (mls, NULL, msg->output); + ml_service_invoke_event_new_data (mls, NULL, msg->output); } else { _ml_error_report ("Failed to invoke the model in ml-service extension thread."); @@ -226,11 +189,11 @@ _ml_extension_msg_thread (gpointer data) } case ML_EXTENSION_TYPE_PIPELINE: { - ml_extension_node_info_s *node_info; + ml_service_node_info_s *node_info; node_info = _ml_extension_node_info_get (ext, msg->name); - if (node_info && node_info->type == ML_EXTENSION_NODE_TYPE_INPUT) { + if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) { /* The input data will be released in the pipeline. */ status = ml_pipeline_src_input_data (node_info->handle, msg->input, ML_PIPELINE_BUF_POLICY_AUTO_FREE); @@ -389,7 +352,7 @@ _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single) */ static int _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node, - ml_extension_node_type_e type) + ml_service_node_type_e type) { ml_extension_s *ext = (ml_extension_s *) mls->priv; JsonArray *array = NULL; @@ -405,7 +368,7 @@ _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node, for (i = 0; i < n; i++) { const gchar *name = NULL; - ml_extension_node_info_s *node_info; + ml_service_node_info_s *node_info; if (array) object = json_array_get_object_element (array, i); @@ -435,13 +398,13 @@ _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node, } switch (type) { - case ML_EXTENSION_NODE_TYPE_INPUT: + case ML_SERVICE_NODE_TYPE_INPUT: status = ml_pipeline_src_get_handle (ext->pipeline, name, &node_info->handle); break; - case ML_EXTENSION_NODE_TYPE_OUTPUT: + case ML_SERVICE_NODE_TYPE_OUTPUT: status = ml_pipeline_sink_register (ext->pipeline, name, - _ml_extension_pipeline_sink_cb, node_info, &node_info->handle); + ml_service_pipeline_sink_cb, node_info, &node_info->handle); break; default: status = ML_ERROR_INVALID_PARAMETER; @@ -499,7 +462,7 @@ _ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe) JsonNode *node = json_object_get_member (pipe, "input_node"); status = _ml_extension_conf_parse_pipeline_node (mls, node, - ML_EXTENSION_NODE_TYPE_INPUT); + ML_SERVICE_NODE_TYPE_INPUT); if (status != ML_ERROR_NONE) { _ml_error_report_return (status, "Failed to parse configuration file, cannot get the input node."); @@ -513,7 +476,7 @@ _ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe) JsonNode *node = json_object_get_member (pipe, "output_node"); status = _ml_extension_conf_parse_pipeline_node (mls, node, - ML_EXTENSION_NODE_TYPE_OUTPUT); + ML_SERVICE_NODE_TYPE_OUTPUT); if (status != ML_ERROR_NONE) { _ml_error_report_return (status, "Failed to parse configuration file, cannot get the output node."); @@ -720,11 +683,11 @@ _ml_service_extension_get_input_information (ml_service_s * mls, break; case ML_EXTENSION_TYPE_PIPELINE: { - ml_extension_node_info_s *node_info; + ml_service_node_info_s *node_info; node_info = _ml_extension_node_info_get (ext, name); - if (node_info && node_info->type == ML_EXTENSION_NODE_TYPE_INPUT) { + if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) { status = _ml_tensors_info_create_from (node_info->info, info); } else { status = ML_ERROR_INVALID_PARAMETER; @@ -755,11 +718,11 @@ _ml_service_extension_get_output_information (ml_service_s * mls, break; case ML_EXTENSION_TYPE_PIPELINE: { - ml_extension_node_info_s *node_info; + ml_service_node_info_s *node_info; node_info = _ml_extension_node_info_get (ext, name); - if (node_info && node_info->type == ML_EXTENSION_NODE_TYPE_OUTPUT) { + if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_OUTPUT) { status = _ml_tensors_info_create_from (node_info->info, info); } else { status = ML_ERROR_INVALID_PARAMETER; @@ -813,7 +776,7 @@ _ml_service_extension_request (ml_service_s * mls, const char *name, int status, len; if (ext->type == ML_EXTENSION_TYPE_PIPELINE) { - ml_extension_node_info_s *node_info; + ml_service_node_info_s *node_info; if (!STR_IS_VALID (name)) { _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, @@ -822,7 +785,7 @@ _ml_service_extension_request (ml_service_s * mls, const char *name, node_info = _ml_extension_node_info_get (ext, name); - if (!node_info || node_info->type != ML_EXTENSION_NODE_TYPE_INPUT) { + if (!node_info || node_info->type != ML_SERVICE_NODE_TYPE_INPUT) { _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, "The parameter, name '%s', is invalid, cannot find the input node from pipeline.", name); diff --git a/c/src/ml-api-service-private.h b/c/src/ml-api-service-private.h index 68ad2689..b7fdff35 100644 --- a/c/src/ml-api-service-private.h +++ b/c/src/ml-api-service-private.h @@ -48,6 +48,31 @@ typedef enum ML_SERVICE_TYPE_MAX } ml_service_type_e; +/** + * @brief Enumeration for the node type in pipeline. + */ +typedef enum +{ + ML_SERVICE_NODE_TYPE_UNKNOWN = 0, + ML_SERVICE_NODE_TYPE_INPUT = 1, + ML_SERVICE_NODE_TYPE_OUTPUT = 2, + ML_SERVICE_NODE_TYPE_TRAINING = 3, + + ML_EXTENSION_NODE_TYPE_MAX +} ml_service_node_type_e; + +/** + * @brief Structure of the node info in pipeline. + */ +typedef struct +{ + gchar *name; + ml_service_node_type_e type; + ml_tensors_info_h info; + void *handle; + void *mls; +} ml_service_node_info_s; + /** * @brief Structure for ml-service event callback. */ @@ -128,7 +153,12 @@ const gchar * _ml_service_get_json_string_member (JsonObject *object, const gcha /** * @brief Invoke ml-service event for new data. */ -int _ml_service_invoke_event_new_data (ml_service_s * mls, const char *name, const ml_tensors_data_h data); +int ml_service_invoke_event_new_data (ml_service_s * mls, const char *name, const ml_tensors_data_h data); + +/** + * @brief Callback for sink node in pipeline description. + */ +void ml_service_pipeline_sink_cb (const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data); #ifdef __cplusplus } diff --git a/c/src/ml-api-service-training-offloading.c b/c/src/ml-api-service-training-offloading.c index 96c890d0..bd26847a 100644 --- a/c/src/ml-api-service-training-offloading.c +++ b/c/src/ml-api-service-training-offloading.c @@ -39,29 +39,6 @@ typedef enum ML_TRAINING_OFFLOADING_TYPE_MAX, } ml_training_offloaing_type_e; -/** - * @brief Internal enumeration for the node type in pipeline. - */ -typedef enum -{ - ML_TRAINING_OFFLOADING_NODE_TYPE_UNKNOWN = 0, - ML_TRAINING_OFFLOADING_NODE_TYPE_OUTPUT, - ML_TRAINING_OFFLOADING_NODE_TYPE_TRAINING, - - ML_TRAINING_OFFLOADING_NODE_TYPE_MAX -} ml_training_offloading_node_type_e; - -/** - * @brief Internal structure of the node info in pipeline. - */ -typedef struct -{ - gchar *name; - ml_training_offloading_node_type_e type; - void *handle; - void *mls; -} ml_training_offloading_node_info_s; - /** * @brief Internal structure for ml-service training offloading handle. */ @@ -110,35 +87,14 @@ _training_offloading_get_priv (ml_service_s * mls, return ML_ERROR_NONE; } -/** - * @brief Internal callback for sink node in pipeline description. - */ -static void -_pipeline_sink_cb (const ml_tensors_data_h data, - const ml_tensors_info_h tensors_info, void *user_data) -{ - ml_service_s *mls = NULL; - ml_training_offloading_node_info_s *node_info = NULL; - - g_return_if_fail (data != NULL); - - node_info = (ml_training_offloading_node_info_s *) user_data; - g_return_if_fail (node_info != NULL); - - mls = (ml_service_s *) node_info->mls; - g_return_if_fail (mls != NULL); - - _ml_service_invoke_event_new_data (mls, node_info->name, data); -} - /** * @brief Internal function to release pipeline node info. */ static void _training_offloading_node_info_free (gpointer data) { - ml_training_offloading_node_info_s *node_info = - (ml_training_offloading_node_info_s *) data; + ml_service_node_info_s *node_info = + (ml_service_node_info_s *) data; if (!node_info) return; @@ -150,11 +106,11 @@ _training_offloading_node_info_free (gpointer data) /** * @brief Internal function to create node info in pipeline. */ -static ml_training_offloading_node_info_s * +static ml_service_node_info_s * _training_offloading_node_info_new (ml_service_s * mls, - const gchar * name, ml_training_offloading_node_type_e type) + const gchar * name, ml_service_node_type_e type) { - ml_training_offloading_node_info_s *node_info; + ml_service_node_info_s *node_info; ml_training_services_s *training_s = NULL; int ret; @@ -168,7 +124,7 @@ _training_offloading_node_info_new (ml_service_s * mls, "Cannot add duplicated node '%s' in ml-service pipeline.", name); } - node_info = g_try_new0 (ml_training_offloading_node_info_s, 1); + node_info = g_try_new0 (ml_service_node_info_s, 1); if (!node_info) { _ml_error_report_return (NULL, "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?"); @@ -281,14 +237,14 @@ _training_offloading_conf_parse_json (ml_service_s * mls, JsonObject * object) */ static int _training_offloading_conf_parse_pipeline_node (ml_service_s * mls, - JsonNode * node, ml_training_offloading_node_type_e type) + JsonNode * node, ml_service_node_type_e type) { int ret = ML_ERROR_NONE; guint i, array_len = 1; const gchar *name; JsonArray *array = NULL; JsonObject *node_object; - ml_training_offloading_node_info_s *node_info = NULL; + ml_service_node_info_s *node_info = NULL; ml_training_services_s *training_s = NULL; g_return_val_if_fail (node != NULL, ML_ERROR_INVALID_PARAMETER); @@ -321,13 +277,13 @@ _training_offloading_conf_parse_pipeline_node (ml_service_s * mls, } switch (type) { - case ML_TRAINING_OFFLOADING_NODE_TYPE_TRAINING: + case ML_SERVICE_NODE_TYPE_TRAINING: ret = ml_pipeline_element_get_handle (training_s->pipeline_h, name, &node_info->handle); break; - case ML_TRAINING_OFFLOADING_NODE_TYPE_OUTPUT: + case ML_SERVICE_NODE_TYPE_OUTPUT: ret = ml_pipeline_sink_register (training_s->pipeline_h, name, - _pipeline_sink_cb, node_info, &node_info->handle); + ml_service_pipeline_sink_cb, node_info, &node_info->handle); break; default: ret = ML_ERROR_INVALID_PARAMETER; @@ -358,7 +314,7 @@ _training_offloading_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe) if (json_object_has_member (pipe, "output_node")) { node = json_object_get_member (pipe, "output_node"); ret = _training_offloading_conf_parse_pipeline_node (mls, node, - ML_TRAINING_OFFLOADING_NODE_TYPE_OUTPUT); + ML_SERVICE_NODE_TYPE_OUTPUT); if (ret != ML_ERROR_NONE) { _ml_error_report_return (ret, "Failed to parse configuration file, cannot get the input node."); @@ -368,7 +324,7 @@ _training_offloading_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe) if (json_object_has_member (pipe, "training_node")) { node = json_object_get_member (pipe, "training_node"); ret = _training_offloading_conf_parse_pipeline_node (mls, node, - ML_TRAINING_OFFLOADING_NODE_TYPE_TRAINING); + ML_SERVICE_NODE_TYPE_TRAINING); if (ret != ML_ERROR_NONE) { _ml_error_report_return (ret, "Failed to parse configuration file, cannot get the training node."); diff --git a/c/src/ml-api-service.c b/c/src/ml-api-service.c index f08afa59..776215ea 100644 --- a/c/src/ml-api-service.c +++ b/c/src/ml-api-service.c @@ -773,7 +773,7 @@ _ml_service_get_json_string_member (JsonObject * object, * @brief Invoke ml-service event for new data. */ int -_ml_service_invoke_event_new_data (ml_service_s * mls, const char *name, +ml_service_invoke_event_new_data (ml_service_s * mls, const char *name, const ml_tensors_data_h data) { ml_service_event_cb_info_s cb_info = { 0 }; @@ -813,4 +813,22 @@ _ml_service_invoke_event_new_data (ml_service_s * mls, const char *name, } return status; -} \ No newline at end of file +} + +/** + * @brief Callback for sink node in pipeline description. + */ +void +ml_service_pipeline_sink_cb (const ml_tensors_data_h data, + const ml_tensors_info_h info, void *user_data) +{ + ml_service_s *mls = NULL; + ml_service_node_info_s *node_info = NULL; + + node_info = (ml_service_node_info_s *) user_data; + g_return_if_fail (node_info != NULL); + mls = (ml_service_s *) node_info->mls; + g_return_if_fail (mls != NULL); + + ml_service_invoke_event_new_data (mls, node_info->name, data); +}