Skip to content

Commit 94e9e4e

Browse files
committed
[ml service] Refactor: Decompose ml_single_open_custom function for better maintainability
This commit refactors the monolithic ml_single_open_custom function (200+ lines) into smaller, focused helper functions to improve code readability and maintainability. The main function is now structured as a clear sequence of steps, each handled by a dedicated helper function. Signed-off-by: hyunil park <hyunil46.park@samsung.com>
1 parent 71aaab1 commit 94e9e4e

1 file changed

Lines changed: 164 additions & 100 deletions

File tree

c/src/ml-api-inference-single.c

Lines changed: 164 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -998,68 +998,24 @@ _ml_single_open_custom_validate_arguments (ml_single_h * single,
998998
}
999999

10001000
/**
1001-
* @brief Internal function to convert accelerator as tensor_filter property format.
1002-
* @note returned value must be freed by the caller
1003-
* @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c.
1001+
* @brief Validate and determine NNFW type from model files
10041002
*/
1005-
char *
1006-
_ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw)
1007-
{
1008-
const gchar *hw_name;
1009-
const gchar *use_accl = "true:";
1010-
gchar *str_prop = NULL;
1011-
1012-
hw_name = get_accl_hw_str (_ml_nnfw_to_accl_hw (hw));
1013-
str_prop = g_strdup_printf ("%s%s", use_accl, hw_name);
1014-
1015-
return str_prop;
1016-
}
1017-
1018-
/**
1019-
* @brief Opens an ML model with the custom options and returns the instance as a handle.
1020-
*/
1021-
int
1022-
ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
1003+
static int
1004+
__validate_and_determine_nnfw (ml_single_preset * info, ml_nnfw_type_e * determined_nnfw)
10231005
{
1024-
ml_single *single_h;
1025-
GObject *filter_obj;
1026-
int status = ML_ERROR_NONE;
1027-
ml_tensors_info_s *in_tensors_info, *out_tensors_info;
1028-
ml_nnfw_type_e nnfw;
1029-
ml_nnfw_hw_e hw;
1030-
const gchar *fw_name;
1006+
ml_nnfw_type_e nnfw = info->nnfw;
10311007
g_autofree gchar *converted_models = NULL;
10321008
gchar **list_models;
10331009
guint i, num_models;
1034-
char *hw_name;
1035-
1036-
check_feature_state (ML_FEATURE_INFERENCE);
1037-
1038-
/* Validate the params */
1039-
_ml_error_report_return_continue_iferr
1040-
(_ml_single_open_custom_validate_arguments (single, info),
1041-
"The parameter, 'info' (ml_single_preset *), cannot be validated. Please provide valid information for this object.");
1042-
1043-
/* init null */
1044-
*single = NULL;
1010+
int status;
10451011

1046-
in_tensors_info = (ml_tensors_info_s *) info->input_info;
1047-
out_tensors_info = (ml_tensors_info_s *) info->output_info;
1048-
nnfw = info->nnfw;
1049-
hw = info->hw;
1050-
fw_name = _ml_get_nnfw_subplugin_name (nnfw);
10511012
converted_models = _ml_convert_predefined_entity (info->models);
1052-
1053-
/**
1054-
* 1. Determine nnfw and validate model file
1055-
*/
10561013
list_models = g_strsplit (converted_models, ",", -1);
10571014
num_models = g_strv_length (list_models);
10581015
for (i = 0; i < num_models; i++)
10591016
g_strstrip (list_models[i]);
10601017

1061-
status = _ml_validate_model_file ((const char **) list_models, num_models,
1062-
&nnfw);
1018+
status = _ml_validate_model_file ((const char **) list_models, num_models, &nnfw);
10631019
if (status != ML_ERROR_NONE) {
10641020
_ml_error_report_continue
10651021
("Cannot validate the model (1st model: %s. # models: %d). Error code: %d",
@@ -1070,36 +1026,53 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
10701026

10711027
g_strfreev (list_models);
10721028

1073-
/**
1074-
* 2. Determine hw
1075-
* (Supposed CPU only) Support others later.
1076-
*/
1077-
if (!_ml_nnfw_is_available (nnfw, hw)) {
1029+
/* Check hardware availability */
1030+
if (!_ml_nnfw_is_available (nnfw, info->hw)) {
10781031
_ml_error_report_return (ML_ERROR_NOT_SUPPORTED,
10791032
"The given nnfw, '%s', is not supported. There is no corresponding tensor-filter subplugin available or the given hardware requirement is not supported for the given nnfw.",
1080-
fw_name);
1033+
_ml_get_nnfw_subplugin_name (nnfw));
10811034
}
10821035

1083-
/* Create ml_single object */
1084-
if ((single_h = ml_single_create_handle (nnfw)) == NULL) {
1085-
_ml_error_report_return_continue (ML_ERROR_OUT_OF_MEMORY,
1086-
"Cannot create handle for the given nnfw, %s", fw_name);
1087-
}
1036+
*determined_nnfw = nnfw;
1037+
return ML_ERROR_NONE;
1038+
}
10881039

1040+
/**
1041+
* @brief Configure single handle with basic settings
1042+
*/
1043+
static int
1044+
__configure_handle (ml_single * single_h, ml_single_preset * info)
1045+
{
10891046
single_h->invoke_dynamic = info->invoke_dynamic;
1047+
return ML_ERROR_NONE;
1048+
}
1049+
1050+
/**
1051+
* @brief Configure async settings for single handle
1052+
*/
1053+
static void
1054+
__configure_async_settings (ml_single * single_h, ml_single_preset * info)
1055+
{
10901056
single_h->invoke_async = info->invoke_async;
10911057
single_h->invoke_async_cb = info->invoke_async_cb;
10921058
single_h->invoke_async_pdata = info->invoke_async_pdata;
1059+
}
10931060

1094-
filter_obj = G_OBJECT (single_h->filter);
1061+
/**
1062+
* @brief Configure NNFW-specific tensor information
1063+
*/
1064+
static int
1065+
__configure_nnfw_tensors (ml_single * single_h, ml_single_preset * info,
1066+
const char *fw_name)
1067+
{
1068+
GObject *filter_obj = G_OBJECT (single_h->filter);
1069+
ml_tensors_info_s *in_tensors_info = (ml_tensors_info_s *) info->input_info;
1070+
ml_tensors_info_s *out_tensors_info = (ml_tensors_info_s *) info->output_info;
1071+
int status = ML_ERROR_NONE;
10951072

1096-
/**
1097-
* 3. Construct a direct connection with the nnfw.
1098-
* Note that we do not construct a pipeline since 2019.12.
1099-
*/
1100-
if (nnfw == ML_NNFW_TYPE_TENSORFLOW || nnfw == ML_NNFW_TYPE_SNAP ||
1101-
nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE ||
1102-
nnfw == ML_NNFW_TYPE_NCNN) {
1073+
if (info->nnfw == ML_NNFW_TYPE_TENSORFLOW || info->nnfw == ML_NNFW_TYPE_SNAP ||
1074+
info->nnfw == ML_NNFW_TYPE_PYTORCH || info->nnfw == ML_NNFW_TYPE_TRIX_ENGINE ||
1075+
info->nnfw == ML_NNFW_TYPE_NCNN) {
11031076
/* set input and output tensors information */
11041077
if (in_tensors_info && out_tensors_info) {
11051078
status =
@@ -1108,7 +1081,7 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
11081081
_ml_error_report_continue
11091082
("Input tensors info is given; however, failed to set input tensors info. Error code: %d",
11101083
status);
1111-
goto error;
1084+
return status;
11121085
}
11131086

11141087
status =
@@ -1118,16 +1091,15 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
11181091
_ml_error_report_continue
11191092
("Output tensors info is given; however, failed to set output tensors info. Error code: %d",
11201093
status);
1121-
goto error;
1094+
return status;
11221095
}
11231096
} else {
11241097
_ml_error_report
11251098
("To run the given nnfw, '%s', with a neural network model, both input and output information should be provided.",
11261099
fw_name);
1127-
status = ML_ERROR_INVALID_PARAMETER;
1128-
goto error;
1100+
return ML_ERROR_INVALID_PARAMETER;
11291101
}
1130-
} else if (nnfw == ML_NNFW_TYPE_ARMNN) {
1102+
} else if (info->nnfw == ML_NNFW_TYPE_ARMNN) {
11311103
/* set input and output tensors information, if available */
11321104
if (in_tensors_info) {
11331105
status =
@@ -1136,7 +1108,7 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
11361108
_ml_error_report_continue
11371109
("With nnfw '%s', input tensors info is optional. However, the user has provided an invalid input tensors info. Error code: %d",
11381110
fw_name, status);
1139-
goto error;
1111+
return status;
11401112
}
11411113
}
11421114
if (out_tensors_info) {
@@ -1147,35 +1119,134 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
11471119
_ml_error_report_continue
11481120
("With nnfw '%s', output tensors info is optional. However, the user has provided an invalid output tensors info. Error code: %d",
11491121
fw_name, status);
1150-
goto error;
1122+
return status;
11511123
}
11521124
}
11531125
}
11541126

1155-
/* set accelerator, framework, model files and custom option */
1156-
if (info->fw_name) {
1157-
fw_name = (const char *) info->fw_name;
1158-
} else {
1159-
fw_name = _ml_get_nnfw_subplugin_name (nnfw); /* retry for "auto" */
1160-
}
1161-
hw_name = _ml_nnfw_to_str_prop (hw);
1127+
return ML_ERROR_NONE;
1128+
}
11621129

1163-
g_object_set (filter_obj, "framework", fw_name, "accelerator", hw_name,
1164-
"model", converted_models, "invoke-dynamic", single_h->invoke_dynamic,
1130+
/**
1131+
* @brief Configure filter properties
1132+
*/
1133+
static void
1134+
__configure_filter_properties (ml_single * single_h, ml_single_preset * info,
1135+
const char *fw_name, char *hw_name)
1136+
{
1137+
g_autofree gchar *converted_models = NULL;
1138+
1139+
converted_models = _ml_convert_predefined_entity (info->models);
1140+
1141+
g_object_set (G_OBJECT (single_h->filter), "framework", fw_name,
1142+
"accelerator", hw_name, "model", converted_models,
1143+
"invoke-dynamic", single_h->invoke_dynamic,
11651144
"invoke-async", single_h->invoke_async, NULL);
1166-
g_free (hw_name);
11671145

11681146
if (info->custom_option) {
1169-
g_object_set (filter_obj, "custom", info->custom_option, NULL);
1147+
g_object_set (G_OBJECT (single_h->filter), "custom", info->custom_option, NULL);
11701148
}
11711149

11721150
/* Set async callback. */
11731151
if (single_h->invoke_async) {
11741152
single_h->klass->set_invoke_async_callback (single_h->filter,
11751153
ml_single_async_cb, single_h);
11761154
}
1155+
}
1156+
1157+
/**
1158+
* @brief Setup input/output tensors information
1159+
*/
1160+
static int
1161+
__setup_inout_info (ml_single * single_h, ml_single_preset * info)
1162+
{
1163+
ml_tensors_info_s *in_tensors_info = (ml_tensors_info_s *) info->input_info;
1164+
ml_tensors_info_s *out_tensors_info = (ml_tensors_info_s *) info->output_info;
1165+
1166+
if (!ml_single_set_info_in_handle (single_h, TRUE, in_tensors_info)) {
1167+
_ml_error_report
1168+
("The input tensors info is invalid. Cannot configure single_h handle with the given input tensors info.");
1169+
return ML_ERROR_INVALID_PARAMETER;
1170+
}
1171+
1172+
if (!ml_single_set_info_in_handle (single_h, FALSE, out_tensors_info)) {
1173+
_ml_error_report
1174+
("The output tensors info is invalid. Cannot configure single_h handle with the given output tensors info.");
1175+
return ML_ERROR_INVALID_PARAMETER;
1176+
}
1177+
1178+
return ML_ERROR_NONE;
1179+
}
1180+
1181+
/**
1182+
* @brief Internal function to convert accelerator as tensor_filter property format.
1183+
* @note returned value must be freed by the caller
1184+
* @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c.
1185+
*/
1186+
char *
1187+
_ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw)
1188+
{
1189+
const gchar *hw_name;
1190+
const gchar *use_accl = "true:";
1191+
gchar *str_prop = NULL;
1192+
1193+
hw_name = get_accl_hw_str (_ml_nnfw_to_accl_hw (hw));
1194+
str_prop = g_strdup_printf ("%s%s", use_accl, hw_name);
1195+
1196+
return str_prop;
1197+
}
1198+
1199+
/**
1200+
* @brief Opens an ML model with the custom options and returns the instance as a handle.
1201+
*/
1202+
int
1203+
ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
1204+
{
1205+
ml_single *single_h;
1206+
int status = ML_ERROR_NONE;
1207+
ml_nnfw_type_e nnfw;
1208+
const gchar *fw_name;
1209+
char *hw_name;
1210+
1211+
check_feature_state (ML_FEATURE_INFERENCE);
1212+
1213+
/* Validate the params */
1214+
_ml_error_report_return_continue_iferr
1215+
(_ml_single_open_custom_validate_arguments (single, info),
1216+
"The parameter, 'info' (ml_single_preset *), cannot be validated. Please provide valid information for this object.");
1217+
1218+
/* init null */
1219+
*single = NULL;
1220+
1221+
status = __validate_and_determine_nnfw (info, &nnfw);
1222+
if (status != ML_ERROR_NONE) {
1223+
return status;
1224+
}
1225+
1226+
fw_name = _ml_get_nnfw_subplugin_name (nnfw);
1227+
1228+
/* Create ml_single object */
1229+
if ((single_h = ml_single_create_handle (nnfw)) == NULL) {
1230+
_ml_error_report_return_continue (ML_ERROR_OUT_OF_MEMORY,
1231+
"Cannot create handle for the given nnfw, %s", fw_name);
1232+
}
1233+
1234+
status = __configure_handle (single_h, info);
1235+
if (status != ML_ERROR_NONE) {
1236+
goto error;
1237+
}
1238+
1239+
__configure_async_settings (single_h, info);
1240+
1241+
status = __configure_nnfw_tensors (single_h, info, fw_name);
1242+
if (status != ML_ERROR_NONE) {
1243+
goto error;
1244+
}
1245+
1246+
hw_name = _ml_nnfw_to_str_prop (info->hw);
1247+
__configure_filter_properties (single_h, info, fw_name, hw_name);
1248+
g_free (hw_name);
11771249

1178-
/* 4. Start the nnfw to get inout configurations if needed */
11791250
if (!single_h->klass->start (single_h->filter)) {
11801251
_ml_error_report
11811252
("Failed to start NNFW, '%s', to get inout configurations. Subplugin class method has failed to start.",
@@ -1185,6 +1256,9 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
11851256
}
11861257

11871258
if (nnfw == ML_NNFW_TYPE_NNTR_INF) {
1259+
ml_tensors_info_s *in_tensors_info = (ml_tensors_info_s *) info->input_info;
1260+
ml_tensors_info_s *out_tensors_info = (ml_tensors_info_s *) info->output_info;
1261+
11881262
if (!in_tensors_info || !out_tensors_info) {
11891263
if (!in_tensors_info) {
11901264
GstTensorsInfo in_info;
@@ -1211,18 +1285,8 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
12111285
}
12121286
}
12131287

1214-
/* 5. Set in/out configs and metadata */
1215-
if (!ml_single_set_info_in_handle (single_h, TRUE, in_tensors_info)) {
1216-
_ml_error_report
1217-
("The input tensors info is invalid. Cannot configure single_h handle with the given input tensors info.");
1218-
status = ML_ERROR_INVALID_PARAMETER;
1219-
goto error;
1220-
}
1221-
1222-
if (!ml_single_set_info_in_handle (single_h, FALSE, out_tensors_info)) {
1223-
_ml_error_report
1224-
("The output tensors info is invalid. Cannot configure single_h handle with the given output tensors info.");
1225-
status = ML_ERROR_INVALID_PARAMETER;
1288+
status = __setup_inout_info (single_h, info);
1289+
if (status != ML_ERROR_NONE) {
12261290
goto error;
12271291
}
12281292

0 commit comments

Comments
 (0)