@@ -819,75 +819,75 @@ cpp::result<StartModelResult, std::string> ModelService::StartModel(
819
819
constexpr const int kDefautlContextLength = 8192 ;
820
820
int max_model_context_length = kDefautlContextLength ;
821
821
Json::Value json_data;
822
- auto model_entry = db_service_->GetModelInfo (model_handle);
823
- if (model_entry.has_error ()) {
824
- CTL_WRN (" Error: " + model_entry.error ());
825
- return cpp::fail (model_entry.error ());
826
- }
827
- yaml_handler.ModelConfigFromFile (
828
- fmu::ToAbsoluteCortexDataPath (
829
- fs::path (model_entry.value ().path_to_model_yaml ))
830
- .string ());
831
- auto mc = yaml_handler.GetModelConfig ();
832
-
833
- // Check if Python model first
834
- if (mc.engine == kPythonEngine ) {
835
-
836
- config::PythonModelConfig python_model_config;
837
- python_model_config.ReadFromYaml (
838
-
822
+ // Currently we don't support download vision models, so we need to bypass check
823
+ if (!bypass_model_check) {
824
+ auto model_entry = db_service_->GetModelInfo (model_handle);
825
+ if (model_entry.has_error ()) {
826
+ CTL_WRN (" Error: " + model_entry.error ());
827
+ return cpp::fail (model_entry.error ());
828
+ }
829
+ yaml_handler.ModelConfigFromFile (
839
830
fmu::ToAbsoluteCortexDataPath (
840
831
fs::path (model_entry.value ().path_to_model_yaml ))
841
832
.string ());
842
- // Start all depends model
843
- auto depends = python_model_config.depends ;
844
- for (auto & depend : depends) {
845
- Json::Value temp;
846
- auto res = StartModel (depend, temp, false );
847
- if (res.has_error ()) {
848
- CTL_WRN (" Error: " + res.error ());
849
- for (auto & depend : depends) {
850
- if (depend != model_handle) {
851
- StopModel (depend);
833
+ auto mc = yaml_handler.GetModelConfig ();
834
+
835
+ // Check if Python model first
836
+ if (mc.engine == kPythonEngine ) {
837
+
838
+ config::PythonModelConfig python_model_config;
839
+ python_model_config.ReadFromYaml (
840
+
841
+ fmu::ToAbsoluteCortexDataPath (
842
+ fs::path (model_entry.value ().path_to_model_yaml ))
843
+ .string ());
844
+ // Start all depends model
845
+ auto depends = python_model_config.depends ;
846
+ for (auto & depend : depends) {
847
+ Json::Value temp;
848
+ auto res = StartModel (depend, temp, false );
849
+ if (res.has_error ()) {
850
+ CTL_WRN (" Error: " + res.error ());
851
+ for (auto & depend : depends) {
852
+ if (depend != model_handle) {
853
+ StopModel (depend);
854
+ }
852
855
}
856
+ return cpp::fail (" Model failed to start dependency '" + depend +
857
+ " ' : " + res.error ());
853
858
}
854
- return cpp::fail (" Model failed to start dependency '" + depend +
855
- " ' : " + res.error ());
856
859
}
857
- }
858
860
859
- json_data[" model" ] = model_handle;
860
- json_data[" model_path" ] =
861
- fmu::ToAbsoluteCortexDataPath (
862
- fs::path (model_entry.value ().path_to_model_yaml ))
863
- .string ();
864
- json_data[" engine" ] = mc.engine ;
865
- assert (!!inference_svc_);
866
- // Check if python engine
867
-
868
- auto ir =
869
- inference_svc_->LoadModel (std::make_shared<Json::Value>(json_data));
870
- auto status = std::get<0 >(ir)[" status_code" ].asInt ();
871
- auto data = std::get<1 >(ir);
872
-
873
- if (status == drogon::k200OK) {
874
- return StartModelResult{.success = true , .warning = " " };
875
- } else if (status == drogon::k409Conflict) {
876
- CTL_INF (" Model '" + model_handle + " ' is already loaded" );
877
- return StartModelResult{.success = true , .warning = " " };
878
- } else {
879
- // only report to user the error
880
- for (auto & depend : depends) {
861
+ json_data[" model" ] = model_handle;
862
+ json_data[" model_path" ] =
863
+ fmu::ToAbsoluteCortexDataPath (
864
+ fs::path (model_entry.value ().path_to_model_yaml ))
865
+ .string ();
866
+ json_data[" engine" ] = mc.engine ;
867
+ assert (!!inference_svc_);
868
+ // Check if python engine
869
+
870
+ auto ir =
871
+ inference_svc_->LoadModel (std::make_shared<Json::Value>(json_data));
872
+ auto status = std::get<0 >(ir)[" status_code" ].asInt ();
873
+ auto data = std::get<1 >(ir);
881
874
882
- StopModel (depend);
875
+ if (status == drogon::k200OK) {
876
+ return StartModelResult{.success = true , .warning = " " };
877
+ } else if (status == drogon::k409Conflict) {
878
+ CTL_INF (" Model '" + model_handle + " ' is already loaded" );
879
+ return StartModelResult{.success = true , .warning = " " };
880
+ } else {
881
+ // only report to user the error
882
+ for (auto & depend : depends) {
883
+
884
+ StopModel (depend);
885
+ }
883
886
}
887
+ CTL_ERR (" Model failed to start with status code: " << status);
888
+ return cpp::fail (" Model failed to start: " +
889
+ data[" message" ].asString ());
884
890
}
885
- CTL_ERR (" Model failed to start with status code: " << status);
886
- return cpp::fail (" Model failed to start: " + data[" message" ].asString ());
887
- }
888
-
889
- // Currently we don't support download vision models, so we need to bypass check
890
- if (!bypass_model_check) {
891
891
892
892
// Running remote model
893
893
if (engine_svc_->IsRemoteEngine (mc.engine )) {
0 commit comments