diff --git a/engine/CMakeLists.txt b/engine/CMakeLists.txt index 4a71ec612..b5adf53df 100644 --- a/engine/CMakeLists.txt +++ b/engine/CMakeLists.txt @@ -198,10 +198,8 @@ if(CMAKE_CXX_STANDARD LESS 17) message(STATUS "use c++14") find_package(Boost 1.61.0 REQUIRED) target_include_directories(${TARGET_NAME} PRIVATE ${Boost_INCLUDE_DIRS}) -elseif(CMAKE_CXX_STANDARD LESS 20) - message(STATUS "use c++17") else() - message(STATUS "use c++20") + message(STATUS "use c++17") endif() aux_source_directory(controllers CTL_SRC) diff --git a/engine/cli/command_line_parser.cc b/engine/cli/command_line_parser.cc index 3c4b0806f..a5f0363ab 100644 --- a/engine/cli/command_line_parser.cc +++ b/engine/cli/command_line_parser.cc @@ -86,6 +86,7 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) { #else CLI_LOG("default"); #endif +(void) c; }; app_.add_flag_function("-v,--version", cb, "Get Cortex version"); diff --git a/engine/cli/commands/config_get_cmd.cc b/engine/cli/commands/config_get_cmd.cc index 62d9638a5..1d0a4f72e 100644 --- a/engine/cli/commands/config_get_cmd.cc +++ b/engine/cli/commands/config_get_cmd.cc @@ -17,9 +17,10 @@ void commands::ConfigGetCmd::Exec(const std::string& host, int port) { } } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "configs"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "configs"}, + /* .queries = */ {}, }; auto get_config_result = curl_utils::SimpleGetJson(url.ToFullPath()); diff --git a/engine/cli/commands/config_upd_cmd.cc b/engine/cli/commands/config_upd_cmd.cc index 9866fbfa0..37deb0571 100644 --- a/engine/cli/commands/config_upd_cmd.cc +++ b/engine/cli/commands/config_upd_cmd.cc @@ -63,9 +63,10 @@ void commands::ConfigUpdCmd::Exec( } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "configs"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "configs"}, + /* .queries = */ {}, }; auto json = NormalizeJson(non_null_opts); diff --git a/engine/cli/commands/cortex_upd_cmd.cc b/engine/cli/commands/cortex_upd_cmd.cc index 6c8baa1a4..e11ad4290 100644 --- a/engine/cli/commands/cortex_upd_cmd.cc +++ b/engine/cli/commands/cortex_upd_cmd.cc @@ -1,4 +1,5 @@ #include "cortex_upd_cmd.h" +#include #include "cli/commands/server_start_cmd.h" #include "server_stop_cmd.h" #include "utils/archive_utils.h" @@ -27,7 +28,8 @@ std::chrono::seconds GetTimeSinceEpochMillisec() { return duration_cast(system_clock::now().time_since_epoch()); } -std::unique_ptr GetSystemInfoWithUniversal() { +[[maybe_unused]] std::unique_ptr +GetSystemInfoWithUniversal() { auto system_info = system_info_utils::GetSystemInfo(); if (system_info->os == "mac") { CTL_INF("Change arch from " << system_info->arch << " to universal"); @@ -36,8 +38,8 @@ std::unique_ptr GetSystemInfoWithUniversal() { return system_info; } -std::string GetNightlyInstallerName(const std::string& v, - const std::string& os_arch) { +[[maybe_unused]] std::string GetNightlyInstallerName( + const std::string& v, const std::string& os_arch) { const std::string kCortex = "cortex"; // Remove 'v' in file name std::string version = v == "latest" ? "" : (v.substr(1) + "-"); @@ -50,7 +52,7 @@ std::string GetNightlyInstallerName(const std::string& v, #endif } -std::string GetInstallCmd(const std::string& exe_path) { +[[maybe_unused]] std::string GetInstallCmd(const std::string& exe_path) { #if defined(__APPLE__) && defined(__MACH__) return "sudo touch /var/tmp/cortex_installer_skip_postinstall_check && sudo " "installer " @@ -133,6 +135,7 @@ bool InstallNewVersion(const std::filesystem::path& dst, std::optional CheckNewUpdate( std::optional timeout) { + (void)timeout; // Get info from .cortexrc auto should_check_update = false; auto config = file_manager_utils::GetCortexConfig(); @@ -152,9 +155,10 @@ std::optional CheckNewUpdate( } auto url = url_parser::Url{ - .protocol = "https", - .host = GetHostName(), - .pathParams = GetReleasePath(), + /* .protocol = */ "https", + /* .host = */ GetHostName(), + /* .pathParams = */ GetReleasePath(), + /* .queries = */ {}, }; CTL_INF("Engine release path: " << url.ToFullPath()); @@ -264,9 +268,10 @@ bool CortexUpdCmd::GetStable(const std::string& v) { CTL_INF("OS: " << system_info->os << ", Arch: " << system_info->arch); auto url_obj = url_parser::Url{ - .protocol = "https", - .host = GetHostName(), - .pathParams = GetReleasePath(), + /* .protocol = */ "https", + /* .host = */ GetHostName(), + /* .pathParams = */ GetReleasePath(), + /* .queries = */ {}, }; CTL_INF("Engine release path: " << url_obj.ToFullPath()); @@ -318,9 +323,10 @@ bool CortexUpdCmd::GetBeta(const std::string& v) { CTL_INF("OS: " << system_info->os << ", Arch: " << system_info->arch); auto url_obj = url_parser::Url{ - .protocol = "https", - .host = GetHostName(), - .pathParams = GetReleasePath(), + /* .protocol = */ "https", + /* .host = */ GetHostName(), + /* .pathParams = */ GetReleasePath(), + /* queries = */ {}, }; CTL_INF("Engine release path: " << url_obj.ToFullPath()); auto res = curl_utils::SimpleGetJson(url_obj.ToFullPath()); @@ -410,12 +416,17 @@ std::optional CortexUpdCmd::HandleGithubRelease( return std::nullopt; } auto download_task{DownloadTask{ - .id = "cortex", - .type = DownloadType::Cortex, - .items = {DownloadItem{ - .id = "cortex", - .downloadUrl = download_url, - .localPath = local_path, + /* .id = */ "cortex", + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::Cortex, + /* .items = */ + {DownloadItem{ + /* .id = */ "cortex", + /* .downloadUrl = */ download_url, + /* .localPath = */ local_path, + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, }}, }}; @@ -456,9 +467,10 @@ bool CortexUpdCmd::GetNightly(const std::string& v) { }; std::vector path_list(paths, std::end(paths)); auto url_obj = url_parser::Url{ - .protocol = "https", - .host = kNightlyHost, - .pathParams = path_list, + /* .protocol = */ "https", + /* .host = */ kNightlyHost, + /* .pathParams = */ path_list, + /* .queries = */ {}, }; CTL_INF("Cortex release path: " << url_parser::FromUrl(url_obj)); @@ -474,12 +486,17 @@ bool CortexUpdCmd::GetNightly(const std::string& v) { return false; } auto download_task = - DownloadTask{.id = "cortex", - .type = DownloadType::Cortex, - .items = {DownloadItem{ - .id = "cortex", - .downloadUrl = url_parser::FromUrl(url_obj), - .localPath = localPath, + DownloadTask{/* .id = */ "cortex", + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::Cortex, + /* .items = */ + {DownloadItem{ + /* .id = */ "cortex", + /* .downloadUrl = */ url_parser::FromUrl(url_obj), + /* .localPath = */ localPath, + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, }}}; auto result = download_service_->AddDownloadTask( @@ -522,9 +539,10 @@ bool CortexUpdCmd::GetLinuxInstallScript(const std::string& v, "templates", "linux", "install.sh"}; } auto url_obj = url_parser::Url{ - .protocol = "https", - .host = "raw.githubusercontent.com", - .pathParams = path_list, + /* .protocol = */ "https", + /* .host = */ "raw.githubusercontent.com", + /* .pathParams = */ path_list, + /* .queries = */ {}, }; CTL_INF("Linux installer script path: " << url_parser::FromUrl(url_obj)); @@ -540,12 +558,17 @@ bool CortexUpdCmd::GetLinuxInstallScript(const std::string& v, return false; } auto download_task = - DownloadTask{.id = "cortex", - .type = DownloadType::Cortex, - .items = {DownloadItem{ - .id = "cortex", - .downloadUrl = url_parser::FromUrl(url_obj), - .localPath = localPath, + DownloadTask{/* .id = */ "cortex", + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::Cortex, + /* .items = */ + {DownloadItem{ + /* .id = */ "cortex", + /* .downloadUrl = */ url_parser::FromUrl(url_obj), + /* .localPath = */ localPath, + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, }}}; auto result = download_service_->AddDownloadTask( diff --git a/engine/cli/commands/engine_get_cmd.cc b/engine/cli/commands/engine_get_cmd.cc index 3fd1fd576..30001400e 100644 --- a/engine/cli/commands/engine_get_cmd.cc +++ b/engine/cli/commands/engine_get_cmd.cc @@ -28,11 +28,10 @@ void EngineGetCmd::Exec(const std::string& host, int port, tabulate::Table table; table.add_row({"#", "Name", "Version", "Variant", "Status"}); - auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine_name}, - }; + auto url = url_parser::Url{/* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine_name}, + /* .queries = */ {}}; auto result = curl_utils::SimpleGetJson(url.ToFullPath()); if (result.has_error()) { // TODO: refactor this @@ -50,9 +49,10 @@ void EngineGetCmd::Exec(const std::string& host, int port, auto installed_variants = result.value(); for (const auto& variant : installed_variants) { output.push_back(EngineVariantResponse{ - .name = variant["name"].asString(), - .version = variant["version"].asString(), - .engine = engine_name, + /* .name = */ variant["name"].asString(), + /* .version = */ variant["version"].asString(), + /* .engine = */ engine_name, + /* .type = */ "", }); } diff --git a/engine/cli/commands/engine_install_cmd.cc b/engine/cli/commands/engine_install_cmd.cc index 85a5def5d..bebfdb8ce 100644 --- a/engine/cli/commands/engine_install_cmd.cc +++ b/engine/cli/commands/engine_install_cmd.cc @@ -47,9 +47,10 @@ bool EngineInstallCmd::Exec(const std::string& engine, }); auto releases_url = url_parser::Url{ - .protocol = "http", - .host = host_ + ":" + std::to_string(port_), - .pathParams = {"v1", "engines", engine, "releases"}, + /* .protocol = */ "http", + /* .host = */ host_ + ":" + std::to_string(port_), + /* .pathParams = */ {"v1", "engines", engine, "releases"}, + /* .queries = */ {}, }; auto releases_result = curl_utils::SimpleGetJson(releases_url.ToFullPath()); if (releases_result.has_error()) { @@ -70,16 +71,17 @@ bool EngineInstallCmd::Exec(const std::string& engine, std::cout << "Selected version: " << selected_release.value() << std::endl; auto variant_url = url_parser::Url{ - .protocol = "http", - .host = host_ + ":" + std::to_string(port_), - .pathParams = - { - "v1", - "engines", - engine, - "releases", - selected_release.value(), - }, + /* .protocol = */ "http", + /* .host = */ host_ + ":" + std::to_string(port_), + /* .pathParams = */ + { + "v1", + "engines", + engine, + "releases", + selected_release.value(), + }, + /* queries = */ {}, }; auto variant_result = curl_utils::SimpleGetJson(variant_url.ToFullPath()); if (variant_result.has_error()) { @@ -117,15 +119,16 @@ bool EngineInstallCmd::Exec(const std::string& engine, << selected_release.value() << std::endl; auto install_url = url_parser::Url{ - .protocol = "http", - .host = host_ + ":" + std::to_string(port_), - .pathParams = - { - "v1", - "engines", - engine, - "install", - }, + /* .protocol = */ "http", + /* .host = */ host_ + ":" + std::to_string(port_), + /* .pathParams = */ + { + "v1", + "engines", + engine, + "install", + }, + /* queries = */ {}, }; Json::Value body; body["version"] = selected_release.value(); @@ -160,15 +163,16 @@ bool EngineInstallCmd::Exec(const std::string& engine, }); auto install_url = url_parser::Url{ - .protocol = "http", - .host = host_ + ":" + std::to_string(port_), - .pathParams = - { - "v1", - "engines", - engine, - "install", - }, + /* .protocol = */ "http", + /* .host = */ host_ + ":" + std::to_string(port_), + /* .pathParams = */ + { + "v1", + "engines", + engine, + "install", + }, + /* .queries = */ {}, }; Json::Value body; diff --git a/engine/cli/commands/engine_install_cmd.h b/engine/cli/commands/engine_install_cmd.h index 2f318b4d7..89b6f6a89 100644 --- a/engine/cli/commands/engine_install_cmd.h +++ b/engine/cli/commands/engine_install_cmd.h @@ -13,9 +13,13 @@ class EngineInstallCmd { host_(host), port_(port), show_menu_(show_menu), - hw_inf_{.sys_inf = system_info_utils::GetSystemInfo(), - .cuda_driver_version = - system_info_utils::GetDriverAndCudaVersion().second} {}; + hw_inf_{ + system_info_utils::GetSystemInfo(), //sysinfo + {}, //cpu_info + + system_info_utils::GetDriverAndCudaVersion() + .second //cuda_driver_version + } {}; bool Exec(const std::string& engine, const std::string& version = "latest", const std::string& src = ""); diff --git a/engine/cli/commands/engine_list_cmd.cc b/engine/cli/commands/engine_list_cmd.cc index 0abe32b28..6e14e98ce 100644 --- a/engine/cli/commands/engine_list_cmd.cc +++ b/engine/cli/commands/engine_list_cmd.cc @@ -27,9 +27,10 @@ bool EngineListCmd::Exec(const std::string& host, int port) { table.add_row({"#", "Name", "Version", "Variant", "Status"}); auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines"}, + /* .queries = */ {}, }; auto result = curl_utils::SimpleGetJson(url.ToFullPath()); if (result.has_error()) { @@ -45,18 +46,20 @@ bool EngineListCmd::Exec(const std::string& host, int port) { auto installed_variants = result.value()[engine]; for (const auto& variant : installed_variants) { engine_map[engine].push_back(EngineVariantResponse{ - .name = variant["name"].asString(), - .version = variant["version"].asString(), - .engine = engine, + /* .name = */ variant["name"].asString(), + /* .version = */ variant["version"].asString(), + /* .engine = */ engine, + /* .type = */ "", }); } } // TODO: namh support onnx and tensorrt auto default_engine_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", kLlamaEngine, "default"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", kLlamaEngine, "default"}, + /* .queries = */ {}, }; auto selected_variant_result = curl_utils::SimpleGetJson(default_engine_url.ToFullPath()); diff --git a/engine/cli/commands/engine_load_cmd.cc b/engine/cli/commands/engine_load_cmd.cc index 329d1b7e2..7048338ec 100644 --- a/engine/cli/commands/engine_load_cmd.cc +++ b/engine/cli/commands/engine_load_cmd.cc @@ -19,9 +19,10 @@ cpp::result EngineLoadCmd::Exec(const std::string& host, } auto load_engine_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine, "load"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine, "load"}, + /* .queries = */ {}, }; auto load_engine_result = curl_utils::SimplePostJson(load_engine_url.ToFullPath()); diff --git a/engine/cli/commands/engine_uninstall_cmd.cc b/engine/cli/commands/engine_uninstall_cmd.cc index ef9c95af8..075f20936 100644 --- a/engine/cli/commands/engine_uninstall_cmd.cc +++ b/engine/cli/commands/engine_uninstall_cmd.cc @@ -18,9 +18,11 @@ void EngineUninstallCmd::Exec(const std::string& host, int port, } auto url = - url_parser::Url{.protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine, "install"}}; + url_parser::Url{/* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine, "install"}, + /* .queries = */ {}, + }; auto result = curl_utils::SimpleDeleteJson(url.ToFullPath()); if (result.has_error()) { diff --git a/engine/cli/commands/engine_unload_cmd.cc b/engine/cli/commands/engine_unload_cmd.cc index e36a64f3b..9d19ba050 100644 --- a/engine/cli/commands/engine_unload_cmd.cc +++ b/engine/cli/commands/engine_unload_cmd.cc @@ -18,9 +18,10 @@ cpp::result EngineUnloadCmd::Exec( } auto load_engine_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine, "load"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine, "load"}, + /* .queries = */ {}, }; auto load_engine_result = curl_utils::SimpleDeleteJson(load_engine_url.ToFullPath()); diff --git a/engine/cli/commands/engine_update_cmd.cc b/engine/cli/commands/engine_update_cmd.cc index a86106ed2..08a24419c 100644 --- a/engine/cli/commands/engine_update_cmd.cc +++ b/engine/cli/commands/engine_update_cmd.cc @@ -35,9 +35,10 @@ bool EngineUpdateCmd::Exec(const std::string& host, int port, }); auto update_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine, "update"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine, "update"}, + /* .queries = */ {}, }; auto update_result = curl_utils::SimplePostJson(update_url.ToFullPath()); if (update_result.has_error()) { diff --git a/engine/cli/commands/engine_use_cmd.cc b/engine/cli/commands/engine_use_cmd.cc index 50735739d..6a1b2df79 100644 --- a/engine/cli/commands/engine_use_cmd.cc +++ b/engine/cli/commands/engine_use_cmd.cc @@ -19,9 +19,10 @@ cpp::result EngineUseCmd::Exec(const std::string& host, } auto get_installed_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine}, + /* .queries = */ {}, }; auto installed_variants_results = curl_utils::SimpleGetJson(get_installed_url.ToFullPath()); @@ -71,9 +72,10 @@ cpp::result EngineUseCmd::Exec(const std::string& host, body["variant"] = selected_variant.value(); body["version"] = selected_version.value(); auto set_default_engine_variant = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "engines", engine, "default"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "engines", engine, "default"}, + /* .queries = */ {}, }; auto response = curl_utils::SimplePostJson( diff --git a/engine/cli/commands/hardware_activate_cmd.cc b/engine/cli/commands/hardware_activate_cmd.cc index 77d600233..0465a3e58 100644 --- a/engine/cli/commands/hardware_activate_cmd.cc +++ b/engine/cli/commands/hardware_activate_cmd.cc @@ -51,9 +51,10 @@ bool HardwareActivateCmd::Exec( auto data_str = body.toStyledString(); auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "hardware", "activate"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "hardware", "activate"}, + /* .queries = */ {}, }; auto res = curl_utils::SimplePostJson(url.ToFullPath(), data_str); diff --git a/engine/cli/commands/hardware_list_cmd.cc b/engine/cli/commands/hardware_list_cmd.cc index 5a67cea8b..6d57c9b53 100644 --- a/engine/cli/commands/hardware_list_cmd.cc +++ b/engine/cli/commands/hardware_list_cmd.cc @@ -28,9 +28,10 @@ bool HardwareListCmd::Exec(const std::string& host, int port, } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "hardware"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "hardware"}, + /* .queries = */ {}, }; auto result = curl_utils::SimpleGetJson(url.ToFullPath()); if (result.has_error()) { diff --git a/engine/cli/commands/model_del_cmd.cc b/engine/cli/commands/model_del_cmd.cc index 2f46aa52a..c2b3538a6 100644 --- a/engine/cli/commands/model_del_cmd.cc +++ b/engine/cli/commands/model_del_cmd.cc @@ -18,9 +18,10 @@ void ModelDelCmd::Exec(const std::string& host, int port, } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", model_handle}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", model_handle}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleDeleteJson(url.ToFullPath()); diff --git a/engine/cli/commands/model_get_cmd.cc b/engine/cli/commands/model_get_cmd.cc index c4a400136..7cd531f56 100644 --- a/engine/cli/commands/model_get_cmd.cc +++ b/engine/cli/commands/model_get_cmd.cc @@ -19,9 +19,10 @@ void ModelGetCmd::Exec(const std::string& host, int port, } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", model_handle}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", model_handle}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleGetJson(url.ToFullPath()); diff --git a/engine/cli/commands/model_import_cmd.cc b/engine/cli/commands/model_import_cmd.cc index fbc01be7d..929415b72 100644 --- a/engine/cli/commands/model_import_cmd.cc +++ b/engine/cli/commands/model_import_cmd.cc @@ -21,9 +21,10 @@ void ModelImportCmd::Exec(const std::string& host, int port, } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "import"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "import"}, + /* .queries = */ {}, }; Json::Value json_data; diff --git a/engine/cli/commands/model_list_cmd.cc b/engine/cli/commands/model_list_cmd.cc index 96ff2885d..a6a3b97c0 100644 --- a/engine/cli/commands/model_list_cmd.cc +++ b/engine/cli/commands/model_list_cmd.cc @@ -53,9 +53,10 @@ void ModelListCmd::Exec(const std::string& host, int port, // Iterate through directory auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models"}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleGetJson(url.ToFullPath()); diff --git a/engine/cli/commands/model_pull_cmd.cc b/engine/cli/commands/model_pull_cmd.cc index 75c0ce1a0..b20d7596e 100644 --- a/engine/cli/commands/model_pull_cmd.cc +++ b/engine/cli/commands/model_pull_cmd.cc @@ -37,9 +37,10 @@ std::optional ModelPullCmd::Exec(const std::string& host, int port, } auto model_info_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"models", "pull", "info"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"models", "pull", "info"}, + /* .queries = */ {}, }; Json::Value j_data; j_data["model"] = input; @@ -96,9 +97,10 @@ std::optional ModelPullCmd::Exec(const std::string& host, int port, auto data_str = json_data.toStyledString(); auto pull_url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "pull"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "pull"}, + /* .queries = */ {}, }; auto pull_result = @@ -149,9 +151,10 @@ bool ModelPullCmd::AbortModelPull(const std::string& host, int port, json_data["taskId"] = task_id; auto data_str = json_data.toStyledString(); auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "pull"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "pull"}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleDeleteJson(url.ToFullPath(), data_str); diff --git a/engine/cli/commands/model_source_add_cmd.cc b/engine/cli/commands/model_source_add_cmd.cc index 2fadbe8ec..390c4f1fd 100644 --- a/engine/cli/commands/model_source_add_cmd.cc +++ b/engine/cli/commands/model_source_add_cmd.cc @@ -14,9 +14,10 @@ bool ModelSourceAddCmd::Exec(const std::string& host, int port, const std::strin } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "sources"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "sources"}, + /* .queries = */ {}, }; Json::Value json_data; diff --git a/engine/cli/commands/model_source_del_cmd.cc b/engine/cli/commands/model_source_del_cmd.cc index c3c1694e7..cc362e144 100644 --- a/engine/cli/commands/model_source_del_cmd.cc +++ b/engine/cli/commands/model_source_del_cmd.cc @@ -15,9 +15,10 @@ bool ModelSourceDelCmd::Exec(const std::string& host, int port, const std::strin } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "sources"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "sources"}, + /* .queries = */ {}, }; Json::Value json_data; diff --git a/engine/cli/commands/model_source_list_cmd.cc b/engine/cli/commands/model_source_list_cmd.cc index ae69c5aef..1d92c4e53 100644 --- a/engine/cli/commands/model_source_list_cmd.cc +++ b/engine/cli/commands/model_source_list_cmd.cc @@ -29,9 +29,10 @@ bool ModelSourceListCmd::Exec(const std::string& host, int port) { table.add_row({"#", "Model Source"}); auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "sources"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "sources"}, + /* .queries = */ {}, }; auto result = curl_utils::SimpleGetJson(url.ToFullPath()); if (result.has_error()) { diff --git a/engine/cli/commands/model_start_cmd.cc b/engine/cli/commands/model_start_cmd.cc index ef5d5c1f2..e54f17dee 100644 --- a/engine/cli/commands/model_start_cmd.cc +++ b/engine/cli/commands/model_start_cmd.cc @@ -50,9 +50,10 @@ bool ModelStartCmd::Exec( } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "start"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "start"}, + /* .queries = */ {}, }; Json::Value json_data; diff --git a/engine/cli/commands/model_status_cmd.cc b/engine/cli/commands/model_status_cmd.cc index e467e4353..19a615ccc 100644 --- a/engine/cli/commands/model_status_cmd.cc +++ b/engine/cli/commands/model_status_cmd.cc @@ -17,9 +17,10 @@ bool ModelStatusCmd::IsLoaded(const std::string& host, int port, } } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "status", model_handle}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "status", model_handle}, + /* .queries= */ {}, }; auto res = curl_utils::SimpleGetJson(url.ToFullPath()); diff --git a/engine/cli/commands/model_stop_cmd.cc b/engine/cli/commands/model_stop_cmd.cc index 291977dc7..8b78131c9 100644 --- a/engine/cli/commands/model_stop_cmd.cc +++ b/engine/cli/commands/model_stop_cmd.cc @@ -9,9 +9,10 @@ namespace commands { void ModelStopCmd::Exec(const std::string& host, int port, const std::string& model_handle) { auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", "stop"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", "stop"}, + /* .queries = */ {}, }; Json::Value json_data; diff --git a/engine/cli/commands/model_upd_cmd.cc b/engine/cli/commands/model_upd_cmd.cc index 1572581ec..f3f8fc544 100644 --- a/engine/cli/commands/model_upd_cmd.cc +++ b/engine/cli/commands/model_upd_cmd.cc @@ -23,9 +23,10 @@ void ModelUpdCmd::Exec( } auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"v1", "models", model_handle_}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"v1", "models", model_handle_}, + /* .queries = */ {}, }; Json::Value json_data; @@ -314,6 +315,7 @@ void ModelUpdCmd::UpdateConfig(Json::Value& data, const std::string& key, void ModelUpdCmd::UpdateVectorField( const std::string& key, const std::string& value, std::function&)> setter) { + (void) key; std::vector tokens; std::istringstream iss(value); std::string token; @@ -337,6 +339,7 @@ void ModelUpdCmd::UpdateNumericField(const std::string& key, void ModelUpdCmd::UpdateBooleanField(const std::string& key, const std::string& value, std::function setter) { + (void) key; bool boolValue = (value == "true" || value == "1"); setter(boolValue); } diff --git a/engine/cli/commands/ps_cmd.cc b/engine/cli/commands/ps_cmd.cc index 4f83f4f42..14816b939 100644 --- a/engine/cli/commands/ps_cmd.cc +++ b/engine/cli/commands/ps_cmd.cc @@ -12,9 +12,10 @@ namespace commands { void PsCmd::Exec(const std::string& host, int port) { auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"inferences", "server", "models"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"inferences", "server", "models"}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleGetJson(url.ToFullPath()); if (res.has_error()) { diff --git a/engine/cli/commands/server_start_cmd.cc b/engine/cli/commands/server_start_cmd.cc index e2b14e70e..af2d647e2 100644 --- a/engine/cli/commands/server_start_cmd.cc +++ b/engine/cli/commands/server_start_cmd.cc @@ -189,6 +189,7 @@ void ServerStartCmd::UpdateConfig(CortexConfig& data, const std::string& key, {"port", [](CortexConfig& data, const std::string& k, const std::string& v) { data.apiServerPort = v; + (void)k; }}, {"hf-token", [](CortexConfig& data, const std::string&, const std::string& v) { @@ -283,6 +284,7 @@ void ServerStartCmd::UpdateVectorField( tokens.push_back(token); } setter(tokens); + (void)key; } void ServerStartCmd::UpdateNumericField(const std::string& key, @@ -301,6 +303,7 @@ void ServerStartCmd::UpdateBooleanField(const std::string& key, std::function setter) { bool bool_value = (value == "true" || value == "1"); setter(bool_value); + (void)key; } }; // namespace commands diff --git a/engine/cli/commands/server_start_cmd.h b/engine/cli/commands/server_start_cmd.h index 8807fc1ef..179cf196c 100644 --- a/engine/cli/commands/server_start_cmd.h +++ b/engine/cli/commands/server_start_cmd.h @@ -12,9 +12,10 @@ using CortexConfig = config_yaml_utils::CortexConfig; inline bool IsServerAlive(const std::string& host, int port) { auto url = url_parser::Url{ - .protocol = "http", - .host = host + ":" + std::to_string(port), - .pathParams = {"healthz"}, + /* .protocol = */ "http", + /* .host = */ host + ":" + std::to_string(port), + /* .pathParams = */ {"healthz"}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleGet(url.ToFullPath()); if (res.has_error()) { diff --git a/engine/cli/commands/server_stop_cmd.cc b/engine/cli/commands/server_stop_cmd.cc index 303022174..4bfaac8cf 100644 --- a/engine/cli/commands/server_stop_cmd.cc +++ b/engine/cli/commands/server_stop_cmd.cc @@ -9,9 +9,10 @@ ServerStopCmd::ServerStopCmd(std::string host, int port) void ServerStopCmd::Exec() { auto url = url_parser::Url{ - .protocol = "http", - .host = host_ + ":" + std::to_string(port_), - .pathParams = {"processManager", "destroy"}, + /* .protocol = */ "http", + /* .host = */ host_ + ":" + std::to_string(port_), + /* .pathParams = */ {"processManager", "destroy"}, + /* .queries = */ {}, }; auto res = curl_utils::SimpleDeleteJson(url.ToFullPath()); diff --git a/engine/cli/utils/download_progress.cc b/engine/cli/utils/download_progress.cc index 07f91adb4..7538fff46 100644 --- a/engine/cli/utils/download_progress.cc +++ b/engine/cli/utils/download_progress.cc @@ -121,7 +121,7 @@ bool DownloadProgress::Handle( bars->push_back(*(items.at(i.id).second)); } } - for (int i = 0; i < ev.download_task_.items.size(); i++) { + for (int i = 0; i < (int) ev.download_task_.items.size(); i++) { auto& it = ev.download_task_.items[i]; if (ev.type_ == DownloadStatus::DownloadUpdated) { uint64_t downloaded = it.downloadedBytes.value_or(0u); diff --git a/engine/cli/utils/easywsclient.cc b/engine/cli/utils/easywsclient.cc index 5c6ed38e8..2c03c6c33 100644 --- a/engine/cli/utils/easywsclient.cc +++ b/engine/cli/utils/easywsclient.cc @@ -112,15 +112,15 @@ socket_t hostname_connect(const std::string& hostname, int port) { class _DummyWebSocket : public easywsclient::WebSocket { public: - void poll(int timeout) {} - void send(const std::string& message) {} - void sendBinary(const std::string& message) {} - void sendBinary(const std::vector& message) {} + void poll(int timeout) { (void)timeout; } + void send(const std::string& message) { (void)message; } + void sendBinary(const std::string& message) { (void)message; } + void sendBinary(const std::vector& message) { (void)message; } void sendPing() {} void close() {} readyStateValues getReadyState() const { return CLOSED; } - void _dispatch(Callback_Imp& callable) {} - void _dispatchBinary(BytesCallback_Imp& callable) {} + void _dispatch(Callback_Imp& callable) { (void)callable; } + void _dispatchBinary(BytesCallback_Imp& callable) { (void)callable; } }; class _RealWebSocket : public easywsclient::WebSocket { @@ -591,4 +591,4 @@ WebSocket::pointer WebSocket::from_url_no_mask(const std::string& url, return ::from_url(url, false, origin); } -} // namespace easywsclient \ No newline at end of file +} // namespace easywsclient diff --git a/engine/common/api_server_configuration.h b/engine/common/api_server_configuration.h index b313f3286..9a444c34a 100644 --- a/engine/common/api_server_configuration.h +++ b/engine/common/api_server_configuration.h @@ -34,75 +34,52 @@ static const std::unordered_map CONFIGURATIONS = { {"cors", ApiConfigurationMetadata{ - .name = "cors", - .desc = "Cross-Origin Resource Sharing configuration.", - .group = "CORS", - .accept_value = "[on|off]", - .default_value = "on"}}, + "cors", "Cross-Origin Resource Sharing configuration.", "CORS", + "[on|off]", "on"}}, {"allowed_origins", ApiConfigurationMetadata{ - .name = "allowed_origins", - .desc = "Allowed origins for CORS. Comma separated. E.g. " - "http://localhost,https://cortex.so", - .group = "CORS", - .accept_value = "comma separated", - .default_value = "*", - .allow_empty = true}}, - {"proxy_url", ApiConfigurationMetadata{.name = "proxy_url", - .desc = "Proxy URL", - .group = "Proxy", - .accept_value = "string", - .default_value = ""}}, - {"proxy_username", ApiConfigurationMetadata{.name = "proxy_username", - .desc = "Proxy Username", - .group = "Proxy", - .accept_value = "string", - .default_value = ""}}, - {"proxy_password", ApiConfigurationMetadata{.name = "proxy_password", - .desc = "Proxy Password", - .group = "Proxy", - .accept_value = "string", - .default_value = ""}}, + "allowed_origins", + "Allowed origins for CORS. Comma separated. E.g. " + "http://localhost,https://cortex.so", + "CORS", "comma separated", "*", true}}, + {"proxy_url", ApiConfigurationMetadata{"proxy_url", "Proxy URL", + "Proxy", "string", ""}}, + {"proxy_username", + ApiConfigurationMetadata{"proxy_username", "Proxy Username", "Proxy", + "string", ""}}, + {"proxy_password", + ApiConfigurationMetadata{"proxy_password", "Proxy Password", "Proxy", + "string", ""}}, {"verify_proxy_ssl", - ApiConfigurationMetadata{.name = "verify_proxy_ssl", - .desc = "Verify SSL for proxy", - .group = "Proxy", - .accept_value = "[on|off]", - .default_value = "on"}}, + ApiConfigurationMetadata{"verify_proxy_ssl", "Verify SSL for proxy", + "Proxy", "[on|off]", "on"}}, {"verify_proxy_host_ssl", - ApiConfigurationMetadata{.name = "verify_proxy_host_ssl", - .desc = "Verify SSL for proxy", - .group = "Proxy", - .accept_value = "[on|off]", - .default_value = "on"}}, - {"no_proxy", ApiConfigurationMetadata{.name = "no_proxy", - .desc = "No proxy for hosts", - .group = "Proxy", - .accept_value = "string", - .default_value = ""}}, - {"verify_peer_ssl", ApiConfigurationMetadata{.name = "verify_peer_ssl", - .desc = "Verify peer SSL", - .group = "Proxy", - .accept_value = "[on|off]", - .default_value = "on"}}, - {"verify_host_ssl", ApiConfigurationMetadata{.name = "verify_host_ssl", - .desc = "Verify host SSL", - .group = "Proxy", - .accept_value = "[on|off]", - .default_value = "on"}}, + ApiConfigurationMetadata{"verify_proxy_host_ssl", + "Verify SSL for proxy", "Proxy", "[on|off]", + "on"}}, + {"no_proxy", ApiConfigurationMetadata{"no_proxy", "No proxy for hosts", + "Proxy", "string", ""}}, + {"verify_peer_ssl", + ApiConfigurationMetadata{"verify_peer_ssl", "Verify peer SSL", "Proxy", + "[on|off]", "on"}}, + {"verify_host_ssl", + ApiConfigurationMetadata{"verify_host_ssl", "Verify host SSL", "Proxy", + "[on|off]", "on"}}, {"huggingface_token", - ApiConfigurationMetadata{.name = "huggingface_token", - .desc = "HuggingFace token to pull models", - .group = "Token", - .accept_value = "string", - .default_value = "", - .allow_empty = true}}, - {"github_token", ApiConfigurationMetadata{.name = "github_token", - .desc = "Github token", - .group = "Token", - .accept_value = "string", - .default_value = "", - .allow_empty = true}}, + ApiConfigurationMetadata{ + /* .name = */ "huggingface_token", + /* .desc = */ "HuggingFace token to pull models", + /* .group = */ "Token", + /* .accept_value = */ "string", + /* .default_value = */ "", + /* .allow_empty = */ true}}, + {"github_token", + ApiConfigurationMetadata{/* .name = */ "github_token", + /* .desc = */ "Github token", + /* .group = */ "Token", + /* .accept_value = */ "string", + /* .default_value = */ "", + /* .allow_empty = */ true}}, }; class ApiServerConfiguration { diff --git a/engine/common/hardware_common.h b/engine/common/hardware_common.h index 4dc2e2c35..6115e2d98 100644 --- a/engine/common/hardware_common.h +++ b/engine/common/hardware_common.h @@ -54,11 +54,7 @@ inline CPU FromJson(const Json::Value& root) { for (auto const& i : root["instructions"]) { insts.emplace_back(i.asString()); } - return {.cores = cores, - .arch = arch, - .model = model, - .usage = usage, - .instructions = insts}; + return {cores, arch, model, usage, insts}; } } // namespace cpu @@ -160,8 +156,7 @@ inline Json::Value ToJson(const OS& os) { namespace os { inline OS FromJson(const Json::Value& root) { - return {.name = root["name"].asString(), - .version = root["version"].asString()}; + return {root["name"].asString(), root["version"].asString(), ""}; } } // namespace os @@ -181,14 +176,13 @@ inline Json::Value ToJson(const PowerInfo& pi) { namespace power { inline PowerInfo FromJson(const Json::Value& root) { - return {.charging_status = root["charging_status"].asString(), - .battery_life = root["battery_life"].asInt(), - .is_power_saving = root["is_power_saving"].asBool()}; + return {root["charging_status"].asString(), root["battery_life"].asInt(), + root["is_power_saving"].asBool()}; } } // namespace power namespace { -int64_t ByteToMiB(int64_t b) { +[[maybe_unused]] int64_t ByteToMiB(int64_t b) { return b / 1024 / 1024; } } // namespace @@ -208,9 +202,8 @@ inline Json::Value ToJson(const Memory& m) { namespace memory { inline Memory FromJson(const Json::Value& root) { - return {.total_MiB = root["total"].asInt64(), - .available_MiB = root["available"].asInt64(), - .type = root["type"].asString()}; + return {root["total"].asInt64(), root["available"].asInt64(), + root["type"].asString()}; } } // namespace memory @@ -230,9 +223,8 @@ inline Json::Value ToJson(const StorageInfo& si) { namespace storage { inline StorageInfo FromJson(const Json::Value& root) { - return {.type = root["type"].asString(), - .total = root["total"].asInt64(), - .available = root["available"].asInt64()}; + return {root["type"].asString(), root["total"].asInt64(), + root["available"].asInt64()}; } } // namespace storage -} // namespace cortex::hw \ No newline at end of file +} // namespace cortex::hw diff --git a/engine/config/gguf_parser.cc b/engine/config/gguf_parser.cc index 9acc97de2..e07ddecc0 100644 --- a/engine/config/gguf_parser.cc +++ b/engine/config/gguf_parser.cc @@ -560,7 +560,7 @@ void GGUFHandler::ModelConfigFromMetadata() { } try { - if (tokens.size() > eos_token) { + if (tokens.size() > (unsigned)eos_token) { eos_string = tokens[eos_token]; stop.push_back(std::move(eos_string)); } else { @@ -582,6 +582,7 @@ void GGUFHandler::ModelConfigFromMetadata() { model_config_.max_tokens = max_tokens; model_config_.ctx_len = max_tokens; model_config_.ngl = ngl; + (void)bos_token; } const ModelConfig& GGUFHandler::GetModelConfig() const { diff --git a/engine/config/gguf_parser.h b/engine/config/gguf_parser.h index c71a9320f..d9997c797 100644 --- a/engine/config/gguf_parser.h +++ b/engine/config/gguf_parser.h @@ -4,32 +4,32 @@ namespace config { constexpr char OPEN_CHAT_3_5_JINJA[] = - "{{ bos_token }}{\% for message in messages \%}{{ 'GPT4 Correct ' + " + R"({{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + " "message['role'].title() + ': ' + message['content'] + " - "'<|end_of_turn|>'}}{\% endfor \%}{\% if add_generation_prompt \%}{{ 'GPT4 " - "Correct Assistant:' }}{\% endif \%}"; + "'<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 " + "Correct Assistant:' }}{% endif %})"; constexpr char ZEPHYR_JINJA[] = - "{\% for message in messages \%}\n{\% if message['role'] == 'user' \%}\n{{ " - "'<|user|>\n' + message['content'] + eos_token }}\n{\% elif " - "message['role'] == 'system' \%}\n{{ '<|system|>\n' + message['content'] + " - "eos_token }}\n{\% elif message['role'] == 'assistant' \%}\n{{ " - "'<|assistant|>\n' + message['content'] + eos_token }}\n{\% endif " - "\%}\n{\% if loop.last and add_generation_prompt \%}\n{{ '<|assistant|>' " - "}}\n{\% endif \%}\n{\% endfor \%}"; + R"({% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ " + "'<|user|>\n' + message['content'] + eos_token }}\n{% elif " + "message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + " + "eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ " + "'<|assistant|>\n' + message['content'] + eos_token }}\n{% endif " + "%}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' " + "}}\n{% endif %}\n{% endfor %})"; constexpr char LLAMA_3_1_JINJA[] = - "{\% set loop_messages = messages \%}{\% for message in loop_messages " - "\%}{\% set content = '<|start_header_id|>' + message['role'] + " - "'<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' \%}{\% " - "if loop.index0 == 0 \%}{\% set content = bos_token + content \%}{\% endif " - "\%}{{ content }}{\% endfor \%}{{ " - "'<|start_header_id|>assistant<|end_header_id|>\n\n' }}"; + R"({% set loop_messages = messages %}{% for message in loop_messages " + "%}{% set content = '<|start_header_id|>' + message['role'] + " + "'<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% " + "if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif " + "%}{{ content }}{% endfor %}{{ " + "'<|start_header_id|>assistant<|end_header_id|>\n\n' }})"; constexpr char LLAMA_3_JINJA[] = - "{\% set loop_messages = messages \%}{\% for message in loop_messages " - "\%}{\% set content = '<|start_header_id|>' + message['role'] + " - "'<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' \%}{\% " - "if loop.index0 == 0 \%}{\% set content = bos_token + content \%}{\% endif " - "\%}{{ content }}{\% endfor \%}{\% if add_generation_prompt \%}{{ " - "'<|start_header_id|>assistant<|end_header_id|>\n\n' }}"; + R"({% set loop_messages = messages %}{% for message in loop_messages " + "%}{% set content = '<|start_header_id|>' + message['role'] + " + "'<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% " + "if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif " + "%}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ " + "'<|start_header_id|>assistant<|end_header_id|>\n\n' }})"; constexpr uint32_t GGUF_MAGIC_NUMBER = 1179993927; class GGUFHandler { @@ -68,4 +68,4 @@ class GGUFHandler { std::unordered_map> metadata_array_string_; }; -} \ No newline at end of file +} // namespace config diff --git a/engine/controllers/assistants.cc b/engine/controllers/assistants.cc index 530e180a5..f1ab9f932 100644 --- a/engine/controllers/assistants.cc +++ b/engine/controllers/assistants.cc @@ -69,6 +69,7 @@ void Assistants::RetrieveAssistantV2( callback(resp); } } + (void)req; } void Assistants::CreateAssistantV2( @@ -300,6 +301,7 @@ void Assistants::ListAssistants( auto response = cortex_utils::CreateCortexHttpJsonResponse(root); response->setStatusCode(k200OK); callback(response); + (void)req; } void Assistants::DeleteAssistant( @@ -324,4 +326,5 @@ void Assistants::DeleteAssistant( cortex_utils::CreateCortexHttpJsonResponse(response.ToJson().value()); resp->setStatusCode(k200OK); callback(resp); + (void)req; } diff --git a/engine/controllers/configs.cc b/engine/controllers/configs.cc index c2cf7cc2c..24e96afed 100644 --- a/engine/controllers/configs.cc +++ b/engine/controllers/configs.cc @@ -18,6 +18,7 @@ void Configs::GetConfigurations( get_config_result.value().ToJson()); resp->setStatusCode(drogon::k200OK); callback(resp); + (void)req; return; } diff --git a/engine/controllers/engines.cc b/engine/controllers/engines.cc index 43bc3735f..f7deb41eb 100644 --- a/engine/controllers/engines.cc +++ b/engine/controllers/engines.cc @@ -5,9 +5,9 @@ #include "utils/engine_constants.h" #include "utils/http_util.h" #include "utils/logging_utils.h" +#include "utils/normalize_engine.h" #include "utils/scope_exit.h" #include "utils/string_utils.h" -#include "utils/normalize_engine.h" void Engines::ListEngine( const HttpRequestPtr& req, @@ -42,6 +42,7 @@ void Engines::ListEngine( auto resp = cortex_utils::CreateCortexHttpJsonResponse(ret); resp->setStatusCode(k200OK); callback(resp); + (void)req; } void Engines::UninstallEngine( @@ -113,6 +114,7 @@ void Engines::GetEngineReleases( auto resp = cortex_utils::CreateCortexHttpJsonResponse(releases); resp->setStatusCode(k200OK); callback(resp); + (void)req; } void Engines::GetEngineVariants( @@ -120,6 +122,7 @@ void Engines::GetEngineVariants( std::function&& callback, const std::string& engine, const std::string& version, std::optional show) const { + (void)req; if (engine.empty()) { Json::Value res; res["message"] = "Engine name is required"; @@ -146,7 +149,8 @@ void Engines::GetEngineVariants( auto normalize_version = string_utils::RemoveSubstring(version, "v"); Json::Value releases(Json::arrayValue); for (const auto& release : result.value()) { - auto json = release.ToApiJson(cortex::engine::NormalizeEngine(engine), normalize_version); + auto json = release.ToApiJson(cortex::engine::NormalizeEngine(engine), + normalize_version); if (json != std::nullopt) { releases.append(json.value()); } @@ -294,6 +298,7 @@ void Engines::GetInstalledEngineVariants( const HttpRequestPtr& req, std::function&& callback, const std::string& engine) const { + (void)req; if (engine_service_->IsRemoteEngine(engine)) { auto remote_engines = engine_service_->GetEngines(); @@ -417,6 +422,7 @@ void Engines::GetLatestEngineVersion( const HttpRequestPtr& req, std::function&& callback, const std::string& engine) { + (void)req; auto result = engine_service_->GetLatestEngineVersion(engine); if (result.has_error()) { Json::Value res; @@ -487,6 +493,7 @@ void Engines::GetDefaultEngineVariant( const HttpRequestPtr& req, std::function&& callback, const std::string& engine) const { + (void)req; auto result = engine_service_->GetDefaultEngineVariant(engine); if (result.has_error()) { Json::Value res; @@ -505,6 +512,7 @@ void Engines::GetDefaultEngineVariant( void Engines::LoadEngine(const HttpRequestPtr& req, std::function&& callback, const std::string& engine) { + (void)req; auto result = engine_service_->LoadEngine(engine); if (result.has_error()) { Json::Value res; @@ -525,6 +533,7 @@ void Engines::UnloadEngine( const HttpRequestPtr& req, std::function&& callback, const std::string& engine) { + (void)req; auto result = engine_service_->UnloadEngine(engine); if (result.has_error()) { Json::Value res; diff --git a/engine/controllers/events.cc b/engine/controllers/events.cc index 3ad50e8f6..75402f59f 100644 --- a/engine/controllers/events.cc +++ b/engine/controllers/events.cc @@ -10,11 +10,15 @@ void Events::handleNewMessage(const WebSocketConnectionPtr& wsConnPtr, std::string&& message, const WebSocketMessageType& type) { // ignore message sent from client + (void)wsConnPtr; + (void)message; + (void)type; } void Events::handleNewConnection(const HttpRequestPtr& req, const WebSocketConnectionPtr& ws_conn_ptr) { connections_.insert(ws_conn_ptr); + (void)req; } void Events::handleConnectionClosed(const WebSocketConnectionPtr& ws_conn_ptr) { diff --git a/engine/controllers/files.cc b/engine/controllers/files.cc index ed37967b2..d4dd1bb3b 100644 --- a/engine/controllers/files.cc +++ b/engine/controllers/files.cc @@ -64,6 +64,7 @@ void Files::ListFiles(const HttpRequestPtr& req, std::optional limit, std::optional order, std::optional after) const { + (void)req; auto res = file_service_->ListFiles( purpose.value_or(""), std::stoi(limit.value_or("20")), order.value_or("desc"), after.value_or("")); @@ -97,6 +98,7 @@ void Files::RetrieveFile(const HttpRequestPtr& req, std::function&& callback, const std::string& file_id, std::optional thread_id) const { + (void)req; // this code part is for backward compatible. remove it later on if (thread_id.has_value()) { auto msg_res = @@ -169,6 +171,7 @@ void Files::RetrieveFile(const HttpRequestPtr& req, void Files::DeleteFile(const HttpRequestPtr& req, std::function&& callback, const std::string& file_id) { + (void)req; auto res = file_service_->DeleteFileLocal(file_id); if (res.has_error()) { Json::Value ret; @@ -193,6 +196,7 @@ void Files::RetrieveFileContent( const HttpRequestPtr& req, std::function&& callback, const std::string& file_id, std::optional thread_id) { + (void)req; if (thread_id.has_value()) { auto msg_res = message_service_->RetrieveMessage(thread_id.value(), file_id); diff --git a/engine/controllers/hardware.cc b/engine/controllers/hardware.cc index 8b7884710..8592097fc 100644 --- a/engine/controllers/hardware.cc +++ b/engine/controllers/hardware.cc @@ -5,6 +5,7 @@ void Hardware::GetHardwareInfo( const HttpRequestPtr& req, std::function&& callback) { + (void)req; auto hw_inf = hw_svc_->GetHardwareInfo(); Json::Value ret; ret["cpu"] = cortex::hw::ToJson(hw_inf.cpu); @@ -38,7 +39,7 @@ void Hardware::Activate( ahc.gpus.push_back(g.asInt()); } } - + if (!hw_svc_->IsValidConfig(ahc)) { Json::Value ret; ret["message"] = "Invalid GPU index provided."; diff --git a/engine/controllers/health.cc b/engine/controllers/health.cc index 22fc0bfd6..664f8ca68 100644 --- a/engine/controllers/health.cc +++ b/engine/controllers/health.cc @@ -2,8 +2,9 @@ #include "utils/cortex_utils.h" void health::asyncHandleHttpRequest( - const HttpRequestPtr &req, - std::function &&callback) { + const HttpRequestPtr& req, + std::function&& callback) { + (void)req; auto resp = cortex_utils::CreateCortexHttpResponse(); resp->setStatusCode(k200OK); resp->setContentTypeCode(CT_TEXT_HTML); diff --git a/engine/controllers/messages.cc b/engine/controllers/messages.cc index 27307803a..f91998d9c 100644 --- a/engine/controllers/messages.cc +++ b/engine/controllers/messages.cc @@ -14,6 +14,7 @@ void Messages::ListMessages( std::optional order, std::optional after, std::optional before, std::optional run_id) const { + (void)req; auto res = message_service_->ListMessages( thread_id, std::stoi(limit.value_or("20")), order.value_or("desc"), after.value_or(""), before.value_or(""), run_id.value_or("")); @@ -172,6 +173,7 @@ void Messages::RetrieveMessage( const HttpRequestPtr& req, std::function&& callback, const std::string& thread_id, const std::string& message_id) const { + (void)req; auto res = message_service_->RetrieveMessage(thread_id, message_id); if (res.has_error()) { Json::Value ret; @@ -322,6 +324,7 @@ void Messages::DeleteMessage( const HttpRequestPtr& req, std::function&& callback, const std::string& thread_id, const std::string& message_id) { + (void)req; auto res = message_service_->DeleteMessage(thread_id, message_id); if (res.has_error()) { Json::Value ret; diff --git a/engine/controllers/models.cc b/engine/controllers/models.cc index 3215da753..2071407f5 100644 --- a/engine/controllers/models.cc +++ b/engine/controllers/models.cc @@ -15,6 +15,7 @@ namespace { std::string ToJsonStringWithPrecision(Json::Value& input, int precision = 2) { + (void)precision; Json::StreamWriterBuilder wbuilder; wbuilder.settings_["precision"] = 2; return Json::writeString(wbuilder, input); @@ -60,15 +61,19 @@ void Models::PullModel(const HttpRequestPtr& req, auto model_and_branch = string_utils::SplitBy(model_handle, ":"); if (model_and_branch.size() == 3) { auto mh = url_parser::Url{ - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = { + /* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + { model_and_branch[0], model_and_branch[1], "resolve", "main", model_and_branch[2], - }}.ToFullPath(); + }, + /* queries= */ {}, + } + .ToFullPath(); return model_service_->HandleDownloadUrlAsync(mh, desired_model_id, desired_model_name); } @@ -171,6 +176,7 @@ void Models::AbortPullModel( void Models::ListModel( const HttpRequestPtr& req, std::function&& callback) const { + (void)req; namespace fs = std::filesystem; namespace fmu = file_manager_utils; Json::Value ret; @@ -263,6 +269,7 @@ void Models::ListModel( void Models::GetModel(const HttpRequestPtr& req, std::function&& callback, const std::string& model_id) const { + (void)req; namespace fs = std::filesystem; namespace fmu = file_manager_utils; LOG_DEBUG << "GetModel, Model handle: " << model_id; @@ -324,6 +331,7 @@ void Models::GetModel(const HttpRequestPtr& req, void Models::DeleteModel(const HttpRequestPtr& req, std::function&& callback, const std::string& model_id) { + (void)req; auto result = model_service_->DeleteModel(model_id); if (result.has_error()) { Json::Value ret; @@ -420,7 +428,7 @@ void Models::ImportModel( cortex::db::ModelEntry model_entry{ modelHandle, "", "", yaml_rel_path.string(), modelHandle, "local", "imported", cortex::db::ModelStatus::Downloaded, - ""}; + "", ""}; std::filesystem::create_directories( std::filesystem::path(model_yaml_path).parent_path()); @@ -599,6 +607,7 @@ void Models::GetModelStatus( const HttpRequestPtr& req, std::function&& callback, const std::string& model_id) { + (void)req; auto result = model_service_->GetModelStatus(model_id); if (result.has_error()) { Json::Value ret; @@ -619,6 +628,7 @@ void Models::GetRemoteModels( const HttpRequestPtr& req, std::function&& callback, const std::string& engine_id) { + (void)req; if (!engine_service_->IsRemoteEngine(engine_id)) { Json::Value ret; ret["message"] = "Not a remote engine: " + engine_id; @@ -688,7 +698,7 @@ void Models::AddRemoteModel( cortex::db::ModelEntry model_entry{ model_handle, "", "", yaml_rel_path.string(), model_handle, "remote", "imported", cortex::db::ModelStatus::Remote, - engine_name}; + engine_name, ""}; std::filesystem::create_directories( std::filesystem::path(model_yaml_path).parent_path()); if (db_service_->AddModelEntry(model_entry).value()) { @@ -748,7 +758,7 @@ void Models::AddModelSource( resp->setStatusCode(k400BadRequest); callback(resp); } else { - auto const& info = res.value(); + /* auto const& info = res.value(); */ Json::Value ret; ret["message"] = "Model source is added successfully!"; auto resp = cortex_utils::CreateCortexHttpJsonResponse(ret); @@ -773,7 +783,7 @@ void Models::DeleteModelSource( resp->setStatusCode(k400BadRequest); callback(resp); } else { - auto const& info = res.value(); + /* auto const& info = res.value(); */ Json::Value ret; ret["message"] = "Model source is deleted successfully!"; auto resp = cortex_utils::CreateCortexHttpJsonResponse(ret); @@ -785,6 +795,7 @@ void Models::DeleteModelSource( void Models::GetModelSources( const HttpRequestPtr& req, std::function&& callback) { + (void)req; auto res = model_src_svc_->GetModelSources(); if (res.has_error()) { Json::Value ret; @@ -810,6 +821,7 @@ void Models::GetModelSource( const HttpRequestPtr& req, std::function&& callback, const std::string& src) { + (void)req; auto res = model_src_svc_->GetModelSource(src); if (res.has_error()) { Json::Value ret; @@ -829,6 +841,7 @@ void Models::GetRepositoryList( const HttpRequestPtr& req, std::function&& callback, std::optional author, std::optional tag) { + (void)req; if (!author.has_value()) author = "cortexso"; auto res = @@ -851,4 +864,4 @@ void Models::GetRepositoryList( resp->setStatusCode(k200OK); callback(resp); } -} \ No newline at end of file +} diff --git a/engine/controllers/process_manager.cc b/engine/controllers/process_manager.cc index 72b0f08d2..f245e6698 100644 --- a/engine/controllers/process_manager.cc +++ b/engine/controllers/process_manager.cc @@ -7,6 +7,7 @@ void ProcessManager::destroy( const HttpRequestPtr& req, std::function&& callback) { + (void)req; auto loaded_engines = engine_service_->GetSupportedEngineNames(); for (const auto& engine : loaded_engines.value()) { auto result = engine_service_->UnloadEngine(engine); diff --git a/engine/controllers/swagger.cc b/engine/controllers/swagger.cc index abb80b94e..b583e1606 100644 --- a/engine/controllers/swagger.cc +++ b/engine/controllers/swagger.cc @@ -19,6 +19,7 @@ Json::Value SwaggerController::GenerateOpenApiSpec() const { void SwaggerController::serveSwaggerUI( const drogon::HttpRequestPtr& req, std::function&& callback) const { + (void)req; auto resp = cortex_utils::CreateCortexHttpResponse(); resp->setBody(ScalarUi); resp->setContentTypeCode(drogon::CT_TEXT_HTML); @@ -28,6 +29,7 @@ void SwaggerController::serveSwaggerUI( void SwaggerController::serveOpenAPISpec( const drogon::HttpRequestPtr& req, std::function&& callback) const { + (void)req; auto spec = GenerateOpenApiSpec(); auto resp = cortex_utils::CreateCortexHttpJsonResponse(spec); callback(resp); diff --git a/engine/controllers/threads.cc b/engine/controllers/threads.cc index 4a87bc9eb..f9ff4df00 100644 --- a/engine/controllers/threads.cc +++ b/engine/controllers/threads.cc @@ -9,6 +9,7 @@ void Threads::ListThreads( std::function&& callback, std::optional limit, std::optional order, std::optional after, std::optional before) const { + (void)req; CTL_INF("ListThreads"); auto res = thread_service_->ListThreads( std::stoi(limit.value_or("20")), order.value_or("desc"), @@ -101,6 +102,7 @@ void Threads::RetrieveThread( const HttpRequestPtr& req, std::function&& callback, const std::string& thread_id) const { + (void)req; auto res = thread_service_->RetrieveThread(thread_id); if (res.has_error()) { Json::Value ret; @@ -196,8 +198,7 @@ void Threads::ModifyThread( auto json_res = res->ToJson(); json_res->removeMember("title"); json_res->removeMember("assistants"); - auto resp = - cortex_utils::CreateCortexHttpJsonResponse(json_res.value()); + auto resp = cortex_utils::CreateCortexHttpJsonResponse(json_res.value()); resp->setStatusCode(k200OK); callback(resp); } @@ -208,6 +209,7 @@ void Threads::DeleteThread( const HttpRequestPtr& req, std::function&& callback, const std::string& thread_id) { + (void)req; auto res = thread_service_->DeleteThread(thread_id); if (res.has_error()) { Json::Value ret; diff --git a/engine/database/engines.cc b/engine/database/engines.cc index a4d13ef79..61476fe3a 100644 --- a/engine/database/engines.cc +++ b/engine/database/engines.cc @@ -4,7 +4,9 @@ namespace cortex::db { -void CreateTable(SQLite::Database& db) {} +void CreateTable(SQLite::Database& db) { + (void)db; +} Engines::Engines() : db_(cortex::db::Database::GetInstance().db()) { CreateTable(db_); @@ -170,4 +172,4 @@ std::optional Engines::DeleteEngineById(int id) { } } -} // namespace cortex::db \ No newline at end of file +} // namespace cortex::db diff --git a/engine/extensions/remote-engine/remote_engine.cc b/engine/extensions/remote-engine/remote_engine.cc index 3924663aa..79a471f88 100644 --- a/engine/extensions/remote-engine/remote_engine.cc +++ b/engine/extensions/remote-engine/remote_engine.cc @@ -11,11 +11,11 @@ namespace remote_engine { namespace { constexpr const int k200OK = 200; constexpr const int k400BadRequest = 400; -constexpr const int k409Conflict = 409; -constexpr const int k500InternalServerError = 500; -constexpr const int kFileLoggerOption = 0; +[[maybe_unused]] constexpr const int k409Conflict = 409; +[[maybe_unused]] constexpr const int k500InternalServerError = 500; +[[maybe_unused]] constexpr const int kFileLoggerOption = 0; -constexpr const std::array kAnthropicModels = { +[[maybe_unused]]constexpr const std::array kAnthropicModels = { "claude-3-5-sonnet-20241022", "claude-3-5-haiku-20241022", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"}; @@ -285,6 +285,7 @@ CurlResponse RemoteEngine::MakeGetModelsRequest( CurlResponse RemoteEngine::MakeChatCompletionRequest( const ModelConfig& config, const std::string& body, const std::string& method) { + (void) config; CURL* curl = curl_easy_init(); CurlResponse response; @@ -391,6 +392,7 @@ void RemoteEngine::GetModels( status["status_code"] = 200; callback(std::move(status), std::move(json_resp)); CTL_INF("Running models responded"); + (void)json_body; } void RemoteEngine::LoadModel( @@ -734,4 +736,4 @@ Json::Value RemoteEngine::GetRemoteModels(const std::string& url, } } -} // namespace remote_engine \ No newline at end of file +} // namespace remote_engine diff --git a/engine/migrations/migration_manager.cc b/engine/migrations/migration_manager.cc index 26197115d..8f9e22b5a 100644 --- a/engine/migrations/migration_manager.cc +++ b/engine/migrations/migration_manager.cc @@ -63,6 +63,7 @@ cpp::result MigrationManager::Migrate() { if (std::filesystem::exists(cortex_tmp)) { try { auto n = std::filesystem::remove_all(cortex_tmp); + (void)n; // CTL_INF("Deleted " << n << " files or directories"); } catch (const std::exception& e) { CTL_WRN(e.what()); diff --git a/engine/repositories/file_fs_repository.cc b/engine/repositories/file_fs_repository.cc index 4ec6c1ab2..6deefcc96 100644 --- a/engine/repositories/file_fs_repository.cc +++ b/engine/repositories/file_fs_repository.cc @@ -11,18 +11,18 @@ std::filesystem::path FileFsRepository::GetFilePath() const { return data_folder_path_ / kFileContainerFolderName; } -std::filesystem::path SanitizePath(const std::filesystem::path & user_input, - const std::filesystem::path & basedir) { +std::filesystem::path SanitizePath(const std::filesystem::path& user_input, + const std::filesystem::path& basedir) { auto abs_base = std::filesystem::canonical(basedir); std::filesystem::path resolved_path = std::filesystem::weakly_canonical( std::filesystem::path(basedir) / std::filesystem::path(user_input)); - /* Ensure the resolved path is within our basedir */ + /* Ensure the resolved path is within our basedir */ for (auto p = resolved_path; !p.empty(); p = p.parent_path()) { if (std::filesystem::equivalent(p, abs_base)) { return resolved_path; } - } + } return {}; } @@ -89,6 +89,8 @@ cpp::result FileFsRepository::StoreFile( cpp::result, std::string> FileFsRepository::ListFiles( const std::string& purpose, uint8_t limit, const std::string& order, const std::string& after) const { + (void)purpose; + (void)after; auto res = db_service_->GetFileList(); if (res.has_error()) { return cpp::fail(res.error()); diff --git a/engine/services/download_service.cc b/engine/services/download_service.cc index d85e1f78f..c54d4b3f3 100644 --- a/engine/services/download_service.cc +++ b/engine/services/download_service.cc @@ -265,6 +265,7 @@ cpp::result DownloadService::Download( fclose(file); curl_easy_cleanup(curl); + (void)download_id; return true; } @@ -325,7 +326,7 @@ void DownloadService::Shutdown() { } void DownloadService::WorkerThread(int worker_id) { - auto& worker_data = worker_data_[worker_id]; + // auto& worker_data = worker_data_[worker_id]; while (!stop_flag_) { std::unique_lock lock(task_mutex_); @@ -383,9 +384,9 @@ void DownloadService::ProcessTask(DownloadTask& task, int worker_id) { return; } auto dl_data_ptr = std::make_shared(DownloadingData{ - .task_id = task.id, - .item_id = item.id, - .download_service = this, + task.id, + item.id, + this, }); worker_data->downloading_data_map[item.id] = dl_data_ptr; @@ -438,9 +439,9 @@ cpp::result DownloadService::ProcessMultiDownload( auto result = ProcessCompletedTransfers(multi_handle); if (result.has_error()) { return cpp::fail(ProcessDownloadFailed{ - .message = result.error(), - .task_id = task.id, - .type = DownloadEventType::DownloadError, + result.error(), + task.id, + DownloadEventType::DownloadError, }); } @@ -448,12 +449,13 @@ cpp::result DownloadService::ProcessMultiDownload( CTL_INF("IsTaskTerminated " + std::to_string(IsTaskTerminated(task.id))); CTL_INF("stop_flag_ " + std::to_string(stop_flag_)); return cpp::fail(ProcessDownloadFailed{ - .message = result.error(), - .task_id = task.id, - .type = DownloadEventType::DownloadStopped, + result.error(), + task.id, + DownloadEventType::DownloadStopped, }); } } while (still_running); + (void)handles; return {}; } @@ -510,16 +512,14 @@ void DownloadService::RemoveTaskFromStopList(const std::string& task_id) { void DownloadService::EmitTaskStarted(const DownloadTask& task) { event_queue_->enqueue( EventType::DownloadEvent, - DownloadEvent{.type_ = DownloadEventType::DownloadStarted, - .download_task_ = task}); + DownloadEvent{{}, DownloadEventType::DownloadStarted, task}); } void DownloadService::EmitTaskStopped(const std::string& task_id) { if (auto it = active_tasks_.find(task_id); it != active_tasks_.end()) { event_queue_->enqueue( EventType::DownloadEvent, - DownloadEvent{.type_ = DownloadEventType::DownloadStopped, - .download_task_ = *it->second}); + DownloadEvent{{}, DownloadEventType::DownloadStopped, *it->second}); } } @@ -527,8 +527,7 @@ void DownloadService::EmitTaskError(const std::string& task_id) { if (auto it = active_tasks_.find(task_id); it != active_tasks_.end()) { event_queue_->enqueue( EventType::DownloadEvent, - DownloadEvent{.type_ = DownloadEventType::DownloadError, - .download_task_ = *it->second}); + DownloadEvent{{}, DownloadEventType::DownloadError, *it->second}); } } @@ -540,8 +539,7 @@ void DownloadService::EmitTaskCompleted(const std::string& task_id) { } event_queue_->enqueue( EventType::DownloadEvent, - DownloadEvent{.type_ = DownloadEventType::DownloadSuccess, - .download_task_ = *it->second}); + DownloadEvent{{}, DownloadEventType::DownloadSuccess, *it->second}); } } diff --git a/engine/services/download_service.h b/engine/services/download_service.h index 78ebcbf73..c008e2f37 100644 --- a/engine/services/download_service.h +++ b/engine/services/download_service.h @@ -87,7 +87,7 @@ class DownloadService { explicit DownloadService(std::shared_ptr event_queue, std::shared_ptr config_service) - : config_service_{config_service}, event_queue_{event_queue} { + : config_service_{config_service}, event_queue_{event_queue} { InitializeWorkers(); }; @@ -127,10 +127,9 @@ class DownloadService { void Shutdown(); - cpp::result Download( - const std::string& download_id, - const DownloadItem& download_item, - bool show_progress = true) noexcept; + cpp::result Download(const std::string& download_id, + const DownloadItem& download_item, + bool show_progress = true) noexcept; std::shared_ptr event_queue_; @@ -224,8 +223,7 @@ class DownloadService { if (should_emit_event) { dl_srv->event_queue_->enqueue( EventType::DownloadEvent, - DownloadEvent{.type_ = DownloadEventType::DownloadUpdated, - .download_task_ = *task}); + DownloadEvent{{}, DownloadEventType::DownloadUpdated, *task}); dl_srv->event_emit_map_[task->id] = std::chrono::steady_clock::now(); } @@ -234,8 +232,8 @@ class DownloadService { break; } } - (void) ultotal; - (void) ulnow; + (void)ultotal; + (void)ulnow; return 0; } diff --git a/engine/services/engine_service.cc b/engine/services/engine_service.cc index 194604e5e..48cc6ff37 100644 --- a/engine/services/engine_service.cc +++ b/engine/services/engine_service.cc @@ -36,7 +36,7 @@ std::string GetSuitableCudaVersion(const std::string& engine, } else if (cuda_driver_semver.major == 12) { suitable_toolkit_version = "12.0"; } - + (void)engine; return suitable_toolkit_version; } @@ -150,6 +150,7 @@ cpp::result EngineService::UnzipEngine( CTL_INF("Set default engine variant: " << res.value().variant); } } + (void)version; return true; } @@ -338,14 +339,19 @@ cpp::result EngineService::DownloadEngine( CTL_INF("Finished!"); }; - auto downloadTask = - DownloadTask{.id = selected_variant->name, - .type = DownloadType::Engine, - .items = {DownloadItem{ - .id = selected_variant->name, - .downloadUrl = selected_variant->browser_download_url, - .localPath = variant_path, - }}}; + auto downloadTask = DownloadTask{ + /* .id = */ selected_variant->name, + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::Engine, + /* .items = */ + {DownloadItem{ + /* .id = */ selected_variant->name, + /* .downloadUrl = */ selected_variant->browser_download_url, + /* .localPath = */ variant_path, + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, + }}}; auto add_task_result = download_service_->AddTask(downloadTask, on_finished); if (add_task_result.has_error()) { @@ -374,10 +380,12 @@ cpp::result EngineService::DownloadCuda( GetSuitableCudaVersion(engine, hw_inf_.cuda_driver_version); auto url_obj = url_parser::Url{ - .protocol = "https", - .host = jan_host, - .pathParams = {"dist", "cuda-dependencies", suitable_toolkit_version, - hw_inf_.sys_inf->os, cuda_toolkit_file_name}, + /* .protocol = */ "https", + /* .host = */ jan_host, + /* .pathParams = */ + {"dist", "cuda-dependencies", suitable_toolkit_version, + hw_inf_.sys_inf->os, cuda_toolkit_file_name}, + /* .queries = */ {}, }; auto cuda_toolkit_url = url_parser::FromUrl(url_obj); @@ -389,11 +397,18 @@ cpp::result EngineService::DownloadCuda( cuda_toolkit_file_name; CTL_DBG("Download to: " << cuda_toolkit_local_path.string()); auto downloadCudaToolkitTask{DownloadTask{ - .id = download_id, - .type = DownloadType::CudaToolkit, - .items = {DownloadItem{.id = download_id, - .downloadUrl = cuda_toolkit_url, - .localPath = cuda_toolkit_local_path}}, + /* .id = */ download_id, + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::CudaToolkit, + /* .items = */ + {DownloadItem{ + /* .id = */ download_id, + /* .downloadUrl = */ cuda_toolkit_url, + /* .localPath = */ cuda_toolkit_local_path, + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, + }}, }}; auto on_finished = [engine](const DownloadTask& finishedTask) { @@ -543,9 +558,9 @@ EngineService::SetDefaultEngineVariant(const std::string& engine, } return DefaultEngineVariant{ - .engine = engine, - .version = normalized_version, - .variant = variant, + engine, //engine + normalized_version, //version + variant, //varient }; } @@ -564,8 +579,8 @@ cpp::result EngineService::IsEngineVariantReady( for (const auto& installed_engine : installed_engines.value()) { CLI_LOG("Installed: name: " + installed_engine.name + ", version: " + installed_engine.version); - if (installed_engine.name == variant && - installed_engine.version == normalized_version || + if ((installed_engine.name == variant && + installed_engine.version == normalized_version) || installed_engine.version == "v" + normalized_version) { return true; } @@ -591,9 +606,9 @@ EngineService::GetDefaultEngineVariant(const std::string& engine) { } return DefaultEngineVariant{ - .engine = engine, - .version = version, - .variant = variant, + engine, //engine + version, //version + variant, //varient }; } @@ -625,9 +640,10 @@ EngineService::GetInstalledEngineVariants(const std::string& engine) const { try { auto node = YAML::LoadFile(version_txt_path.string()); auto ev = EngineVariantResponse{ - .name = node["name"].as(), - .version = "v" + node["version"].as(), - .engine = engine, + node["name"].as(), // name + "v" + node["version"].as(), // version + engine, // engine + "", // type }; variants.push_back(ev); } catch (const YAML::Exception& e) { @@ -731,12 +747,12 @@ cpp::result EngineService::LoadEngine( auto func = dylib->get_function("get_engine"); auto engine_obj = func(); auto load_opts = EngineI::EngineLoadOption{ - .engine_path = engine_dir_path, - .deps_path = cuda_path, - .is_custom_engine_path = custom_engine_path, - .log_path = log_path, - .max_log_lines = config.maxLogLines, - .log_level = logging_utils_helper::global_log_level, + /* .engine_path = */ engine_dir_path, + /* .deps_path = */ cuda_path, + /* .is_custom_engine_path = */ custom_engine_path, + /* .log_path = */ log_path, + /* .max_log_lines = */ config.maxLogLines, + /* .log_level = */ logging_utils_helper::global_log_level, }; engine_obj->Load(load_opts); @@ -764,7 +780,7 @@ void EngineService::RegisterEngineLibPath() { continue; } auto engine_dir_path = engine_dir_path_res.value().first; - auto custom_engine_path = engine_dir_path_res.value().second; + //[unused] auto custom_engine_path = engine_dir_path_res.value().second; auto cuda_path = file_manager_utils::GetCudaToolkitPath(ne); // register deps @@ -970,10 +986,10 @@ cpp::result EngineService::UpdateEngine( auto res = InstallEngineAsync(engine, latest_version->tag_name, default_variant->variant); - return EngineUpdateResult{.engine = engine, - .variant = default_variant->variant, - .from = default_variant->version, - .to = latest_version->tag_name}; + return EngineUpdateResult{/*.engine =*/engine, + /*.variant =*/default_variant->variant, + /*.from =*/default_variant->version, + /*.to =*/latest_version->tag_name}; } cpp::result, std::string> diff --git a/engine/services/engine_service.h b/engine/services/engine_service.h index 830944aee..7e6be74c5 100644 --- a/engine/services/engine_service.h +++ b/engine/services/engine_service.h @@ -68,9 +68,13 @@ class EngineService : public EngineServiceI { std::shared_ptr db_service) : download_service_{download_service}, dylib_path_manager_{dylib_path_manager}, - hw_inf_{.sys_inf = system_info_utils::GetSystemInfo(), - .cuda_driver_version = - system_info_utils::GetDriverAndCudaVersion().second}, + hw_inf_{ + system_info_utils::GetSystemInfo(), // sys_inf. + {}, // cpu_info. + system_info_utils::GetDriverAndCudaVersion() + .second // cuda_driver_version. + }, + db_service_(db_service) {} std::vector GetEngineInfoList() const; @@ -131,7 +135,6 @@ class EngineService : public EngineServiceI { cpp::result UpdateEngine( const std::string& engine); - cpp::result, std::string> GetEngines(); cpp::result GetEngineById(int id); diff --git a/engine/services/file_watcher_service.h b/engine/services/file_watcher_service.h index d15b98827..1c61ab5e3 100644 --- a/engine/services/file_watcher_service.h +++ b/engine/services/file_watcher_service.h @@ -296,7 +296,7 @@ class FileWatcherService { const int POLL_TIMEOUT_MS = 1000; // 1 second timeout char buffer[4096]; - struct pollfd pfd = {.fd = fd, .events = POLLIN, .revents = 0}; + struct pollfd pfd = {fd, POLLIN, 0}; while (running_) { // Poll will sleep until either: diff --git a/engine/services/hardware_service.cc b/engine/services/hardware_service.cc index b61987319..f0ccadb28 100644 --- a/engine/services/hardware_service.cc +++ b/engine/services/hardware_service.cc @@ -52,12 +52,12 @@ HardwareInfo HardwareService::GetHardwareInfo() { }; } - return HardwareInfo{.cpu = cpu_info_.GetCPUInfo(), - .os = cortex::hw::GetOSInfo(), - .ram = cortex::hw::GetMemoryInfo(), - .storage = cortex::hw::GetStorageInfo(), - .gpus = gpus, - .power = cortex::hw::GetPowerInfo()}; + return HardwareInfo{/* .cpu = */ cpu_info_.GetCPUInfo(), + /* .os = */ cortex::hw::GetOSInfo(), + /* .ram = */ cortex::hw::GetMemoryInfo(), + /* .storage = */ cortex::hw::GetStorageInfo(), + /* .gpus = */ gpus, + /* .power = */ cortex::hw::GetPowerInfo()}; } bool HardwareService::Restart(const std::string& host, int port) { @@ -283,7 +283,7 @@ bool HardwareService::SetActivateHardwareConfig( for (size_t i = 0; i < ahc_gpus.size(); i++) { // if activated id or priority changes if (ahc_gpus[i] != activated_ids[i].first || - i != activated_ids[i].second) + i != (uint64_t)activated_ids[i].second) need_update = true; break; } @@ -366,12 +366,12 @@ void HardwareService::UpdateHardwareInfos() { }; auto res = db_service_->AddHardwareEntry( - HwEntry{.uuid = gpu.uuid, - .type = "gpu", - .hardware_id = std::stoi(gpu.id), - .software_id = std::stoi(gpu.id), - .activated = activated(), - .priority = INT_MAX}); + HwEntry{/* .uuid = */ gpu.uuid, + /* .type = */ "gpu", + /* .hardware_id = */ std::stoi(gpu.id), + /* .software_id = */ std::stoi(gpu.id), + /* .activated = */ activated(), + /* .priority = */ INT_MAX}); if (res.has_error()) { CTL_WRN(res.error()); } @@ -448,7 +448,7 @@ void HardwareService::UpdateHardwareInfos() { for (auto const& p : activated_gpu_af) { gpus.push_back(p.first); } - ahc_ = {.gpus = gpus}; + ahc_ = {/* .gpus = */ gpus}; } } @@ -494,12 +494,18 @@ void HardwareService::CheckDependencies() { std::filesystem::create_directories(fmu::GetCortexDataPath() / "deps"); } auto download_task{DownloadTask{ - .id = "vulkan", - .type = DownloadType::Miscellaneous, - .items = {DownloadItem{ - .id = "vulkan", - .downloadUrl = "https://catalog.jan.ai/libvulkan.so", - .localPath = fmu::GetCortexDataPath() / "deps" / "libvulkan.so", + /* .id = */ "vulkan", + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::Miscellaneous, + /* .items = */ + {DownloadItem{ + /* .id = */ "vulkan", + /* .downloadUrl = */ "https://catalog.jan.ai/libvulkan.so", + /* .localPath = */ fmu::GetCortexDataPath() / "deps" / + "libvulkan.so", + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, }}, }}; auto result = DownloadService().AddDownloadTask( diff --git a/engine/services/inference_service.cc b/engine/services/inference_service.cc index e4b3853e3..a1646495b 100644 --- a/engine/services/inference_service.cc +++ b/engine/services/inference_service.cc @@ -270,6 +270,7 @@ InferResult InferenceService::GetModels( for (auto r : res["data"]) { resp_data.append(r); } + (void) status; }; for (const auto& loaded_engine : loaded_engines) { if (std::holds_alternative(loaded_engine)) { diff --git a/engine/services/message_service.cc b/engine/services/message_service.cc index 9b57e0215..5b489a750 100644 --- a/engine/services/message_service.cc +++ b/engine/services/message_service.cc @@ -97,7 +97,7 @@ cpp::result MessageService::ModifyMessage( msg->content = std::move(content_list); } - auto ptr = &msg.value(); + /* auto ptr = &msg.value(); */ auto res = message_repository_->ModifyMessage(msg.value()); if (res.has_error()) { diff --git a/engine/services/model_service.cc b/engine/services/model_service.cc index b0a692eb5..d9359b698 100644 --- a/engine/services/model_service.cc +++ b/engine/services/model_service.cc @@ -73,12 +73,16 @@ void ParseGguf(DatabaseService& db_service, auto author_id = author.has_value() ? author.value() : "cortexso"; if (!db_service.HasModel(ggufDownloadItem.id)) { cortex::db::ModelEntry model_entry{ - .model = ggufDownloadItem.id, - .author_repo_id = author_id, - .branch_name = branch, - .path_to_model_yaml = rel.string(), - .model_alias = ggufDownloadItem.id, - .status = cortex::db::ModelStatus::Downloaded}; + /* .model = */ ggufDownloadItem.id, + /* .author_repo_id = */ author_id, + /* .branch_name = */ branch, + /* .path_to_model_yaml = */ rel.string(), + /* .model_alias = */ ggufDownloadItem.id, + "", + "", + /* .status = */ cortex::db::ModelStatus::Downloaded, + "", + ""}; auto result = db_service.AddModelEntry(model_entry); if (result.has_error()) { @@ -99,11 +103,11 @@ void ParseGguf(DatabaseService& db_service, cpp::result GetDownloadTask( const std::string& modelId, const std::string& branch = "main") { - url_parser::Url url = { - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"api", "models", "cortexso", modelId, "tree", branch}, - }; + url_parser::Url url = {/* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + {"api", "models", "cortexso", modelId, "tree", branch}, + {}}; auto result = curl_utils::SimpleGetJsonRecursive(url.ToFullPath()); if (result.has_error()) { @@ -123,23 +127,30 @@ cpp::result GetDownloadTask( continue; } url_parser::Url download_url = { - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"cortexso", modelId, "resolve", branch, path}}; + /* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ {"cortexso", modelId, "resolve", branch, path}, + {}}; auto local_path = model_container_path / path; if (!std::filesystem::exists(local_path.parent_path())) { std::filesystem::create_directories(local_path.parent_path()); } - download_items.push_back( - DownloadItem{.id = path, - .downloadUrl = download_url.ToFullPath(), - .localPath = local_path}); + download_items.push_back(DownloadItem{ + /* .id = */ path, + /* .downloadUrl = */ download_url.ToFullPath(), + /* .localPath = */ local_path, + /*.checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, + }); } - return DownloadTask{.id = branch == "main" ? modelId : modelId + "-" + branch, - .type = DownloadType::Model, - .items = download_items}; + return DownloadTask{ + /* .id = */ branch == "main" ? modelId : modelId + "-" + branch, + /* .status = */ DownloadTask::Status::Pending, + /* .type = */ DownloadType::Model, + /* .items = */ download_items}; } } // namespace @@ -277,12 +288,17 @@ cpp::result ModelService::HandleDownloadUrlAsync( auto download_url = url_parser::FromUrl(url_obj.value()); // this assume that the model being downloaded is a single gguf file - auto downloadTask{DownloadTask{.id = model_id, - .type = DownloadType::Model, - .items = {DownloadItem{ - .id = unique_model_id, - .downloadUrl = download_url, - .localPath = local_path, + auto downloadTask{DownloadTask{/* .id = */ model_id, + DownloadTask::Status::Pending, + /* .type = */ DownloadType::Model, + /* .items = */ + {DownloadItem{ + /* .id = */ unique_model_id, + /* .downloadUrl = */ download_url, + /* .localPath = */ local_path, + /* .checksum = */ std::nullopt, + /* .bytes = */ std::nullopt, + /* .downloadedBytes = */ std::nullopt, }}}}; auto on_finished = [this, author, @@ -341,12 +357,12 @@ ModelService::EstimateModel(const std::string& model_handle, return hardware::EstimateLLaMACppRun( fmu::ToAbsoluteCortexDataPath(fs::path(mc.files[0])).string(), - {.ngl = mc.ngl, - .ctx_len = mc.ctx_len, - .n_batch = n_batch, - .n_ubatch = n_ubatch, - .kv_cache_type = kv_cache, - .free_vram_MiB = free_vram_MiB}); + {/* .ngl = */ mc.ngl, + /* .ctx_len = */ mc.ctx_len, + /* .n_batch = */ n_batch, + /* .n_ubatch = */ n_ubatch, + /* .kv_cache_type = */ kv_cache, + /* .free_vram_MiB = */ free_vram_MiB}); } catch (const std::exception& e) { return cpp::fail("Fail to get model status with ID '" + model_handle + "': " + e.what()); @@ -383,7 +399,7 @@ ModelService::DownloadModelFromCortexsoAsync( auto on_finished = [this, unique_model_id, branch](const DownloadTask& finishedTask) { const DownloadItem* model_yml_item = nullptr; - auto need_parse_gguf = true; + // [unused] auto need_parse_gguf = true; for (const auto& item : finishedTask.items) { if (item.localPath.filename().string() == "model.yml") { @@ -421,13 +437,16 @@ ModelService::DownloadModelFromCortexsoAsync( if (!db_service_->HasModel(unique_model_id)) { cortex::db::ModelEntry model_entry{ - .model = unique_model_id, - .author_repo_id = "cortexso", - .branch_name = branch, - .path_to_model_yaml = rel.string(), - .model_alias = unique_model_id, - .status = cortex::db::ModelStatus::Downloaded, - .engine = mc.engine}; + /* .model = */ unique_model_id, + /* .author_repo_id = */ "cortexso", + /* .branch_name = */ branch, + /* .path_to_model_yaml = */ rel.string(), + /* .model_alias = */ unique_model_id, + "", + "", + /* .status = */ cortex::db::ModelStatus::Downloaded, + /* .engine = */ mc.engine, + ""}; auto result = db_service_->AddModelEntry(model_entry); if (result.has_error()) { @@ -589,10 +608,10 @@ cpp::result ModelService::StartModel( auto status = std::get<0>(ir)["status_code"].asInt(); auto data = std::get<1>(ir); if (status == drogon::k200OK) { - return StartModelResult{.success = true, .warning = ""}; + return StartModelResult{/* .success = */ true, /* .warning = */ ""}; } else if (status == drogon::k409Conflict) { CTL_INF("Model '" + model_handle + "' is already loaded"); - return StartModelResult{.success = true, .warning = ""}; + return StartModelResult{/* .success = */ true, /* .warning = */ ""}; } else { // only report to user the error CTL_ERR("Model failed to start with status code: " << status); @@ -614,7 +633,7 @@ cpp::result ModelService::StartModel( #endif } else { LOG_WARN << "model_path is empty"; - return StartModelResult{.success = false}; + return StartModelResult{/* .success = */ false, ""}; } if (!mc.mmproj.empty()) { #if defined(_WIN32) @@ -687,12 +706,13 @@ cpp::result ModelService::StartModel( } } - return StartModelResult{.success = true, - .warning = may_fallback_res.value()}; + return StartModelResult{/* .success = */ true, + /* .warning = */ may_fallback_res.value()}; } else if (status == drogon::k409Conflict) { CTL_INF("Model '" + model_handle + "' is already loaded"); return StartModelResult{ - .success = true, .warning = may_fallback_res.value_or(std::nullopt)}; + /* .success = */ true, + /* .warning = */ may_fallback_res.value_or(std::nullopt)}; } else { // only report to user the error CTL_ERR("Model failed to start with status code: " << status); @@ -820,15 +840,20 @@ cpp::result ModelService::GetModelPullInfo( auto file_name{url_obj->pathParams.back()}; if (author == "cortexso") { return ModelPullInfo{ - .id = model_id + ":" + url_obj->pathParams[3], - .downloaded_models = {}, - .available_models = {}, - .download_url = url_parser::FromUrl(url_obj.value())}; + /* .id = */ model_id + ":" + url_obj->pathParams[3], + /* .default_branch = */ "main", + /* .downloaded_models = */ {}, + /* .available_models = */ {}, + /* .model_source = */ "", + /* .download_url = */ url_parser::FromUrl(url_obj.value())}; } - return ModelPullInfo{.id = author + ":" + model_id + ":" + file_name, - .downloaded_models = {}, - .available_models = {}, - .download_url = url_parser::FromUrl(url_obj.value())}; + return ModelPullInfo{ + /* .id = */ author + ":" + model_id + ":" + file_name, + /* .default_branch = */ "main", + /* .downloaded_models = */ {}, + /* .available_models = */ {}, + /* .model_source = */ "", + /* .download_url = */ url_parser::FromUrl(url_obj.value())}; } if (input.find(":") != std::string::npos) { @@ -836,10 +861,12 @@ cpp::result ModelService::GetModelPullInfo( if (parsed.size() != 2 && parsed.size() != 3) { return cpp::fail("Invalid model handle: " + input); } - return ModelPullInfo{.id = input, - .downloaded_models = {}, - .available_models = {}, - .download_url = input}; + return ModelPullInfo{/* .id = */ input, + /* .default_branch = */ "main", + /* .downloaded_models = */ {}, + /* .available_models = */ {}, + /* .model_source = */ "", + /* .download_url = */ input}; } if (input.find("/") != std::string::npos) { @@ -872,11 +899,13 @@ cpp::result ModelService::GetModelPullInfo( } return ModelPullInfo{ - .id = author + ":" + model_name, - .downloaded_models = {}, - .available_models = options, - .download_url = - huggingface_utils::GetDownloadableUrl(author, model_name, "")}; + /* .id = */ author + ":" + model_name, + /* .default_branch = */ "main", + /* .downloaded_models = */ {}, + /* .available_models = */ options, + /* .model_source = */ "", + /* .download_url = */ + huggingface_utils::GetDownloadableUrl(author, model_name, "")}; } } auto branches = @@ -915,11 +944,13 @@ cpp::result ModelService::GetModelPullInfo( string_utils::SortStrings(downloaded_model_ids); string_utils::SortStrings(avai_download_opts); - return ModelPullInfo{.id = model_name, - .default_branch = normalized_def_branch.value_or(""), - .downloaded_models = downloaded_model_ids, - .available_models = avai_download_opts, - .model_source = "cortexso"}; + return ModelPullInfo{ + /* .id = */ model_name, + /* .default_branch = */ normalized_def_branch.value_or(""), + /* .downloaded_models = */ downloaded_model_ids, + /* .available_models = */ avai_download_opts, + /* .model_source = */ "cortexso", + /* .download_url = */ ""}; } cpp::result ModelService::AbortDownloadModel( @@ -1007,12 +1038,12 @@ ModelService::MayFallbackToCpu(const std::string& model_path, int ngl, free_vram_MiB = free_ram_MiB; #endif - hardware::RunConfig rc = {.ngl = ngl, - .ctx_len = ctx_len, - .n_batch = n_batch, - .n_ubatch = n_ubatch, - .kv_cache_type = kv_cache_type, - .free_vram_MiB = free_vram_MiB}; + hardware::RunConfig rc = {/* .ngl = */ ngl, + /* .ctx_len = */ ctx_len, + /* .n_batch = */ n_batch, + /* .n_ubatch = */ n_ubatch, + /* .kv_cache_type = */ kv_cache_type, + /* .free_vram_MiB = */ free_vram_MiB}; auto es = hardware::EstimateLLaMACppRun(model_path, rc); if (!!es && (*es).gpu_mode.vram_MiB > free_vram_MiB && is_cuda) { @@ -1108,4 +1139,4 @@ void ModelService::ProcessBgrTasks() { auto clone = cb; task_queue_.RunInQueue(std::move(cb)); task_queue_.RunEvery(std::chrono::seconds(60), std::move(clone)); -} \ No newline at end of file +} diff --git a/engine/services/model_source_service.cc b/engine/services/model_source_service.cc index ea26718e2..b5979667c 100644 --- a/engine/services/model_source_service.cc +++ b/engine/services/model_source_service.cc @@ -234,7 +234,7 @@ ModelSourceService::GetRepositoryList(std::string_view hub_author, return get_repo_list(); } - const auto begin = std::chrono::high_resolution_clock::now(); + /* const auto begin = std::chrono::high_resolution_clock::now(); */ auto res = curl_utils::SimpleGet("https://huggingface.co/api/models?author=" + as); if (res.has_value()) { @@ -353,16 +353,16 @@ ModelSourceService::AddRepoSiblings(const std::string& model_source, std::string model_id = hub_author + ":" + model_name + ":" + sibling.rfilename; cortex::db::ModelEntry e = { - .model = model_id, - .author_repo_id = hub_author, - .branch_name = "main", - .path_to_model_yaml = "", - .model_alias = "", - .model_format = "hf-gguf", - .model_source = model_source, - .status = cortex::db::ModelStatus::Downloadable, - .engine = "llama-cpp", - .metadata = json_helper::DumpJsonString(meta_json)}; + /* .model = */ model_id, + /* .author_repo_id = */ hub_author, + /* .branch_name = */ "main", + /* .path_to_model_yaml = */ "", + /* .model_alias = */ "", + /* .model_format = */ "hf-gguf", + /* .model_source = */ model_source, + /* .status = */ cortex::db::ModelStatus::Downloadable, + /* .engine = */ "llama-cpp", + /* .metadata = */ json_helper::DumpJsonString(meta_json)}; if (!db_service_->HasModel(model_id)) { if (auto add_res = db_service_->AddModelEntry(e); add_res.has_error()) { CTL_INF(add_res.error()); @@ -488,9 +488,11 @@ cpp::result ModelSourceService::AddCortexsoRepoBranch( const std::string& model_name, const std::string& branch, const std::string& metadata, const std::string& desc) { url_parser::Url url = { - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"api", "models", "cortexso", model_name, "tree", branch}, + /* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + {"api", "models", "cortexso", model_name, "tree", branch}, + /* .queries = */ {}, }; auto result = curl_utils::SimpleGetJson(url.ToFullPath()); @@ -516,16 +518,16 @@ cpp::result ModelSourceService::AddCortexsoRepoBranch( meta_json["description"] = desc; std::string model_id = model_name + ":" + branch; cortex::db::ModelEntry e = { - .model = model_id, - .author_repo_id = author, - .branch_name = branch, - .path_to_model_yaml = "", - .model_alias = "", - .model_format = "cortexso", - .model_source = model_source, - .status = cortex::db::ModelStatus::Downloadable, - .engine = "llama-cpp", - .metadata = json_helper::DumpJsonString(meta_json)}; + /* .model = */ model_id, + /* .author_repo_id = */ author, + /* .branch_name = */ branch, + /* .path_to_model_yaml = */ "", + /* .model_alias = */ "", + /* .model_format = */ "cortexso", + /* .model_source = */ model_source, + /* .status = */ cortex::db::ModelStatus::Downloadable, + /* .engine = */ "llama-cpp", + /* .metadata = */ json_helper::DumpJsonString(meta_json)}; if (!db_service_->HasModel(model_id)) { CTL_INF("Adding model to db: " << model_name << ":" << branch); if (auto res = db_service_->AddModelEntry(e); diff --git a/engine/test/components/test_cortex_config.cc b/engine/test/components/test_cortex_config.cc index f48b5c674..12eebc6f3 100644 --- a/engine/test/components/test_cortex_config.cc +++ b/engine/test/components/test_cortex_config.cc @@ -14,16 +14,39 @@ class CortexConfigTest : public ::testing::Test { void SetUp() override { // Set up default configuration - default_config = {"default_log_path", - "default_llamacpp_log_path", - "default_onnx_log_path", - "default_data_path", - 1000, - kDefaultHost, - kDefaultPort, - kDefaultCheckedForUpdateAt, - kDefaultCheckedForLlamacppUpdateAt, - kDefaultLatestRelease}; + default_config = { + "default_log_path", + "default_llamacpp_log_path", + "default_onnx_log_path", + "default_data_path", + 1000, + kDefaultHost, + kDefaultPort, + kDefaultCheckedForUpdateAt, + kDefaultCheckedForLlamacppUpdateAt, + kDefaultLatestRelease, + "", + "", + "", + "", + "", + "", + false, + {}, + "", + false, + false, + "", + "", + "", + false, + false, + "", + "", + {}, + 0, + {}, + }; } void TearDown() override { @@ -35,16 +58,39 @@ class CortexConfigTest : public ::testing::Test { }; TEST_F(CortexConfigTest, DumpYamlConfig_WritesCorrectly) { - CortexConfig config = {"log_path", - "default_llamacpp_log_path", - "default_onnx_log_path", - "data_path", - 5000, - "localhost", - "8080", - 123456789, - 123456789, - "v1.0.0"}; + CortexConfig config = { + "log_path", + "default_llamacpp_log_path", + "default_onnx_log_path", + "data_path", + 5000, + "localhost", + "8080", + 123456789, + 123456789, + "v1.0.0", + "", + "", + "", + "", + "", + "", + false, + {}, + "", + false, + false, + "", + "", + "", + false, + false, + "", + "", + {}, + 0, + {}, + }; auto result = cyu::CortexConfigMgr::GetInstance().DumpYamlConfig( config, test_file_path); @@ -64,16 +110,40 @@ TEST_F(CortexConfigTest, DumpYamlConfig_WritesCorrectly) { TEST_F(CortexConfigTest, FromYaml_ReadsCorrectly) { // First, create a valid YAML configuration file - CortexConfig config = {"log_path", - "default_llamacpp_log_path", - "default_onnx_log_path", - "data_path", - 5000, - "localhost", - "8080", - 123456789, - 123456789, - "v1.0.0"}; + CortexConfig config = { + "log_path", + "default_llamacpp_log_path", + "default_onnx_log_path", + "data_path", + 5000, + "localhost", + "8080", + 123456789, + 123456789, + "v1.0.0", + + "", + "", + "", + "", + "", + "", + false, + {}, + "", + false, + false, + "", + "", + "", + false, + false, + "", + "", + {}, + 0, + {}, + }; auto result = cyu::CortexConfigMgr::GetInstance().DumpYamlConfig( config, test_file_path); diff --git a/engine/test/components/test_download_task_queue.cc b/engine/test/components/test_download_task_queue.cc index 526371399..929885183 100644 --- a/engine/test/components/test_download_task_queue.cc +++ b/engine/test/components/test_download_task_queue.cc @@ -10,11 +10,11 @@ class DownloadTaskQueueTest : public ::testing::Test { DownloadTask CreateDownloadTask( const std::string& id, - DownloadTask::Status staus = DownloadTask::Status::Pending) { - return DownloadTask{.id = id, - .status = DownloadTask::Status::Pending, - .type = DownloadType::Model, - .items = {}}; + DownloadTask::Status status = DownloadTask::Status::Pending) { + return DownloadTask{/* .id = */ id, + /* .status = */ status, + /* .type = */ DownloadType::Model, + /* .items = */ {}}; } TEST_F(DownloadTaskQueueTest, PushAndPop) { @@ -107,7 +107,7 @@ TEST_F(DownloadTaskQueueTest, ConcurrentPushAndPop) { std::atomic poppedTasks{0}; for (int i = 0; i < 4; ++i) { - pushThreads.emplace_back([this, numTasks, i, &pushedTasks]() { + pushThreads.emplace_back([this, i, &pushedTasks]() { for (int j = 0; j < numTasks; ++j) { queue.push(CreateDownloadTask("task_" + std::to_string(i) + "_" + std::to_string(j))); diff --git a/engine/test/components/test_file_manager_config_yaml_utils.cc b/engine/test/components/test_file_manager_config_yaml_utils.cc index ccbc92ec8..fc457b158 100644 --- a/engine/test/components/test_file_manager_config_yaml_utils.cc +++ b/engine/test/components/test_file_manager_config_yaml_utils.cc @@ -57,11 +57,42 @@ TEST_F(FileManagerConfigTest, GetCortexConfig) { // Tests for config_yaml_utils TEST_F(FileManagerConfigTest, DumpYamlConfig) { - config_yaml_utils::CortexConfig config{.logFolderPath = "/path/to/logs", - .dataFolderPath = "/path/to/data", - .maxLogLines = 1000, - .apiServerHost = "localhost", - .apiServerPort = "8080"}; + config_yaml_utils::CortexConfig config{ + /* .logFolderPath = */ "/path/to/logs", + /* .logLlamaCppPath = */ file_manager_utils::kLogsLlamacppBaseName, + /* .logOnnxPath = */ file_manager_utils::kLogsOnnxBaseName, + /* .dataFolderPath = */ "/path/to/data", + /* .maxLogLines = */ 1000, + /* .apiServerHost = */ "localhost", + /* .apiServerPort = */ "8080", + + /* .checkedForUpdateAt = */ config_yaml_utils::kDefaultCheckedForUpdateAt, + /* .checkedForLlamacppUpdateAt = */ + config_yaml_utils::kDefaultCheckedForLlamacppUpdateAt, + /* .latestRelease = */ config_yaml_utils::kDefaultLatestRelease, + /* .latestLlamacppRelease = */ config_yaml_utils::kDefaultLatestLlamacppRelease, + /* .huggingFaceToken = */ "", + /* .gitHubUserAgent = */ "", + /* .gitHubToken = */ "", + /* .llamacppVariant = */ "", + /* .llamacppVersion = */ "", + /* .enableCors = */ config_yaml_utils::kDefaultCorsEnabled, + /* .allowedOrigins = */ config_yaml_utils::kDefaultEnabledOrigins, + /* .proxyUrl = */ "", + /* .verifyProxySsl = */ true, + /* .verifyProxyHostSsl = */ true, + /* .proxyUsername = */ "", + /* .proxyPassword = */ "", + /* .noProxy = */ config_yaml_utils::kDefaultNoProxy, + /* .verifyPeerSsl = */ true, + /* .verifyHostSsl = */ true, + + /* .sslCertPath = */ "", + /* .sslKeyPath = */ "", + /* .supportedEngines = */ config_yaml_utils::kDefaultSupportedEngines, + /* .checkedForSyncHubAt = */ 0u, + /* .apiKeys = */ {}, + }; std::string test_file = "test_config.yaml"; auto result = diff --git a/engine/test/components/test_hardware.cc b/engine/test/components/test_hardware.cc index d87beb744..2545255f7 100644 --- a/engine/test/components/test_hardware.cc +++ b/engine/test/components/test_hardware.cc @@ -1,3 +1,4 @@ +#include "common/hardware_common.h" #include "gtest/gtest.h" #include "utils/hardware/cpu_info.h" #include "utils/hardware/gpu_info.h" @@ -67,23 +68,32 @@ class GpuJsonTests : public ::testing::Test { // Set up a vector of GPUs for testing cortex::hw::NvidiaAddInfo nvidia_info{"460.32.03", "6.1"}; - test_gpus.push_back({.id = "0", - .name = "NVIDIA GeForce GTX 1080", - .version = "1.0", - .add_info = nvidia_info, - .free_vram = 4096, - .total_vram = 8192, - .uuid = "GPU-12345678", - .is_activated = true}); - - test_gpus.push_back({.id = "1", - .name = "NVIDIA GeForce RTX 2080", - .version = "1.1", - .add_info = nvidia_info, - .free_vram = 6144, - .total_vram = 8192, - .uuid = "GPU-87654321", - .is_activated = false}); + test_gpus.push_back(cortex::hw::GPU{ + /* .id = */ "0", + /* .device_id = */ 0, + /* .name = */ "NVIDIA GeForce GTX 1080", + /* .version = */ "1.0", + /* .add_info = */ nvidia_info, + /* .free_vram = */ 4096, + /* .total_vram = */ 8192, + /* .uuid = */ "GPU-12345678", + /* .is_activated = */ true, + /* .vendor = */ "", + /* .gpu_type = */ cortex::hw::GpuType::kGpuTypeDiscrete}); + + test_gpus.push_back({ + /* .id = */ "1", + /* .device_id = */ 0, + /* .name = */ "NVIDIA GeForce RTX 2080", + /* .version = */ "1.1", + /* .add_info = */ nvidia_info, + /* .free_vram = */ 6144, + /* .total_vram = */ 8192, + /* .uuid = */ "GPU-87654321", + /* .is_activated = */ false, + /* .vendor = */ "", + /* .gpu_type = */ cortex::hw::GpuType::kGpuTypeDiscrete, + }); } std::vector test_gpus; @@ -169,30 +179,30 @@ TEST_F(GpuJsonTests, FromJson_ValidJson_Success) { } class OsJsonTests : public ::testing::Test { -protected: - cortex::hw::OS test_os; - - void SetUp() override { - test_os.name = "Ubuntu"; - test_os.version = "20.04"; - test_os.arch = "x86_64"; - } + protected: + cortex::hw::OS test_os; + + void SetUp() override { + test_os.name = "Ubuntu"; + test_os.version = "20.04"; + test_os.arch = "x86_64"; + } }; TEST_F(OsJsonTests, ToJson_ValidOS_Success) { - Json::Value json_result = cortex::hw::ToJson(test_os); + Json::Value json_result = cortex::hw::ToJson(test_os); - EXPECT_EQ(json_result["name"].asString(), test_os.name); - EXPECT_EQ(json_result["version"].asString(), test_os.version); + EXPECT_EQ(json_result["name"].asString(), test_os.name); + EXPECT_EQ(json_result["version"].asString(), test_os.version); } TEST_F(OsJsonTests, FromJson_ValidJson_Success) { - Json::Value json_input; - json_input["name"] = test_os.name; - json_input["version"] = test_os.version; + Json::Value json_input; + json_input["name"] = test_os.name; + json_input["version"] = test_os.version; - cortex::hw::OS os_result = cortex::hw::os::FromJson(json_input); + cortex::hw::OS os_result = cortex::hw::os::FromJson(json_input); - EXPECT_EQ(os_result.name, test_os.name); - EXPECT_EQ(os_result.version, test_os.version); -} \ No newline at end of file + EXPECT_EQ(os_result.name, test_os.name); + EXPECT_EQ(os_result.version, test_os.version); +} diff --git a/engine/test/components/test_models_db.cc b/engine/test/components/test_models_db.cc index 0cc9b0344..14adccbe5 100644 --- a/engine/test/components/test_models_db.cc +++ b/engine/test/components/test_models_db.cc @@ -44,7 +44,8 @@ class ModelsTestSuite : public ::testing::Test { "main", "/path/to/model.yaml", "test_alias", "test_format", "test_source", cortex::db::ModelStatus::Downloaded, - "test_engine"}; + "test_engine", "", + }; }; TEST_F(ModelsTestSuite, TestAddModelEntry) { diff --git a/engine/test/components/test_tool_resources.cc b/engine/test/components/test_tool_resources.cc index 2b78e6494..501882a0d 100644 --- a/engine/test/components/test_tool_resources.cc +++ b/engine/test/components/test_tool_resources.cc @@ -205,7 +205,7 @@ TEST_F(FileSearchTest, SelfAssignment) { FileSearch search; search.vector_store_ids = sample_vector_store_ids; - search = std::move(search); // Self-assignment with move + // search = std::move(search); // Self-assignment with move EXPECT_EQ(search.vector_store_ids, sample_vector_store_ids); } } // namespace diff --git a/engine/test/components/test_url_parser.cc b/engine/test/components/test_url_parser.cc index 25769bc6f..a0f4346fa 100644 --- a/engine/test/components/test_url_parser.cc +++ b/engine/test/components/test_url_parser.cc @@ -16,9 +16,10 @@ TEST_F(UrlParserTestSuite, TestParseUrlCorrectly) { TEST_F(UrlParserTestSuite, ConstructUrlCorrectly) { auto url = url_parser::Url{ - .protocol = "https", - .host = "jan.ai", - .pathParams = {"path1", "path2"}, + /* .protocol = */ "https", + /* .host = */ "jan.ai", + /* .pathParams = */ {"path1", "path2"}, + /* .queries = */ {}, }; auto url_str = url_parser::FromUrl(url); @@ -27,10 +28,10 @@ TEST_F(UrlParserTestSuite, ConstructUrlCorrectly) { TEST_F(UrlParserTestSuite, ConstructUrlWithQueryCorrectly) { auto url = url_parser::Url{ - .protocol = "https", - .host = "jan.ai", - .pathParams = {"path1", "path2"}, - .queries = {{"key1", "value1"}, {"key2", 2}, {"key3", true}}, + /* .protocol = */ "https", + /* .host = */ "jan.ai", + /* .pathParams = */ {"path1", "path2"}, + /* .queries = */ {{"key1", "value1"}, {"key2", 2}, {"key3", true}}, }; auto url_str = url_parser::FromUrl(url); @@ -45,9 +46,10 @@ TEST_F(UrlParserTestSuite, ConstructUrlWithQueryCorrectly) { TEST_F(UrlParserTestSuite, ConstructUrlWithEmptyPathCorrectly) { auto url = url_parser::Url{ - .protocol = "https", - .host = "jan.ai", - .pathParams = {}, + /* .protocol = */ "https", + /* .host = */ "jan.ai", + /* .pathParams = */ {}, + /* .queries = */ {}, }; auto url_str = url_parser::FromUrl(url); @@ -55,16 +57,22 @@ TEST_F(UrlParserTestSuite, ConstructUrlWithEmptyPathCorrectly) { } TEST_F(UrlParserTestSuite, GetProtocolAndHostCorrectly) { - auto url = url_parser::Url{.protocol = "https", .host = "jan.ai"}; + auto url = url_parser::Url{ + /* .protocol = */ "https", + /* .host = */ "jan.ai", + /* .pathParams = */ {}, + /* .queries= */ {}, + }; auto protocol_and_host = url.GetProtocolAndHost(); EXPECT_EQ(protocol_and_host, "https://jan.ai"); } TEST_F(UrlParserTestSuite, GetPathAndQueryCorrectly) { auto url = url_parser::Url{ - .protocol = "https", - .host = "jan.ai", - .pathParams = {"path1", "path2"}, + /* .protocol = */ "https", + /* .host = */ "jan.ai", + /* .pathParams = */ {"path1", "path2"}, + /* .queries = */ {}, }; auto path_and_query = url.GetPathAndQuery(); EXPECT_EQ(path_and_query, "/path1/path2"); diff --git a/engine/utils/cli_selection_utils.h b/engine/utils/cli_selection_utils.h index 20450ef7f..dca6fe675 100644 --- a/engine/utils/cli_selection_utils.h +++ b/engine/utils/cli_selection_utils.h @@ -80,7 +80,7 @@ inline std::optional PrintModelSelection( // deal with out of range numeric values std::optional numeric_value = GetNumericValue(selection); - if (!numeric_value.has_value() || numeric_value.value() > availables.size() || numeric_value.value() < 1) { + if (!numeric_value.has_value() || (unsigned) numeric_value.value() > availables.size() || numeric_value.value() < 1) { return std::nullopt; } @@ -107,7 +107,7 @@ inline std::optional PrintSelection( // deal with out of range numeric values std::optional numeric_value = GetNumericValue(selection); - if (!numeric_value.has_value() || numeric_value.value() > options.size() || numeric_value.value() < 1) { + if (!numeric_value.has_value() ||(unsigned) numeric_value.value() > options.size() || numeric_value.value() < 1) { return std::nullopt; } diff --git a/engine/utils/config_yaml_utils.cc b/engine/utils/config_yaml_utils.cc index dc47590c4..e6843c04c 100644 --- a/engine/utils/config_yaml_utils.cc +++ b/engine/utils/config_yaml_utils.cc @@ -89,98 +89,106 @@ CortexConfig CortexConfigMgr::FromYaml(const std::string& path, !node["checkedForSyncHubAt"] || !node["apiKeys"]); CortexConfig config = { - .logFolderPath = node["logFolderPath"] - ? node["logFolderPath"].as() - : default_cfg.logFolderPath, - .logLlamaCppPath = node["logLlamaCppPath"] - ? node["logLlamaCppPath"].as() - : default_cfg.logLlamaCppPath, - .logOnnxPath = node["logOnnxPath"] - ? node["logOnnxPath"].as() - : default_cfg.logOnnxPath, - .dataFolderPath = node["dataFolderPath"] - ? node["dataFolderPath"].as() - : default_cfg.dataFolderPath, - .maxLogLines = node["maxLogLines"] ? node["maxLogLines"].as() - : default_cfg.maxLogLines, - .apiServerHost = node["apiServerHost"] - ? node["apiServerHost"].as() - : default_cfg.apiServerHost, - .apiServerPort = node["apiServerPort"] - ? node["apiServerPort"].as() - : default_cfg.apiServerPort, - .checkedForUpdateAt = node["checkedForUpdateAt"] - ? node["checkedForUpdateAt"].as() - : default_cfg.checkedForUpdateAt, - .checkedForLlamacppUpdateAt = - node["checkedForLlamacppUpdateAt"] - ? node["checkedForLlamacppUpdateAt"].as() - : default_cfg.checkedForLlamacppUpdateAt, - .latestRelease = node["latestRelease"] - ? node["latestRelease"].as() - : default_cfg.latestRelease, - .latestLlamacppRelease = - node["latestLlamacppRelease"] - ? node["latestLlamacppRelease"].as() - : default_cfg.latestLlamacppRelease, - .huggingFaceToken = node["huggingFaceToken"] - ? node["huggingFaceToken"].as() - : default_cfg.huggingFaceToken, - .gitHubUserAgent = node["gitHubUserAgent"] - ? node["gitHubUserAgent"].as() - : default_cfg.gitHubUserAgent, - .gitHubToken = node["gitHubToken"] - ? node["gitHubToken"].as() - : default_cfg.gitHubToken, - .llamacppVariant = node["llamacppVariant"] - ? node["llamacppVariant"].as() - : default_cfg.llamacppVariant, - .llamacppVersion = node["llamacppVersion"] - ? node["llamacppVersion"].as() - : default_cfg.llamacppVersion, - .enableCors = node["enableCors"] ? node["enableCors"].as() - : default_cfg.enableCors, - .allowedOrigins = - node["allowedOrigins"] - ? node["allowedOrigins"].as>() - : default_cfg.allowedOrigins, - .proxyUrl = node["proxyUrl"] ? node["proxyUrl"].as() - : default_cfg.proxyUrl, - .verifyProxySsl = node["verifyProxySsl"] - ? node["verifyProxySsl"].as() - : default_cfg.verifyProxySsl, - .verifyProxyHostSsl = node["verifyProxyHostSsl"] - ? node["verifyProxyHostSsl"].as() - : default_cfg.verifyProxyHostSsl, - .proxyUsername = node["proxyUsername"] - ? node["proxyUsername"].as() - : default_cfg.proxyUsername, - .proxyPassword = node["proxyPassword"] - ? node["proxyPassword"].as() - : default_cfg.proxyPassword, - .noProxy = node["noProxy"] ? node["noProxy"].as() - : default_cfg.noProxy, - .verifyPeerSsl = node["verifyPeerSsl"] - ? node["verifyPeerSsl"].as() - : default_cfg.verifyPeerSsl, - .verifyHostSsl = node["verifyHostSsl"] - ? node["verifyHostSsl"].as() - : default_cfg.verifyHostSsl, - .sslCertPath = node["sslCertPath"] - ? node["sslCertPath"].as() - : default_cfg.sslCertPath, - .sslKeyPath = node["sslKeyPath"] ? node["sslKeyPath"].as() - : default_cfg.sslKeyPath, - .supportedEngines = - node["supportedEngines"] - ? node["supportedEngines"].as>() - : default_cfg.supportedEngines, - .checkedForSyncHubAt = node["checkedForSyncHubAt"] - ? node["checkedForSyncHubAt"].as() - : default_cfg.checkedForSyncHubAt, - .apiKeys = node["apiKeys"] - ? node["apiKeys"].as>() - : default_cfg.apiKeys, + /* .logFolderPath = */ node["logFolderPath"] + ? node["logFolderPath"].as() + : default_cfg.logFolderPath, + /* .logLlamaCppPath = */ + node["logLlamaCppPath"] ? node["logLlamaCppPath"].as() + : default_cfg.logLlamaCppPath, + /* .logOnnxPath = */ + node["logOnnxPath"] ? node["logOnnxPath"].as() + : default_cfg.logOnnxPath, + /* .dataFolderPath = */ + node["dataFolderPath"] ? node["dataFolderPath"].as() + : default_cfg.dataFolderPath, + /* .maxLogLines = */ + node["maxLogLines"] ? node["maxLogLines"].as() + : default_cfg.maxLogLines, + /* .apiServerHost = */ + node["apiServerHost"] ? node["apiServerHost"].as() + : default_cfg.apiServerHost, + /* .apiServerPort = */ + node["apiServerPort"] ? node["apiServerPort"].as() + : default_cfg.apiServerPort, + /* .checkedForUpdateAt = */ + node["checkedForUpdateAt"] + ? node["checkedForUpdateAt"].as() + : default_cfg.checkedForUpdateAt, + /* .checkedForLlamacppUpdateAt = */ + node["checkedForLlamacppUpdateAt"] + ? node["checkedForLlamacppUpdateAt"].as() + : default_cfg.checkedForLlamacppUpdateAt, + /* .latestRelease = */ + node["latestRelease"] ? node["latestRelease"].as() + : default_cfg.latestRelease, + /* .latestLlamacppRelease = */ + node["latestLlamacppRelease"] + ? node["latestLlamacppRelease"].as() + : default_cfg.latestLlamacppRelease, + /* .huggingFaceToken = */ + node["huggingFaceToken"] + ? node["huggingFaceToken"].as() + : default_cfg.huggingFaceToken, + /* .gitHubUserAgent = */ + node["gitHubUserAgent"] ? node["gitHubUserAgent"].as() + : default_cfg.gitHubUserAgent, + /* .gitHubToken = */ + node["gitHubToken"] ? node["gitHubToken"].as() + : default_cfg.gitHubToken, + /* .llamacppVariant = */ + node["llamacppVariant"] ? node["llamacppVariant"].as() + : default_cfg.llamacppVariant, + /* .llamacppVersion = */ + node["llamacppVersion"] ? node["llamacppVersion"].as() + : default_cfg.llamacppVersion, + /* .enableCors = */ + node["enableCors"] ? node["enableCors"].as() + : default_cfg.enableCors, + /* .allowedOrigins = */ + node["allowedOrigins"] + ? node["allowedOrigins"].as>() + : default_cfg.allowedOrigins, + /* .proxyUrl = */ + node["proxyUrl"] ? node["proxyUrl"].as() + : default_cfg.proxyUrl, + /* .verifyProxySsl = */ + node["verifyProxySsl"] ? node["verifyProxySsl"].as() + : default_cfg.verifyProxySsl, + /* .verifyProxyHostSsl = */ + node["verifyProxyHostSsl"] ? node["verifyProxyHostSsl"].as() + : default_cfg.verifyProxyHostSsl, + /* .proxyUsername = */ + node["proxyUsername"] ? node["proxyUsername"].as() + : default_cfg.proxyUsername, + /* .proxyPassword = */ + node["proxyPassword"] ? node["proxyPassword"].as() + : default_cfg.proxyPassword, + /* .noProxy = */ + node["noProxy"] ? node["noProxy"].as() + : default_cfg.noProxy, + /* .verifyPeerSsl = */ + node["verifyPeerSsl"] ? node["verifyPeerSsl"].as() + : default_cfg.verifyPeerSsl, + /* .verifyHostSsl = */ + node["verifyHostSsl"] ? node["verifyHostSsl"].as() + : default_cfg.verifyHostSsl, + /* .sslCertPath = */ + node["sslCertPath"] ? node["sslCertPath"].as() + : default_cfg.sslCertPath, + /* .sslKeyPath = */ + node["sslKeyPath"] ? node["sslKeyPath"].as() + : default_cfg.sslKeyPath, + /* .supportedEngines = */ + node["supportedEngines"] + ? node["supportedEngines"].as>() + : default_cfg.supportedEngines, + /* .checkedForSyncHubAt = */ + node["checkedForSyncHubAt"] + ? node["checkedForSyncHubAt"].as() + : default_cfg.checkedForSyncHubAt, + /* .apiKeys = */ + node["apiKeys"] ? node["apiKeys"].as>() + : default_cfg.apiKeys, }; if (should_update_config) { diff --git a/engine/utils/curl_utils.cc b/engine/utils/curl_utils.cc index 859c629d1..1d0be2f70 100644 --- a/engine/utils/curl_utils.cc +++ b/engine/utils/curl_utils.cc @@ -86,7 +86,7 @@ std::shared_ptr
GetHeaders(const std::string& url) { // for debug purpose auto min_token_size = 6; - if (token.size() < min_token_size) { + if (token.size() < (unsigned)min_token_size) { CTL_WRN("Hugging Face token is too short"); } else { CTL_INF("Using authentication with Hugging Face token: " + @@ -110,7 +110,7 @@ std::shared_ptr
GetHeaders(const std::string& url) { // for debug purpose auto min_token_size = 6; - if (gh_token.size() < min_token_size) { + if (gh_token.size() < (unsigned)min_token_size) { CTL_WRN("Github token is too short"); } else { CTL_INF("Using authentication with Github token: " + @@ -373,4 +373,4 @@ cpp::result SimplePatchJson(const std::string& url, return root; } -} // namespace curl_utils \ No newline at end of file +} // namespace curl_utils diff --git a/engine/utils/event_processor.h b/engine/utils/event_processor.h index 3dfb64d73..720a0f39d 100644 --- a/engine/utils/event_processor.h +++ b/engine/utils/event_processor.h @@ -29,7 +29,7 @@ class EventProcessor { running_ = false; // to prevent blocking thread on wait event_queue_->enqueue(EventType::ExitEvent, - ExitEvent{.message = "Event queue exitting.."}); + ExitEvent{{}, "Event queue exitting.."}); if (thread_.joinable()) { thread_.join(); } diff --git a/engine/utils/file_manager_utils.cc b/engine/utils/file_manager_utils.cc index c479949aa..f4ffb99db 100644 --- a/engine/utils/file_manager_utils.cc +++ b/engine/utils/file_manager_utils.cc @@ -182,43 +182,48 @@ config_yaml_utils::CortexConfig GetDefaultConfig() { return config_yaml_utils::CortexConfig{ #if defined(_WIN32) - .logFolderPath = + /* .logFolderPath = */ cortex::wc::WstringToUtf8(default_data_folder_path.wstring()), #else - .logFolderPath = default_data_folder_path.string(), + /* .logFolderPath = */ default_data_folder_path.string(), #endif - .logLlamaCppPath = kLogsLlamacppBaseName, - .logOnnxPath = kLogsOnnxBaseName, + /* .logLlamaCppPath = */ kLogsLlamacppBaseName, + /* .logOnnxPath = */ kLogsOnnxBaseName, #if defined(_WIN32) - .dataFolderPath = + /* .dataFolderPath = */ cortex::wc::WstringToUtf8(default_data_folder_path.wstring()), #else - .dataFolderPath = default_data_folder_path.string(), + /* .dataFolderPath = */ default_data_folder_path.string(), #endif - .maxLogLines = config_yaml_utils::kDefaultMaxLines, - .apiServerHost = config_yaml_utils::kDefaultHost, - .apiServerPort = config_yaml_utils::kDefaultPort, - .checkedForUpdateAt = config_yaml_utils::kDefaultCheckedForUpdateAt, - .checkedForLlamacppUpdateAt = + /* .maxLogLines = */ config_yaml_utils::kDefaultMaxLines, + /* .apiServerHost = */ config_yaml_utils::kDefaultHost, + /* .apiServerPort = */ config_yaml_utils::kDefaultPort, + /* .checkedForUpdateAt = */ config_yaml_utils::kDefaultCheckedForUpdateAt, + /* .checkedForLlamacppUpdateAt = */ config_yaml_utils::kDefaultCheckedForLlamacppUpdateAt, - .latestRelease = config_yaml_utils::kDefaultLatestRelease, - .latestLlamacppRelease = config_yaml_utils::kDefaultLatestLlamacppRelease, - .enableCors = config_yaml_utils::kDefaultCorsEnabled, - .allowedOrigins = config_yaml_utils::kDefaultEnabledOrigins, - .proxyUrl = "", - .verifyProxySsl = true, - .verifyProxyHostSsl = true, - .proxyUsername = "", - .proxyPassword = "", - .noProxy = config_yaml_utils::kDefaultNoProxy, - .verifyPeerSsl = true, - .verifyHostSsl = true, - - .sslCertPath = "", - .sslKeyPath = "", - .supportedEngines = config_yaml_utils::kDefaultSupportedEngines, - .checkedForSyncHubAt = 0u, - .apiKeys = {}, + /* .latestRelease = */ config_yaml_utils::kDefaultLatestRelease, + /* .latestLlamacppRelease = */ config_yaml_utils::kDefaultLatestLlamacppRelease, + /* .huggingFaceToken = */ "", + /* .gitHubUserAgent = */ "", + /* .gitHubToken = */ "", + /* .llamacppVariant = */ "", + /* .llamacppVersion = */ "", + /* .enableCors = */ config_yaml_utils::kDefaultCorsEnabled, + /* .allowedOrigins = */ config_yaml_utils::kDefaultEnabledOrigins, + /* .proxyUrl = */ "", + /* .verifyProxySsl = */ true, + /* .verifyProxyHostSsl = */ true, + /* .proxyUsername = */ "", + /* .proxyPassword = */ "", + /* .noProxy = */ config_yaml_utils::kDefaultNoProxy, + /* .verifyPeerSsl = */ true, + /* .verifyHostSsl = */ true, + + /* .sslCertPath = */ "", + /* .sslKeyPath = */ "", + /* .supportedEngines = */ config_yaml_utils::kDefaultSupportedEngines, + /* .checkedForSyncHubAt = */ 0u, + /* .apiKeys = */ {}, }; } diff --git a/engine/utils/github_release_utils.h b/engine/utils/github_release_utils.h index 4f5785bca..29f8a5725 100644 --- a/engine/utils/github_release_utils.h +++ b/engine/utils/github_release_utils.h @@ -28,21 +28,19 @@ struct GitHubAsset { static GitHubAsset FromJson(const Json::Value& json, const std::string& version) { - return GitHubAsset{ - .url = json["url"].asString(), - .id = json["id"].asInt(), - .node_id = json["node_id"].asString(), - .name = json["name"].asString(), - .label = json["label"].asString(), - .content_type = json["content_type"].asString(), - .state = json["state"].asString(), - .size = json["size"].asUInt64(), - .download_count = json["download_count"].asUInt(), - .created_at = json["created_at"].asString(), - .updated_at = json["updated_at"].asString(), - .browser_download_url = json["browser_download_url"].asString(), - .version = version, - }; + return GitHubAsset{json["url"].asString(), + json["id"].asInt(), + json["node_id"].asString(), + json["name"].asString(), + json["label"].asString(), + json["content_type"].asString(), + json["state"].asString(), + json["size"].asUInt64(), + json["download_count"].asUInt(), + json["created_at"].asString(), + json["updated_at"].asString(), + json["browser_download_url"].asString(), + version}; } Json::Value ToJson() const { @@ -103,15 +101,15 @@ struct GitHubRelease { } return GitHubRelease{ - .url = json["url"].asString(), - .id = json["id"].asInt(), - .tag_name = json["tag_name"].asString(), - .name = json["name"].asString(), - .draft = json["draft"].asBool(), - .prerelease = json["prerelease"].asBool(), - .created_at = json["created_at"].asString(), - .published_at = json["published_at"].asString(), - .assets = assets, + json["url"].asString(), + json["id"].asInt(), + json["tag_name"].asString(), + json["name"].asString(), + json["draft"].asBool(), + json["prerelease"].asBool(), + json["created_at"].asString(), + json["published_at"].asString(), + assets, }; } @@ -149,9 +147,10 @@ inline cpp::result, std::string> GetReleases( const std::string& author, const std::string& repo, const bool allow_prerelease = true) { auto url = url_parser::Url{ - .protocol = "https", - .host = kGitHubHost, - .pathParams = {"repos", author, repo, "releases"}, + /* .protocol = */ "https", + /* .host = */ kGitHubHost, + /* .pathParams = */ {"repos", author, repo, "releases"}, + /* .queries = */ {}, }; auto result = curl_utils::SimpleGetJson(url_parser::FromUrl(url)); @@ -168,7 +167,7 @@ inline cpp::result, std::string> GetReleases( for (const auto& release : result.value()) { releases.push_back(GitHubRelease::FromJson(release)); } - (void) allow_prerelease; + (void)allow_prerelease; return releases; } @@ -190,9 +189,10 @@ inline cpp::result GetReleaseByVersion( } auto url = url_parser::Url{ - .protocol = "https", - .host = kGitHubHost, - .pathParams = path_params, + /* .protocol = */ "https", + /* .host = */ kGitHubHost, + /* .pathParams = */ path_params, + /* .queries = */ {}, }; // CTL_DBG("GetReleaseByVersion: " << url.ToFullPath()); diff --git a/engine/utils/hardware/cpu_info.h b/engine/utils/hardware/cpu_info.h index ac5e1c83a..a31f6e44f 100644 --- a/engine/utils/hardware/cpu_info.h +++ b/engine/utils/hardware/cpu_info.h @@ -127,7 +127,7 @@ struct CpuInfo { std::this_thread::sleep_for(std::chrono::duration(1)); jiffies_initialized = true; } - + auto get_jiffies = [](int index) -> Jiffies { std::ifstream filestat("/proc/stat"); if (!filestat.is_open()) { @@ -188,11 +188,8 @@ struct CpuInfo { auto cpu = res[0]; cortex::cpuid::CpuInfo inst; auto usage = static_cast(GetCPUUsage()); - return CPU{.cores = cpu.numPhysicalCores(), - .arch = std::string(GetArch()), - .model = cpu.modelName(), - .usage = usage, - .instructions = inst.instructions()}; + return CPU{cpu.numPhysicalCores(), std::string(GetArch()), cpu.modelName(), + usage, inst.instructions()}; } }; -} // namespace cortex::hw \ No newline at end of file +} // namespace cortex::hw diff --git a/engine/utils/hardware/gguf/ggml.h b/engine/utils/hardware/gguf/ggml.h index f56fb9172..15c068019 100644 --- a/engine/utils/hardware/gguf/ggml.h +++ b/engine/utils/hardware/gguf/ggml.h @@ -176,59 +176,23 @@ struct GGMLTypeTrait { }; const std::unordered_map kGGMLTypeTraits = { - {GGML_TYPE_F32, {.block_size = 1, .type_size = 4}}, - {GGML_TYPE_F16, {.block_size = 1, .type_size = 2}}, - {GGML_TYPE_Q4_0, {.block_size = 32, .type_size = 18, .is_quantized = true}}, - {GGML_TYPE_Q4_1, {.block_size = 32, .type_size = 20, .is_quantized = true}}, - {GGML_TYPE_Q5_0, {.block_size = 32, .type_size = 22, .is_quantized = true}}, - {GGML_TYPE_Q5_1, {.block_size = 32, .type_size = 24, .is_quantized = true}}, - {GGML_TYPE_Q8_0, {.block_size = 32, .type_size = 34, .is_quantized = true}}, - {GGML_TYPE_Q8_1, {.block_size = 32, .type_size = 36, .is_quantized = true}}, - {GGML_TYPE_Q2_K, - {.block_size = 256, .type_size = 84, .is_quantized = true}}, - {GGML_TYPE_Q3_K, - {.block_size = 256, .type_size = 110, .is_quantized = true}}, - {GGML_TYPE_Q4_K, - {.block_size = 256, .type_size = 144, .is_quantized = true}}, - {GGML_TYPE_Q5_K, - {.block_size = 256, .type_size = 176, .is_quantized = true}}, - {GGML_TYPE_Q6_K, - {.block_size = 256, .type_size = 210, .is_quantized = true}}, - {GGML_TYPE_Q8_K, - {.block_size = 256, .type_size = 292, .is_quantized = true}}, - {GGML_TYPE_IQ2_XXS, - {.block_size = 256, .type_size = 66, .is_quantized = true}}, - {GGML_TYPE_IQ2_XS, - {.block_size = 256, .type_size = 74, .is_quantized = true}}, - {GGML_TYPE_IQ3_XXS, - {.block_size = 256, .type_size = 98, .is_quantized = true}}, - {GGML_TYPE_IQ1_S, - {.block_size = 256, .type_size = 50, .is_quantized = true}}, - {GGML_TYPE_IQ4_NL, - {.block_size = 32, .type_size = 18, .is_quantized = true}}, - {GGML_TYPE_IQ3_S, - {.block_size = 256, .type_size = 110, .is_quantized = true}}, - {GGML_TYPE_IQ2_S, - {.block_size = 256, .type_size = 82, .is_quantized = true}}, - {GGML_TYPE_IQ4_XS, - {.block_size = 256, .type_size = 136, .is_quantized = true}}, - {GGML_TYPE_I8, {.block_size = 1, .type_size = 1}}, - {GGML_TYPE_I16, {.block_size = 1, .type_size = 2}}, - {GGML_TYPE_I32, {.block_size = 1, .type_size = 4}}, - {GGML_TYPE_I64, {.block_size = 1, .type_size = 8}}, - {GGML_TYPE_F64, {.block_size = 1, .type_size = 8}}, - {GGML_TYPE_IQ1_M, - {.block_size = 256, .type_size = 56, .is_quantized = true}}, - {GGML_TYPE_BF16, {.block_size = 1, .type_size = 2}}, - {GGML_TYPE_Q4_0_4_4, - {.block_size = 32, .type_size = 18, .is_quantized = true}}, - {GGML_TYPE_Q4_0_4_8, - {.block_size = 32, .type_size = 18, .is_quantized = true}}, - {GGML_TYPE_Q4_0_8_8, - {.block_size = 32, .type_size = 18, .is_quantized = true}}, - {GGML_TYPE_TQ1_0, - {.block_size = 256, .type_size = 54, .is_quantized = true}}, - {GGML_TYPE_TQ2_0, - {.block_size = 256, .type_size = 66, .is_quantized = true}}, + {GGML_TYPE_F32, {1, 4, false}}, {GGML_TYPE_F16, {1, 2, false}}, + {GGML_TYPE_Q4_0, {32, 18, true}}, {GGML_TYPE_Q4_1, {32, 20, true}}, + {GGML_TYPE_Q5_0, {32, 22, true}}, {GGML_TYPE_Q5_1, {32, 24, true}}, + {GGML_TYPE_Q8_0, {32, 34, true}}, {GGML_TYPE_Q8_1, {32, 36, true}}, + {GGML_TYPE_Q2_K, {256, 84, true}}, {GGML_TYPE_Q3_K, {256, 110, true}}, + {GGML_TYPE_Q4_K, {256, 144, true}}, {GGML_TYPE_Q5_K, {256, 176, true}}, + {GGML_TYPE_Q6_K, {256, 210, true}}, {GGML_TYPE_Q8_K, {256, 292, true}}, + {GGML_TYPE_IQ2_XXS, {256, 66, true}}, {GGML_TYPE_IQ2_XS, {256, 74, true}}, + {GGML_TYPE_IQ3_XXS, {256, 98, true}}, {GGML_TYPE_IQ1_S, {256, 50, true}}, + {GGML_TYPE_IQ4_NL, {32, 18, true}}, {GGML_TYPE_IQ3_S, {256, 110, true}}, + {GGML_TYPE_IQ2_S, {256, 82, true}}, {GGML_TYPE_IQ4_XS, {256, 136, true}}, + {GGML_TYPE_I8, {1, 1, false}}, {GGML_TYPE_I16, {1, 2, false}}, + {GGML_TYPE_I32, {1, 4, false}}, {GGML_TYPE_I64, {1, 8, false}}, + {GGML_TYPE_F64, {1, 8, false}}, {GGML_TYPE_IQ1_M, {256, 56, true}}, + {GGML_TYPE_BF16, {1, 2, false}}, {GGML_TYPE_Q4_0_4_4, {32, 18, true}}, + {GGML_TYPE_Q4_0_4_8, {32, 18, true}}, {GGML_TYPE_Q4_0_8_8, {32, 18, true}}, + {GGML_TYPE_TQ1_0, {256, 54, true}}, {GGML_TYPE_TQ2_0, {256, 66, true}}, }; + } // namespace hardware diff --git a/engine/utils/hardware/gpu/vulkan/vulkan_gpu.h b/engine/utils/hardware/gpu/vulkan/vulkan_gpu.h index 15a40c97e..a46a57c3f 100644 --- a/engine/utils/hardware/gpu/vulkan/vulkan_gpu.h +++ b/engine/utils/hardware/gpu/vulkan/vulkan_gpu.h @@ -210,8 +210,7 @@ GetGpuUsage() { auto vram_total = get_vram(vram_total_path, 10) / 1024 / 1024; auto vram_usage = get_vram(vram_used_path, 10) / 1024 / 1024; auto device_id = get_vram(device_id_path, 16); - res[device_id] = AmdGpuUsage{.total_vram_MiB = vram_total, - .used_vram_MiB = vram_usage}; + res[device_id] = AmdGpuUsage{vram_total, vram_usage}; } } else { return cpp::fail("Error: Unable to open " + vendor_path.string()); @@ -456,16 +455,18 @@ class VulkanGpu { if (device_properties.vendorID == kNvidiaVendor || device_properties.vendorID == kAmdVendor) { gpus.emplace_back(cortex::hw::GPU{ - .id = std::to_string(id), - .device_id = device_properties.deviceID, - .name = device_properties.deviceName, - .version = std::to_string(device_properties.driverVersion), - .add_info = cortex::hw::AmdAddInfo{}, - .free_vram = free_vram_MiB, - .total_vram = total_vram_MiB, - .uuid = uuid_to_string(device_id_properties.deviceUUID), - .vendor = GetVendorStr(device_properties.vendorID), - .gpu_type = static_cast(device_properties.deviceType)}); + std::to_string(id), // id + device_properties.deviceID, // device_id + device_properties.deviceName, // name + std::to_string(device_properties.driverVersion), // version + cortex::hw::AmdAddInfo{}, // add_info (GPUAddInfo) + free_vram_MiB, // free_vram + total_vram_MiB, // total_vram + uuid_to_string(device_id_properties.deviceUUID), // uuid + true, // is_activated (default value) + GetVendorStr(device_properties.vendorID), // vendor + static_cast(device_properties.deviceType) // gpu_type + }); } id++; } @@ -519,4 +520,4 @@ class VulkanGpu { return gpus_; } }; -} // namespace cortex::hw \ No newline at end of file +} // namespace cortex::hw diff --git a/engine/utils/hardware/gpu_info.h b/engine/utils/hardware/gpu_info.h index 1a2a5319c..586853a97 100644 --- a/engine/utils/hardware/gpu_info.h +++ b/engine/utils/hardware/gpu_info.h @@ -21,11 +21,11 @@ inline std::vector GetGPUInfo() { vulkan_gpus[j].version = nvidia_gpus[0].cuda_driver_version.value_or("unknown"); vulkan_gpus[j].add_info = NvidiaAddInfo{ - .driver_version = nvidia_gpus[i].driver_version.value_or("unknown"), - .compute_cap = nvidia_gpus[i].compute_cap.value_or("unknown")}; + nvidia_gpus[i].driver_version.value_or("unknown"), //driver_version + nvidia_gpus[i].compute_cap.value_or("unknown")}; //compute_cap vulkan_gpus[j].free_vram = std::stoll(nvidia_gpus[i].vram_free); vulkan_gpus[j].total_vram = std::stoll(nvidia_gpus[i].vram_total); - vulkan_gpus[j].vendor = nvidia_gpus[i].vendor; + vulkan_gpus[j].vendor = nvidia_gpus[i].vendor; } } } @@ -44,21 +44,25 @@ inline std::vector GetGPUInfo() { } else { std::vector res; for (auto& n : nvidia_gpus) { - res.emplace_back( - GPU{.id = n.id, - .name = n.name, - .version = nvidia_gpus[0].cuda_driver_version.value_or("unknown"), - .add_info = - NvidiaAddInfo{ - .driver_version = n.driver_version.value_or("unknown"), - .compute_cap = n.compute_cap.value_or("unknown")}, - .free_vram = std::stoi(n.vram_free), - .total_vram = std::stoi(n.vram_total), - .uuid = n.uuid, - .vendor = n.vendor, - .gpu_type = GpuType::kGpuTypeDiscrete}); + res.emplace_back(GPU{ + n.id, // id + 0, // device_id (not specified in original) + n.name, // name + nvidia_gpus[0].cuda_driver_version.value_or("unknown"), // version + NvidiaAddInfo{ + // add_info + n.driver_version.value_or("unknown"), // driver_version + n.compute_cap.value_or("unknown") // compute_cap + }, + std::stoi(n.vram_free), // free_vram + std::stoi(n.vram_total), // total_vram + n.uuid, // uuid + true, // is_activated (default value) + n.vendor, // vendor + GpuType::kGpuTypeDiscrete // gpu_type + }); } return res; } } -} // namespace cortex::hw \ No newline at end of file +} // namespace cortex::hw diff --git a/engine/utils/hardware/os_info.h b/engine/utils/hardware/os_info.h index a87d448f5..67c53d835 100644 --- a/engine/utils/hardware/os_info.h +++ b/engine/utils/hardware/os_info.h @@ -8,8 +8,11 @@ namespace cortex::hw { inline OS GetOSInfo() { hwinfo::OS os; - return OS{.name = os.name(), - .version = os.version(), - .arch = os.is32bit() ? "32 bit" : "64 bit"}; + return OS{ + os.name(), //name + os.version(), //version + os.is32bit() ? "32 bit" : "64 bit" //arch + }; } -} // namespace cortex::hw \ No newline at end of file +} // namespace cortex::hw + diff --git a/engine/utils/hardware/ram_info.h b/engine/utils/hardware/ram_info.h index 14a48d798..f87073869 100644 --- a/engine/utils/hardware/ram_info.h +++ b/engine/utils/hardware/ram_info.h @@ -36,10 +36,13 @@ inline Memory GetMemoryInfo() { return Memory{.total_MiB = ByteToMiB(total_memory), .available_MiB = ByteToMiB(avail_memory)}; #elif defined(__linux__) || defined(_WIN32) - return Memory{.total_MiB = ByteToMiB(m.total_Bytes()), - .available_MiB = ByteToMiB(m.available_Bytes())}; + return Memory{ + ByteToMiB(m.total_Bytes()), //total_MiB + ByteToMiB(m.available_Bytes()), //available_MiB + "" //type + }; #else return Memory{}; #endif } -} // namespace cortex::hw \ No newline at end of file +} // namespace cortex::hw diff --git a/engine/utils/huggingface_utils.h b/engine/utils/huggingface_utils.h index 9c233c704..bfabf2786 100644 --- a/engine/utils/huggingface_utils.h +++ b/engine/utils/huggingface_utils.h @@ -40,7 +40,7 @@ struct HuggingFaceSiblingsFileSize { for (auto const& j : json) { if (j["type"].asString() == "file") { res.file_sizes[j["path"].asString()] = - HuggingFaceFileSize{.size_in_bytes = j["size"].asUInt64()}; + HuggingFaceFileSize{/* .size_in_bytes = */ j["size"].asUInt64()}; } } return res; @@ -69,10 +69,12 @@ GetSiblingsFileSize(const std::string& author, const std::string& model_name, if (author.empty() || model_name.empty()) { return cpp::fail("Author and model name cannot be empty"); } - auto url_obj = url_parser::Url{ - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"api", "models", author, model_name, "tree", branch}}; + auto url_obj = + url_parser::Url{/* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + {"api", "models", author, model_name, "tree", branch}, + /* .queries = */ {}}; auto result = curl_utils::SimpleGetJson(url_obj.ToFullPath()); if (result.has_error()) { @@ -82,11 +84,12 @@ GetSiblingsFileSize(const std::string& author, const std::string& model_name, auto r = result.value(); for (auto const& j : result.value()) { if (j["type"].asString() == "directory") { - auto url_obj = - url_parser::Url{.protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"api", "models", author, model_name, - "tree", branch, j["path"].asString()}}; + auto url_obj = url_parser::Url{/* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + {"api", "models", author, model_name, + "tree", branch, j["path"].asString()}, + /* .queries = */ {}}; auto rd = curl_utils::SimpleGetJson(url_obj.ToFullPath()); if (rd.has_value()) { @@ -105,15 +108,17 @@ inline cpp::result GetReadMe( if (author.empty() || model_name.empty()) { return cpp::fail("Author and model name cannot be empty"); } - auto url_obj = url_parser::Url{.protocol = "https", - .host = kHuggingFaceHost, - .pathParams = { + auto url_obj = url_parser::Url{/* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + { author, model_name, "raw", "main", "README.md", - }}; + }, + /* .queries = */ {}}; auto result = curl_utils::SimpleGet(url_obj.ToFullPath()); if (result.has_error()) { @@ -135,8 +140,8 @@ struct HuggingFaceGgufInfo { } try { return HuggingFaceGgufInfo{ - .total = json["total"].asUInt64(), - .architecture = json["architecture"].asString(), + /* .total = */ json["total"].asUInt64(), + /* .architecture = */ json["architecture"].asString(), }; } catch (const std::exception& e) { return cpp::fail("Failed to parse gguf info: " + std::string(e.what())); @@ -183,31 +188,32 @@ struct HuggingFaceModelRepoInfo { auto siblings_info = body["siblings"]; for (const auto& sibling : siblings_info) { auto sibling_info = HuggingFaceFileSibling{ - .rfilename = sibling["rfilename"].asString(), + /* .rfilename = */ sibling["rfilename"].asString(), }; siblings.push_back(sibling_info); } return HuggingFaceModelRepoInfo{ - .id = body["id"].asString(), - .modelId = body["modelId"].asString(), - .author = body["author"].asString(), - .sha = body["sha"].asString(), - .lastModified = body["lastModified"].asString(), - - .isPrivate = body["private"].asBool(), - .disabled = body["disabled"].asBool(), - .gated = body["gated"].asBool(), - .tags = json_parser_utils::ParseJsonArray(body["tags"]), - .downloads = body["downloads"].asInt(), - - .likes = body["likes"].asInt(), - .gguf = gguf, - .siblings = siblings, - .spaces = - json_parser_utils::ParseJsonArray(body["spaces"]), - .createdAt = body["createdAt"].asString(), - .metadata = body.toStyledString(), + /* .id = */ body["id"].asString(), + /* .modelId = */ body["modelId"].asString(), + /* .author = */ body["author"].asString(), + /* .sha = */ body["sha"].asString(), + /* .lastModified = */ body["lastModified"].asString(), + + /* .isPrivate = */ body["private"].asBool(), + /* .disabled = */ body["disabled"].asBool(), + /* .gated = */ body["gated"].asBool(), + /* .tags = */ + json_parser_utils::ParseJsonArray(body["tags"]), + /* .downloads = */ body["downloads"].asInt(), + + /* .likes = */ body["likes"].asInt(), + /* .gguf = */ gguf, + /* .siblings = */ siblings, + /* .spaces = */ + json_parser_utils::ParseJsonArray(body["spaces"]), + /* .createdAt = */ body["createdAt"].asString(), + /* .metadata = */ body.toStyledString(), }; } @@ -226,9 +232,10 @@ GetModelRepositoryBranches(const std::string& author, return cpp::fail("Author and model name cannot be empty"); } auto url_obj = url_parser::Url{ - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"api", "models", author, modelName, "refs"}}; + /* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ {"api", "models", author, modelName, "refs"}, + /* .queries = */ {}}; auto result = curl_utils::SimpleGetJson(url_obj.ToFullPath()); if (result.has_error()) { @@ -241,9 +248,9 @@ GetModelRepositoryBranches(const std::string& author, for (const auto& branch : branches_json) { branches[branch["name"].asString()] = HuggingFaceBranch{ - .name = branch["name"].asString(), - .ref = branch["ref"].asString(), - .targetCommit = branch["targetCommit"].asString(), + /* .name = */ branch["name"].asString(), + /* .ref = */ branch["ref"].asString(), + /* .targetCommit = */ branch["targetCommit"].asString(), }; } @@ -257,10 +264,11 @@ GetHuggingFaceModelRepoInfo(const std::string& author, if (author.empty() || modelName.empty()) { return cpp::fail("Author and model name cannot be empty"); } - auto url_obj = - url_parser::Url{.protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"api", "models", author, modelName}}; + auto url_obj = url_parser::Url{/* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + {"api", "models", author, modelName}, + /* .queries = */ {}}; auto result = curl_utils::SimpleGetJson(url_obj.ToFullPath()); if (result.has_error()) { @@ -272,10 +280,12 @@ GetHuggingFaceModelRepoInfo(const std::string& author, } inline std::string GetMetadataUrl(const std::string& model_id) { - auto url_obj = url_parser::Url{ - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {"cortexso", model_id, "resolve", "main", "metadata.yml"}}; + auto url_obj = + url_parser::Url{/* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ + {"cortexso", model_id, "resolve", "main", "metadata.yml"}, + /* .queries = */ {}}; return url_obj.ToFullPath(); } @@ -285,9 +295,10 @@ inline std::string GetDownloadableUrl(const std::string& author, const std::string& fileName, const std::string& branch = "main") { auto url_obj = url_parser::Url{ - .protocol = "https", - .host = kHuggingFaceHost, - .pathParams = {author, modelName, "resolve", branch, fileName}, + /* .protocol = */ "https", + /* .host = */ kHuggingFaceHost, + /* .pathParams = */ {author, modelName, "resolve", branch, fileName}, + /* .queries = */ {}, }; return url_parser::FromUrl(url_obj); } diff --git a/engine/utils/process/utils.cc b/engine/utils/process/utils.cc index 8cd0adc64..f63de5c5e 100644 --- a/engine/utils/process/utils.cc +++ b/engine/utils/process/utils.cc @@ -44,7 +44,7 @@ cpp::result SpawnProcess( const std::vector& command, const std::string& stdout_file, const std::string& stderr_file) { std::stringstream ss; - for (const auto item : command) { + for (const auto& item : command) { ss << item << " "; } CTL_INF("Spawning process with command: " << ss.str()); diff --git a/engine/utils/url_parser.h b/engine/utils/url_parser.h index 4496ebb2e..69e196247 100644 --- a/engine/utils/url_parser.h +++ b/engine/utils/url_parser.h @@ -88,11 +88,7 @@ inline bool SplitPathParams(const std::string& input, inline cpp::result FromUrlString( const std::string& urlString) { - Url url = { - .protocol = "", - .host = "", - .pathParams = {}, - }; + Url url{"", "", {}, {}}; int counter = 0; std::smatch url_match_result;