Skip to content

Commit

Permalink
fix: check nullptr for async_file_logger_ (#273)
Browse files Browse the repository at this point in the history
* fix: check nullptr for async_file_logger_

* fix: remove unused code

---------

Co-authored-by: vansangpfiev <[email protected]>
  • Loading branch information
vansangpfiev and sangjanai authored Oct 30, 2024
1 parent a990689 commit c0aaa01
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 66 deletions.
77 changes: 13 additions & 64 deletions src/llama_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -147,64 +147,10 @@ std::string CreateReturnJson(const std::string& id, const std::string& model,
}
} // namespace

// derepted this function because we no longer support change log when load model
void LlamaEngine::SetLoggerOption(const Json::Value& json_body) {
// if (!json_body["log_option"].isNull()) {
// int log_option = json_body["log_option"].asInt();
// if (log_option != kFileLoggerOption) {
// // Revert to default trantor logger output function
// trantor::Logger::setOutputFunction(
// [](const char* msg, const uint64_t len) {
// fwrite(msg, 1, static_cast<size_t>(len), stdout);
// },
// []() { fflush(stdout); });
// } else {
// std::string log_path =
// json_body.get("log_path", "./logs/cortex.log").asString();
// int max_log_lines = json_body.get("max_log_lines", 100000).asInt();
// trantor::FileLogger asyncFileLogger;
// asyncFileLogger.setFileName(log_path);
// asyncFileLogger.setMaxLines(max_log_lines); // Keep last 100000 lines
// // asyncFileLogger.startLogging();
// trantor::Logger::setOutputFunction(
// [&](const char* msg, const uint64_t len) {
// asynce_file_logger_->output_(msg, len);
// },
// [&]() { asynce_file_logger_->flush(); });
// }
// } else {
// // For backward compatible
// trantor::Logger::setOutputFunction(
// [](const char* msg, const uint64_t len) {
// fwrite(msg, 1, static_cast<size_t>(len), stdout);
// },
// []() { fflush(stdout); });
// }

// if (!json_body["log_level"].isNull()) {
// std::string log_level = json_body["log_level"].asString();
// if (log_level == "trace") {
// trantor::Logger::setLogLevel(trantor::Logger::kTrace);
// } else if (log_level == "debug") {
// trantor::Logger::setLogLevel(trantor::Logger::kDebug);
// } else if (log_level == "info") {
// trantor::Logger::setLogLevel(trantor::Logger::kInfo);
// } else if (log_level == "warn") {
// trantor::Logger::setLogLevel(trantor::Logger::kWarn);
// } else if (log_level == "fatal") {
// trantor::Logger::setLogLevel(trantor::Logger::kFatal);
// } else {
// trantor::Logger::setLogLevel(trantor::Logger::kError);
// }
// } else {
// trantor::Logger::setLogLevel(trantor::Logger::kDebug);
// }
}

LlamaEngine::LlamaEngine(int log_option) {
trantor::Logger::setLogLevel(trantor::Logger::kInfo);
if (log_option == kFileLoggerOption) {
asynce_file_logger_ = std::make_unique<trantor::FileLogger>();
async_file_logger_ = std::make_unique<trantor::FileLogger>();
}

common_log_pause(common_log_main());
Expand Down Expand Up @@ -232,7 +178,7 @@ LlamaEngine::~LlamaEngine() {
l.ReleaseResources();
}
server_map_.clear();
asynce_file_logger_.reset();
async_file_logger_.reset();
}

void LlamaEngine::HandleChatCompletion(
Expand Down Expand Up @@ -260,7 +206,6 @@ void LlamaEngine::HandleEmbedding(
void LlamaEngine::LoadModel(
std::shared_ptr<Json::Value> json_body,
std::function<void(Json::Value&&, Json::Value&&)>&& callback) {
// SetLoggerOption(*json_body); // dont update log option when load model
if (std::exchange(print_version_, false)) {
#if defined(CORTEXLLAMA_VERSION)
LOG_INFO << "cortex.llamacpp version: " << CORTEXLLAMA_VERSION;
Expand Down Expand Up @@ -399,17 +344,21 @@ void LlamaEngine::GetModels(

void LlamaEngine::SetFileLogger(int max_log_lines,
const std::string& log_path) {
if (!asynce_file_logger_) {
asynce_file_logger_ = std::make_unique<trantor::FileLogger>();
if (!async_file_logger_) {
async_file_logger_ = std::make_unique<trantor::FileLogger>();
}
asynce_file_logger_->setFileName(log_path);
asynce_file_logger_->setMaxLines(max_log_lines); // Keep last 100000 lines
asynce_file_logger_->startLogging();
async_file_logger_->setFileName(log_path);
async_file_logger_->setMaxLines(max_log_lines); // Keep last 100000 lines
async_file_logger_->startLogging();
trantor::Logger::setOutputFunction(
[&](const char* msg, const uint64_t len) {
asynce_file_logger_->output_(msg, len);
if (async_file_logger_)
async_file_logger_->output_(msg, len);
},
[&]() { asynce_file_logger_->flush(); });
[&]() {
if (async_file_logger_)
async_file_logger_->flush();
});
llama_log_set(
[](ggml_log_level level, const char* text, void* user_data) {
(void)level;
Expand Down
3 changes: 1 addition & 2 deletions src/llama_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ class LlamaEngine : public EngineI {
std::shared_ptr<Json::Value> jsonBody,
std::function<void(Json::Value&&, Json::Value&&)>&& callback) final;
void SetFileLogger(int max_log_lines, const std::string& log_path) final;
void SetLoggerOption(const Json::Value& json_body);

private:
bool LoadModelImpl(std::shared_ptr<Json::Value> jsonBody);
Expand Down Expand Up @@ -71,5 +70,5 @@ class LlamaEngine : public EngineI {
std::atomic<int> no_of_chats_ = 0;

bool print_version_ = true;
std::unique_ptr<trantor::FileLogger> asynce_file_logger_;
std::unique_ptr<trantor::FileLogger> async_file_logger_;
};

0 comments on commit c0aaa01

Please sign in to comment.