Skip to content

Commit

Permalink
Update llama.cpp submodule to latest release b4079 (#292)
Browse files Browse the repository at this point in the history
* Update submodule to latest release b4079

* fix: build

---------

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
Co-authored-by: vansangpfiev <[email protected]>
  • Loading branch information
3 people authored Nov 15, 2024
1 parent 48db294 commit 2580c85
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion llama.cpp
Submodule llama.cpp updated 138 files
12 changes: 6 additions & 6 deletions patches/0001-Add-API-query-buffer-size.patch
Original file line number Diff line number Diff line change
Expand Up @@ -32,19 +32,19 @@ index c466cd88..15f3102c 100644

+const size_t llama_get_cpu_buffer(const struct llama_model * model) {
+ size_t buffer{0};
+ for (const auto buf : model->bufs) {
+ if (strcmp(ggml_backend_buffer_name(buf), "CPU") == 0) {
+ buffer += ggml_backend_buffer_get_size(buf);
+ for (const auto& buf : model->bufs) {
+ if (strcmp(ggml_backend_buffer_name(buf.get()), "CPU") == 0) {
+ buffer += ggml_backend_buffer_get_size(buf.get());
+ }
+ }
+ return buffer;
+}
+
+const size_t llama_get_other_buffer(const struct llama_model * model) {
+ size_t buffer{0};
+ for (const auto buf : model->bufs) {
+ if (strcmp(ggml_backend_buffer_name(buf), "CPU") != 0) {
+ buffer += ggml_backend_buffer_get_size(buf);
+ for (const auto& buf : model->bufs) {
+ if (strcmp(ggml_backend_buffer_name(buf.get()), "CPU") != 0) {
+ buffer += ggml_backend_buffer_get_size(buf.get());
+ }
+ }
+ return buffer;
Expand Down
4 changes: 2 additions & 2 deletions src/llama_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -476,8 +476,8 @@ void LlamaEngine::SetFileLogger(int max_log_lines,
}
},
nullptr);
freopen(log_path.c_str(), "w", stderr);
freopen(log_path.c_str(), "w", stdout);
freopen(log_path.c_str(), "a", stderr);
freopen(log_path.c_str(), "a", stdout);
}

bool LlamaEngine::LoadModelImpl(std::shared_ptr<Json::Value> json_body) {
Expand Down

0 comments on commit 2580c85

Please sign in to comment.