Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] master from mudler:master #97

Merged
merged 12 commits into from
Sep 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/bump_deps.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ jobs:
rm -rfv ${{ matrix.variable }}_message.txt
rm -rfv ${{ matrix.variable }}_commit.txt
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/bump_docs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
run: |
bash .github/bump_docs.sh ${{ matrix.repository }}
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/checksum_checker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
sudo chmod 777 /hf_cache
bash .github/checksum_checker.sh gallery/index.yaml
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/update_swagger.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
run: |
make protogen-go swagger
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
Expand Down
27 changes: 19 additions & 8 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -263,14 +263,20 @@ EOT
# In most cases, builder is the image you should be using - however, this can save build time if one just needs to copy backend-assets/grpc/stablediffusion and nothing else.
FROM builder-base AS builder-sd

COPY . .
COPY .git .

RUN make prepare


# stablediffusion does not tolerate a newer version of abseil, build it first
RUN GRPC_BACKENDS=backend-assets/grpc/stablediffusion make build
# stablediffusion does not tolerate a newer version of abseil, copy only over enough elements to build it
COPY Makefile .
COPY go.mod .
COPY go.sum .
COPY backend/backend.proto ./backend/backend.proto
COPY backend/go/image/stablediffusion ./backend/go/image/stablediffusion
COPY pkg/grpc ./pkg/grpc
COPY pkg/stablediffusion ./pkg/stablediffusion
RUN git init
RUN make sources/go-stable-diffusion
RUN touch prepare-sources

# Actually build the backend
RUN GRPC_BACKENDS=backend-assets/grpc/stablediffusion make backend-assets/grpc/stablediffusion

###################################
###################################
Expand All @@ -285,6 +291,11 @@ COPY --from=grpc /opt/grpc /usr/local
# Rebuild with defaults backends
WORKDIR /build

COPY . .
COPY .git .

RUN make prepare

## Build the binary
## If it's CUDA, we want to skip some of the llama-compat backends to save space
## We only leave the most CPU-optimized variant and the fallback for the cublas build
Expand Down
10 changes: 5 additions & 5 deletions core/backend/transcript.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
"github.com/mudler/LocalAI/core/schema"

"github.com/mudler/LocalAI/pkg/grpc/proto"
model "github.com/mudler/LocalAI/pkg/model"
"github.com/mudler/LocalAI/pkg/model"
)

func ModelTranscription(audio, language string, translate bool, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.TranscriptionResult, error) {
Expand All @@ -22,16 +22,16 @@ func ModelTranscription(audio, language string, translate bool, ml *model.ModelL
model.WithAssetDir(appConfig.AssetsDestination),
})

whisperModel, err := ml.BackendLoader(opts...)
transcriptionModel, err := ml.BackendLoader(opts...)
if err != nil {
return nil, err
}

if whisperModel == nil {
return nil, fmt.Errorf("could not load whisper model")
if transcriptionModel == nil {
return nil, fmt.Errorf("could not load transcription model")
}

r, err := whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{
r, err := transcriptionModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{
Dst: audio,
Language: language,
Translate: translate,
Expand Down
4 changes: 2 additions & 2 deletions examples/functions/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
langchain==0.2.15
openai==1.43.0
langchain==0.2.16
openai==1.44.0
6 changes: 3 additions & 3 deletions examples/langchain-chroma/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
langchain==0.2.15
openai==1.43.0
langchain==0.2.16
openai==1.44.1
chromadb==0.5.5
llama-index==0.11.4
llama-index==0.11.7
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@ debugpy==1.8.2
frozenlist==1.4.1
greenlet==3.0.3
idna==3.8
langchain==0.2.15
langchain-community==0.2.15
langchain==0.2.16
langchain-community==0.2.16
marshmallow==3.22.0
marshmallow-enum==1.5.1
multidict==6.0.5
mypy-extensions==1.0.0
numexpr==2.10.1
numpy==2.1.0
openai==1.43.0
numpy==2.1.1
openai==1.44.0
openapi-schema-pydantic==1.2.4
packaging>=23.2
pydantic==2.8.2
Expand Down
Loading