Skip to content

Commit

Permalink
update torch
Browse files Browse the repository at this point in the history
  • Loading branch information
mht-sharma committed Aug 5, 2024
1 parent 5c3d2ed commit e42e2da
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 20 deletions.
38 changes: 20 additions & 18 deletions Dockerfile_amd
Original file line number Diff line number Diff line change
Expand Up @@ -98,24 +98,26 @@ RUN pip uninstall -y triton && \
cd triton/python && \
pip install .

RUN git clone --depth 1 --recursive --single-branch --branch 2.3-patched https://github.com/fxmarty/pytorch.git pytorch && cd pytorch && pip install -r requirements.txt --no-cache-dir

ARG _GLIBCXX_USE_CXX11_ABI="1"
ARG CMAKE_PREFIX_PATH="/opt/conda"
ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942"
ARG BUILD_CAFFE2="0" \
BUILD_CAFFE2_OPS="0" \
USE_CUDA="0" \
USE_ROCM="1" \
BUILD_TEST="0" \
USE_FBGEMM="0" \
USE_NNPACK="0" \
USE_QNNPACK="0" \
USE_XNNPACK="0" \
USE_FLASH_ATTENTION="1" \
USE_MEM_EFF_ATTENTION="0"

RUN cd pytorch && python tools/amd_build/build_amd.py && python setup.py install
# RUN git clone --depth 1 --recursive --single-branch --branch 2.3-patched https://github.com/fxmarty/pytorch.git pytorch && cd pytorch && pip install -r requirements.txt --no-cache-dir

# ARG _GLIBCXX_USE_CXX11_ABI="1"
# ARG CMAKE_PREFIX_PATH="/opt/conda"
# ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942"
# ARG BUILD_CAFFE2="0" \
# BUILD_CAFFE2_OPS="0" \
# USE_CUDA="0" \
# USE_ROCM="1" \
# BUILD_TEST="0" \
# USE_FBGEMM="0" \
# USE_NNPACK="0" \
# USE_QNNPACK="0" \
# USE_XNNPACK="0" \
# USE_FLASH_ATTENTION="1" \
# USE_MEM_EFF_ATTENTION="0"

# RUN cd pytorch && python tools/amd_build/build_amd.py && python setup.py install

RUN pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.1

# Set AS recommended: https://github.com/ROCm/triton/wiki/A-script-to-set-program-execution-environment-in-ROCm
ENV HIP_FORCE_DEV_KERNARG=1
Expand Down
3 changes: 1 addition & 2 deletions server/text_generation_server/models/flash_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1150,8 +1150,7 @@ def warmup(self, batch: FlashCausalLMBatch):
elif CUDA_GRAPHS is not None:
tuning_sequences = CUDA_GRAPHS
else:
# For seqlen = 1, we dispatch to LLMM1 kernel.
tuning_sequences = [2, 3, 4, 5, 6, 7]
tuning_sequences = [1, 2, 3, 4, 5, 6, 7]

tunableop_filepath = os.path.join(
HUGGINGFACE_HUB_CACHE,
Expand Down

0 comments on commit e42e2da

Please sign in to comment.