Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .dev_scripts/xtuner_rl_path.pth
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
import xtuner_rl_path
18 changes: 18 additions & 0 deletions .dev_scripts/xtuner_rl_path/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import os
import sys

dist_packages_index = 0
for i, path in enumerate(sys.path):
if path.endswith("dist-packages"):
dist_packages_index = i
break

if os.getenv('XTUNER_USE_LMDEPLOY', '').lower() in ['1', 'on', 'true']:
lmdeploy_envs_dir = os.getenv('XTUNER_LMDEPLOY_ENVS_DIR', '/envs/lmdeploy')
if lmdeploy_envs_dir not in sys.path:
sys.path.insert(dist_packages_index, lmdeploy_envs_dir)

elif os.getenv('XTUNER_USE_SGLANG', '').lower() in ['1', 'on', 'true']:
sglang_envs_dir = os.getenv('XTUNER_SGLANG_ENVS_DIR', '/envs/sglang')
if sglang_envs_dir not in sys.path:
sys.path.insert(dist_packages_index, sglang_envs_dir)
2 changes: 1 addition & 1 deletion .github/workflows/unit_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ on:
env:
WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-5)
WORKSPACE_PREFIX_SHORT: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-3)
IMAGE: registry.h.pjlab.org.cn/ailab-llmrazor/xtuner:pt28_20251216_d769950
IMAGE: registry.h.pjlab.org.cn/ailab-llmrazor/xtuner_tmp:pt29_20260324_d7fee45

concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
Expand Down
168 changes: 115 additions & 53 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,39 +2,38 @@
# builder
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:25.03-py3

## build args
## build base env
FROM ${BASE_IMAGE} AS setup_env

ARG TORCH_VERSION
ARG PPA_SOURCE

RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
sed -i "s@http://.*.ubuntu.com@${PPA_SOURCE}@g" /etc/apt/sources.list.d/ubuntu.sources && \
# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
RUN sed -i "s@http://.*.ubuntu.com@${PPA_SOURCE}@g" /etc/apt/sources.list.d/ubuntu.sources && \
apt update && \
apt install --no-install-recommends ca-certificates -y && \
apt install --no-install-recommends bc wget -y && \
apt install --no-install-recommends build-essential sudo -y && \
apt install --no-install-recommends git curl pkg-config tree unzip tmux \
openssh-server openssh-client dnsutils iproute2 lsof net-tools zsh rclone \
iputils-ping telnet netcat-openbsd -y && \
iputils-ping telnet netcat-openbsd htop bubblewrap socat -y && \
apt clean && rm -rf /var/lib/apt/lists/*

RUN if [ -d /etc/pip ] && [ -f /etc/pip/constraint.txt ]; then echo > /etc/pip/constraint.txt; fi
RUN pip install pystack py-spy --no-cache-dir
RUN pip uninstall flash_attn opencv -y && rm -rf /usr/local/lib/python3.12/dist-packages/cv2
RUN git config --system --add safe.directory "*"

# torch
ARG TORCH_VERSION
ARG PYTORCH_WHEELS_URL
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
--mount=type=secret,id=NO_PROXY,env=no_proxy \
if [ -n "${TORCH_VERSION}" ]; then \
pip install torchvision torch==${TORCH_VERSION} \
--index-url https://download.pytorch.org/whl/cu128 \
--extra-index-url https://download.pytorch.org/whl/cu126 \
-i ${PYTORCH_WHEELS_URL}/cu128 \
--extra-index-url ${PYTORCH_WHEELS_URL}/cu126 \
--no-cache-dir; \
fi

# set reasonable default for CUDA architectures when building ngc image
ENV TORCH_CUDA_ARCH_LIST="7.5 8.0 8.6 9.0 10.0"

RUN pip uninstall flash_attn opencv -y && rm -rf /usr/local/lib/python3.12/dist-packages/cv2
ENV TORCH_CUDA_ARCH_LIST="9.0 10.0"

ARG FLASH_ATTN_DIR=/tmp/flash-attn
ARG CODESPACE=/root/codespace
Expand All @@ -56,6 +55,9 @@ ARG CODESPACE
ARG FLASH_ATTN_DIR
ARG FLASH_ATTN3_DIR
ARG FLASH_ATTN_URL
# force hopper for now, you change it throught build args
ARG FLASH_ATTN_CUDA_ARCHS="90"
ARG FLASH_ATTENTION_DISABLE_SM80="TRUE"

RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
git clone $(echo ${FLASH_ATTN_URL} | cut -d '@' -f 1) && \
Expand Down Expand Up @@ -119,42 +121,41 @@ WORKDIR ${CODESPACE}/causal-conv1d

RUN CAUSAL_CONV1D_FORCE_BUILD=TRUE pip wheel -w ${CAUSAL_CONV1D_DIR} -v --no-deps --no-build-isolation .

# pypi install nvshmem and compile deepep
# compile nvshmem and deepep
FROM setup_env AS deep_ep

ARG CODESPACE
ARG DEEP_EP_DIR
ARG DEEP_EP_URL
# build sm90 and sm100 for deep_ep for now
ARG TORCH_CUDA_ARCH_LIST="9.0 10.0"

# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
# curl -LO https://github.com/NVIDIA/nvshmem/releases/download/v3.4.5-0/nvshmem_src_cuda-all-all-3.4.5.tar.gz && \
# tar -zxvf nvshmem_src_cuda-all-all-3.4.5.tar.gz && \
# cd ${CODESPACE}/nvshmem_src && \
# NVSHMEM_SHMEM_SUPPORT=0 \
# NVSHMEM_UCX_SUPPORT=0 \
# NVSHMEM_USE_NCCL=0 \
# NVSHMEM_MPI_SUPPORT=0 \
# NVSHMEM_IBGDA_SUPPORT=1 \
# NVSHMEM_USE_GDRCOPY=0 \
# NVSHMEM_PMIX_SUPPORT=0 \
# NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \
# NVSHMEM_BUILD_TESTS=0 \
# NVSHMEM_BUILD_EXAMPLES=0 \
# NVSHMEM_BUILD_HYDRA_LAUNCHER=0 \
# NVSHMEM_BUILD_TXZ_PACKAGE=0 \
# NVSHMEM_BUILD_PYTHON_LIB=OFF \
# cmake -S . -B build/ -DCMAKE_INSTALL_PREFIX=${NVSHMEM_PREFIX} -DMLX5_lib=/lib/x86_64-linux-gnu/libmlx5.so.1 && \
# cmake --build build --target install --parallel 32 && \
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
curl -LO https://github.com/NVIDIA/nvshmem/releases/download/v3.4.5-0/nvshmem_src_cuda-all-all-3.4.5.tar.gz && \
tar -zxvf nvshmem_src_cuda-all-all-3.4.5.tar.gz && \
cd ${CODESPACE}/nvshmem_src && \
NVSHMEM_SHMEM_SUPPORT=0 \
NVSHMEM_UCX_SUPPORT=0 \
NVSHMEM_USE_NCCL=0 \
NVSHMEM_MPI_SUPPORT=0 \
NVSHMEM_IBGDA_SUPPORT=1 \
NVSHMEM_USE_GDRCOPY=0 \
NVSHMEM_PMIX_SUPPORT=0 \
NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \
NVSHMEM_BUILD_TESTS=0 \
NVSHMEM_BUILD_EXAMPLES=0 \
NVSHMEM_BUILD_HYDRA_LAUNCHER=0 \
NVSHMEM_BUILD_TXZ_PACKAGE=0 \
NVSHMEM_BUILD_PYTHON_LIB=OFF \
cmake -S . -B build/ -DCMAKE_INSTALL_PREFIX=${NVSHMEM_PREFIX} -DMLX5_lib=/lib/x86_64-linux-gnu/libmlx5.so.1 && \
cmake --build build --target install --parallel 32 && \
cd ${CODESPACE} && git clone $(echo ${DEEP_EP_URL} | cut -d '@' -f 1) && \
cd ${CODESPACE}/DeepEP && \
git checkout $(echo ${DEEP_EP_URL} | cut -d '@' -f 2) && \
git submodule update --init --recursive --force

WORKDIR ${CODESPACE}/DeepEP

RUN NVSHMEM_DIR=${NVSHMEM_PREFIX} pip wheel -w ${DEEP_EP_DIR} -v --no-deps .
RUN pip wheel -w ${DEEP_EP_DIR} -v --no-deps .

# compile deep_gemm
FROM setup_env AS deep_gemm
Expand Down Expand Up @@ -192,7 +193,7 @@ COPY --from=flash_attn ${FLASH_ATTN_DIR} ${FLASH_ATTN_DIR}
COPY --from=adaptive_gemm ${ADAPTIVE_GEMM_DIR} ${ADAPTIVE_GEMM_DIR}
COPY --from=grouped_gemm ${GROUPED_GEMM_DIR} ${GROUPED_GEMM_DIR}
COPY --from=deep_ep ${DEEP_EP_DIR} ${DEEP_EP_DIR}
COPY --from=deep_ep ${NVSHMEM_PREFIX} ${NVSHMEM_PREFIX}
# COPY --from=deep_ep ${NVSHMEM_PREFIX} ${NVSHMEM_PREFIX}
COPY --from=deep_gemm ${DEEP_GEMM_DIR} ${DEEP_GEMM_DIR}
COPY --from=causal_conv1d ${CAUSAL_CONV1D_DIR} ${CAUSAL_CONV1D_DIR}

Expand All @@ -204,51 +205,112 @@ RUN unzip ${DEEP_EP_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
RUN unzip ${DEEP_GEMM_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
RUN unzip ${CAUSAL_CONV1D_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}

# install sglang and its runtime requirements
ARG SGLANG_VERSION
ARG DEFAULT_PYPI_URL

RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
pip install sglang==${SGLANG_VERSION} sgl-kernel==0.3.14.post1 pybase64 orjson uvloop setproctitle msgspec \
compressed_tensors python-multipart torch_memory_saver \
grpcio-tools==1.75.1 hf_transfer interegular llguidance==0.7.11 \
xgrammar==0.1.24 blobfile==3.0.0 flashinfer_python==0.4.0 --no-cache-dir --no-deps
# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
RUN pip install pystack py-spy --no-cache-dir -i ${DEFAULT_PYPI_URL}

# install sglang and its runtime requirements
ENV XTUNER_SGLANG_ENVS_DIR=/envs/sglang

# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
RUN \
pip install --target ${XTUNER_SGLANG_ENVS_DIR} \
sglang==0.5.9 sgl-kernel==0.3.21 \
apache-tvm-ffi==0.1.9 \
anthropic==0.86.0 \
build==1.4.0 \
cuda-python==12.9.0 \
decord2==3.2.0 \
flashinfer_python==0.6.3 \
flashinfer_cubin==0.6.3 \
gguf==0.18.0 \
modelscope==1.35.3 \
nvidia-cutlass-dsl==4.4.2 \
openai-harmony==0.0.4 \
openai==2.6.1 \
outlines==0.1.11 \
quack-kernels==0.2.4 \
timm==1.0.16 \
torchao==0.9.0 \
torchaudio==2.9.1 \
torchcodec==0.8.0 \
xgrammar==0.1.32 \
smg-grpc-proto==0.4.5 \
grpcio==1.78.1 \
grpcio-reflection==1.78.1 \
grpcio-health-checking==1.80.0 \
pycryptodomex==3.23.0 \
lxml==6.0.2 \
cuda-bindings==12.9.6 \
cuda-pathfinder==1.5.0 \
nvidia-cudnn-frontend==1.21.0 \
lark==1.3.1 \
pycountry==26.2.16 \
airportsdata==20260315 \
outlines_core==0.1.26 \
torch-c-dlpack-ext==0.1.5 \
pyproject_hooks==1.2.0 \
huggingface_hub==0.36.2 \
torch_memory_saver==0.0.9 \
llguidance==0.7.11 blobfile==3.0.0 \
pybase64 orjson uvloop setproctitle msgspec \
compressed_tensors python-multipart \
hf_transfer interegular --no-cache-dir --no-deps -i ${DEFAULT_PYPI_URL}

# install lmdeploy and its missing runtime requirements
ARG LMDEPLOY_VERSION
ARG LMDEPLOY_URL
ENV XTUNER_LMDEPLOY_ENVS_DIR=/envs/lmdeploy

# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
ARG LMDEPLOY_WHEELS=https://github.com/InternLM/lmdeploy/releases/download/v${LMDEPLOY_VERSION}/lmdeploy-${LMDEPLOY_VERSION}+cu128-cp312-cp312-manylinux2014_x86_64.whl
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
--mount=type=secret,id=NO_PROXY,env=no_proxy \
pip install fastapi fire openai outlines \
partial_json_parser ray[default] shortuuid uvicorn \
'pydantic>2' openai_harmony dlblas --no-cache-dir && \
partial_json_parser 'ray[default]<3' shortuuid uvicorn pybase64 \
'pydantic>2' openai_harmony dlblas --target ${XTUNER_LMDEPLOY_ENVS_DIR} --no-cache-dir -i ${DEFAULT_PYPI_URL} && \
pip install xgrammar==0.1.32 --no-cache-dir -i ${DEFAULT_PYPI_URL} --no-deps && \
if [ -n "${LMDEPLOY_VERSION}" ]; then \
pip install lmdeploy==${LMDEPLOY_VERSION} --no-deps --no-cache-dir; \
# pip install lmdeploy==${LMDEPLOY_VERSION} --target ${XTUNER_LMDEPLOY_ENVS_DIR} --no-deps --no-cache-dir -i ${DEFAULT_PYPI_URL}; \
echo pip install ${LMDEPLOY_WHEELS} --target ${XTUNER_LMDEPLOY_ENVS_DIR} --no-deps --no-cache-dir -i ${DEFAULT_PYPI_URL}; \
pip install ${LMDEPLOY_WHEELS} --target ${XTUNER_LMDEPLOY_ENVS_DIR} --no-deps --no-cache-dir -i ${DEFAULT_PYPI_URL}; \
else \
git clone $(echo ${LMDEPLOY_URL} | cut -d '@' -f 1) && \
cd ${CODESPACE}/lmdeploy && \
git checkout $(echo ${LMDEPLOY_URL} | cut -d '@' -f 2) && \
pip install . -v --no-deps --no-cache-dir; \
pip install . -v --target ${XTUNER_LMDEPLOY_ENVS_DIR} --no-deps --no-cache-dir -i ${DEFAULT_PYPI_URL}; \
fi

## install xtuner
ARG XTUNER_URL
ARG XTUNER_COMMIT
#RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
# git clone $(echo ${XTUNER_URL} | cut -d '@' -f 1) && \
# cd ${CODESPACE}/xtuner && \
# git checkout $(echo ${XTUNER_URL} | cut -d '@' -f 2)
COPY . ${CODESPACE}/xtuner

WORKDIR ${CODESPACE}/xtuner
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
pip install .[all] -v --no-cache-dir

# Install custom .pth file for conditional lmdeploy and sglang path injection
RUN cp -r .dev_scripts/xtuner_rl_path* ${PYTHON_SITE_PACKAGE_PATH}/

# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
RUN pip install .[all] -v --no-cache-dir -i ${DEFAULT_PYPI_URL}

WORKDIR ${CODESPACE}

# nccl update for torch 2.6.0
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
if [ "x${TORCH_VERSION}" = "x2.6.0" ]; then \
pip install nvidia-nccl-cu12==2.25.1 --no-cache-dir; \
# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
RUN if [ "x${TORCH_VERSION}" = "x2.6.0" ]; then \
pip install nvidia-nccl-cu12==2.25.1 --no-cache-dir -i ${DEFAULT_PYPI_URL}; \
fi

# cudnn update for torch 2.9.1
# RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
RUN if [ "x${TORCH_VERSION}" = "x2.9.1" ]; then \
pip install nvidia-cudnn-cu12==9.15.1.9 --no-cache-dir -i ${DEFAULT_PYPI_URL}; \
fi

# setup sysctl
Expand Down
2 changes: 1 addition & 1 deletion autotest/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ default_config:
gpus_per_task: 8
cpus_per_task: 120
memory_per_task: 512
image: registry.h.pjlab.org.cn/ailab-llmrazor/xtuner:pt28_latest
image: registry.h.pjlab.org.cn/ailab-llmrazor/xtuner_tmp:pt29_20260318_95a8ccf
envs:
- HF_HUB_CACHE=/mnt/shared-storage-user/auto-eval-pipeline/opencompass/models/hf_hub
eval:
Expand Down
12 changes: 9 additions & 3 deletions ci/scripts/CI_ENV.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ export VERL_ROLLOUT_DATA_PATH=${CI_SHARE_DATA}/verl-rollout-step0.jsonl
export QWEN3_PATH=${CI_SHARE_MODEL}/Qwen3-8B
export QWEN3_VL_PATH=${CI_SHARE_MODEL}/Qwen2.5-VL-3B-Instruct
export QWEN3_MOE_PATH=${CI_SHARE_MODEL}/Qwen3-30B-A3B
export QWEN3_5_MOE_PATH=${CI_SHARE_MODEL}/Qwen3.5-35B-A3B
export QWEN3_MOE_FOPE_PATH=${CI_SHARE_MODEL}/Qwen3_30B_fope_g0.1_sephead
export INTERNS1_DENSE_PATH=${CI_SHARE_MODEL}/intern-s1-mini
export ROLLOUT_MODEL_PATH=${CI_SHARE_MODEL}/Qwen3-8B
Expand All @@ -32,7 +33,12 @@ export PYTEST_ADDOPTS='-o cache_dir=/tmp/.pytest_cache'
# Some DDP test will cost more than 300s, set it to 600 avoid timeout error.
export DISTRIBUTED_TESTS_DEFAULT_TIMEOUT=600

proxy_off
pip install -e .[all]
# proxy_off
# pip install .[all]

export PYTHONPATH=${LM_DEPLOY}:$PYTHONPATH
# TORCH_VERSION=$(python -c "import torch;print(torch.__version__.split('+')[0])")
# if [[ $TORCH_VERSION == "2.9.1" ]]; then
# pip install nvidia-cudnn-cu12==9.15.1.9
# fi

# export PYTHONPATH=${LM_DEPLOY}:$PYTHONPATH
22 changes: 12 additions & 10 deletions image_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,22 +10,27 @@ export DEEP_EP_URL=https://github.com/deepseek-ai/DeepEP@9af0e0d0e74f3577af1979c
export DEEP_GEMM_URL=https://github.com/deepseek-ai/DeepGEMM@c9f8b34dcdacc20aa746b786f983492c51072870 # v2.1.1.post3
export CAUSAL_CONV1D_URL=https://github.com/Dao-AILab/causal-conv1d@da6dbaa9fd5a919967f14d3fd031da1288ad5025 # v1.6.0

export TORCH_VERSION=${TORCH_VERSION:-"2.8.0"}
export LMDEPLOY_VERSION="0.11.0"
# export LMDEPLOY_URL=https://github.com/InternLM/lmdeploy@a9a24fbd8985374cb01ecb6021d1ce9668253c9c
export TORCH_VERSION=${TORCH_VERSION:-"2.9.0"}
export LMDEPLOY_VERSION="0.12.2"
# export LMDEPLOY_URL=https://github.com/InternLM/lmdeploy@9a50f1f4eaf1e4fbe45892bc8017a7359237160c
export PPA_SOURCE="https://mirrors.aliyun.com"
export SGLANG_VERSION="0.5.3"
export DEFAULT_PYPI_URL=${DEFAULT_PYPI_URL:-"https://mirrors.aliyun.com/pypi/simple"}
# mirror https://download.pytorch.org/whl
export PYTORCH_WHEELS_URL=${PYTORCH_WHEELS_URL:-"https://download.pytorch.org/whl"}

image_name=${IMAGE_NAME:-"xtuner"}
image_tag=${IMAGE_TAG:-"pt$(echo ${TORCH_VERSION} | awk -F. '{print $1$2}')_$(date +%Y%m%d)_${XTUNER_COMMIT:0:7}"}

docker build . \
-t "$image_name:$image_tag" \
--secret id=HTTPS_PROXY \
--secret id=NO_PROXY \
--build-arg TORCH_VERSION=$TORCH_VERSION\
--build-arg BASE_IMAGE=$BASE_IMAGE \
--build-arg PPA_SOURCE=$PPA_SOURCE \
--build-arg ADAPTIVE_GEMM_URL=$ADAPTIVE_GEMM_URL \
--build-arg PPA_SOURCE="$PPA_SOURCE" \
--build-arg DEFAULT_PYPI_URL="$DEFAULT_PYPI_URL" \
--build-arg PYTORCH_WHEELS_URL="$PYTORCH_WHEELS_URL" \
--build-arg ADAPTIVE_GEMM_URL="$ADAPTIVE_GEMM_URL" \
--build-arg FLASH_ATTN_URL=$FLASH_ATTN_URL \
--build-arg GROUPED_GEMM_URL=$GROUPED_GEMM_URL \
--build-arg CAUSAL_CONV1D_URL=$CAUSAL_CONV1D_URL \
Expand All @@ -34,8 +39,6 @@ docker build . \
--build-arg XTUNER_URL=$XTUNER_URL \
--build-arg XTUNER_COMMIT=$XTUNER_COMMIT \
--build-arg LMDEPLOY_VERSION=$LMDEPLOY_VERSION \
--build-arg LMDEPLOY_URL=$LMDEPLOY_URL \
--build-arg SGLANG_VERSION=$SGLANG_VERSION \
--progress=plain \
--label "BASE_IMAGE=$BASE_IMAGE" \
--label "XTUNER_URL=${XTUNER_URL/@/\/tree\/}" \
Expand All @@ -46,5 +49,4 @@ docker build . \
--label "CAUSAL_CONV1D_URL=${CAUSAL_CONV1D_URL/@/\/tree\/}" \
--label "DEEP_EP_URL=${DEEP_EP_URL/@/\/tree\/}" \
--label "DEEP_GEMM_URL=${DEEP_GEMM_URL/@/\/tree\/}" \
--label "LMDEPLOY_VERSION=$LMDEPLOY_VERSION" \
--label "SGLANG_VERSION=$SGLANG_VERSION"
--label "LMDEPLOY_VERSION=$LMDEPLOY_VERSION"
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ dependencies = [
"datasets<4.0.0",
"einops",
"loguru",
"mmengine==0.11.0rc0",
"mmengine==0.11.0rc2",
"openpyxl",
"peft>=0.14.0",
"scikit-image",
Expand All @@ -37,7 +37,7 @@ dependencies = [
"tiktoken",
"torch>=2.6.0",
"torchvision",
"transformers==4.57.0",
"transformers==5.2.0",
"cyclopts",
"transformers_stream_generator",
"opencv-python-headless<=4.12.0.88",
Expand Down
Loading
Loading