bach-or-bot / Dockerfile
krislette's picture
Auto-deploy from GitHub: 7c591156b27da3e33cf2a35fbb1d3fdf593c7e3f
61f21af
# Use CUDA base for GPU support
FROM nvidia/cuda:13.0.1-runtime-ubuntu22.04
# Set timezone non-interactively
ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=UTC
# Install Python and basic dependencies
RUN apt-get update && apt-get install -y \
software-properties-common \
&& add-apt-repository ppa:deadsnakes/ppa \
&& apt-get update && apt-get install -y \
python3.11 \
python3.11-dev \
python3.11-venv \
python3.11-distutils \
git \
libsndfile1 \
ffmpeg \
curl \
&& rm -rf /var/lib/apt/lists/* \
&& ln -sf /usr/bin/python3.11 /usr/bin/python3 \
&& ln -sf /usr/bin/python3.11 /usr/bin/python \
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11
WORKDIR /app
# Copy and install Python dependencies
COPY pyproject.toml poetry.lock* ./
RUN python3.11 -m pip install poetry && \
poetry config virtualenvs.create false && \
poetry install --only=main
# Copy application code
COPY models/ ./models/
COPY config/ ./config/
COPY app/ ./app/
COPY scripts/ ./scripts/
COPY src/ ./src/
# Create cache directories with proper permissions
RUN mkdir -p /app/.cache/huggingface /app/.cache/torch /tmp/numba_cache && \
chmod -R 777 /app/.cache /tmp/numba_cache
# Set environment
ENV PYTHONPATH="/app"
ENV HF_HOME="/app/.cache/huggingface"
ENV TRANSFORMERS_CACHE="/app/.cache/huggingface"
ENV TORCH_HOME="/app/.cache/torch"
ENV NUMBA_CACHE_DIR="/tmp/numba_cache"
ENV NUMBA_DISABLE_JIT=0
ENV MUSICLIME_NUM_SAMPLES=1000
ENV MUSICLIME_NUM_FEATURES=10
ENV PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
# Hugging Face Spaces specific, expose port 7860
EXPOSE 7860
# Run on port 7860 for HF Spaces
CMD ["uvicorn", "app.server:app", "--host", "0.0.0.0", "--port", "7860", "--timeout-keep-alive", "600"]