# Use the official Python 3.9 image FROM python:3.9-slim # Create a non-root user RUN useradd -m appuser # Set the working directory inside the container WORKDIR /app # Set environment variables for cache ENV TRANSFORMERS_CACHE=/app/cache/huggingface/transformers ENV HF_HOME=/app/cache/huggingface ENV SENTENCE_TRANSFORMERS_HOME=/app/cache/sentence_transformers # Ensure the cache directory can be written to RUN mkdir -p /app/cache/huggingface/transformers && \ mkdir -p /app/cache/sentence_transformers && \ chown -R appuser:appuser /app/cache # Copy the requirements file into the container COPY requirements.txt ./requirements.txt # Install system dependencies and Python packages RUN apt-get update && \ apt-get -y install gcc libpq-dev && \ pip install --no-cache-dir -r requirements.txt # Copy the entire project into the container COPY . /app # Change ownership of the /app directory to the non-root user RUN chown -R appuser:appuser /app # Switch to the non-root user USER appuser # Create a script to load the models RUN echo "from sentence_transformers import SentenceTransformer; \ SentenceTransformer('sentence-transformers/msmarco-distilbert-base-v3'); \ SentenceTransformer('sentence-transformers/all-mpnet-base-v2'); \ SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L6-v2');" > load_models.py # Run the model loading script RUN python load_models.py # RUN python -c "SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L6-v2');" # Start the application # CMD ["python", "semantic_search.py"] # CMD ["gunicorn","-b" , "0.0.0.0:7860", "semantic_search:app"] CMD ["uvicorn", "semantic_search:app", "--host", "0.0.0.0", "--port", "7860"]