diff --git a/docker-compose.yml b/docker-compose.yml index dc96935b66..8370c1fd8b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,16 +1,17 @@ +version: "3.9" + services: gpt-engineer: build: context: . dockerfile: docker/Dockerfile + image: gpt-engineer stdin_open: true tty: true - # Set the API key from the .env file env_file: - .env - ## OR set the API key directly + # OR set the API key directly: # environment: - # - OPENAI_API_KEY=YOUR_API_KEY - image: gpt-engineer + # - OPENAI_API_KEY=YOUR_API_KEY volumes: - ./projects/example:/project diff --git a/docker/Dockerfile b/docker/Dockerfile index 50e3fe1811..54746f3d9a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,29 +1,41 @@ # Stage 1: Builder stage -FROM python:3.11-slim AS builder +FROM python:3.13-slim AS builder +# Install necessary OS packages RUN apt-get update && apt-get install -y --no-install-recommends \ - tk \ - tcl \ - curl \ - git \ - && rm -rf /var/lib/apt/lists/* + tk \ + tcl \ + curl \ + git \ + && rm -rf /var/lib/apt/lists/* WORKDIR /app +# Copy source code COPY . . +# Install dependencies RUN pip install --no-cache-dir -e . +RUN pip install --no-cache-dir openai>=1.0.0 backoff langchain_google_genai # Stage 2: Final stage -FROM python:3.11-slim +FROM python:3.13-slim WORKDIR /app -COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages +# Copy installed Python packages and binaries from builder +COPY --from=builder /usr/local/lib/python3.13/site-packages /usr/local/lib/python3.13/site-packages COPY --from=builder /usr/local/bin /usr/local/bin COPY --from=builder /usr/bin /usr/bin + +# Copy app code COPY --from=builder /app . +# Copy entrypoint and ensure Unix line endings COPY docker/entrypoint.sh . +# Make entrypoint executable +RUN chmod +x /app/entrypoint.sh + +# Set entrypoint ENTRYPOINT ["bash", "/app/entrypoint.sh"] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 138ab50255..b4a248c993 100644 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,10 +1,14 @@ #!/usr/bin/env bash # -*- coding: utf-8 -*- +set -e # Exit immediately if a command exits with non-zero status + project_dir="/project" -# Run the gpt engineer script -gpt-engineer $project_dir "$@" +# Run gpt-engineer with all passed arguments +gpt-engineer "$project_dir" "$@" -# Patch the permissions of the generated files to be owned by nobody except prompt file -find "$project_dir" -mindepth 1 -maxdepth 1 ! -path "$project_dir/prompt" -exec chown -R nobody:nogroup {} + -exec chmod -R 777 {} + +# Patch permissions of generated files +find "$project_dir" -mindepth 1 -maxdepth 1 ! -path "$project_dir/prompt" \ + -exec chown -R nobody:nogroup {} + \ + -exec chmod -R 777 {} + diff --git a/gpt_engineer/core/ai.py b/gpt_engineer/core/ai.py index ae86f63364..a923d570c6 100644 --- a/gpt_engineer/core/ai.py +++ b/gpt_engineer/core/ai.py @@ -37,6 +37,8 @@ ) from langchain_anthropic import ChatAnthropic from langchain_openai import AzureChatOpenAI, ChatOpenAI +from langchain_google_genai import ChatGemini + from gpt_engineer.core.token_usage import TokenUsageLog @@ -250,7 +252,7 @@ def next( return messages - @backoff.on_exception(backoff.expo, openai.RateLimitError, max_tries=7, max_time=45) + @backoff.on_exception(backoff.expo, openai.error.RateLimitError, max_tries=7, max_time=45) def backoff_inference(self, messages): """ Perform inference using the language model while implementing an exponential backoff strategy. @@ -362,6 +364,14 @@ def _create_chat_model(self) -> BaseChatModel: streaming=self.streaming, max_tokens_to_sample=4096, ) + elif "gemini" in self.model_name: + return ChatGemini( + model=self.model_name, + temperature=self.temperature, + streaming=self.streaming, + callbacks=[StreamingStdOutCallbackHandler()], + max_output_tokens=4096, + ) elif self.vision: return ChatOpenAI( model=self.model_name, @@ -379,6 +389,7 @@ def _create_chat_model(self) -> BaseChatModel: ) + def serialize_messages(messages: List[Message]) -> str: return AI.serialize_messages(messages)