Skip to content
This repository was archived by the owner on Sep 20, 2025. It is now read-only.

Commit 026c2ba

Browse files
authored
fix: update base image uri (#189)
1 parent d173ab7 commit 026c2ba

File tree

16 files changed

+16
-16
lines changed

16 files changed

+16
-16
lines changed

src/emd/cfn/shared/ecs_cluster.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ Parameters:
1010
APIRouterImageURI:
1111
Type: String
1212
Description: The URI of OpenAI compatbile API router image. If provided, the router will be deployed.
13-
Default: "public.ecr.aws/aws-gcr-solutions/dmaa/api-router:latest"
13+
Default: "quay.io/dmaa/api-router:latest"
1414
UseSpot:
1515
Type: String
1616
Description: Use Fargate Spot capacity?

src/pipeline/backend/huggingface/embedding/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
1+
FROM quay.io/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
22

33
# Create a new stage based on vllm-base
44
FROM huggingface-base AS sagemaker-serving

src/pipeline/backend/huggingface/llm/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
1+
FROM quay.io/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
22

33
# Create a new stage based on vllm-base
44
FROM huggingface-base AS sagemaker-serving

src/pipeline/backend/huggingface/llm/Dockerfile_baichuan

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
1+
FROM quay.io/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
22

33
# Create a new stage based on vllm-base
44
FROM huggingface-base AS sagemaker-serving

src/pipeline/backend/huggingface/rerank/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
1+
FROM quay.io/dmaa/huggingface/transformers-pytorch-gpu:{{VERSION}} AS huggingface-base
22

33
# Create a new stage based on vllm-base
44
FROM huggingface-base AS sagemaker-serving

src/pipeline/backend/llama_cpp/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/llama-cpp:{{VERSION}}
1+
FROM quay.io/dmaa/llama-cpp:{{VERSION}}
22

33
# Ensure the serve script has executable permissions
44
# RUN chmod +x /usr/bin/serve

src/pipeline/backend/lmdeploy/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/openmmlab/lmdeploy:{{VERSION}} AS lmdeploy-base
1+
FROM quay.io/dmaa/openmmlab/lmdeploy:{{VERSION}} AS lmdeploy-base
22

33
# Create a new stage based on vllm-base
44
FROM lmdeploy-base AS sagemaker-serving

src/pipeline/backend/lmdeploy/Dockerfile_internvl2

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/openmmlab/lmdeploy:{{VERSION}} AS lmdeploy-base
1+
FROM quay.io/dmaa/openmmlab/lmdeploy:{{VERSION}} AS lmdeploy-base
22

33
# Create a new stage based on vllm-base
44
FROM lmdeploy-base AS sagemaker-serving

src/pipeline/backend/ollama/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/ollama:{{VERSION}} AS ollama-base
1+
FROM quay.io/dmaa/ollama:{{VERSION}} AS ollama-base
22

33
# Ensure the serve script has executable permissions
44
# RUN chmod +x /usr/bin/serve

src/pipeline/backend/tgi/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM public.ecr.aws/aws-gcr-solutions/dmaa/huggingface/text-generation-inference:{{VERSION}} AS tgi-base
1+
FROM quay.io/dmaa/huggingface/text-generation-inference:{{VERSION}} AS tgi-base
22

33
# Create a new stage based on vllm-base
44
FROM tgi-base AS sagemaker-serving

0 commit comments

Comments
 (0)