Skip to content

Validate Azure OpenAI response #3

Validate Azure OpenAI response

Validate Azure OpenAI response #3

name: Validate Azure OpenAI response
on:
workflow_dispatch: # run on demand from the Actions tab
jobs:
run-validation:
runs-on: ubuntu-latest
environment: responses # 🔑 unlocks the environment‑scoped secrets
# Expose the environment secrets as real process env‑vars
env:
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
AZURE_OPENAI_V1_API_ENDPOINT: ${{ secrets.AZURE_OPENAI_V1_API_ENDPOINT }}
AZURE_OPENAI_API_MODEL: ${{ secrets.AZURE_OPENAI_API_MODEL }}
steps:
# 1 – check out the repo so the script is available
- uses: actions/checkout@v4
# 2 – set up Python
- uses: actions/setup-python@v5
with:
python-version: "3.11"
# 3 – install the script’s two lightweight deps
- name: Install requirements
run: |
python -m pip install --upgrade pip
pip install openai python-dotenv
# 4 – run the script, grade the result, assemble a report
- name: Execute script and capture outcome
id: test
shell: bash
run: |
set +e # we want to handle failures ourselves
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
# === run the user script ===
python responses-basic-aoai-v1.py > out.txt 2>&1
EXIT_CODE=$?
# === decide pass / fail ===
if [[ $EXIT_CODE -eq 0 && -s out.txt ]]; then
PASS_FAIL="PASS"
else
PASS_FAIL="FAIL"
fi
# === build JSON report ===
jq -n \
--arg date "$TIMESTAMP" \
--arg output "$(cat out.txt | tr -d '\r')" \
--arg pass_fail "$PASS_FAIL" \
--argjson code "$EXIT_CODE" \
'{test_run_date: $date,
output: $output,
pass_fail: $pass_fail,
error_code: $code}' > aoai-test-result.json
# 5 – make the report downloadable from the run summary
- name: Upload result artifact
uses: actions/upload-artifact@v4
with:
name: aoai-response-test # folder name visible in the UI
path: aoai-test-result.json