Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions AudioQnA/Dockerfile.openEuler
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

ARG IMAGE_REPO=opea
ARG BASE_TAG=latest
FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler

COPY ./audioqna.py $HOME/audioqna.py

ENTRYPOINT ["python", "audioqna.py"]
92 changes: 92 additions & 0 deletions AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

services:
whisper-service:
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
container_name: whisper-service
ports:
- ${WHISPER_SERVER_PORT:-7066}:7066
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
restart: unless-stopped
speecht5-service:
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}-openeuler
container_name: speecht5-service
ports:
- ${SPEECHT5_SERVER_PORT:-7055}:7055
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
restart: unless-stopped
vllm-service:
image: openeuler/vllm-cpu:0.9.1-oe2403lts
container_name: vllm-service
ports:
- ${LLM_SERVER_PORT:-3006}:80
volumes:
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
shm_size: 128g
privileged: true
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HF_TOKEN: ${HF_TOKEN}
LLM_MODEL_ID: ${LLM_MODEL_ID}
VLLM_TORCH_PROFILER_DIR: "/mnt"
LLM_SERVER_PORT: ${LLM_SERVER_PORT}
VLLM_CPU_OMP_THREADS_BIND: all
VLLM_CPU_KVCACHE_SPACE: 30
healthcheck:
test: ["CMD-SHELL", "curl -f http://$host_ip:${LLM_SERVER_PORT}/health || exit 1"]
interval: 10s
timeout: 10s
retries: 100
command: --model ${LLM_MODEL_ID} --host 0.0.0.0 --port 80
audioqna-xeon-backend-server:
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}-openeuler
container_name: audioqna-xeon-backend-server
depends_on:
- whisper-service
- vllm-service
- speecht5-service
ports:
- "3008:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- WHISPER_SERVER_HOST_IP=${WHISPER_SERVER_HOST_IP}
- WHISPER_SERVER_PORT=${WHISPER_SERVER_PORT}
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
- LLM_SERVER_PORT=${LLM_SERVER_PORT}
- LLM_MODEL_ID=${LLM_MODEL_ID}
- SPEECHT5_SERVER_HOST_IP=${SPEECHT5_SERVER_HOST_IP}
- SPEECHT5_SERVER_PORT=${SPEECHT5_SERVER_PORT}
ipc: host
restart: always
audioqna-xeon-ui-server:
image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}-openeuler
container_name: audioqna-xeon-ui-server
depends_on:
- audioqna-xeon-backend-server
ports:
- "5173:5173"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- CHAT_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always

networks:
default:
driver: bridge
29 changes: 29 additions & 0 deletions AudioQnA/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,29 @@ services:
context: ../
dockerfile: ./Dockerfile
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}
audioqna-openeuler:
build:
args:
IMAGE_REPO: ${REGISTRY}
BASE_TAG: ${TAG}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
context: ../
dockerfile: ./Dockerfile.openEuler
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}-openeuler
audioqna-ui:
build:
context: ../ui
dockerfile: ./docker/Dockerfile
extends: audioqna
image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}
audioqna-ui-openeuler:
build:
context: ../ui
dockerfile: ./docker/Dockerfile.openEuler
extends: audioqna
image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}-openeuler
audioqna-multilang:
build:
context: ../
Expand All @@ -37,6 +54,12 @@ services:
dockerfile: comps/third_parties/whisper/src/Dockerfile
extends: audioqna
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
whisper-openeuler:
build:
context: GenAIComps
dockerfile: comps/third_parties/whisper/src/Dockerfile.openEuler
extends: audioqna
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
asr:
build:
context: GenAIComps
Expand All @@ -61,6 +84,12 @@ services:
dockerfile: comps/third_parties/speecht5/src/Dockerfile
extends: audioqna
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
speecht5-openeuler:
build:
context: GenAIComps
dockerfile: comps/third_parties/speecht5/src/Dockerfile.openEuler
extends: audioqna
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}-openeuler
tts:
build:
context: GenAIComps
Expand Down
103 changes: 103 additions & 0 deletions AudioQnA/tests/test_compose_openeuler_on_xeon.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
#!/bin/bash
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

set -e
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
export MODEL_CACHE=${model_cache:-"./data"}

WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')

function build_docker_images() {
opea_branch=${opea_branch:-"main"}

cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG}-openeuler --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.openEuler .
popd && sleep 1s

echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="audioqna-openeuler audioqna-ui-openeuler whisper-openeuler speecht5-openeuler"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log

docker images && sleep 1s
}

function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export host_ip=${ip_address}
source set_env.sh
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

# Start Docker Containers
docker compose -f compose_openeuler.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
until [[ "$n" -ge 200 ]]; do
docker logs vllm-service > $LOG_PATH/vllm_service_start.log 2>&1
if grep -q complete $LOG_PATH/vllm_service_start.log; then
break
fi
sleep 5s
n=$((n+1))
done
}


function validate_megaservice() {
response=$(http_proxy="" curl http://${ip_address}:3008/v1/audioqna -XPOST -d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' -H 'Content-Type: application/json')
# always print the log
docker logs whisper-service > $LOG_PATH/whisper-service.log
docker logs speecht5-service > $LOG_PATH/tts-service.log
docker logs vllm-service > $LOG_PATH/vllm-service.log
docker logs audioqna-xeon-backend-server > $LOG_PATH/audioqna-xeon-backend-server.log
echo "$response" | sed 's/^"//;s/"$//' | base64 -d > speech.mp3

if [[ $(file speech.mp3) == *"RIFF"* ]]; then
echo "Result correct."
else
echo "Result wrong."
exit 1
fi

}

function stop_docker() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
docker compose -f compose_openeuler.yaml stop && docker compose rm -f
}

function main() {

echo "::group::stop_docker"
stop_docker
echo "::endgroup::"

echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"

echo "::group::start_services"
start_services
echo "::endgroup::"

echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"

echo "::group::stop_docker"
stop_docker
docker system prune -f
echo "::endgroup::"

}

main
30 changes: 30 additions & 0 deletions AudioQnA/ui/docker/Dockerfile.openEuler
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

# Use node 20.11.1 as the base image
FROM openeuler/node:20.11.1-oe2403lts

# Update package manager and install Git
RUN yum update -y && \
yum install -y \
git && \
yum clean all && \
rm -rf /var/cache/yum

# Copy the front-end code repository
COPY svelte /home/user/svelte

# Set the working directory
WORKDIR /home/user/svelte

# Install front-end dependencies
RUN npm install

# Build the front-end application
RUN npm run build

# Expose the port of the front-end application
EXPOSE 5173

# Run the front-end application in preview mode
CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"]