Python script to print available execution providers and devices:
import onnxruntime as ort
# pip install onnxruntime-openvino
def log_onnx_info() -> None:
providers: list[str] = ort.get_available_providers()
print(f"Onnx providers: {providers}")
if "OpenVINOExecutionProvider" not in providers:
return
try:
from onnxruntime.capi.onnxruntime_pybind11_state import (
get_available_openvino_device_ids,
)
available_devices: list[str] = get_available_openvino_device_ids()
print(f"OpenVINO devices: {available_devices}")
except ImportError:
print("OpenVINOExecutionProvider is available but get_available_openvino_device_ids not found")
if __name__ == "__main__":
log_onnx_info()
To access iGPU on host machine:
sudo apt install clinfo intel-opencl-icd intel-gpu-tools
sudo usermod -a -G render $LOGNAME
re-login and you're good to start using igpu for inference from both openvino or through onnxruntime-openvino.
For docker:
requirements.txt should contain onnxruntime-openvino
FROM python:3.12-slim-bookworm
# intel-opencl-icd is required
# htop, clinfo and intel-gpu-tools are optional
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y htop clinfo intel-opencl-icd intel-gpu-tools && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "main.py"]
services:
app:
build:
context: .
dockerfile: Dockerfile
devices:
- /dev/dri:/dev/dri
group_add:
- video
Top comments (0)