Skip to content

Commit

Permalink
Test examples (#306)
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil authored Dec 10, 2024
1 parent 24f4531 commit a2700a8
Show file tree
Hide file tree
Showing 56 changed files with 324 additions and 245 deletions.
17 changes: 15 additions & 2 deletions .github/workflows/test_api_cpu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,21 @@ jobs:
pip install -e .[testing,timm,diffusers,codecarbon]
- name: Run tests
run: |
pytest tests/test_api.py -s -k "api and cpu"
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/cpu
run: |
pytest tests/test_api.py -s -k "api and cpu"

# no examples for now
# - if: ${{
# (github.event_name == 'push') ||
# (github.event_name == 'workflow_dispatch') ||
# contains( github.event.pull_request.labels.*.name, 'examples')
# }}
# name: Run examples
# run: |
# pytest tests/test_examples.py -s -k "api and cpu"
# env:
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
# PUSH_REPO_ID: optimum-benchmark/cpu
15 changes: 14 additions & 1 deletion .github/workflows/test_api_cuda.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,21 @@ jobs:
pip install -e .[testing,timm,diffusers,codecarbon]
- name: Run tests
run: |
pytest tests/test_api.py -x -s -k "api and cuda"
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/cuda

- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: |
pytest tests/test_api.py -x -s -k "api and cuda"
pip install -e .[testing,torchao,autoawq,auto-gptq]
pytest tests/test_examples.py -x -s -k "api and cuda and pytorch"
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/cuda
4 changes: 2 additions & 2 deletions .github/workflows/test_api_misc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ jobs:
UV_SYSTEM_PYTHON: 1

- name: Run tests
run: |
pytest tests/test_api.py -s -k "api and not (cpu or cuda or rocm or mps)"
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/misc-${{ matrix.os }}-${{ matrix.python }}
run: |
pytest tests/test_api.py -s -k "api and not (cpu or cuda or rocm or mps)"
15 changes: 8 additions & 7 deletions .github/workflows/test_cli_cpu_ipex.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,17 @@ jobs:
- name: Checkout
uses: actions/checkout@v4

- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"

- name: Install requirements
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[testing,ipex,diffusers,timm]
- name: Run tests
run: pytest tests/test_cli.py -s -k "cli and cpu and ipex"

- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: pytest tests/test_examples.py -s -k "cli and cpu and ipex"
10 changes: 9 additions & 1 deletion .github/workflows/test_cli_cpu_llama_cpp.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,12 @@ jobs:
pip install -e .[testing,llama-cpp]
- name: Run tests
run: pytest tests/test_cli.py -s -k "llama_cpp"
run: pytest tests/test_cli.py -s -k "cli and cpu and llama_cpp"

- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: pytest tests/test_examples.py -s -k "cli and cpu and llama_cpp"
8 changes: 8 additions & 0 deletions .github/workflows/test_cli_cpu_onnxruntime.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,11 @@ jobs:
- name: Run tests
run: pytest tests/test_cli.py -s -k "cli and cpu and onnxruntime"

- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: pytest tests/test_examples.py -s -k "cli and cpu and onnxruntime"
14 changes: 8 additions & 6 deletions .github/workflows/test_cli_cpu_openvino.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,18 @@ jobs:
- name: Checkout
uses: actions/checkout@v4

- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"

- name: Install requirements
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[testing,openvino,diffusers,timm]
- name: Run tests
run: pytest tests/test_cli.py -s -k "cli and cpu and openvino"

- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: pytest tests/test_examples.py -s -k "cli and cpu and openvino"
11 changes: 10 additions & 1 deletion .github/workflows/test_cli_cpu_py_txi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,16 @@ jobs:
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[testing,py-txi]
pip install -e .[testing,py-txi] git+https://github.com/IlyasMoutawwakil/py-txi.git
- name: Run tests
run: pytest tests/test_cli.py -s -k "cli and cpu and py_txi"

# no examples for now
# - if: ${{
# (github.event_name == 'push') ||
# (github.event_name == 'workflow_dispatch') ||
# contains( github.event.pull_request.labels.*.name, 'examples')
# }}
# name: Run examples
# run: pytest tests/test_examples.py -s -k "cli and cpu and (tgi or tei)"
9 changes: 9 additions & 0 deletions .github/workflows/test_cli_cpu_pytorch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,12 @@ jobs:
- name: Run tests
run: pytest tests/test_cli.py -s -k "cli and cpu and pytorch"

# no examples for now
# - if: ${{
# (github.event_name == 'push') ||
# (github.event_name == 'workflow_dispatch') ||
# contains( github.event.pull_request.labels.*.name, 'examples')
# }}
# name: Run examples
# run: pytest tests/test_examples.py -s -k "cli and cpu and pytorch"
9 changes: 9 additions & 0 deletions .github/workflows/test_cli_cuda_onnxruntime.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,12 @@ jobs:
- name: Run tests
run: |
pytest tests/test_cli.py -x -s -k "cli and cuda and onnxruntime"
# no examples for now
# - if: ${{
# (github.event_name == 'push') ||
# (github.event_name == 'workflow_dispatch') ||
# contains( github.event.pull_request.labels.*.name, 'examples')
# }}
# name: Run examples
# run: pytest tests/test_examples.py -x -s -k "cli and cuda and onnxruntime"
10 changes: 9 additions & 1 deletion .github/workflows/test_cli_cuda_py_txi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,15 @@ jobs:
- name: Install requirements
run: |
pip install --upgrade pip
pip install -e .[testing,py-txi]
pip install -e .[testing,py-txi] git+https://github.com/IlyasMoutawwakil/py-txi.git
- name: Run tests
run: pytest tests/test_cli.py -x -s -k "cli and cuda and py_txi"

- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: pytest tests/test_examples.py -x -s -k "cli and cuda and (tgi or tei)"
8 changes: 8 additions & 0 deletions .github/workflows/test_cli_cuda_pytorch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,14 @@ jobs:
run: |
pytest tests/test_cli.py -x -s -k "cli and cuda and pytorch and not (dp or ddp or device_map or deepspeed)"
- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: pytest tests/test_examples.py -x -s -k "cli and cuda and pytorch"

run_cli_cuda_pytorch_multi_gpu_tests:
if: ${{
(github.event_name == 'push') ||
Expand Down
10 changes: 10 additions & 0 deletions .github/workflows/test_cli_cuda_tensorrt_llm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,16 @@ jobs:
run: |
pytest tests/test_cli.py -x -s -k "cli and cuda and tensorrt_llm and not (tp or pp)"
- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: |
huggingface-cli delete-cache
pytest tests/test_examples.py -x -s -k "cli and cuda and trt"
cli_cuda_tensorrt_llm_multi_gpu_tests:
if: ${{
(github.event_name == 'push') ||
Expand Down
15 changes: 11 additions & 4 deletions .github/workflows/test_cli_cuda_torch_ort.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,21 @@ jobs:

- name: Install dependencies
run: |
pip install -e .[testing,torch-ort,peft]
pip install optimum@git+https://github.com/huggingface/optimum.git
pip install -e .[testing,torch-ort,peft] optimum@git+https://github.com/huggingface/optimum.git@fxi-ort-trainer
- name: Run tests
run: |
pytest tests/test_cli.py -x -s -k "cli and cuda and torch_ort and not (dp or ddp or device_map) and not (peft)"
# - if: ${{
# (github.event_name == 'push') ||
# (github.event_name == 'workflow_dispatch') ||
# contains( github.event.pull_request.labels.*.name, 'examples')
# }}
# name: Run examples
# run: |
# pytest tests/test_examples.py -x -s -k "cli and cuda and torch_ort"

run_cli_cuda_torch_ort_multi_gpu_tests:
if: ${{
(github.event_name == 'push') ||
Expand All @@ -75,8 +83,7 @@ jobs:

- name: Install dependencies
run: |
pip install -e .[testing,torch-ort,peft]
pip install optimum@git+https://github.com/huggingface/optimum.git
pip install -e .[testing,torch-ort,peft] optimum@git+https://github.com/huggingface/optimum.git@fxi-ort-trainer
- name: Run tests
run: |
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/test_cli_cuda_vllm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,15 @@ jobs:
run: |
FORCE_SEQUENTIAL=1 pytest tests/test_cli.py -x -s -k "cli and cuda and vllm and not (tp or pp)"
- if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples')
}}
name: Run examples
run: |
pytest tests/test_examples.py -x -s -k "cli and cuda and vllm"
run_cli_cuda_vllm_multi_gpu_tests:
if: ${{
(github.event_name == 'push') ||
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,11 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

jobs:
run_cli_energy_star_tests:
run_energy_star:
if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'cli') ||
contains( github.event.pull_request.labels.*.name, 'energy_star') ||
contains( github.event.pull_request.labels.*.name, 'cli_energy_star')
contains( github.event.pull_request.labels.*.name, 'energy_star')
}}

runs-on:
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
19 changes: 10 additions & 9 deletions examples/ipex_bert.yaml → examples/cpu_ipex_bert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,25 @@ defaults:
- _base_
- _self_

name: ipex_bert
name: cpu_ipex_bert

launcher:
numactl: true
numactl_kwargs:
cpunodebind: 0
membind: 0

backend:
device: cpu
export: true
no_weights: false # because on multi-node machines, intializing weights could harm performance
torch_dtype: float32 # but use bfloat16 on compatible Intel CPUs
model: google-bert/bert-base-uncased

scenario:
latency: true
memory: true
latency: true

input_shapes:
batch_size: 1
sequence_length: 128

backend:
device: cpu
no_weights: false
export: true
torch_dtype: bfloat16
model: bert-base-uncased
25 changes: 11 additions & 14 deletions examples/ipex_llama.yaml → examples/cpu_ipex_llama.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,32 +6,29 @@ defaults:
- _base_
- _self_

name: ipex_llama
name: cpu_ipex_llama

launcher:
numactl: true
numactl_kwargs:
cpunodebind: 0
membind: 0

backend:
device: cpu
export: true
no_weights: false # because on multi-node machines, intializing weights could harm performance
torch_dtype: float32 # but use bfloat16 on compatible Intel CPUs
model: TinyLlama/TinyLlama-1.1B-Chat-v1.0

scenario:
latency: true
memory: true
latency: true

warmup_runs: 10
iterations: 10
duration: 10

input_shapes:
batch_size: 1
sequence_length: 256
sequence_length: 64

generate_kwargs:
max_new_tokens: 32
min_new_tokens: 32

backend:
device: cpu
export: true
no_weights: false
torch_dtype: bfloat16
model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
Loading

0 comments on commit a2700a8

Please sign in to comment.