Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 40 additions & 40 deletions .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -508,49 +508,49 @@ jobs:
fail-fast: false
matrix:
test:
- name: 'Whisper'
# TODO: skip some tests temporary untill https://github.com/huggingface/datasets/issues/7647 dataset is fixed
cmd: 'python -m pytest -v ./tests/python_tests/test_whisper_pipeline.py ./tests/python_tests/test_whisper_pipeline_static.py -k "not test_smoke[sample_from_dataset0 and not test_whisper_constructors[sample_from_dataset0 and not test_max_new_tokens[sample_from_dataset0 and not test_language_mode[language and not test_task_mode[sample_from_dataset0 and not test_language_autodetect[sample_from_dataset0 and not test_whisper_config_constructor and not test_language_autodetect[sample_from_dataset1 and not test_language_autodetect[sample_from_dataset2 and not test_initial_prompt_hotwords[sample_from_dataset0 and not test_random_sampling[sample_from_dataset0"'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).whisper.test }}
timeout: 45
- name: 'Cacheopt E2E (Part 1)'
cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_1.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
timeout: 180
- name: 'Cacheopt E2E (Part 2)'
cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_2.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
timeout: 360
- name: 'LLM & VLM'
cmd: 'python -m pytest -v ./tests/python_tests/test_llm_pipeline.py tests/python_tests/test_llm_pipeline_static.py ./tests/python_tests/test_vlm_pipeline.py tests/python_tests/test_structured_output.py --override-ini cache_dir=/mount/caches/pytest/'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).visual_language.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
timeout: 180
- name: 'GGUF Reader tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_gguf_reader.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).GGUF.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
timeout: 360
- name: 'Tokenizer tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_tokenizer.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).tokenizers.test }}
timeout: 60
- name: 'API tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_continuous_batching.py ./tests/python_tests/test_generation_config.py ./tests/python_tests/test_sampling.py ./tests/python_tests/test_text_streamer.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test || fromJSON(needs.smart_ci.outputs.affected_components).sampling.test || fromJSON(needs.smart_ci.outputs.affected_components).text_streamer.test }}
timeout: 60
# - name: 'Whisper'
# # TODO: skip some tests temporary untill https://github.com/huggingface/datasets/issues/7647 dataset is fixed
# cmd: 'python -m pytest -v ./tests/python_tests/test_whisper_pipeline.py ./tests/python_tests/test_whisper_pipeline_static.py -k "not test_smoke[sample_from_dataset0 and not test_whisper_constructors[sample_from_dataset0 and not test_max_new_tokens[sample_from_dataset0 and not test_language_mode[language and not test_task_mode[sample_from_dataset0 and not test_language_autodetect[sample_from_dataset0 and not test_whisper_config_constructor and not test_language_autodetect[sample_from_dataset1 and not test_language_autodetect[sample_from_dataset2 and not test_initial_prompt_hotwords[sample_from_dataset0 and not test_random_sampling[sample_from_dataset0"'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).whisper.test }}
# timeout: 45
# - name: 'Cacheopt E2E (Part 1)'
# cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_1.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
# timeout: 180
# - name: 'Cacheopt E2E (Part 2)'
# cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_2.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
# timeout: 360
# - name: 'LLM & VLM'
# cmd: 'python -m pytest -v ./tests/python_tests/test_llm_pipeline.py tests/python_tests/test_llm_pipeline_static.py ./tests/python_tests/test_vlm_pipeline.py tests/python_tests/test_structured_output.py --override-ini cache_dir=/mount/caches/pytest/'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).visual_language.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
# timeout: 180
# - name: 'GGUF Reader tests'
# cmd: 'python -m pytest -v ./tests/python_tests/test_gguf_reader.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).GGUF.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
# timeout: 360
# - name: 'Tokenizer tests'
# cmd: 'python -m pytest -v ./tests/python_tests/test_tokenizer.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).tokenizers.test }}
# timeout: 60
# - name: 'API tests'
# cmd: 'python -m pytest -v ./tests/python_tests/test_continuous_batching.py ./tests/python_tests/test_generation_config.py ./tests/python_tests/test_sampling.py ./tests/python_tests/test_text_streamer.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test || fromJSON(needs.smart_ci.outputs.affected_components).sampling.test || fromJSON(needs.smart_ci.outputs.affected_components).text_streamer.test }}
# timeout: 60
- name: 'Rag tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_rag.py'
cmd: 'pip list && python -m pytest -s -v ./tests/python_tests/test_rag.py -k test_qwen3_rerank_documents'
Copy link

Copilot AI Nov 6, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Workflow configuration has been modified to only run specific tests. This change should be reverted before merging to ensure full test coverage in CI.

Copilot uses AI. Check for mistakes.
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).RAG.test }}
timeout: 30
- name: 'WWB tests'
cmd: 'python -m pytest -v ./tools/who_what_benchmark/tests -m "not nanollava"'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
timeout: 120
- name: 'WWB tests (nanollava)'
cmd: |
python -m pip install transformers==4.48.0
python -m pytest -v ./tools/who_what_benchmark/tests -m nanollava
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
timeout: 90
# - name: 'WWB tests'
# cmd: 'python -m pytest -v ./tools/who_what_benchmark/tests -m "not nanollava"'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
# timeout: 120
# - name: 'WWB tests (nanollava)'
# cmd: |
# python -m pip install transformers==4.48.0
# python -m pytest -v ./tools/who_what_benchmark/tests -m nanollava
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
# timeout: 90
defaults:
run:
shell: bash
Expand Down
80 changes: 40 additions & 40 deletions .github/workflows/manylinux_2_28.yml
Original file line number Diff line number Diff line change
Expand Up @@ -446,49 +446,49 @@ jobs:
fail-fast: false
matrix:
test:
- name: 'Whisper'
# TODO: skip some tests temporary untill https://github.com/huggingface/datasets/issues/7647 dataset is fixed
cmd: 'python -m pytest -v ./tests/python_tests/test_whisper_pipeline.py ./tests/python_tests/test_whisper_pipeline_static.py -k "not test_smoke[sample_from_dataset0 and not test_whisper_constructors[sample_from_dataset0 and not test_max_new_tokens[sample_from_dataset0 and not test_language_mode[language and not test_task_mode[sample_from_dataset0 and not test_language_autodetect[sample_from_dataset0 and not test_whisper_config_constructor and not test_language_autodetect[sample_from_dataset1 and not test_language_autodetect[sample_from_dataset2 and not test_initial_prompt_hotwords[sample_from_dataset0 and not test_random_sampling[sample_from_dataset0"'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).whisper.test }}
timeout: 120
- name: 'Cacheopt E2E (Part 1)'
cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_1.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
timeout: 180
- name: 'Cacheopt E2E (Part 2)'
cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_2.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
timeout: 360
- name: 'LLM & VLM'
cmd: 'python -m pytest -v ./tests/python_tests/test_llm_pipeline.py ./tests/python_tests/test_llm_pipeline_static.py ./tests/python_tests/test_vlm_pipeline.py ./tests/python_tests/test_structured_output.py --override-ini cache_dir=/mount/caches/pytest/'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).visual_language.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
timeout: 180
- name: 'GGUF Reader tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_gguf_reader.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).GGUF.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
timeout: 360
- name: 'Tokenizer tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_tokenizer.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).tokenizers.test }}
timeout: 60
- name: 'API tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_continuous_batching.py ./tests/python_tests/test_generation_config.py ./tests/python_tests/test_sampling.py ./tests/python_tests/test_text_streamer.py'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test || fromJSON(needs.smart_ci.outputs.affected_components).sampling.test || fromJSON(needs.smart_ci.outputs.affected_components).text_streamer.test }}
timeout: 60
# - name: 'Whisper'
# # TODO: skip some tests temporary untill https://github.com/huggingface/datasets/issues/7647 dataset is fixed
# cmd: 'python -m pytest -v ./tests/python_tests/test_whisper_pipeline.py ./tests/python_tests/test_whisper_pipeline_static.py -k "not test_smoke[sample_from_dataset0 and not test_whisper_constructors[sample_from_dataset0 and not test_max_new_tokens[sample_from_dataset0 and not test_language_mode[language and not test_task_mode[sample_from_dataset0 and not test_language_autodetect[sample_from_dataset0 and not test_whisper_config_constructor and not test_language_autodetect[sample_from_dataset1 and not test_language_autodetect[sample_from_dataset2 and not test_initial_prompt_hotwords[sample_from_dataset0 and not test_random_sampling[sample_from_dataset0"'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).whisper.test }}
# timeout: 120
# - name: 'Cacheopt E2E (Part 1)'
# cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_1.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
# timeout: 180
# - name: 'Cacheopt E2E (Part 2)'
# cmd: 'python -m pytest -v ./tests/python_tests/test_kv_cache_eviction/test_kv_cache_eviction_2.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test }}
# timeout: 360
# - name: 'LLM & VLM'
# cmd: 'python -m pytest -v ./tests/python_tests/test_llm_pipeline.py ./tests/python_tests/test_llm_pipeline_static.py ./tests/python_tests/test_vlm_pipeline.py ./tests/python_tests/test_structured_output.py --override-ini cache_dir=/mount/caches/pytest/'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).visual_language.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
# timeout: 180
# - name: 'GGUF Reader tests'
# cmd: 'python -m pytest -v ./tests/python_tests/test_gguf_reader.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).GGUF.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
# timeout: 360
# - name: 'Tokenizer tests'
# cmd: 'python -m pytest -v ./tests/python_tests/test_tokenizer.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).tokenizers.test }}
# timeout: 60
# - name: 'API tests'
# cmd: 'python -m pytest -v ./tests/python_tests/test_continuous_batching.py ./tests/python_tests/test_generation_config.py ./tests/python_tests/test_sampling.py ./tests/python_tests/test_text_streamer.py'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test || fromJSON(needs.smart_ci.outputs.affected_components).sampling.test || fromJSON(needs.smart_ci.outputs.affected_components).text_streamer.test }}
# timeout: 60
- name: 'Rag tests'
cmd: 'python -m pytest -v ./tests/python_tests/test_rag.py'
cmd: 'python -m pytest -s -v ./tests/python_tests/test_rag.py -k test_qwen3_rerank_documents'
Copy link

Copilot AI Nov 6, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Workflow configuration has been modified to only run specific tests. This change should be reverted before merging to ensure full test coverage in CI.

Copilot uses AI. Check for mistakes.
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).RAG.test }}
timeout: 30
- name: 'WWB tests'
cmd: 'python -m pytest -v ./tools/who_what_benchmark/tests -m "not nanollava"'
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
timeout: 120
- name: 'WWB tests (nanollava)'
cmd: |
python -m pip install transformers==4.48.0
python -m pytest -v ./tools/who_what_benchmark/tests -m nanollava
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
timeout: 90
# - name: 'WWB tests'
# cmd: 'python -m pytest -v ./tools/who_what_benchmark/tests -m "not nanollava"'
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
# timeout: 120
# - name: 'WWB tests (nanollava)'
# cmd: |
# python -m pip install transformers==4.48.0
# python -m pytest -v ./tools/who_what_benchmark/tests -m nanollava
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
# timeout: 90
defaults:
run:
shell: bash
Expand Down
Loading
Loading