Skip to content

Commit fc152d2

Browse files
committed
Pre-commit test mark removal
1 parent 565e6e4 commit fc152d2

23 files changed

+9
-192
lines changed

Jenkinsfile

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@ properties([
88
booleanParam(defaultValue: true,
99
description: 'Whether to propagate commit status to GitHub',
1010
name: 'propagateStatus'),
11-
booleanParam(defaultValue: false,
12-
description: 'If true, forces running pre-commit scope',
13-
name: 'forceRunPrecommitScope'),
1411
string(defaultValue: '',
1512
description: 'Pipeline shared library version (branch/tag/commit). Determined automatically if empty',
1613
name: 'library_version')

pyproject.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,5 +63,4 @@ build-backend = "py_build_cmake.build"
6363

6464
[tool.pytest.ini_options]
6565
markers = [
66-
"precommit: (deselect with '-m \"precommit\"')",
6766
]

tests/python_tests/README.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,29 +14,29 @@ pip install -r tests/python_tests/requirements.txt
1414
## Run Tests
1515

1616
```sh
17-
python -m pytest tests/python_tests/ -m precommit
17+
python -m pytest tests/python_tests/
1818
```
1919

2020
If you have built GenAI library by yourself instead of using wheel please set `PYTHONPATH` so that test could find library, e.g.
2121
```sh
22-
PYTHONPATH=$PYTHONPATH:.../openvino.genai/build-Release/ python -m pytest tests/python_tests/ -m precommit
22+
PYTHONPATH=$PYTHONPATH:.../openvino.genai/build-Release/ python -m pytest tests/python_tests/
2323
```
2424

2525
## Customize tests run
2626

27-
Tests have `precommit` set of models. `precommit` contains lightweight models which can be quickly inferred. If you wish to run specific tests, you can use `-k` option, for example to run only multibatch and chat tests:
27+
Tests have different sets of models for different purposes. If you wish to run specific tests, you can use `-k` option, for example to run only multibatch and chat tests:
2828
```sh
29-
python -m pytest tests/python_tests/ -m precommit -k "test_multibatch and test_chat"
29+
python -m pytest tests/python_tests/ -k "test_multibatch and test_chat"
3030
```
3131

3232
If you wish to run all tests except beam search do the following:
3333
```sh
34-
python -m pytest tests/python_tests/ -m precommit -k "not test_beam_search"
34+
python -m pytest tests/python_tests/ -k "not test_beam_search"
3535
```
3636

3737
Argument `--model_ids` can be used to run tests selectively only for specific models. HF model ids should be separated by space, e.g:
3838
```sh
39-
python -m pytest tests/python_tests/ -m precommit -k "test_multibatch" --model_ids "TinyLlama/TinyLlama-1.1B-Chat-v1.0 Qwen/Qwen2-0.5B-Instruct"
39+
python -m pytest tests/python_tests/ -k "test_multibatch" --model_ids "TinyLlama/TinyLlama-1.1B-Chat-v1.0 Qwen/Qwen2-0.5B-Instruct"
4040
```
4141

4242
List of currently supported models can be found in tests/python_tests/models.py:get_models_list

tests/python_tests/conftest.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,5 @@ def pytest_addoption(parser):
5353

5454

5555
def pytest_configure(config: pytest.Config):
56-
marker = "precommit" if config.getoption("-m") == "precommit" else None
57-
pytest.run_marker = marker
56+
pytest.run_marker = None
5857
pytest.selected_model_ids = config.getoption("--model_ids", default=None)
File renamed without changes.

tests/python_tests/pytest.ini

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
markers =
44
; The following markers are defined for categorizing tests:
5-
; precommit - Tests that should be run before committing code.
65
; real_models - Tests that involve execution of the models from models/real_models file
76
; samples - Tests related to the sample models.
87
; llm - Tests related to large language models.
@@ -12,7 +11,6 @@ markers =
1211
; vlm - Tests related to the VLM model.
1312
; rag - Tests related to the RAG components.
1413
; speech_generation - Tests related to text-to-speech generation
15-
precommit
1614
real_models
1715
samples
1816
llm
@@ -23,5 +21,3 @@ markers =
2321
agent
2422
rag
2523
speech_generation
26-
27-
addopts = -m precommit

tests/python_tests/samples/test_text2speech.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ def teardown_class(self):
3232

3333
@pytest.mark.speech_generation
3434
@pytest.mark.samples
35-
@pytest.mark.precommit
3635
@pytest.mark.parametrize("convert_model", ["tiny-random-SpeechT5ForTextToSpeech"], indirect=True)
3736
@pytest.mark.parametrize("input_prompt", ["Hello everyone"])
3837
def test_sample_text_to_speech(self, convert_model, input_prompt):
@@ -54,7 +53,6 @@ def test_sample_text_to_speech(self, convert_model, input_prompt):
5453

5554
@pytest.mark.speech_generation
5655
@pytest.mark.samples
57-
@pytest.mark.precommit
5856
@pytest.mark.parametrize("convert_model", ["tiny-random-SpeechT5ForTextToSpeech"], indirect=True)
5957
@pytest.mark.parametrize("input_prompt", ["Test text to speech without speaker embedding file"])
6058
def test_sample_text_to_speech_no_speaker_embedding_file(self, convert_model, input_prompt):

tests/python_tests/test_continuous_batching.py

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ def read_models_list(file_name: str):
3636
models.append(model_name)
3737
return models
3838

39-
@pytest.mark.precommit
40-
@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "precommit")))
41-
def test_e2e_precommit(model_id):
39+
@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "lightweight")))
40+
def test_e2e_lightweight_models(model_id):
4241
prompts, generation_configs = get_test_dataset()
4342
generate_and_compare(prompts=prompts,
4443
generation_config=generation_configs,
@@ -73,7 +72,6 @@ def test_e2e_real_models(model_id):
7372
]
7473
@pytest.mark.parametrize("generation_config", test_configs)
7574
@pytest.mark.parametrize("prompt", batched_prompts[1:]) # num_beams=15 diverges on the first prompt.
76-
@pytest.mark.precommit
7775
@pytest.mark.skip(reason="CVS-162891: Fix test_continuous_batching_vs_stateful tests after we started to compare cb vs sdpa")
7876
def test_continuous_batching_vs_stateful(prompt, generation_config):
7977
model_id = "facebook/opt-125m"
@@ -93,7 +91,6 @@ def test_continuous_batching_vs_stateful(prompt, generation_config):
9391

9492
prompts = ['The Sun is yellow because', 'Difference between Jupiter and Mars is that', 'table is made of']
9593
@pytest.mark.parametrize("prompt", prompts)
96-
@pytest.mark.precommit
9794
def test_cb_streamer_vs_return_vs_stateful(prompt):
9895
model_id = "facebook/opt-125m"
9996
_, _, models_path = download_and_convert_model(model_id)
@@ -124,7 +121,6 @@ def test_cb_streamer_vs_return_vs_stateful(prompt):
124121
@pytest.mark.parametrize("input_type", [
125122
GenerationChatInputsType.STRING,
126123
GenerationChatInputsType.CHAT_HISTORY])
127-
@pytest.mark.precommit
128124
def test_chat_scenario_vs_stateful(model_id, generation_config_kwargs: dict, pipeline_type, input_type: GenerationChatInputsType):
129125
_, _, models_path = download_and_convert_model(model_id)
130126

@@ -175,7 +171,6 @@ def test_chat_scenario_vs_stateful(model_id, generation_config_kwargs: dict, pip
175171
@pytest.mark.parametrize("generation_config_kwargs", generation_configs)
176172
@pytest.mark.parametrize("model_id", get_chat_models_list())
177173
@pytest.mark.parametrize("pipeline_type", [PipelineType.CONTINUOUS_BATCHING, PipelineType.SPECULATIVE_DECODING, PipelineType.PROMPT_LOOKUP_DECODING,])
178-
@pytest.mark.precommit
179174
def test_continuous_batching_add_request_health_check(model_id, generation_config_kwargs: dict, pipeline_type):
180175
_, _, models_path = download_and_convert_model(model_id)
181176

@@ -206,7 +201,6 @@ def test_continuous_batching_add_request_health_check(model_id, generation_confi
206201
@pytest.mark.parametrize("generation_config_kwargs", invalid_generation_configs)
207202
@pytest.mark.parametrize("model_id", get_chat_models_list())
208203
@pytest.mark.parametrize("pipeline_type", [PipelineType.CONTINUOUS_BATCHING, PipelineType.SPECULATIVE_DECODING, PipelineType.PROMPT_LOOKUP_DECODING,])
209-
@pytest.mark.precommit
210204
def test_continuous_batching_add_request_fails(model_id, generation_config_kwargs: dict, pipeline_type):
211205
_, _, models_path = download_and_convert_model(model_id)
212206

@@ -228,7 +222,6 @@ def test_continuous_batching_add_request_fails(model_id, generation_config_kwarg
228222
#
229223

230224
# todo: iefode: bug reproducer!!!
231-
@pytest.mark.precommit
232225
@pytest.mark.parametrize("sampling_config", [get_greedy(), get_beam_search(), get_multinomial_all_parameters()],
233226
ids=["greedy", "beam_search", "multinomial_all_parameters"])
234227
def test_post_oom_health(sampling_config):
@@ -289,7 +282,6 @@ def get_beam_search_seq_len_300() -> GenerationConfig:
289282
({"num_kv_blocks": 100, "dynamic_split_fuse": True}, get_beam_search_seq_len_300()),
290283
({"num_kv_blocks": 100, "dynamic_split_fuse": False}, get_beam_search_seq_len_300())]
291284
@pytest.mark.parametrize("params", scheduler_params_list)
292-
@pytest.mark.precommit
293285
def test_preemption(params):
294286
model_id = "facebook/opt-125m"
295287
scheduler_params = params[0]
@@ -342,7 +334,6 @@ def test_preemption(params):
342334

343335
# todo: Anastasiia Pnevskaya: fix the test because it is hanging according max_new_tokens = std::numeric_limits<std::size_t>::max()
344336
@pytest.mark.parametrize("dynamic_split_fuse", [True, False])
345-
@pytest.mark.precommit
346337
@pytest.mark.skip(reason="Random sampling results are non deterministic due to: discrete_distribution impl depends on platform, model inference results may depend on CPU. Test passes on CI but fails locally.")
347338
def test_preemption_with_multinomial(dynamic_split_fuse):
348339
generation_configs = multinomial_params.generation_config
@@ -425,7 +416,6 @@ def test_preemption_with_multinomial(dynamic_split_fuse):
425416

426417

427418
@pytest.mark.parametrize("dynamic_split_fuse", [True, False])
428-
@pytest.mark.precommit
429419
@pytest.mark.skip(reason="Random sampling results are non deterministic due to: discrete_distribution impl depends on platform, model inference results may depend on CPU. Test passes on CI but fails locally.")
430420
def test_preemption_with_multinomial_n_seq(dynamic_split_fuse):
431421
model_id : str = "facebook/opt-125m"
@@ -442,7 +432,6 @@ def test_preemption_with_multinomial_n_seq(dynamic_split_fuse):
442432

443433

444434
@pytest.mark.parametrize("pipeline_type", [PipelineType.PROMPT_LOOKUP_DECODING])
445-
@pytest.mark.precommit
446435
def test_dynamic_split_fuse_doesnt_affect_generated_text(pipeline_type):
447436
model_id : str = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
448437
_, _, models_path = download_and_convert_model(model_id)
@@ -496,7 +485,6 @@ def run_extended_perf_metrics_collection(model_id, generation_config: Generation
496485

497486

498487
@pytest.mark.parametrize("pipeline_type", [PipelineType.PAGED_ATTENTION, PipelineType.SPECULATIVE_DECODING])
499-
@pytest.mark.precommit
500488
def test_speculative_decoding_extended_perf_metrics(pipeline_type):
501489
import time
502490
start_time = time.perf_counter()

tests/python_tests/test_generation_config.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def verify_set_values(generation_config, kwargs):
6161
dict(max_new_tokens=1, apply_chat_template=False),
6262
]
6363
@pytest.mark.parametrize("generation_config_kwargs", configs)
64-
@pytest.mark.precommit
6564
def test_valid_configs(generation_config_kwargs):
6665
config = GenerationConfig(**generation_config_kwargs)
6766
verify_set_values(config, generation_config_kwargs)
@@ -107,7 +106,6 @@ def test_valid_configs(generation_config_kwargs):
107106
# TODO: add tests for invalid properties
108107
]
109108
@pytest.mark.parametrize("generation_config_kwargs", invalid_configs)
110-
@pytest.mark.precommit
111109
def test_invalid_generation_configs_throws(generation_config_kwargs):
112110
config = GenerationConfig(**generation_config_kwargs)
113111
with pytest.raises(RuntimeError):
@@ -123,7 +121,6 @@ def test_invalid_generation_configs_throws(generation_config_kwargs):
123121
dict(eos_token_id=1), # 'stop_token_ids' does not contain 'eos_token_id'
124122
dict(eos_token_id=1, stop_token_ids={2}), # 'stop_token_ids' is not empty, but does not contain 'eos_token_id'
125123
])
126-
@pytest.mark.precommit
127124
def test_invalid_fields_assinment_rises(fields):
128125
config = GenerationConfig()
129126
for key, val in fields.items():
@@ -147,7 +144,6 @@ def load_genai_generation_config_from_file(configs: list[tuple], temp_path):
147144

148145
return ov_generation_config
149146

150-
@pytest.mark.precommit
151147
def test_multiple_eos_are_read_as_stop_token_ids(tmp_path):
152148
generation_config_json = {
153149
"eos_token_id": [

tests/python_tests/test_gguf_reader.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818

1919
@pytest.mark.parametrize("pipeline_type", get_gguf_pipeline_types())
2020
@pytest.mark.parametrize("model_ids", get_gguf_model_list())
21-
@pytest.mark.precommit
2221
@pytest.mark.skipif(sys.platform == "win32", reason="CVS-174065")
2322
def test_pipelines_with_gguf_generate(pipeline_type, model_ids):
2423
if sys.platform == 'darwin':
@@ -69,7 +68,6 @@ def test_pipelines_with_gguf_generate(pipeline_type, model_ids):
6968
'<|endoftext|><|endoftext|><|im_end|>',
7069
'<|endoftext|> Why the Sky is Blue? <|im_end|>',
7170
])
72-
@pytest.mark.precommit
7371
@pytest.mark.skipif(sys.platform == "win32", reason="CVS-174065")
7472
def test_full_gguf_pipeline(pipeline_type, model_ids, enable_save_ov_model, prompt):
7573
if sys.platform == 'darwin':
@@ -129,7 +127,6 @@ def test_full_gguf_pipeline(pipeline_type, model_ids, enable_save_ov_model, prom
129127
@pytest.mark.parametrize("pipeline_type", get_gguf_pipeline_types())
130128
@pytest.mark.parametrize("model_ids", [{"gguf_model_id": "Qwen/Qwen3-0.6B-GGUF", "gguf_filename": "Qwen3-0.6B-Q8_0.gguf"}])
131129
@pytest.mark.xfail(condition=(sys.platform == "darwin"), reason="Ticket - 172335")
132-
@pytest.mark.precommit
133130
@pytest.mark.skipif(sys.platform == "win32", reason="CVS-174065")
134131
def test_full_gguf_qwen3_pipeline(pipeline_type, model_ids):
135132
# Temporal testing solution until transformers starts to support qwen3 in GGUF format

0 commit comments

Comments
 (0)