Skip to content

Commit 82bca21

Browse files
committed
Pytest precommit mark removal
1 parent 3cc61a5 commit 82bca21

26 files changed

+15
-208
lines changed

Jenkinsfile

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@ properties([
88
booleanParam(defaultValue: true,
99
description: 'Whether to propagate commit status to GitHub',
1010
name: 'propagateStatus'),
11-
booleanParam(defaultValue: false,
12-
description: 'If true, forces running pre-commit scope',
13-
name: 'forceRunPrecommitScope'),
1411
string(defaultValue: '',
1512
description: 'Pipeline shared library version (branch/tag/commit). Determined automatically if empty',
1613
name: 'library_version')

pyproject.toml

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,3 @@ requires = [
5959
"cmake~=3.24.0; platform_system == 'Darwin' and platform_machine == 'arm64'",
6060
]
6161
build-backend = "py_build_cmake.build"
62-
63-
[tool.pytest.ini_options]
64-
markers = [
65-
"precommit: (deselect with '-m \"precommit\"')",
66-
]

src/cpp/src/lora/adapter.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,10 +100,9 @@ struct AutoSafetensor: public safetensors_File {
100100
ConstantMap safetensor_to_constant_map(const ov::Tensor& safetensor) {
101101
AutoSafetensor safe_tensors_file{};
102102

103-
// Intentionally discard constness as safetensors_file_init requires a non-const pointer (used as read-only)
104-
auto data_ptr = const_cast<char*>(safetensor.data<char>());
105-
OPENVINO_ASSERT(safetensors_file_init(data_ptr, safetensor.get_byte_size(), &safe_tensors_file) == nullptr,
106-
"Cannot parse safetensor as a Safetensors file format. Safetensors file format is supported only");
103+
OPENVINO_ASSERT(safetensors_file_init(safetensor.data<char>(), safetensor.get_byte_size(), &safe_tensors_file) == nullptr,
104+
"Cannot parse safetensor as a Safetensors file format. Safetensors file format is supported only"
105+
);
107106

108107
ConstantMap tensors;
109108
for (int i = 0; i < safe_tensors_file.num_tensors; i++) {

src/cpp/src/speculative_decoding/speculative_decoding_stateful.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ std::variant<int64_t, std::vector<int64_t>>
338338

339339
auto sample_token = [&](const ov::Tensor& logits, std::size_t idx) {
340340
size_t sequence_offset = idx * vocab_size;
341-
const float* logits_data = logits.data<float>() + sequence_offset;
341+
float* logits_data = logits.data<float>() + sequence_offset;
342342
return std::max_element(logits_data, logits_data + vocab_size) - logits_data;
343343
};
344344

tests/python_tests/README.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,29 +14,29 @@ pip install -r tests/python_tests/requirements.txt
1414
## Run Tests
1515

1616
```sh
17-
python -m pytest tests/python_tests/ -m precommit
17+
python -m pytest tests/python_tests/
1818
```
1919

2020
If you have built GenAI library by yourself instead of using wheel please set `PYTHONPATH` so that test could find library, e.g.
2121
```sh
22-
PYTHONPATH=$PYTHONPATH:.../openvino.genai/build-Release/ python -m pytest tests/python_tests/ -m precommit
22+
PYTHONPATH=$PYTHONPATH:.../openvino.genai/build-Release/ python -m pytest tests/python_tests/
2323
```
2424

2525
## Customize tests run
2626

27-
Tests have `precommit` set of models. `precommit` contains lightweight models which can be quickly inferred. If you wish to run specific tests, you can use `-k` option, for example to run only multibatch and chat tests:
27+
Tests have different sets of models for different purposes. If you wish to run specific tests, you can use `-k` option, for example to run only multibatch and chat tests:
2828
```sh
29-
python -m pytest tests/python_tests/ -m precommit -k "test_multibatch and test_chat"
29+
python -m pytest tests/python_tests/ -k "test_multibatch and test_chat"
3030
```
3131

3232
If you wish to run all tests except beam search do the following:
3333
```sh
34-
python -m pytest tests/python_tests/ -m precommit -k "not test_beam_search"
34+
python -m pytest tests/python_tests/ -k "not test_beam_search"
3535
```
3636

3737
Argument `--model_ids` can be used to run tests selectively only for specific models. HF model ids should be separated by space, e.g:
3838
```sh
39-
python -m pytest tests/python_tests/ -m precommit -k "test_multibatch" --model_ids "TinyLlama/TinyLlama-1.1B-Chat-v1.0 Qwen/Qwen2-0.5B-Instruct"
39+
python -m pytest tests/python_tests/ -k "test_multibatch" --model_ids "TinyLlama/TinyLlama-1.1B-Chat-v1.0 Qwen/Qwen2-0.5B-Instruct"
4040
```
4141

4242
List of currently supported models can be found in tests/python_tests/models.py:get_models_list

tests/python_tests/conftest.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,4 @@ def pytest_addoption(parser):
5353

5454

5555
def pytest_configure(config: pytest.Config):
56-
marker = "precommit" if config.getoption("-m") == "precommit" else None
57-
pytest.run_marker = marker
5856
pytest.selected_model_ids = config.getoption("--model_ids", default=None)
File renamed without changes.

tests/python_tests/pytest.ini

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
markers =
44
; The following markers are defined for categorizing tests:
5-
; precommit - Tests that should be run before committing code.
65
; real_models - Tests that involve execution of the models from models/real_models file
6+
; nightly - Tests that should only run in nightly builds (uses large models or long running)
77
; samples - Tests related to the sample models.
88
; llm - Tests related to large language models.
99
; whisper - Tests related to the Whisper model.
@@ -12,8 +12,8 @@ markers =
1212
; vlm - Tests related to the VLM model.
1313
; rag - Tests related to the RAG components.
1414
; speech_generation - Tests related to text-to-speech generation
15-
precommit
1615
real_models
16+
nightly
1717
samples
1818
llm
1919
whisper
@@ -24,4 +24,4 @@ markers =
2424
rag
2525
speech_generation
2626

27-
addopts = -m precommit
27+
addopts = -m "not real_models and not nightly"

tests/python_tests/samples/test_text2speech.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ def teardown_class(self):
3232

3333
@pytest.mark.speech_generation
3434
@pytest.mark.samples
35-
@pytest.mark.precommit
3635
@pytest.mark.parametrize("convert_model", ["tiny-random-SpeechT5ForTextToSpeech"], indirect=True)
3736
@pytest.mark.parametrize("input_prompt", ["Hello everyone"])
3837
def test_sample_text_to_speech(self, convert_model, input_prompt):
@@ -54,7 +53,6 @@ def test_sample_text_to_speech(self, convert_model, input_prompt):
5453

5554
@pytest.mark.speech_generation
5655
@pytest.mark.samples
57-
@pytest.mark.precommit
5856
@pytest.mark.parametrize("convert_model", ["tiny-random-SpeechT5ForTextToSpeech"], indirect=True)
5957
@pytest.mark.parametrize("input_prompt", ["Test text to speech without speaker embedding file"])
6058
def test_sample_text_to_speech_no_speaker_embedding_file(self, convert_model, input_prompt):

tests/python_tests/test_continuous_batching.py

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ def read_models_list(file_name: str):
3636
models.append(model_name)
3737
return models
3838

39-
@pytest.mark.precommit
40-
@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "precommit")))
41-
def test_e2e_precommit(model_id):
39+
@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "lightweight")))
40+
def test_e2e_lightweight_models(model_id):
4241
prompts, generation_configs = get_test_dataset()
4342
generate_and_compare(prompts=prompts,
4443
generation_config=generation_configs,
@@ -73,7 +72,6 @@ def test_e2e_real_models(model_id):
7372
]
7473
@pytest.mark.parametrize("generation_config", test_configs)
7574
@pytest.mark.parametrize("prompt", batched_prompts[1:]) # num_beams=15 diverges on the first prompt.
76-
@pytest.mark.precommit
7775
@pytest.mark.skip(reason="CVS-162891: Fix test_continuous_batching_vs_stateful tests after we started to compare cb vs sdpa")
7876
def test_continuous_batching_vs_stateful(prompt, generation_config):
7977
model_id = "facebook/opt-125m"
@@ -93,7 +91,6 @@ def test_continuous_batching_vs_stateful(prompt, generation_config):
9391

9492
prompts = ['The Sun is yellow because', 'Difference between Jupiter and Mars is that', 'table is made of']
9593
@pytest.mark.parametrize("prompt", prompts)
96-
@pytest.mark.precommit
9794
def test_cb_streamer_vs_return_vs_stateful(prompt):
9895
model_id = "facebook/opt-125m"
9996
_, _, models_path = download_and_convert_model(model_id)
@@ -124,7 +121,6 @@ def test_cb_streamer_vs_return_vs_stateful(prompt):
124121
@pytest.mark.parametrize("input_type", [
125122
GenerationChatInputsType.STRING,
126123
GenerationChatInputsType.CHAT_HISTORY])
127-
@pytest.mark.precommit
128124
def test_chat_scenario_vs_stateful(model_id, generation_config_kwargs: dict, pipeline_type, input_type: GenerationChatInputsType):
129125
_, _, models_path = download_and_convert_model(model_id)
130126

@@ -175,7 +171,6 @@ def test_chat_scenario_vs_stateful(model_id, generation_config_kwargs: dict, pip
175171
@pytest.mark.parametrize("generation_config_kwargs", generation_configs)
176172
@pytest.mark.parametrize("model_id", get_chat_models_list())
177173
@pytest.mark.parametrize("pipeline_type", [PipelineType.CONTINUOUS_BATCHING, PipelineType.SPECULATIVE_DECODING, PipelineType.PROMPT_LOOKUP_DECODING,])
178-
@pytest.mark.precommit
179174
def test_continuous_batching_add_request_health_check(model_id, generation_config_kwargs: dict, pipeline_type):
180175
_, _, models_path = download_and_convert_model(model_id)
181176

@@ -206,7 +201,6 @@ def test_continuous_batching_add_request_health_check(model_id, generation_confi
206201
@pytest.mark.parametrize("generation_config_kwargs", invalid_generation_configs)
207202
@pytest.mark.parametrize("model_id", get_chat_models_list())
208203
@pytest.mark.parametrize("pipeline_type", [PipelineType.CONTINUOUS_BATCHING, PipelineType.SPECULATIVE_DECODING, PipelineType.PROMPT_LOOKUP_DECODING,])
209-
@pytest.mark.precommit
210204
def test_continuous_batching_add_request_fails(model_id, generation_config_kwargs: dict, pipeline_type):
211205
_, _, models_path = download_and_convert_model(model_id)
212206

@@ -228,7 +222,6 @@ def test_continuous_batching_add_request_fails(model_id, generation_config_kwarg
228222
#
229223

230224
# todo: iefode: bug reproducer!!!
231-
@pytest.mark.precommit
232225
@pytest.mark.parametrize("sampling_config", [get_greedy(), get_beam_search(), get_multinomial_all_parameters()],
233226
ids=["greedy", "beam_search", "multinomial_all_parameters"])
234227
def test_post_oom_health(sampling_config):
@@ -289,7 +282,6 @@ def get_beam_search_seq_len_300() -> GenerationConfig:
289282
({"num_kv_blocks": 100, "dynamic_split_fuse": True}, get_beam_search_seq_len_300()),
290283
({"num_kv_blocks": 100, "dynamic_split_fuse": False}, get_beam_search_seq_len_300())]
291284
@pytest.mark.parametrize("params", scheduler_params_list)
292-
@pytest.mark.precommit
293285
def test_preemption(params):
294286
model_id = "facebook/opt-125m"
295287
scheduler_params = params[0]
@@ -342,7 +334,6 @@ def test_preemption(params):
342334

343335
# todo: Anastasiia Pnevskaya: fix the test because it is hanging according max_new_tokens = std::numeric_limits<std::size_t>::max()
344336
@pytest.mark.parametrize("dynamic_split_fuse", [True, False])
345-
@pytest.mark.precommit
346337
@pytest.mark.skip(reason="Random sampling results are non deterministic due to: discrete_distribution impl depends on platform, model inference results may depend on CPU. Test passes on CI but fails locally.")
347338
def test_preemption_with_multinomial(dynamic_split_fuse):
348339
generation_configs = multinomial_params.generation_config
@@ -425,7 +416,6 @@ def test_preemption_with_multinomial(dynamic_split_fuse):
425416

426417

427418
@pytest.mark.parametrize("dynamic_split_fuse", [True, False])
428-
@pytest.mark.precommit
429419
@pytest.mark.skip(reason="Random sampling results are non deterministic due to: discrete_distribution impl depends on platform, model inference results may depend on CPU. Test passes on CI but fails locally.")
430420
def test_preemption_with_multinomial_n_seq(dynamic_split_fuse):
431421
model_id : str = "facebook/opt-125m"
@@ -442,7 +432,6 @@ def test_preemption_with_multinomial_n_seq(dynamic_split_fuse):
442432

443433

444434
@pytest.mark.parametrize("pipeline_type", [PipelineType.PROMPT_LOOKUP_DECODING])
445-
@pytest.mark.precommit
446435
def test_dynamic_split_fuse_doesnt_affect_generated_text(pipeline_type):
447436
model_id : str = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
448437
_, _, models_path = download_and_convert_model(model_id)
@@ -496,7 +485,6 @@ def run_extended_perf_metrics_collection(model_id, generation_config: Generation
496485

497486

498487
@pytest.mark.parametrize("pipeline_type", [PipelineType.PAGED_ATTENTION, PipelineType.SPECULATIVE_DECODING])
499-
@pytest.mark.precommit
500488
def test_speculative_decoding_extended_perf_metrics(pipeline_type):
501489
import time
502490
start_time = time.perf_counter()

0 commit comments

Comments
 (0)