diff --git a/.lintrunner.toml b/.lintrunner.toml index d0c9c6aef6a..396b7fde5ac 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -367,7 +367,7 @@ exclude_patterns = [ '**/third-party/**', 'scripts/check_binary_dependencies.py', 'profiler/test/test_profiler_e2e.py', - 'backends/arm/test/**', + 'backends/arm/test/ops/*.py', ] command = [ 'python', diff --git a/.mypy.ini b/.mypy.ini index e01392a0dfd..0ce444e8a79 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -24,11 +24,14 @@ files = test, util -mypy_path = executorch +mypy_path = executorch,src [mypy-executorch.backends.*] follow_untyped_imports = True +[mypy-backends.arm.*] +disallow_untyped_decorators = False + [mypy-executorch.codegen.*] follow_untyped_imports = True diff --git a/backends/arm/test/models/stable_diffusion/test_SD3Transformer2DModel.py b/backends/arm/test/models/stable_diffusion/test_SD3Transformer2DModel.py index 9506fe727db..3e1f19dd39c 100644 --- a/backends/arm/test/models/stable_diffusion/test_SD3Transformer2DModel.py +++ b/backends/arm/test/models/stable_diffusion/test_SD3Transformer2DModel.py @@ -7,7 +7,9 @@ from typing import Tuple import torch -from diffusers.models.transformers import SD3Transformer2DModel +from diffusers.models.transformers import ( # type: ignore[import-not-found] + SD3Transformer2DModel, +) from executorch.backends.arm.test import common from executorch.backends.arm.test.models.stable_diffusion.stable_diffusion_module_test_configs import ( diff --git a/backends/arm/test/models/stable_diffusion/test_vae_AutoencoderKL.py b/backends/arm/test/models/stable_diffusion/test_vae_AutoencoderKL.py index a3c3a018131..5d33576a817 100644 --- a/backends/arm/test/models/stable_diffusion/test_vae_AutoencoderKL.py +++ b/backends/arm/test/models/stable_diffusion/test_vae_AutoencoderKL.py @@ -7,8 +7,12 @@ from typing import Tuple import torch -from diffusers.models.autoencoders import AutoencoderKL -from diffusers.utils.testing_utils import floats_tensor +from diffusers.models.autoencoders import ( # type: ignore[import-not-found] + AutoencoderKL, +) +from diffusers.utils.testing_utils import ( # type: ignore[import-not-found] + floats_tensor, +) from executorch.backends.arm.test import common from executorch.backends.arm.test.models.stable_diffusion.stable_diffusion_module_test_configs import ( diff --git a/backends/arm/test/models/test_nss.py b/backends/arm/test/models/test_nss.py index 82187d2b479..5f7db548109 100644 --- a/backends/arm/test/models/test_nss.py +++ b/backends/arm/test/models/test_nss.py @@ -19,7 +19,9 @@ from huggingface_hub import hf_hub_download -from ng_model_gym.usecases.nss.model.model_blocks import AutoEncoderV1 +from ng_model_gym.usecases.nss.model.model_blocks import ( # type: ignore[import-not-found,import-untyped] + AutoEncoderV1, +) input_t = Tuple[torch.Tensor] # Input x diff --git a/backends/arm/test/passes/test_fuse_duplicate_users_pass.py b/backends/arm/test/passes/test_fuse_duplicate_users_pass.py index a7e80794015..ffe56e72691 100644 --- a/backends/arm/test/passes/test_fuse_duplicate_users_pass.py +++ b/backends/arm/test/passes/test_fuse_duplicate_users_pass.py @@ -3,7 +3,7 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from typing import Tuple +from typing import Dict, Tuple import torch from executorch.backends.arm._passes import FuseDuplicateUsersPass @@ -13,7 +13,12 @@ input_t = Tuple[torch.Tensor] # Input x -class FuseaAvgPool(torch.nn.Module): +class ModuleWithOps(torch.nn.Module): + ops_before_pass: Dict[str, int] + ops_after_pass: Dict[str, int] + + +class FuseaAvgPool(ModuleWithOps): ops_before_pass = { "executorch_exir_dialects_edge__ops_aten_avg_pool2d_default": 3, } @@ -27,7 +32,7 @@ def forward(self, x): return self.avg(x) + self.avg(x) + self.avg(x) -class FuseAvgPoolChain(torch.nn.Module): +class FuseAvgPoolChain(ModuleWithOps): ops_before_pass = { "executorch_exir_dialects_edge__ops_aten_avg_pool2d_default": 6, } @@ -44,14 +49,14 @@ def forward(self, x): return first + second + third -modules = { +modules: Dict[str, ModuleWithOps] = { "fuse_avg_pool": FuseaAvgPool(), "fuse_avg_pool_chain": FuseAvgPoolChain(), } @common.parametrize("module", modules) -def test_fuse_duplicate_ops_FP(module: torch.nn.Module): +def test_fuse_duplicate_ops_FP(module: ModuleWithOps): pipeline = PassPipeline[input_t]( module=module, test_data=(torch.ones(1, 1, 1, 1),), diff --git a/backends/arm/test/runner_utils.py b/backends/arm/test/runner_utils.py index 2c76af1b779..5e22a54d393 100644 --- a/backends/arm/test/runner_utils.py +++ b/backends/arm/test/runner_utils.py @@ -36,7 +36,7 @@ from torch.fx.node import Node from torch.overrides import TorchFunctionMode -from tosa.TosaGraph import TosaGraph # type: ignore[import-untyped] +from tosa.TosaGraph import TosaGraph # type: ignore[import-not-found, import-untyped] logger = logging.getLogger(__name__) @@ -762,7 +762,7 @@ def run_tosa_graph( inputs_np = [torch_tensor_to_numpy(input_tensor) for input_tensor in inputs] if isinstance(tosa_version, Tosa_1_00): - import tosa_reference_model as reference_model # type: ignore[import-untyped] + import tosa_reference_model as reference_model # type: ignore[import-not-found, import-untyped] debug_mode = "ALL" if logger.getEffectiveLevel() <= logging.DEBUG else None outputs_np, status = reference_model.run(