-
Notifications
You must be signed in to change notification settings - Fork 2.7k
Description
Is there an existing issue for this problem?
- I have searched the existing issues
Install method
Invoke's Launcher
Operating system
Windows
GPU vendor
Nvidia (CUDA)
GPU model
RTX 4070 Ti Super
GPU VRAM
16GB
Version number
v6.9.0
Browser
Arc (Chromium)
System Information
{
"version": "6.9.0",
"dependencies": {
"absl-py" : "2.3.1",
"accelerate" : "1.10.1",
"annotated-types" : "0.7.0",
"anyio" : "4.11.0",
"asttokens" : "3.0.0",
"attrs" : "25.4.0",
"bidict" : "0.23.1",
"bitsandbytes" : "0.48.1",
"blake3" : "1.0.8",
"certifi" : "2022.12.7",
"cffi" : "2.0.0",
"charset-normalizer" : "2.1.1",
"click" : "8.3.0",
"colorama" : "0.4.6",
"coloredlogs" : "15.0.1",
"compel" : "2.1.1",
"contourpy" : "1.3.3",
"CUDA" : "12.8",
"cycler" : "0.12.1",
"decorator" : "5.2.1",
"Deprecated" : "1.2.18",
"diffusers" : "0.33.0",
"diskcache" : "5.6.3",
"dnspython" : "2.8.0",
"dynamicprompts" : "0.31.0",
"einops" : "0.8.1",
"executing" : "2.2.0",
"fastapi" : "0.118.3",
"fastapi-events" : "0.12.2",
"filelock" : "3.19.1",
"flatbuffers" : "25.9.23",
"fonttools" : "4.60.1",
"fsspec" : "2025.9.0",
"gguf" : "0.17.1",
"h11" : "0.16.0",
"httptools" : "0.7.1",
"huggingface-hub" : "0.35.3",
"humanfriendly" : "10.0",
"idna" : "3.4",
"importlib_metadata" : "7.1.0",
"inquirerpy" : "0.3.4",
"invisible-watermark" : "0.2.0",
"InvokeAI" : "6.9.0",
"ipython" : "9.1.0",
"ipython_pygments_lexers": "1.1.1",
"jax" : "0.7.1",
"jaxlib" : "0.7.1",
"jedi" : "0.19.2",
"Jinja2" : "3.1.6",
"kiwisolver" : "1.4.9",
"linkify-it-py" : "2.0.3",
"llama_cpp_python" : "0.3.9",
"markdown-it-py" : "3.0.0",
"MarkupSafe" : "2.1.5",
"matplotlib" : "3.10.7",
"matplotlib-inline" : "0.1.7",
"mdit-py-plugins" : "0.4.2",
"mdurl" : "0.1.2",
"mediapipe" : "0.10.14",
"ml_dtypes" : "0.5.3",
"mpmath" : "1.3.0",
"networkx" : "3.5",
"numpy" : "1.26.3",
"onnx" : "1.16.1",
"onnxruntime" : "1.19.2",
"opencv-contrib-python" : "4.11.0.86",
"opencv-python" : "4.11.0.86",
"opt_einsum" : "3.4.0",
"packaging" : "24.1",
"parso" : "0.8.4",
"pfzy" : "0.3.4",
"picklescan" : "0.0.31",
"pillow" : "11.3.0",
"platformdirs" : "4.3.8",
"prompt_toolkit" : "3.0.52",
"protobuf" : "4.25.8",
"psutil" : "7.1.1",
"pure_eval" : "0.2.3",
"pycparser" : "2.23",
"pydantic" : "2.12.3",
"pydantic-settings" : "2.11.0",
"pydantic_core" : "2.41.4",
"Pygments" : "2.19.1",
"pyparsing" : "3.2.5",
"PyPatchMatch" : "1.0.2",
"pyreadline3" : "3.5.4",
"python-dateutil" : "2.9.0.post0",
"python-dotenv" : "1.1.1",
"python-engineio" : "4.12.3",
"python-multipart" : "0.0.20",
"python-socketio" : "5.14.2",
"PyWavelets" : "1.9.0",
"PyYAML" : "6.0.3",
"regex" : "2025.9.18",
"requests" : "2.28.1",
"rich" : "14.0.0",
"safetensors" : "0.6.2",
"scipy" : "1.16.2",
"semver" : "3.0.4",
"sentencepiece" : "0.2.0",
"setuptools" : "70.2.0",
"simple-websocket" : "1.1.0",
"six" : "1.17.0",
"sniffio" : "1.3.1",
"sounddevice" : "0.5.3",
"spandrel" : "0.4.1",
"stack-data" : "0.6.3",
"starlette" : "0.48.0",
"sympy" : "1.14.0",
"textual" : "3.3.0",
"tokenizers" : "0.22.1",
"torch" : "2.7.1+cu128",
"torchsde" : "0.2.6",
"torchvision" : "0.22.1+cu128",
"tqdm" : "4.66.5",
"traitlets" : "5.14.3",
"trampoline" : "0.1.2",
"transformers" : "4.57.1",
"typing-inspection" : "0.4.2",
"typing_extensions" : "4.15.0",
"uc-micro-py" : "1.0.3",
"urllib3" : "1.26.13",
"uvicorn" : "0.38.0",
"watchfiles" : "1.1.1",
"wcwidth" : "0.2.14",
"websockets" : "15.0.1",
"wrapt" : "1.17.3",
"wsproto" : "1.2.0",
"zipp" : "3.19.2"
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "0.0.0.0",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": [""],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": true,
"patchmatch": true,
"models_dir": "models",
"convert_cache_dir": "models\.convert_cache",
"download_cache_dir": "models\.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "outputs",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console"],
"log_format": "color",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": true,
"device_working_mem_gb": 4,
"enable_partial_loading": true,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": "backend:cudaMallocAsync",
"device": "auto",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "auto",
"attention_slice_size": "auto",
"force_tiled_decode": false,
"pil_compress_level": 6,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "random",
"remote_api_tokens": [
{"url_regex": "civitai", "token": "REDACTED"},
{"url_regex": "huggingface", "token": "REDACTED"}
],
"scan_models_on_startup": false,
"unsafe_disable_picklescan": false,
"allow_unknown_models": true
},
"set_config_fields": [
"pytorch_cuda_alloc_conf", "pil_compress_level", "legacy_models_yaml_path", "hashing_algorithm",
"enable_partial_loading", "remote_api_tokens", "log_tokenization", "host",
"log_memory_usage", "device_working_mem_gb"
]
}
What happened
Currently, it seems that models cannot be installed, with single file and diffusers style models failing in different ways:
- Single file (Flux GGUF model, in this case) - after hashing, the model install fails while attempting to unlink the temp file:
[2025-10-25 16:27:05,197]::[InvokeAI]::INFO --> Invoke running on http://0.0.0.0:9090 (Press CTRL+C to quit)
C:\Users\gogurt_disliker\Documents\invokeai\.venv\Lib\site-packages\huggingface_hub\utils\_deprecation.py:131: FutureWarning: 'get_toke
n_permission' (from 'huggingface_hub.hf_api') is deprecated and will be removed from version '1.0'. Permissions are more com
plex than when `get_token_permission` was first introduced. OAuth and fine-grain tokens allows for more detailed permissions
. If you need to know the permissions associated with a token, please use `whoami` and check the `'auth'` key.
warnings.warn(warning_message, FutureWarning)
[2025-10-25 16:27:21,642]::[ModelInstallService]::INFO --> Queueing model install: https://civitai.com/api/download/models/1
719520?type=Model&format=GGUF&size=pruned&fp=fp8 (1 file)
[2025-10-25 16:27:21,642]::[InvokeAI]::INFO --> Started installation of https://civitai.com/api/download/models/1719520?type
=Model&format=GGUF&size=pruned&fp=fp8
[2025-10-25 16:27:22,942]::[DownloadQueueService]::INFO --> File download started: https://civitai.com/api/download/models/1
719520?type=Model&format=GGUF&size=pruned&fp=fp8
[2025-10-25 16:30:39,152]::[DownloadQueueService]::INFO --> Download complete: https://civitai.com/api/download/models/17195
20?type=Model&format=GGUF&size=pruned&fp=fp8
[2025-10-25 16:30:39,152]::[ModelInstallService]::INFO --> Model download complete: https://civitai.com/api/download/models/
1719520?type=Model&format=GGUF&size=pruned&fp=fp8
[2025-10-25 16:30:39,154]::[ModelInstallService]::INFO --> Model install started: https://civitai.com/api/download/models/17
19520?type=Model&format=GGUF&size=pruned&fp=fp8
Hashing pandoraFLUX_pandoraProAlphaQ4KM.gguf: 100%|████████████████████████████████████████████████| 1/1 [00:00<?, ?file/s]
[2025-10-25 16:30:43,748]::[ModelInstallService]::ERROR --> Model install error: https://civitai.com/api/download/models/171
9520?type=Model&format=GGUF&size=pruned&fp=fp8
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\gogurt_disliker\\Documents\\invokeai\\models\\tmpinstall_cegf5k_1\\pandoraFLUX_pandoraProAlphaQ4KM.gguf'
Exception in thread Thread-3 (_install_next_item):
Traceback (most recent call last):
File "C:\Users\gogurt_disliker\AppData\Roaming\uv\python\cpython-3.12.9-windows-x86_64-none\Lib\threading.py", line 1075, in _bootstr
ap_inner
self.run()
File "C:\Users\gogurt_disliker\AppData\Roaming\uv\python\cpython-3.12.9-windows-x86_64-none\Lib\threading.py", line 1012, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\gogurt_disliker\Documents\invokeai\.venv\Lib\site-packages\invokeai\app\services\model_install\model_install_default.p
y", line 524, in _install_next_item
rmtree(job._install_tmpdir)
File "C:\Users\gogurt_disliker\AppData\Roaming\uv\python\cpython-3.12.9-windows-x86_64-none\Lib\shutil.py", line 781, in rmtree
return _rmtree_unsafe(path, onexc)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\gogurt_disliker\AppData\Roaming\uv\python\cpython-3.12.9-windows-x86_64-none\Lib\shutil.py", line 635, in _rmtree_unsa
fe
onexc(os.unlink, fullname, err)
File "C:\Users\gogurt_disliker\AppData\Roaming\uv\python\cpython-3.12.9-windows-x86_64-none\Lib\shutil.py", line 633, in _rmtree_unsa
fe
os.unlink(fullname)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\gogurt_disliker\\Documents\\invokeai\\models\\tmpinstall_cegf5k_1\\pandoraFLUX_pandoraProAlphaQ4KM.gguf'
C:\Users\gogurt_disliker\Documents\invokeai\.venv\Lib\site-packages\huggingface_hub\utils\_http.py:200: ResourceWarning: unclosed <ssl.
SSLSocket fd=3736, family=2, type=1, proto=0, laddr=('192.168.235.126', 62002), raddr=('18.244.202.118', 443)>
_get_session_from_cache.cache_clear()
ResourceWarning: Enable tracemalloc to get the object allocation traceback
The 'file in use' error can also appear in the OS rather than Invoke when manually attempting to delete this temporary file.
- With multi-file diffusers style models, instead of an error, the install just hangs and the UI specifies it's still 'In Progress' - in this case, there's no errors in console, with the final line being
[2025-10-25 16:32:22,430]::[ModelInstallService]::INFO --> Model download complete: Lykon/dreamshaper-8
This affects URL or HF-based downloads as well as models in Starter Models.
What you expected to happen
Models should be installable as in prior versions of Invoke AI.
How to reproduce the problem
In Invoke's model manager, install any model you'd like via any of the available download methods: URL (civitai, etc), HuggingFace repo, or any model from Starter Models that uses any of these methods.
Additional context
I'm guessing that this is the result of recent changes with regards to the model folder reorganization, or identification?
Discord username
gogurtenjoyer