Skip to content

Commit ad9349a

Browse files
authored
Merge branch 'main' into dependabot/cargo/notify-8.2.0
2 parents d3658fc + 48b3b79 commit ad9349a

File tree

240 files changed

+60049
-4722
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

240 files changed

+60049
-4722
lines changed

.codegraph.toml.example

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
# CodeGraph Configuration File
2+
# Copy this to .codegraph.toml or ~/.codegraph/config.toml and customize
3+
4+
# ============================================================================
5+
# Embedding Configuration
6+
# ============================================================================
7+
[embedding]
8+
# Provider: "auto", "onnx", "ollama", "openai", or "lmstudio"
9+
# "auto" will detect available models automatically
10+
# "lmstudio" recommended for MLX + Flash Attention 2 (macOS)
11+
provider = "lmstudio"
12+
13+
# Model path or identifier
14+
# For ONNX: Absolute path to model directory (auto-detected from HuggingFace cache)
15+
# For Ollama: Model name (e.g., "all-minilm:latest")
16+
# For LM Studio: Model name (e.g., "jinaai/jina-embeddings-v3")
17+
# For OpenAI: Model name (e.g., "text-embedding-3-small")
18+
# Recommended: jinaai/jina-embeddings-v3 (1536-dim, optimized for code)
19+
model = "jinaai/jina-embeddings-v3"
20+
21+
# LM Studio URL (default port 1234)
22+
lmstudio_url = "http://localhost:1234"
23+
24+
# Ollama URL (only used if provider is "ollama")
25+
ollama_url = "http://localhost:11434"
26+
27+
# OpenAI API key (only used if provider is "openai")
28+
# Can also be set via OPENAI_API_KEY environment variable
29+
# openai_api_key = "sk-..."
30+
31+
# Embedding dimension (1536 for jina-code-embeddings-1.5b, 384 for all-MiniLM)
32+
dimension = 1536
33+
34+
# Batch size for embedding generation (GPU optimization)
35+
batch_size = 64
36+
37+
# ============================================================================
38+
# LLM Configuration (for insights generation)
39+
# ============================================================================
40+
[llm]
41+
# Enable LLM insights (false = context-only mode for agents like Claude/GPT-4)
42+
# Set to false for maximum speed if using an external agent
43+
enabled = false
44+
45+
# LLM provider: "ollama" or "lmstudio"
46+
# "lmstudio" recommended for MLX + Flash Attention 2 (macOS)
47+
provider = "lmstudio"
48+
49+
# LLM model identifier
50+
# For LM Studio: lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf
51+
# For Ollama: Model name (e.g., "qwen2.5-coder:14b", "codellama:13b")
52+
# Recommended: DeepSeek Coder v2 Lite Instruct Q4_K_M (superior performance)
53+
model = "lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF"
54+
55+
# LM Studio URL (default port 1234)
56+
lmstudio_url = "http://localhost:1234"
57+
58+
# Ollama URL
59+
ollama_url = "http://localhost:11434"
60+
61+
# Context window size (tokens)
62+
# DeepSeek Coder v2 Lite: 32768 tokens
63+
context_window = 32000
64+
65+
# Temperature for generation (0.0 = deterministic, 1.0 = creative)
66+
temperature = 0.1
67+
68+
# Insights mode: "context-only", "balanced", or "deep"
69+
# - context-only: Return context only (fastest, for agents)
70+
# - balanced: Process top 10 files with LLM (good speed/quality)
71+
# - deep: Process all reranked files (comprehensive)
72+
insights_mode = "context-only"
73+
74+
# ============================================================================
75+
# Performance Configuration
76+
# ============================================================================
77+
[performance]
78+
# Number of worker threads (defaults to CPU count)
79+
num_threads = 0 # 0 = auto-detect
80+
81+
# Cache size in MB
82+
cache_size_mb = 512
83+
84+
# Enable GPU acceleration (requires CUDA/Metal support)
85+
enable_gpu = false
86+
87+
# Maximum concurrent requests for embedding/LLM
88+
max_concurrent_requests = 4
89+
90+
# ============================================================================
91+
# Logging Configuration
92+
# ============================================================================
93+
[logging]
94+
# Log level: "trace", "debug", "info", "warn", "error"
95+
# Use "warn" during indexing for clean TUI output (recommended)
96+
# Use "info" for development/debugging
97+
level = "warn"
98+
99+
# Log format: "pretty", "json", "compact"
100+
format = "pretty"

.env.example

Lines changed: 70 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,73 @@
1-
# CodeGraph Security Configuration
1+
# CodeGraph Configuration Example
22
# Copy this file to .env and update the values for your environment
33

4+
# ============================================================================
5+
# CodeGraph Core Configuration (Simplified Setup)
6+
# ============================================================================
7+
8+
# Minimal Setup - Auto-detect embedding provider (ONNX, Ollama, or OpenAI)
9+
CODEGRAPH_EMBEDDING_PROVIDER=auto
10+
11+
# That's it for basic usage! CodeGraph will auto-detect everything else.
12+
# Uncomment and customize the settings below if you need more control.
13+
14+
# Embedding Provider Configuration
15+
# ----------------------------------
16+
# Provider options: "auto", "onnx", "ollama", "openai", or "lmstudio"
17+
# CODEGRAPH_EMBEDDING_PROVIDER=auto
18+
19+
# ONNX: Specify model path (or leave empty for auto-detection from HuggingFace cache)
20+
# CODEGRAPH_LOCAL_MODEL=/path/to/your/onnx/model
21+
22+
# Ollama: Specify embedding model name
23+
# CODEGRAPH_EMBEDDING_MODEL=all-minilm:latest
24+
# CODEGRAPH_OLLAMA_URL=http://localhost:11434
25+
26+
# LM Studio: Best for MLX + Flash Attention 2 (recommended on macOS)
27+
# Default: jina-code-embeddings-1.5b (1536 dimensions)
28+
# CODEGRAPH_EMBEDDING_PROVIDER=lmstudio
29+
# CODEGRAPH_EMBEDDING_MODEL=jinaai/jina-embeddings-v3
30+
# CODEGRAPH_LMSTUDIO_URL=http://localhost:1234
31+
# CODEGRAPH_EMBEDDING_DIMENSION=1536
32+
33+
# OpenAI: Model name (API key configured below in Security section)
34+
# CODEGRAPH_EMBEDDING_MODEL=text-embedding-3-small
35+
36+
# LLM Configuration (for local insights generation)
37+
# --------------------------------------------------
38+
# Leave empty to use context-only mode (fastest, recommended for agents like Claude/GPT-4)
39+
# Set to enable local LLM insights generation
40+
41+
# LM Studio with DeepSeek Coder v2 Lite Instruct (recommended)
42+
# Superior MLX support and Flash Attention 2 on macOS
43+
# CODEGRAPH_LLM_PROVIDER=lmstudio
44+
# CODEGRAPH_MODEL=lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf
45+
# CODEGRAPH_LMSTUDIO_URL=http://localhost:1234
46+
# CODEGRAPH_CONTEXT_WINDOW=32000
47+
# CODEGRAPH_TEMPERATURE=0.1
48+
49+
# Ollama (alternative)
50+
# LLM model (e.g., "qwen2.5-coder:14b", "codellama:13b")
51+
# CODEGRAPH_MODEL=qwen2.5-coder:14b
52+
# CODEGRAPH_OLLAMA_URL=http://localhost:11434
53+
54+
# LLM context window size (tokens)
55+
# CODEGRAPH_CONTEXT_WINDOW=32000
56+
57+
# LLM temperature (0.0 = deterministic, 1.0 = creative)
58+
# CODEGRAPH_TEMPERATURE=0.1
59+
60+
# Logging
61+
# -------
62+
# Log level: trace, debug, info, warn, error
63+
# Use "warn" during indexing for clean TUI output (recommended)
64+
# Use "info" for development/debugging
65+
RUST_LOG=warn
66+
67+
# ============================================================================
68+
# Security Configuration (for production deployments)
69+
# ============================================================================
70+
471
# JWT Authentication
572
JWT_SECRET=replace_with_secure_random_secret_minimum_32_characters_long
673
JWT_EXPIRY_HOURS=24
@@ -33,8 +100,8 @@ MAX_REQUEST_SIZE=10485760 # 10MB
33100
SESSION_TIMEOUT_HOURS=24
34101
PASSWORD_MIN_LENGTH=12
35102

36-
# Logging
37-
LOG_LEVEL=info
103+
# Logging (see RUST_LOG above for CodeGraph core logging)
104+
# LOG_LEVEL=info # For application-level logging
38105
SECURITY_LOG_LEVEL=warn
39106
LOG_FORMAT=json
40107

.github/workflows/docs.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,7 @@ jobs:
436436
path: docs-site
437437

438438
- name: Setup Node.js
439-
uses: actions/setup-node@v4
439+
uses: actions/setup-node@v6
440440
with:
441441
node-version: '18'
442442

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,4 +168,5 @@ CLAUDE.md
168168
AGENTS.md
169169
CRUSH.md
170170
OUROBOROS.md
171-
.codegraph/
171+
.codegraph/
172+
SESSION-MEMORY.md

.gitignore.codegraph

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# CodeGraph generated files
2+
.codegraph/db/
3+
.codegraph/cache/
4+
.codegraph/vectors/
5+
.codegraph/logs/

0 commit comments

Comments
 (0)