@@ -47,12 +47,62 @@ jobs:
4747 console.log('🤖 Initializing Gemini AI...');
4848
4949 const genAI = new GoogleGenerativeAI(apiKey);
50- const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
50+
51+ // Try different model names prioritizing quality first, then fallback on rate limits
52+ // Order: 2.5 models (highest quality) -> 2.0 models (higher RPM) -> legacy
53+ const modelNames = [
54+ "gemini-2.5-pro", // 5 RPM, 250K TPM - Highest quality
55+ "gemini-2.5-flash", // 10 RPM, 250K TPM - Best 2.5 balance
56+ "gemini-2.5-flash-preview", // 10 RPM, 250K TPM - Latest 2.5 features
57+ "gemini-2.5-flash-lite", // 15 RPM, 250K TPM - Faster 2.5
58+ "gemini-2.5-flash-lite-preview", // 15 RPM, 250K TPM - Latest 2.5 lite
59+ "gemini-2.0-flash", // 15 RPM, 1M TPM - Good 2.0 balance
60+ "gemini-2.0-flash-lite", // 30 RPM, 1M TPM - Highest RPM fallback
61+ "gemini-1.5-flash", // 15 RPM, 250K TPM - DEPRECATED fallback
62+ "gemini-pro" // Legacy final fallback
63+ ];
64+
65+ let model = null;
66+ let modelUsed = null;
67+
68+ for (const modelName of modelNames) {
69+ try {
70+ console.log('🔧 Trying model:', modelName);
71+ model = genAI.getGenerativeModel({ model: modelName });
72+
73+ // Test the model with a small request to check availability/rate limits
74+ console.log('🧪 Testing model availability...');
75+ await model.generateContent("test");
76+
77+ modelUsed = modelName;
78+ console.log('✅ Successfully initialized and tested model:', modelName);
79+ break;
80+ } catch (modelError) {
81+ console.log('❌ Model', modelName, 'failed:', modelError.message);
82+
83+ // Check for rate limit errors specifically
84+ if (modelError.message && (
85+ modelError.message.includes('rate limit') ||
86+ modelError.message.includes('quota') ||
87+ modelError.message.includes('429') ||
88+ modelError.status === 429
89+ )) {
90+ console.log('⚠️ Rate limit detected, trying next model with higher RPM...');
91+ } else if (modelError.message && modelError.message.includes('404')) {
92+ console.log('⚠️ Model not found, trying next available model...');
93+ }
94+ continue;
95+ }
96+ }
97+
98+ if (!model) {
99+ throw new Error('No supported Gemini model could be initialized');
100+ }
51101
52102 const prompt = fs.readFileSync('analysis_prompt.txt', 'utf8');
53103 console.log('📝 Prompt loaded, size:', prompt.length, 'characters');
54104
55- console.log('🚀 Generating analysis...' );
105+ console.log('🚀 Generating analysis with model:', modelUsed );
56106 const result = await model.generateContent(prompt);
57107 const response = await result.response;
58108 const text = response.text();
0 commit comments