@@ -49,31 +49,32 @@ export async function initApi(key: KeyConfig, chatModel: CHATMODEL) {
4949 messageStore : undefined ,
5050 getMessageById,
5151 }
52-
53- // Set the token limits based on the model's type. This is because different models have different token limits.
54- // The token limit includes the token count from both the message array sent and the model response.
55- // 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.
56-
57- // Check if the model type includes '16k'
58- if ( model . toLowerCase ( ) . includes ( '16k' ) ) {
59- // If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
60- options . maxModelTokens = 16384 ;
61- options . maxResponseTokens = 4096 ;
62- } else if ( model . toLowerCase ( ) . includes ( '32k' ) ) {
63- // If it's a '32k' model, set the maxModelTokens to 32768 and maxResponseTokens to 8192
64- options . maxModelTokens = 32768 ;
65- options . maxResponseTokens = 8192 ;
66- } else if ( model . toLowerCase ( ) . includes ( 'gpt-4' ) ) {
67- // If it's a 'gpt-4' model, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
68- options . maxModelTokens = 8192 ;
69- options . maxResponseTokens = 2048 ;
70- } else {
71- // If none of the above, use the default values, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
72- options . maxModelTokens = 4096 ;
73- options . maxResponseTokens = 1024 ;
74- }
7552
53+ // Set the token limits based on the model's type. This is because different models have different token limits.
54+ // The token limit includes the token count from both the message array sent and the model response.
55+ // 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.
7656
57+ // Check if the model type includes '16k'
58+ if ( model . toLowerCase ( ) . includes ( '16k' ) ) {
59+ // If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
60+ options . maxModelTokens = 16384
61+ options . maxResponseTokens = 4096
62+ }
63+ else if ( model . toLowerCase ( ) . includes ( '32k' ) ) {
64+ // If it's a '32k' model, set the maxModelTokens to 32768 and maxResponseTokens to 8192
65+ options . maxModelTokens = 32768
66+ options . maxResponseTokens = 8192
67+ }
68+ else if ( model . toLowerCase ( ) . includes ( 'gpt-4' ) ) {
69+ // If it's a 'gpt-4' model, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
70+ options . maxModelTokens = 8192
71+ options . maxResponseTokens = 2048
72+ }
73+ else {
74+ // If none of the above, use the default values, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
75+ options . maxModelTokens = 4096
76+ options . maxResponseTokens = 1024
77+ }
7778
7879 if ( isNotEmptyString ( OPENAI_API_BASE_URL ) )
7980 options . apiBaseUrl = `${ OPENAI_API_BASE_URL } /v1`
@@ -262,12 +263,21 @@ async function fetchBalance() {
262263 console . error ( '您的账户已被封禁,请登录OpenAI进行查看。' )
263264 return
264265 }
265- const subscriptionData = await response . json ( )
266+ interface SubscriptionData {
267+ hard_limit_usd ?: number
268+ // 这里可以添加其他可能的属性
269+ }
270+ const subscriptionData : SubscriptionData = await response . json ( )
266271 const totalAmount = subscriptionData . hard_limit_usd
267272
273+ interface UsageData {
274+ total_usage ?: number
275+ // 这里可以添加其他可能的属性
276+ }
277+
268278 // 获取已使用量
269279 response = await fetch ( urlUsage , { agent : socksAgent === undefined ? httpsAgent : socksAgent , headers } )
270- const usageData = await response . json ( )
280+ const usageData : UsageData = await response . json ( )
271281 const totalUsage = usageData . total_usage / 100
272282
273283 // 计算剩余额度
0 commit comments