Skip to content

Commit

Permalink
feat: support accesstoken token statistic (Close Chanzhaoyu#275)
Browse files Browse the repository at this point in the history
Kerwin committed Jul 9, 2023
1 parent 6665170 commit d067727
Showing 3 changed files with 252 additions and 271 deletions.
1 change: 1 addition & 0 deletions service/package.json
Original file line number Diff line number Diff line change
@@ -31,6 +31,7 @@
"esno": "^0.16.3",
"express": "^4.18.2",
"express-rate-limit": "^6.7.0",
"gpt-token": "^0.0.5",
"https-proxy-agent": "^5.0.1",
"isomorphic-fetch": "^3.0.0",
"jwt-decode": "^3.1.2",
507 changes: 238 additions & 269 deletions service/pnpm-lock.yaml
15 changes: 13 additions & 2 deletions service/src/index.ts
Original file line number Diff line number Diff line change
@@ -2,13 +2,14 @@ import express from 'express'
import jwt from 'jsonwebtoken'
import * as dotenv from 'dotenv'
import { ObjectId } from 'mongodb'
import { textTokens } from 'gpt-token'
import type { RequestProps } from './types'
import type { ChatMessage } from './chatgpt'
import { abortChatProcess, chatConfig, chatReplyProcess, containsSensitiveWords, initAuditService } from './chatgpt'
import { auth, getUserId } from './middleware/auth'
import { clearApiKeyCache, clearConfigCache, getApiKeys, getCacheApiKeys, getCacheConfig, getOriginConfig } from './storage/config'
import type { AuditConfig, CHATMODEL, ChatInfo, ChatOptions, Config, KeyConfig, MailConfig, SiteConfig, UsageResponse, UserInfo } from './storage/model'
import { Status, UserRole, chatModelOptions } from './storage/model'
import type { AuditConfig, CHATMODEL, ChatInfo, ChatOptions, Config, KeyConfig, MailConfig, SiteConfig, UserInfo } from './storage/model'
import { Status, UsageResponse, UserRole, chatModelOptions } from './storage/model'
import {
clearChat,
createChatRoom,
@@ -395,6 +396,16 @@ router.post('/chat-process', [auth, limiter], async (req, res) => {
room,
})
// return the whole response including usage
if (!result.data.detail?.usage) {
if (!result.data.detail)
result.data.detail = {}
result.data.detail.usage = new UsageResponse()
// 因为 token 本身不计算, 所以这里默认以 gpt 3.5 的算做一个伪统计
result.data.detail.usage.prompt_tokens = textTokens(prompt, 'gpt-3.5-turbo-0613')
result.data.detail.usage.completion_tokens = textTokens(result.data.text, 'gpt-3.5-turbo-0613')
result.data.detail.usage.total_tokens = result.data.detail.usage.prompt_tokens + result.data.detail.usage.completion_tokens
result.data.detail.usage.estimated = true
}
res.write(`\n${JSON.stringify(result.data)}`)
}
catch (error) {

0 comments on commit d067727

Please sign in to comment.