mirror of
https://github.com/labring/FastGPT.git
synced 2025-12-25 20:02:47 +00:00
* add logs chart (#5352) * charts * chart data * log chart * delete * rename api * fix * move api * fix * fix * pro config * fix * feat: Repository interaction (#5356) * feat: 1好像功能没问题了,明天再测 * feat: 2 解决了昨天遗留的bug,但全选按钮又bug了 * feat: 3 第三版,解决了全选功能bug * feat: 4 第四版,下面改小细节 * feat: 5 我勒个痘 * feat: 6 * feat: 6 pr * feat: 7 * feat: 8 * feat: 9 * feat: 10 * feat: 11 * feat: 12 * perf: checkbox ui * refactor: tweak login loyout (#5357) Co-authored-by: Archer <545436317@qq.com> * login ui * app chat log chart pro display (#5392) * app chat log chart pro display * add canopen props * perf: pro tag tip * perf: pro tag tip * feat: openrouter provider (#5406) * perf: login ui * feat: openrouter provider * provider * perf: custom error throw * perf: emb batch (#5407) * perf: emb batch * perf: vector retry * doc * doc (#5411) * doc * fix: team folder will add to workflow * fix: generateToc shell * Tool price (#5376) * resolve conflicts for cherry-pick * fix i18n * Enhance system plugin template data structure and update ToolSelectModal to include CostTooltip component * refactor: update systemKeyCost type to support array of objects in plugin and workflow types * refactor: simplify systemKeyCost type across plugin and workflow types to a single number * refactor: streamline systemKeyCost handling in plugin and workflow components * fix * fix * perf: toolset price config;fix: workflow array selector ui (#5419) * fix: workflow array selector ui * update default model tip * perf: toolset price config * doc * fix: test * Refactor/chat (#5418) * refactor: add homepage configuration; add home chat page; add side bar animated collapse and layout * fix: fix lint rules * chore: improve logics and code * chore: more clearer logics * chore: adjust api --------- Co-authored-by: Archer <545436317@qq.com> * perf: chat setting code * del history * logo image * perf: home chat ui * feat: enhance chat response handling with external links and user info (#5427) * feat: enhance chat response handling with external links and user info * fix * cite code * perf: toolset add in workflow * fix: test * fix: search paraentId * Fix/chat (#5434) * wip: rebase了upstream * wip: adapt mobile UI * fix: fix chat page logic and UI * fix: fix UI and improve some logics * fix: model selector missing logo; vision model to retrieve file * perf: role selector * fix: chat ui * optimize export app chat log (#5436) * doc * chore: move components to proper directory; fix the api to get app list (#5437) * chore: improve team app panel display form (#5438) * feat: add home chat log tab * chore: improve team app panel display form * chore: improve log panel * fix: spec * doc * fix: log permission * fix: dataset schema required * add loading status * remove ui weight * manage log * fix: log detail per * doc * fix: log menu * rename permission * bg color * fix: app log per * fix: log key selector * fix: log * doc --------- Co-authored-by: heheer <zhiyu44@qq.com> Co-authored-by: colnii <1286949794@qq.com> Co-authored-by: 伍闲犬 <76519998+xqvvu@users.noreply.github.com> Co-authored-by: Ctrlz <143257420+ctrlz526@users.noreply.github.com> Co-authored-by: 伍闲犬 <whoeverimf5@gmail.com> Co-authored-by: heheer <heheer@sealos.io>
134 lines
3.9 KiB
TypeScript
134 lines
3.9 KiB
TypeScript
import { type EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
|
|
import { getAIApi } from '../config';
|
|
import { countPromptTokens } from '../../../common/string/tiktoken/index';
|
|
import { EmbeddingTypeEnm } from '@fastgpt/global/core/ai/constants';
|
|
import { addLog } from '../../../common/system/log';
|
|
|
|
type GetVectorProps = {
|
|
model: EmbeddingModelItemType;
|
|
input: string[] | string;
|
|
type?: `${EmbeddingTypeEnm}`;
|
|
headers?: Record<string, string>;
|
|
};
|
|
|
|
// text to vector
|
|
export async function getVectorsByText({ model, input, type, headers }: GetVectorProps) {
|
|
if (!input) {
|
|
return Promise.reject({
|
|
code: 500,
|
|
message: 'input is empty'
|
|
});
|
|
}
|
|
const ai = getAIApi();
|
|
|
|
const formatInput = Array.isArray(input) ? input : [input];
|
|
|
|
// 20 size every request
|
|
const chunkSize = 20;
|
|
const chunks = [];
|
|
for (let i = 0; i < formatInput.length; i += chunkSize) {
|
|
chunks.push(formatInput.slice(i, i + chunkSize));
|
|
}
|
|
|
|
try {
|
|
// Process chunks sequentially
|
|
let totalTokens = 0;
|
|
const allVectors: number[][] = [];
|
|
|
|
for (const chunk of chunks) {
|
|
// input text to vector
|
|
const result = await ai.embeddings
|
|
.create(
|
|
{
|
|
...model.defaultConfig,
|
|
...(type === EmbeddingTypeEnm.db && model.dbConfig),
|
|
...(type === EmbeddingTypeEnm.query && model.queryConfig),
|
|
model: model.model,
|
|
input: chunk
|
|
},
|
|
model.requestUrl
|
|
? {
|
|
path: model.requestUrl,
|
|
headers: {
|
|
...(model.requestAuth ? { Authorization: `Bearer ${model.requestAuth}` } : {}),
|
|
...headers
|
|
}
|
|
}
|
|
: { headers }
|
|
)
|
|
.then(async (res) => {
|
|
if (!res.data) {
|
|
addLog.error('Embedding API is not responding', res);
|
|
return Promise.reject('Embedding API is not responding');
|
|
}
|
|
if (!res?.data?.[0]?.embedding) {
|
|
console.log(res);
|
|
// @ts-ignore
|
|
return Promise.reject(res.data?.err?.message || 'Embedding API Error');
|
|
}
|
|
|
|
const [tokens, vectors] = await Promise.all([
|
|
(async () => {
|
|
if (res.usage) return res.usage.total_tokens;
|
|
|
|
const tokens = await Promise.all(chunk.map((item) => countPromptTokens(item)));
|
|
return tokens.reduce((sum, item) => sum + item, 0);
|
|
})(),
|
|
Promise.all(
|
|
res.data
|
|
.map((item) => unityDimensional(item.embedding))
|
|
.map((item) => {
|
|
if (model.normalization) return normalization(item);
|
|
return item;
|
|
})
|
|
)
|
|
]);
|
|
|
|
return {
|
|
tokens,
|
|
vectors
|
|
};
|
|
});
|
|
|
|
totalTokens += result.tokens;
|
|
allVectors.push(...result.vectors);
|
|
}
|
|
|
|
return {
|
|
tokens: totalTokens,
|
|
vectors: allVectors
|
|
};
|
|
} catch (error) {
|
|
addLog.error(`Embedding Error`, error);
|
|
|
|
return Promise.reject(error);
|
|
}
|
|
}
|
|
|
|
function unityDimensional(vector: number[]) {
|
|
if (vector.length > 1536) {
|
|
console.log(
|
|
`The current vector dimension is ${vector.length}, and the vector dimension cannot exceed 1536. The first 1536 dimensions are automatically captured`
|
|
);
|
|
return vector.slice(0, 1536);
|
|
}
|
|
let resultVector = vector;
|
|
const vectorLen = vector.length;
|
|
|
|
const zeroVector = new Array(1536 - vectorLen).fill(0);
|
|
|
|
return resultVector.concat(zeroVector);
|
|
}
|
|
// normalization processing
|
|
function normalization(vector: number[]) {
|
|
if (vector.some((item) => item > 1)) {
|
|
// Calculate the Euclidean norm (L2 norm)
|
|
const norm = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0));
|
|
|
|
// Normalize the vector by dividing each component by the norm
|
|
return vector.map((val) => val / norm);
|
|
}
|
|
|
|
return vector;
|
|
}
|