Skip to content

Commit

Permalink
4.6.4-alpha (#582)
Browse files Browse the repository at this point in the history
  • Loading branch information
c121914yu authored Dec 8, 2023
1 parent 54d52d8 commit b58249f
Show file tree
Hide file tree
Showing 66 changed files with 964 additions and 529 deletions.
3 changes: 2 additions & 1 deletion files/deploy/fastgpt/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# 非 host 版本, 不使用本机代理
# (不懂 Docker 的,只需要关心 OPENAI_BASE_URL 和 CHAT_API_KEY 即可!)
version: '3.3'
services:
pg:
Expand Down Expand Up @@ -47,7 +48,7 @@ services:
environment:
# root 密码,用户名为: root
- DEFAULT_ROOT_PSW=1234
# 中转地址,如果是用官方号,不需要管
# 中转地址,如果是用官方号,不需要管。务必加 /v1
- OPENAI_BASE_URL=https://api.openai.com/v1
- CHAT_API_KEY=sk-xxxx
- DB_MAX_LINK=5 # database max link
Expand Down
24 changes: 24 additions & 0 deletions packages/global/common/error/code/common.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import { ErrType } from '../errorCode';

/* dataset: 507000 */
const startCode = 507000;
export enum CommonErrEnum {
fileNotFound = 'fileNotFound'
}
const datasetErr = [
{
statusText: CommonErrEnum.fileNotFound,
message: 'error.fileNotFound'
}
];
export default datasetErr.reduce((acc, cur, index) => {
return {
...acc,
[cur.statusText]: {
code: startCode + index,
statusText: cur.statusText,
message: cur.message,
data: null
}
};
}, {} as ErrType<`${CommonErrEnum}`>);
10 changes: 5 additions & 5 deletions packages/global/common/error/code/dataset.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,23 @@ export enum DatasetErrEnum {
const datasetErr = [
{
statusText: DatasetErrEnum.unAuthDataset,
message: '无权操作该知识库'
message: 'core.dataset.error.unAuthDataset'
},
{
statusText: DatasetErrEnum.unAuthDatasetCollection,
message: '无权操作该数据集'
message: 'core.dataset.error.unAuthDatasetCollection'
},
{
statusText: DatasetErrEnum.unAuthDatasetData,
message: '无权操作该数据'
message: 'core.dataset.error.unAuthDatasetData'
},
{
statusText: DatasetErrEnum.unAuthDatasetFile,
message: '无权操作该文件'
message: 'core.dataset.error.unAuthDatasetFile'
},
{
statusText: DatasetErrEnum.unCreateCollection,
message: '无权创建数据集'
message: 'core.dataset.error.unCreateCollection'
},
{
statusText: DatasetErrEnum.unLinkCollection,
Expand Down
4 changes: 3 additions & 1 deletion packages/global/common/error/errorCode.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import pluginErr from './code/plugin';
import outLinkErr from './code/outLink';
import teamErr from './code/team';
import userErr from './code/user';
import commonErr from './code/common';

export const ERROR_CODE: { [key: number]: string } = {
400: '请求失败',
Expand Down Expand Up @@ -96,5 +97,6 @@ export const ERROR_RESPONSE: Record<
...outLinkErr,
...teamErr,
...userErr,
...pluginErr
...pluginErr,
...commonErr
};
7 changes: 7 additions & 0 deletions packages/global/common/file/api.d.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
export type UploadImgProps = {
base64Img: string;
expiredTime?: Date;
metadata?: Record<string, any>;
shareId?: string;
};

export type UrlFetchParams = {
urlList: string[];
selector?: string;
Expand Down
9 changes: 8 additions & 1 deletion packages/global/common/file/tools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,14 @@ export const cheerioToHtml = ({
}
});

return $(selector || 'body').html();
const html = $(selector || 'body')
.map((item, dom) => {
return $(dom).html();
})
.get()
.join('\n');

return html;
};
export const urlsFetch = async ({
urlList,
Expand Down
8 changes: 6 additions & 2 deletions packages/global/common/string/markdown.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,14 @@ export const simpleMarkdownText = (rawText: string) => {
rawText = rawText.replace(/\\\\n/g, '\\n');

// Remove headings and code blocks front spaces
['####', '###', '##', '#', '```', '~~~'].forEach((item) => {
['####', '###', '##', '#', '```', '~~~'].forEach((item, i) => {
const isMarkdown = i <= 3;
const reg = new RegExp(`\\n\\s*${item}`, 'g');
if (reg.test(rawText)) {
rawText = rawText.replace(new RegExp(`\\n\\s*(${item})`, 'g'), '\n$1');
rawText = rawText.replace(
new RegExp(`(\\n)\\s*(${item})`, 'g'),
isMarkdown ? '\n$1$2' : '$1$2'
);
}
});

Expand Down
66 changes: 41 additions & 25 deletions packages/global/common/string/textSplitter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,13 @@ export const splitText2Chunks = (props: {
text: string;
chunkLen: number;
overlapRatio?: number;
customReg?: string[];
}): {
chunks: string[];
tokens: number;
overlapRatio?: number;
} => {
let { text = '', chunkLen, overlapRatio = 0.2 } = props;
let { text = '', chunkLen, overlapRatio = 0.2, customReg = [] } = props;
const splitMarker = 'SPLIT_HERE_SPLIT_HERE';
const codeBlockMarker = 'CODE_BLOCK_LINE_MARKER';
const overlapLen = Math.round(chunkLen * overlapRatio);
Expand All @@ -29,22 +30,29 @@ export const splitText2Chunks = (props: {

// The larger maxLen is, the next sentence is less likely to trigger splitting
const stepReges: { reg: RegExp; maxLen: number }[] = [
{ reg: /^(#\s[^\n]+)\n/gm, maxLen: chunkLen * 1.4 },
{ reg: /^(##\s[^\n]+)\n/gm, maxLen: chunkLen * 1.4 },
{ reg: /^(###\s[^\n]+)\n/gm, maxLen: chunkLen * 1.4 },
{ reg: /^(####\s[^\n]+)\n/gm, maxLen: chunkLen * 1.4 },

{ reg: /([\n](`))/g, maxLen: chunkLen * 4 }, // code block
{ reg: /([\n](?![\*\-|>0-9]))/g, maxLen: chunkLen * 1.8 }, // (?![\*\-|>`0-9]): markdown special char
{ reg: /([\n])/g, maxLen: chunkLen * 1.4 },

{ reg: /([。]|([a-zA-Z])\.\s)/g, maxLen: chunkLen * 1.4 },
{ reg: /([!]|!\s)/g, maxLen: chunkLen * 1.4 },
{ reg: /([?]|\?\s)/g, maxLen: chunkLen * 1.6 },
{ reg: /([;]|;\s)/g, maxLen: chunkLen * 1.8 },
...customReg.map((text) => ({ reg: new RegExp(`([${text}])`, 'g'), maxLen: chunkLen * 1.4 })),
{ reg: /^(#\s[^\n]+)\n/gm, maxLen: chunkLen * 1.2 },
{ reg: /^(##\s[^\n]+)\n/gm, maxLen: chunkLen * 1.2 },
{ reg: /^(###\s[^\n]+)\n/gm, maxLen: chunkLen * 1.2 },
{ reg: /^(####\s[^\n]+)\n/gm, maxLen: chunkLen * 1.2 },

{ reg: /([\n]([`~]))/g, maxLen: chunkLen * 4 }, // code block
{ reg: /([\n](?!\s*[\*\-|>0-9]))/g, maxLen: chunkLen * 2 }, // (?![\*\-|>`0-9]): markdown special char
{ reg: /([\n])/g, maxLen: chunkLen * 1.2 },

{ reg: /([。]|([a-zA-Z])\.\s)/g, maxLen: chunkLen * 1.2 },
{ reg: /([!]|!\s)/g, maxLen: chunkLen * 1.2 },
{ reg: /([?]|\?\s)/g, maxLen: chunkLen * 1.4 },
{ reg: /([;]|;\s)/g, maxLen: chunkLen * 1.6 },
{ reg: /([,]|,\s)/g, maxLen: chunkLen * 2 }
];

const customRegLen = customReg.length;
const checkIsCustomStep = (step: number) => step < customRegLen;
const checkIsMarkdownSplit = (step: number) => step >= customRegLen && step <= 3 + customRegLen;
const checkIndependentChunk = (step: number) => step >= customRegLen && step <= 4 + customRegLen;
const checkForbidOverlap = (step: number) => step <= 6 + customRegLen;

// if use markdown title split, Separate record title title
const getSplitTexts = ({ text, step }: { text: string; step: number }) => {
if (step >= stepReges.length) {
Expand All @@ -55,11 +63,13 @@ export const splitText2Chunks = (props: {
}
];
}
const isMarkdownSplit = step <= 3;
const isMarkdownSplit = checkIsMarkdownSplit(step);
const independentChunk = checkIndependentChunk(step);

const { reg } = stepReges[step];

const splitTexts = text
.replace(reg, isMarkdownSplit ? `${splitMarker}$1` : `$1${splitMarker}`)
.replace(reg, independentChunk ? `${splitMarker}$1` : `$1${splitMarker}`)
.split(`${splitMarker}`)
.filter((part) => part.trim());

Expand All @@ -76,7 +86,7 @@ export const splitText2Chunks = (props: {
};

const getOneTextOverlapText = ({ text, step }: { text: string; step: number }): string => {
const forbidOverlap = step <= 6;
const forbidOverlap = checkForbidOverlap(step);
const maxOverlapLen = chunkLen * 0.4;

// step >= stepReges.length: Do not overlap incomplete sentences
Expand Down Expand Up @@ -114,7 +124,8 @@ export const splitText2Chunks = (props: {
lastText: string;
mdTitle: string;
}): string[] => {
const isMarkdownSplit = step <= 3;
const independentChunk = checkIndependentChunk(step);
const isCustomStep = checkIsCustomStep(step);

// mini text
if (text.length <= chunkLen) {
Expand All @@ -134,12 +145,13 @@ export const splitText2Chunks = (props: {
return chunks;
}

const { maxLen } = stepReges[step];
const minChunkLen = chunkLen * 0.7;

// split text by special char
const splitTexts = getSplitTexts({ text, step });

const maxLen = splitTexts.length > 1 ? stepReges[step].maxLen : chunkLen;
const minChunkLen = chunkLen * 0.7;
const miniChunkLen = 30;

const chunks: string[] = [];
for (let i = 0; i < splitTexts.length; i++) {
const item = splitTexts[i];
Expand Down Expand Up @@ -170,8 +182,8 @@ export const splitText2Chunks = (props: {
mdTitle: currentTitle
});
const lastChunk = innerChunks[innerChunks.length - 1];
// last chunk is too small, concat it to lastText
if (!isMarkdownSplit && lastChunk.length < minChunkLen) {
// last chunk is too small, concat it to lastText(next chunk start)
if (!independentChunk && lastChunk.length < minChunkLen) {
chunks.push(...innerChunks.slice(0, -1));
lastText = lastChunk;
} else {
Expand All @@ -189,10 +201,14 @@ export const splitText2Chunks = (props: {
lastText = newText;

// markdown paragraph block: Direct addition; If the chunk size reaches, add a chunk
if (isMarkdownSplit || newTextLen >= chunkLen) {
if (
isCustomStep ||
(independentChunk && newTextLen > miniChunkLen) ||
newTextLen >= chunkLen
) {
chunks.push(`${currentTitle}${lastText}`);

lastText = isMarkdownSplit ? '' : getOneTextOverlapText({ text: lastText, step });
lastText = getOneTextOverlapText({ text: lastText, step });
}
}

Expand Down
2 changes: 1 addition & 1 deletion packages/global/core/app/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ export const getDefaultAppForm = (templateId = 'fastgpt-universal'): AppSimpleEd
dataset: {
datasets: [],
similarity: 0.4,
limit: 5,
limit: 1500,
searchEmptyText: '',
searchMode: DatasetSearchModeEnum.embedding
},
Expand Down
2 changes: 2 additions & 0 deletions packages/global/core/chat/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,5 @@ export const LOGO_ICON = `/icon/logo.svg`;

export const IMG_BLOCK_KEY = 'img-block';
export const FILE_BLOCK_KEY = 'file-block';

export const MARKDOWN_QUOTE_SIGN = 'QUOTE SIGN';
13 changes: 3 additions & 10 deletions packages/global/core/module/template/system/datasetSearch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -54,17 +54,10 @@ export const DatasetSearchModule: FlowModuleTemplateType = {
{
key: ModuleInputKeyEnum.datasetLimit,
type: FlowNodeInputTypeEnum.hidden,
label: '单次搜索上限',
description: '最多取 n 条记录作为本次问题引用',
value: 5,
label: '引用上限',
description: '单次搜索最大的 Tokens 数量,中文约1字=1.7Tokens,英文约1字=1Tokens',
value: 1500,
valueType: ModuleDataTypeEnum.number,
min: 1,
max: 20,
step: 1,
markList: [
{ label: '1', value: 1 },
{ label: '20', value: 20 }
],
showTargetInApp: false,
showTargetInPlugin: false
},
Expand Down
60 changes: 51 additions & 9 deletions packages/service/common/file/gridfs/controller.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import fsp from 'fs/promises';
import fs from 'fs';
import { DatasetFileSchema } from '@fastgpt/global/core/dataset/type';
import { delImgByFileIdList } from '../image/controller';

export function getGFSCollection(bucket: `${BucketNameEnum}`) {
return connectionMongo.connection.db.collection(`${bucket}.files`);
Expand Down Expand Up @@ -69,24 +70,65 @@ export async function getFileById({
_id: new Types.ObjectId(fileId)
});

if (!file) {
return Promise.reject('File not found');
}
// if (!file) {
// return Promise.reject('File not found');
// }

return file;
return file || undefined;
}

export async function delFileById({
export async function delFileByFileIdList({
bucketName,
fileId
fileIdList,
retry = 3
}: {
bucketName: `${BucketNameEnum}`;
fileId: string;
fileIdList: string[];
retry?: number;
}): Promise<any> {
try {
const bucket = getGridBucket(bucketName);

await Promise.all(fileIdList.map((id) => bucket.delete(new Types.ObjectId(id))));
} catch (error) {
if (retry > 0) {
return delFileByFileIdList({ bucketName, fileIdList, retry: retry - 1 });
}
}
}
// delete file by metadata(datasetId)
export async function delFileByMetadata({
bucketName,
datasetId
}: {
bucketName: `${BucketNameEnum}`;
datasetId?: string;
}) {
const bucket = getGridBucket(bucketName);

await bucket.delete(new Types.ObjectId(fileId));
return true;
const files = await bucket
.find(
{
...(datasetId && { 'metadata.datasetId': datasetId })
},
{
projection: {
_id: 1
}
}
)
.toArray();

const idList = files.map((item) => String(item._id));

// delete img
await delImgByFileIdList(idList);

// delete file
await delFileByFileIdList({
bucketName,
fileIdList: idList
});
}

export async function getDownloadStream({
Expand Down
Loading

0 comments on commit b58249f

Please sign in to comment.