fix: dataset data list api adapt (#2878)

* fix: dataset data list api adapt

* update doc version

* perf: fedomain env

* add fedomain env
This commit is contained in:
Archer
2024-10-10 18:10:19 +08:00
committed by GitHub
parent 3878a50d0f
commit 27291faa66
10 changed files with 99 additions and 17 deletions

View File

@@ -1093,6 +1093,22 @@ A2:
{{< tab tabName="请求示例" >}}
{{< markdownify >}}
**4.8.11+**
```bash
curl --location --request POST 'http://localhost:3000/api/core/dataset/data/v2/list' \
--header 'Authorization: Bearer {{authorization}}' \
--header 'Content-Type: application/json' \
--data-raw '{
"offset": 0,
"pageSize": 10,
"collectionId":"65abd4ac9d1448617cba6171",
"searchText":""
}'
```
**4.6.7+**
```bash
curl --location --request POST 'http://localhost:3000/api/core/dataset/data/list' \
--header 'Authorization: Bearer {{authorization}}' \
@@ -1112,10 +1128,13 @@ curl --location --request POST 'http://localhost:3000/api/core/dataset/data/list
{{< markdownify >}}
{{% alert icon=" " context="success" %}}
- pageNum: 偏移量(选填)
- pageNum: 页码(选填)
- pageSize: 每页数量最大30选填
- collectionId: 集合的ID必填
- searchText: 模糊搜索词(选填)
{{% /alert %}}
{{< /markdownify >}}

View File

@@ -10,4 +10,5 @@ weight: 812
## 更新说明
1. 新增 - 全局变量支持更多数据类型
2. 修复 - 文件后缀判断,去除 query 影响。
2. 新增 - FE_DOMAIN 环境变量,配置该环境变量后,上传文件/图片会补全后缀后得到完整地址。(可解决 docx 文件图片链接,有时会无法被模型识别问题)
3. 修复 - 文件后缀判断,去除 query 影响。

View File

@@ -154,6 +154,8 @@ services:
- MILVUS_TOKEN=none
# sandbox 地址
- SANDBOX_URL=http://sandbox:3000
# 前端地址
- FE_DOMAIN=
# 日志等级: debug, info, warn, error
- LOG_LEVEL=info
- STORE_LOG_LEVEL=warn

View File

@@ -111,6 +111,8 @@ services:
- PG_URL=postgresql://username:password@pg:5432/postgres
# sandbox 地址
- SANDBOX_URL=http://sandbox:3000
# 前端地址
- FE_DOMAIN=
# 日志等级: debug, info, warn, error
- LOG_LEVEL=info
- STORE_LOG_LEVEL=warn

View File

@@ -92,6 +92,8 @@ services:
- MILVUS_TOKEN=zilliz_cloud_token
# sandbox 地址
- SANDBOX_URL=http://sandbox:3000
# 前端地址
- FE_DOMAIN=
# 日志等级: debug, info, warn, error
- LOG_LEVEL=info
- STORE_LOG_LEVEL=warn

View File

@@ -5,10 +5,6 @@ import { ClientSession } from '../../../common/mongo';
import { guessBase64ImageType } from '../utils';
import { readFromSecondary } from '../../mongo/utils';
export function getMongoImgUrl(id: string, extension: string) {
return `${imageBaseUrl}${id}.${extension}`;
}
export const maxImgSize = 1024 * 1024 * 12;
const base64MimeRegex = /data:image\/([^\)]+);base64/;
export async function uploadMongoImg({
@@ -39,7 +35,7 @@ export async function uploadMongoImg({
shareId
});
return getMongoImgUrl(String(_id), extension);
return `${process.env.FE_DOMAIN || ''}${imageBaseUrl}${String(_id)}.${extension}`;
}
export async function readMongoImg({ id }: { id: string }) {

View File

@@ -31,7 +31,7 @@ SANDBOX_URL=http://localhost:3001
# 商业版地址
PRO_URL=
# 页面的地址,用于自动补全相对路径资源的 domain
# FE_DOMAIN=http://localhost:3000
FE_DOMAIN=http://localhost:3000
# 日志等级: debug, info, warn, error
LOG_LEVEL=debug

View File

@@ -3,20 +3,19 @@ import { MongoDatasetData } from '@fastgpt/service/core/dataset/data/schema';
import { replaceRegChars } from '@fastgpt/global/common/string/tools';
import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { PagingData, RequestPaging } from '@/types';
import { ApiRequestProps } from '@fastgpt/service/type/next';
import { DatasetDataListItemType } from '@/global/core/dataset/type';
import { PaginationProps, PaginationResponse } from '@fastgpt/web/common/fetch/type';
export type GetDatasetDataListProps = PaginationProps & {
export type GetDatasetDataListProps = RequestPaging & {
searchText?: string;
collectionId: string;
};
export type GetDatasetDataListRes = PaginationResponse<DatasetDataListItemType>;
async function handler(
req: ApiRequestProps<GetDatasetDataListProps>
): Promise<GetDatasetDataListRes> {
let { offset, pageSize = 10, searchText = '', collectionId } = req.body;
): Promise<PagingData<DatasetDataListItemType>> {
let { pageNum = 1, pageSize = 10, searchText = '', collectionId } = req.body;
pageSize = Math.min(pageSize, 30);
@@ -41,17 +40,19 @@ async function handler(
: {})
};
const [list, total] = await Promise.all([
const [data, total] = await Promise.all([
MongoDatasetData.find(match, '_id datasetId collectionId q a chunkIndex')
.sort({ chunkIndex: 1, updateTime: -1 })
.skip(offset)
.skip((pageNum - 1) * pageSize)
.limit(pageSize)
.lean(),
MongoDatasetData.countDocuments(match)
]);
return {
list,
pageNum,
pageSize,
data,
total
};
}

View File

@@ -0,0 +1,59 @@
import { authDatasetCollection } from '@fastgpt/service/support/permission/dataset/auth';
import { MongoDatasetData } from '@fastgpt/service/core/dataset/data/schema';
import { replaceRegChars } from '@fastgpt/global/common/string/tools';
import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { ApiRequestProps } from '@fastgpt/service/type/next';
import { DatasetDataListItemType } from '@/global/core/dataset/type';
import { PaginationProps, PaginationResponse } from '@fastgpt/web/common/fetch/type';
export type GetDatasetDataListProps = PaginationProps & {
searchText?: string;
collectionId: string;
};
export type GetDatasetDataListRes = PaginationResponse<DatasetDataListItemType>;
async function handler(
req: ApiRequestProps<GetDatasetDataListProps>
): Promise<GetDatasetDataListRes> {
let { offset, pageSize = 10, searchText = '', collectionId } = req.body;
pageSize = Math.min(pageSize, 30);
// 凭证校验
const { teamId, collection } = await authDatasetCollection({
req,
authToken: true,
authApiKey: true,
collectionId,
per: ReadPermissionVal
});
const queryReg = new RegExp(`${replaceRegChars(searchText)}`, 'i');
const match = {
teamId,
datasetId: collection.datasetId._id,
collectionId,
...(searchText.trim()
? {
$or: [{ q: queryReg }, { a: queryReg }]
}
: {})
};
const [list, total] = await Promise.all([
MongoDatasetData.find(match, '_id datasetId collectionId q a chunkIndex')
.sort({ chunkIndex: 1, updateTime: -1 })
.skip(offset)
.limit(pageSize)
.lean(),
MongoDatasetData.countDocuments(match)
]);
return {
list,
total
};
}
export default NextAPI(handler);

View File

@@ -51,7 +51,7 @@ import type { UpdateDatasetCollectionParams } from '@/pages/api/core/dataset/col
import type {
GetDatasetDataListProps,
GetDatasetDataListRes
} from '@/pages/api/core/dataset/data/list';
} from '@/pages/api/core/dataset/data/v2/list';
import type { UpdateDatasetDataProps } from '@fastgpt/global/core/dataset/controller';
import type { DatasetFolderCreateBody } from '@/pages/api/core/dataset/folder/create';
import type { PaginationProps, PaginationResponse } from '@fastgpt/web/common/fetch/type';
@@ -159,7 +159,7 @@ export const getScrollCollectionList = (data: GetScrollCollectionsProps) =>
/* =============================== data ==================================== */
/* get dataset list */
export const getDatasetDataList = (data: GetDatasetDataListProps) =>
POST<GetDatasetDataListRes>(`/core/dataset/data/list`, data);
POST<GetDatasetDataListRes>(`/core/dataset/data/v2/list`, data);
export const getDatasetDataItemById = (id: string) =>
GET<DatasetDataItemType>(`/core/dataset/data/detail`, { id });