diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000000..47e2453f4147d2
--- /dev/null
+++ b/.github/CODE_OF_CONDUCT.md
@@ -0,0 +1,43 @@
+# Dify Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Language Policy
+
+To facilitate clear and effective communication, all discussions, comments, documentation, and pull requests in this project should be conducted in English. This ensures that all contributors can participate and collaborate effectively.
+
+
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 2b516f9b15a5be..2a01311deb2e51 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -1,5 +1,5 @@
name: "🕷️ Bug report"
-description: Report errors or unexpected behavior
+description: Report errors or unexpected behavior [please use English :)]
labels:
- bug
body:
diff --git a/.github/ISSUE_TEMPLATE/document_issue.yml b/.github/ISSUE_TEMPLATE/document_issue.yml
index 24c8b5d2ddad77..d21ba88390c84f 100644
--- a/.github/ISSUE_TEMPLATE/document_issue.yml
+++ b/.github/ISSUE_TEMPLATE/document_issue.yml
@@ -1,5 +1,5 @@
name: "📚 Documentation Issue"
-description: Report issues in our documentation
+description: Report issues in our documentation [please use English :)]
labels:
- ducumentation
body:
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index d7ebf0661c886f..2819479a3b7f16 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -1,5 +1,5 @@
name: "⭐ Feature or enhancement request"
-description: Propose something new.
+description: Propose something new. [please use English :)]
labels:
- enhancement
body:
diff --git a/.github/ISSUE_TEMPLATE/help_wanted.yml b/.github/ISSUE_TEMPLATE/help_wanted.yml
index 63c3dea4c18b12..03fac2f3e83c96 100644
--- a/.github/ISSUE_TEMPLATE/help_wanted.yml
+++ b/.github/ISSUE_TEMPLATE/help_wanted.yml
@@ -1,5 +1,5 @@
name: "🤝 Help Wanted"
-description: "Request help from the community"
+description: "Request help from the community" [please use English :)]
labels:
- help-wanted
body:
diff --git a/.github/ISSUE_TEMPLATE/translation_issue.yml b/.github/ISSUE_TEMPLATE/translation_issue.yml
index 5d8e7814cc83a9..51e4adcc08bc31 100644
--- a/.github/ISSUE_TEMPLATE/translation_issue.yml
+++ b/.github/ISSUE_TEMPLATE/translation_issue.yml
@@ -1,5 +1,5 @@
name: "🌐 Localization/Translation issue"
-description: Report incorrect translations.
+description: Report incorrect translations. [please use English :)]
labels:
- translation
body:
diff --git a/.github/workflows/build-api-image.yml b/.github/workflows/build-api-image.yml
index 1b5bf764ecc13c..d1dcdcdbe30432 100644
--- a/.github/workflows/build-api-image.yml
+++ b/.github/workflows/build-api-image.yml
@@ -34,9 +34,7 @@ jobs:
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
type=ref,event=branch
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
- type=semver,pattern={{major}}.{{minor}}.{{patch}}
- type=semver,pattern={{major}}.{{minor}}
- type=semver,pattern={{major}}
+ type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
- name: Build and push
uses: docker/build-push-action@v4
diff --git a/.github/workflows/build-web-image.yml b/.github/workflows/build-web-image.yml
index 32685d50924ad2..b9928bbd7f4086 100644
--- a/.github/workflows/build-web-image.yml
+++ b/.github/workflows/build-web-image.yml
@@ -34,9 +34,7 @@ jobs:
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
type=ref,event=branch
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
- type=semver,pattern={{major}}.{{minor}}.{{patch}}
- type=semver,pattern={{major}}.{{minor}}
- type=semver,pattern={{major}}
+ type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
- name: Build and push
uses: docker/build-push-action@v4
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 40ae6ece95fe14..b3bd0b165b551a 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -20,11 +20,11 @@ jobs:
steps:
- uses: actions/stale@v5
with:
- days-before-issue-stale: 30
+ days-before-issue-stale: 15
days-before-issue-close: 3
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
stale-pr-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'
- any-of-labels: 'duplicate,question,invalid,wontfix,no-issue-activity,no-pr-activity,enhancement'
+ any-of-labels: 'duplicate,question,invalid,wontfix,no-issue-activity,no-pr-activity,enhancement,cant-reproduce,help-wanted'
diff --git a/README.md b/README.md
index ed4589bde7a9f2..5c16aa6ceb9b8a 100644
--- a/README.md
+++ b/README.md
@@ -11,10 +11,8 @@
- [] 支持自定义的OpenAI的**API服务**
- [] 支持**文件夹上传**和新的上传文件类型检查(不受限于单纯的文件后缀名)
- [] 提高QA分割时候的最大分段数量
-
# 路线图
-
## QA分割需要更多针对性改进
一般情况下文档分段设置为384~512之间为embedding模型所容乃的范围内最好,但是QA分割是将文档发送给LLM后得出N个问答对,可LLM目前Token上限已经在8K~32K之间了,大大超过了Embedding模型的范围
所以对于QA分割最好的方式是:
@@ -33,4 +31,3 @@
不仅是chatGPT4的多模态,而是利用多模态模型跟LLM模型一起工作
比如 [图像<=>文本模型BLIP](https://github.com/salesforce/BLIP)
这样我们可以在对话中上传多媒体文件(知识库也可以考虑将多模态纳入,但保存多媒体文件是一件很麻烦的事情)
-
diff --git a/README_CN.md b/README_CN.md
index 29d0e9ad458b0d..679c980303de72 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -1,60 +1,63 @@
-![](./images/describe-cn.jpg)
+[![](./images/describe.png)](https://dify.ai)
English |
简体中文 |
日本語 |
- Español
+ Español |
+ Klingon
+
+
+
+
+
+
+
+
+
+
+
+Dify 是一个 LLM 应用开发平台,已经有超过 10 万个应用基于 Dify.AI 构建。它融合了 Backend as Service 和 LLMOps 的理念,涵盖了构建生成式 AI 原生应用所需的核心技术栈,包括一个内置 RAG 引擎。使用 Dify,你可以基于任何模型自部署类似 Assistants API 和 GPTs 的能力。
+
+![](./images/demo.png)
-#### [官方网站](https://dify.ai) • [使用文档](https://docs.dify.ai/v/zh-hans) · [部署文档](https://docs.dify.ai/v/zh-hans/getting-started/install-self-hosted) · [FAQ](https://docs.dify.ai/v/zh-hans/getting-started/faq) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
+## 为什么选择 Dify
-**Dify** 是一个易用的 LLMOps 平台,基于不同的大型语言模型能力,让更多人可以简易地创建可持续运营的原生 AI 应用。Dify 提供多种类型应用的可视化编排,应用可开箱即用,也能以“后端即服务”的 API 提供服务。
+Dify 具有模型中立性,相较 LangChain 等硬编码开发库 Dify 是一个完整的、工程化的技术栈,而相较于 OpenAI 的 Assistants API 你可以完全将服务部署在本地。
-通过 Dify 创建的应用包含了:
+| 功能 | Dify.AI | Assistants API | LangChain |
+| --- | --- | --- | --- |
+| 编程方式 | 面向 API | 面向 API | 面向 Python 代码 |
+| 生态策略 | 开源 | 封闭且商用 | 开源 |
+| RAG 引擎 | 支持 | 支持 | 不支持 |
+| Prompt IDE | 包含 | 包含 | 没有 |
+| 支持的 LLMs | 丰富 | 仅 GPT | 丰富 |
+| 本地部署 | 支持 | 不支持 | 不适用 |
-- 开箱即用的的 Web 站点,支持表单模式和聊天对话模式
-- 一套 API 即可包含插件、上下文增强等能力,替你省下了后端代码的编写工作
-- 可视化的对应用进行数据分析,查阅日志或进行标注
-https://github.com/langgenius/dify/assets/100913391/f6e658d5-31b3-4c16-a0af-9e191da4d0f6
+## 特点
-## 核心能力
-1. **模型支持:** 你可以在 Dify 上选择基于不同模型的能力来开发你的 AI 应用。Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前支持的模型供应商:
+![](./images/models.png)
-- [x] **OpenAI**:GPT4、GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003
-- [x] **Azure OpenAI Service**
-- [x] **Anthropic**:Claude2、Claude-instant
-- [x] **Replicate**
-- [x] **Hugging Face Hub**
-- [x] **ChatGLM**
-- [x] **Llama2**
-- [x] **MiniMax**
-- [x] **讯飞星火大模型**
-- [x] **文心一言**
-- [x] **通义千问**
+**1. LLM支持**:与 OpenAI 的 GPT 系列模型集成,或者与开源的 Llama2 系列模型集成。事实上,Dify支持主流的商业模型和开源模型(本地部署或基于 MaaS)。
+**2. Prompt IDE**:和团队一起在 Dify 协作,通过可视化的 Prompt 和应用编排工具开发 AI 应用。 支持无缝切换多种大型语言模型。
-我们为所有注册云端版的用户免费提供以下资源(登录 [dify.ai](https://cloud.dify.ai) 即可使用):
-* 200 次 OpenAI 模型的消息调用额度,用于创建基于 OpenAI 模型的 AI 应用
-* 300 万 讯飞星火大模型 Token 的调用额度,用于创建基于讯飞星火大模型的 AI 应用
-* 100 万 MiniMax Token 的调用额度,用于创建基于 MiniMax 模型的 AI 应用
-2. **可视化编排 Prompt:** 通过界面化编写 prompt 并调试,只需几分钟即可发布一个 AI 应用。
-3. **文本 Embedding 处理(数据集)**:全自动完成文本预处理,使用你的数据作为上下文,无需理解晦涩的概念和技术处理。支持 PDF、txt 等文件格式,支持从 Notion、网页、API 同步数据。
-4. **基于 API 开发:** 后端即服务。您可以直接访问网页应用,也可以接入 API 集成到您的应用中,无需关注复杂的后端架构和部署过程。
-5. **插件能力:** Dify 「智聊」平台已支持网页浏览、Google 搜索、Wikipedia 查询等第一方插件,可在对话中实现联网搜索、分析网页内容、展示 AI 的推理过程。
-6. **团队 Workspace:** 团队成员可加入 Workspace 编辑、管理和使用团队内的 AI 应用。
-6. **数据标注与改进:** 可视化查阅 AI 应用日志并对数据进行改进标注,观测 AI 的推理过程,不断提高其性能。(Coming soon)
- -----------------------------
- ## Use cases
- * [几分钟创建一个带有业务数据的官网 AI 智能客服](https://docs.dify.ai/v/zh-hans/use-cases/create-an-ai-chatbot-with-business-data-in-minutes)
- * [构建一个 Notion AI 助手](https://docs.dify.ai/v/zh-hans/use-cases/build-an-notion-ai-assistant)
- * [创建 Midjoureny 提示词机器人](https://docs.dify.ai/v/zh-hans/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code)
+**3. RAG引擎**:包括各种基于全文索引或向量数据库嵌入的 RAG 能力,允许直接上传 PDF、TXT 等各种文本格式。
+**4. Agent**:基于函数调用的 Agent框架,允许用户自定义配置,所见即所得。Dify 提供了基本的插件能力,如谷歌搜索。
-## 使用云服务
+**5. 持续运营**:监控和分析应用日志和性能,使用生产数据持续改进 Prompt、数据集或模型。
-访问 [Dify.ai](https://cloud.dify.ai) 使用云端版。
+## 在开始之前
+
+- [网站](https://dify.ai)
+- [文档](https://docs.dify.ai)
+- [部署文档](https://docs.dify.ai/getting-started/install-self-hosted)
+- [常见问题](https://docs.dify.ai/getting-started/faq)
## 安装社区版
@@ -83,80 +86,29 @@ docker compose up -d
### 配置
-需要自定义配置,请参考我们的 [docker-compose.yml](docker/docker-compose.yaml) 文件中的注释,并手动设置环境配置,修改完毕后,请再次执行 `docker-compose up -d`。
-
-## Roadmap
-
-我们正在开发中的功能:
-
-- **数据集**,支持更多的数据集,通过网页、API 同步内容。用户可以根据自己的数据源构建 AI 应用程序。
-- **插件**,我们将发布符合 ChatGPT 标准的插件,支持更多 Dify 自己的插件,支持用户自定义插件能力,以在应用程序中启用更多功能,例如以支持以目标为导向的分解推理任务。
-
-## Q&A
-
-**Q: 我能用 Dify 做什么?**
-
-A: Dify 是一个简单且能力丰富的 LLM 开发和运营工具。你可以用它搭建商用级应用,个人助理。如果你想自己开发应用,Dify 也能为你省下接入 OpenAI 的后端工作,使用我们逐步提供的可视化运营能力,你可以持续的改进和训练你的 GPT 模型。
-
-**Q: 如何使用 Dify “训练”自己的模型?**
-
-A: 一个有价值的应用由 Prompt Engineering、上下文增强和 Fine-tune 三个环节组成。我们创造了一种 Prompt 结合编程语言的 Hybrid 编程方式(类似一个模版引擎),你可以轻松的完成长文本嵌入,或抓取用户输入的一个 Youtube 视频的字幕——这些都将作为上下文提交给 LLMs 进行计算。我们十分注重应用的可运营性,你的用户在使用 App 期间产生的数据,可进行分析、标记和持续训练。以上环节如果没有好的工具支持,可能会消耗你大量的时间。
-
-**Q: 如果要创建一个自己的应用,我需要准备什么?**
-
-A: 我们假定你已经有了 OpenAI 或 Claude 等模型的 API Key,如果没有请去注册一个。如果你已经有了一些内容可以作为训练上下文,就太好了。
-
-**Q: 提供哪些界面语言?**
-
-A: 支持英文、中文,你可以为我们贡献语言包并提供维护支持。
+如果您需要自定义配置,请参考我们的 [docker-compose.yml](docker/docker-compose.yaml) 文件中的注释,并手动设置环境配置。更改后,请再次运行 `docker-compose up -d`。您可以在我们的[文档](https://docs.dify.ai/getting-started/install-self-hosted/environments)中查看所有环境变量的完整列表。
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
-## 贡献
-
-我们欢迎你为 Dify 作出贡献帮助 Dify 变得更好。我们欢迎各种方式的贡献,提交代码、问题、新想法、或者分享你基于 Dify 创建出的各种有趣有用的 AI 应用。同时,我们也欢迎你在不同的活动、研讨会、社交媒体上分享 Dify。
-
-### 贡献代码
-为了确保正确审查,所有代码贡献 - 包括来自具有直接提交更改权限的贡献者 - 都必须提交 PR 请求并在合并分支之前得到核心开发人员的批准。
-
-我们欢迎所有人提交 PR!如果您愿意提供帮助,可以在 [贡献指南](CONTRIBUTING_CN.md) 中了解有关如何为项目做出代码贡献的更多信息。
-
-### 提交问题或想法
-你可以通过 Dify 代码仓库新增 issues 来提交你的问题或想法。如遇到问题,请尽可能描述你遇到问题的操作步骤,以便我们更好地发现它。如果你对我们的产品有任何新想法,也欢迎向我们反馈,请尽可能多地分享你的见解,以便我们在社区中获得更多反馈和进一步讨论。
-
-### 分享你的应用
-我们鼓励所有社区成员分享他们基于 Dify 创造出的 AI 应用,它们可以是应用于不同情景或不同用户,这将有助于为希望基于 AI 能力创造的人们提供强大灵感!你可以通过 [Dify-user-case 仓库项目提交 issue](https://github.com/langgenius/dify-user-case) 来分享你的应用案例。
-
-### 向别人分享 Dify
-我们鼓励社区贡献者们积极展示你使用 Dify 的不同角度。你可以通过线下研讨会、博客或社交媒体上谈论或分享你使用 Dify 的任意功能,相信你独特的使用分享会给别人带来非常大的帮助!如果你需要任何指导帮助,欢迎联系我们 support@dify.ai ,你也可以在 twitter @Dify.AI 或在 [Discord 社区](https://discord.gg/FngNHpbcY7)交流来帮助你传播信息。
+## 社区与支持
-### 帮助别人
-你还可以在 Discord、GitHub issues或其他社交平台上帮助需要帮助的人,指导别人解决使用过程中遇到的问题和分享使用经验。这也是个非常了不起的贡献!如果你希望成为 Dify 社区的维护者,请通过[Discord 社区](https://discord.gg/FngNHpbcY7) 联系官方团队或邮件联系我们 support@dify.ai.
+我们欢迎您为 Dify 做出贡献,以帮助改善 Dify。包括:提交代码、问题、新想法,或分享您基于 Dify 创建的有趣且有用的 AI 应用程序。同时,我们也欢迎您在不同的活动、会议和社交媒体上分享 Dify。
+- [GitHub Issues](https://github.com/langgenius/dify/issues)。👉:使用 Dify.AI 时遇到的错误和问题,请参阅[贡献指南](CONTRIBUTING.md)。
+- [电子邮件支持](mailto:hello@dify.ai?subject=[GitHub]Questions%20About%20Dify)。👉:关于使用 Dify.AI 的问题。
+- [Discord](https://discord.gg/FngNHpbcY7)。👉:分享您的应用程序并与社区交流。
+- [Twitter](https://twitter.com/dify_ai)。👉:分享您的应用程序并与社区交流。
+- [商业许可](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)。👉:有关商业用途许可 Dify.AI 的商业咨询。
+ - [微信]() 👉:扫描下方二维码,添加微信好友,备注 Dify,我们将邀请您加入 Dify 社区。
+
-## 联系我们
-
-如果您有任何问题、建议或合作意向,欢迎通过以下方式联系我们:
-
-- 在我们的 [GitHub Repo](https://github.com/langgenius/dify) 上提交 Issue 或 PR
-- 在我们的 [Discord 社区](https://discord.gg/FngNHpbcY7) 上加入讨论
-- 发送邮件至 hello@dify.ai
-
-## 安全
+## 安全问题
为了保护您的隐私,请避免在 GitHub 上发布安全问题。发送问题至 security@dify.ai,我们将为您做更细致的解答。
-## Citation
-
-本软件使用了以下开源软件:
-
-- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
-
-更多信息,请参考相应软件的官方网站或许可证文本。
-
## License
本仓库遵循 [Dify Open Source License](LICENSE) 开源协议。
diff --git a/README_ES.md b/README_ES.md
index 7a52b272847828..11cf312050113c 100644
--- a/README_ES.md
+++ b/README_ES.md
@@ -1,4 +1,4 @@
-![](./images/describe-en.png)
+[![](./images/describe.png)](https://dify.ai)
English |
简体中文 |
@@ -6,118 +6,107 @@
Español
-[Sitio web](https://dify.ai) • [Documentación](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
-
-**Dify** es una plataforma LLMOps fácil de usar diseñada para capacitar a más personas para que creen aplicaciones sostenibles basadas en IA. Con orquestación visual para varios tipos de aplicaciones, Dify ofrece aplicaciones listas para usar que también pueden funcionar como APIs de Backend-as-a-Service. Unifica tu proceso de desarrollo con una API para la integración de complementos y conjuntos de datos, y agiliza tus operaciones utilizando una interfaz única para la ingeniería de indicaciones, análisis visual y mejora continua.
-
-Las aplicaciones creadas con Dify incluyen:
-
-- Sitios web listos para usar que admiten el modo de formulario y el modo de conversación por chat.
-- Una API única que abarca capacidades de complementos, mejora de contexto y más, lo que te ahorra esfuerzo de programación en el backend.
-- Análisis visual de datos, revisión de registros y anotación para aplicaciones.
-
-Dify es compatible con Langchain, lo que significa que gradualmente admitiremos múltiples LLMs, actualmente compatibles con:
-
-- GPT 3 (text-davinci-003)
-- GPT 3.5 Turbo (ChatGPT)
-- GPT-4
-
-## Usar servicios en la nube
-
-Visita [Dify.ai](https://dify.ai)
-
-## Instalar la Edición Comunitaria
+
+
+
+
+
+
+
+
+
+
-### Requisitos del sistema
+**Dify** es una plataforma de desarrollo de aplicaciones para modelos de lenguaje de gran tamaño (LLM) que ya ha visto la creación de más de **100,000** aplicaciones basadas en Dify.AI. Integra los conceptos de Backend como Servicio y LLMOps, cubriendo el conjunto de tecnologías esenciales requerido para construir aplicaciones nativas de inteligencia artificial generativa, incluyendo un motor RAG incorporado. Con Dify, **puedes auto-desplegar capacidades similares a las de Assistants API y GPTs basadas en cualquier LLM.**
-Antes de instalar Dify, asegúrate de que tu máquina cumple con los siguientes requisitos mínimos del sistema:
+![](./images/demo.png)
-- CPU >= 2 Core
-- RAM >= 4GB
+## Utilizar Servicios en la Nube
-### Inicio rápido
+Usar [Dify.AI Cloud](https://dify.ai) proporciona todas las capacidades de la versión de código abierto, e incluye un complemento de 200 créditos de prueba para GPT.
-La forma más sencilla de iniciar el servidor de Dify es ejecutar nuestro archivo [docker-compose.yml](docker/docker-compose.yaml). Antes de ejecutar el comando de instalación, asegúrate de que [Docker](https://docs.docker.com/get-docker/) y [Docker Compose](https://docs.docker.com/compose/install/) estén instalados en tu máquina:
+## Por qué Dify
-```bash
-cd docker
-docker compose up -d
-```
+Dify se caracteriza por su neutralidad de modelo y es un conjunto tecnológico completo e ingenierizado, en comparación con las bibliotecas de desarrollo codificadas como LangChain. A diferencia de la API de Assistants de OpenAI, Dify permite el despliegue local completo de los servicios.
-Después de ejecutarlo, puedes acceder al panel de control de Dify en tu navegador desde [http://localhost/install](http://localhost/install) y comenzar el proceso de instalación de inicialización.
+| Característica | Dify.AI | API de Assistants | LangChain |
+|----------------|---------|------------------|-----------|
+| **Enfoque de Programación** | Orientado a API | Orientado a API | Orientado a Código en Python |
+| **Estrategia del Ecosistema** | Código Abierto | Cerrado y Comercial | Código Abierto |
+| **Motor RAG** | Soportado | Soportado | No Soportado |
+| **IDE de Prompts** | Incluido | Incluido | Ninguno |
+| **LLMs Soportados** | Gran Variedad | Solo GPT | Gran Variedad |
+| **Despliegue Local** | Soportado | No Soportado | No Aplicable |
-### Helm Chart
+## Características
-Un gran agradecimiento a @BorisPolonsky por proporcionarnos una versión de [Helm Chart](https://helm.sh/), que permite desplegar Dify en Kubernetes.
-Puede ir a https://github.com/BorisPolonsky/dify-helm para obtener información de despliegue.
+![](./images/models.png)
-### Configuración
+**1. Soporte LLM**: Integración con la familia de modelos GPT de OpenAI, o los modelos de la familia Llama2 de código abierto. De hecho, Dify soporta modelos comerciales convencionales y modelos de código abierto (desplegados localmente o basados en MaaS).
-Si necesitas personalizar la configuración, consulta los comentarios en nuestro archivo [docker-compose.yml](docker/docker-compose.yaml) y configura manualmente la configuración del entorno. Después de realizar los cambios, ejecuta nuevamente 'docker-compose up -d'.
+**2. IDE de Prompts**: Orquestación visual de aplicaciones y servicios basados en LLMs con tu equipo.
-## Hoja de ruta
+**3. Motor RAG**: Incluye varias capacidades RAG basadas en indexación de texto completo o incrustaciones de base de datos vectoriales, permitiendo la carga directa de PDFs, TXTs y otros formatos de texto.
-Funciones en desarrollo:
+**4. Agentes**: Un marco de Agentes basado en Llamadas de Función que permite a los usuarios configurar lo que ven es lo que obtienen. Dify incluye capacidades básicas de plugins como la Búsqueda de Google.
-- **Conjuntos de datos**, admitiendo más conjuntos de datos, por ejemplo, sincronización de contenido desde Notion o páginas web.
-Admitiremos más conjuntos de datos, incluidos texto, páginas web e incluso contenido de Notion. Los usuarios pueden construir aplicaciones de IA basadas en sus propias fuentes de datos
-- **Complementos**, introduciendo complementos estándar de ChatGPT para aplicaciones, o utilizando complementos producidos por Dify.
-Lanzaremos complementos que cumplan con el estándar de ChatGPT, o nuestros propios complementos de Dify para habilitar más capacidades en las aplicaciones.
-- **Modelos de código abierto**, por ejemplo, adoptar Llama como proveedor de modelos o para un ajuste adicional.
-Trabajaremos con excelentes modelos de código abierto como Llama, proporcionándolos como opciones de modelos en nuestra plataforma o utilizándolos para un ajuste adicional.
+**5. Operaciones Continuas**: Monitorear y analizar registros de aplicaciones y rendimiento, mejorando continuamente Prompts, conjuntos de datos o modelos usando datos de producción.
-## Preguntas y respuestas
+## Antes de Empezar
-**P: ¿Qué puedo hacer con Dify?**
+- [Sitio web](https://dify.ai)
+- [Documentación](https://docs.dify.ai)
+- [Documentación de Implementación](https://docs.dify.ai/getting-started/install-self-hosted)
+- [Preguntas Frecuentes](https://docs.dify.ai/getting-started/faq)
-R: Dify es una herramienta de desarrollo y operaciones de LLM, simple pero poderosa. Puedes usarla para construir aplicaciones de calidad comercial y asistentes personales. Si deseas desarrollar tus propias aplicaciones, LangDifyGenius puede ahorrarte trabajo en el backend al integrar con OpenAI y ofrecer capacidades de operaciones visuales, lo que te permite mejorar y entrenar continuamente tu modelo GPT.
+## Instalar la Edición Comunitaria
-**P: ¿Cómo uso Dify para "entrenar" mi propio modelo?**
+### Requisitos del Sistema
-R: Una aplicación valiosa consta de Ingeniería de indicaciones, mejora de contexto y ajuste fino. Hemos creado un enfoque de programación híbrida que combina las indicaciones con lenguajes de programación (similar a un motor de plantillas), lo que facilita la incorporación de texto largo o la captura de subtítulos de un video de YouTube ingresado por el usuario, todo lo cual se enviará como contexto para que los LLM lo procesen. Damos gran importancia a la operabilidad de la aplicación, con los datos generados por los usuarios durante el uso de la aplicación disponibles para análisis, anotación y entrenamiento continuo. Sin las herramientas adecuadas, estos pasos pueden llevar mucho tiempo.
+Antes de instalar Dify, asegúrate de que tu máquina cumpla con los siguientes requisitos mínimos del sistema:
-**P: ¿Qué necesito preparar si quiero crear mi propia aplicación?**
+- CPU >= 2 núcleos
+- RAM >= 4GB
-R: Suponemos que ya tienes una clave de API de OpenAI; si no la tienes, por favor regístrate. ¡Si ya tienes contenido que pueda servir como contexto de entrenamiento, eso es genial!
+### Inicio Rápido
-**P: ¿Qué idiomas de interfaz están disponibles?**
+La forma más sencilla de iniciar el servidor de Dify es ejecutar nuestro archivo [docker-compose.yml](docker/docker-compose.yaml). Antes de ejecutar el comando de instalación, asegúrate de que [Docker](https://docs.docker.com/get-docker/) y [Docker Compose](https://docs.docker.com/compose/install/) estén instalados en tu máquina:
-R: Actualmente se admiten inglés y chino, y puedes contribuir con paquetes de idiomas.
+```bash
+cd docker
+docker compose up -d
+```
-## Historial de estrellas
+Después de ejecutarlo, puedes acceder al panel de control de Dify en tu navegador en [http://localhost/install](http://localhost/install) y comenzar el proceso de instalación de inicialización.
-[![Gráfico de historial de estrellas](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
+### Gráfico Helm
-## Contáctanos
+Un gran agradecimiento a @BorisPolonsky por proporcionarnos una versión del [Gráfico Helm](https://helm.sh/), que permite implementar Dify en Kubernetes. Puedes visitar https://github.com/BorisPolonsky/dify-helm para obtener información sobre la implementación.
-Si tienes alguna pregunta, sugerencia o consulta sobre asociación, no dudes en contactarnos a través de los siguientes canales:
+### Configuración
-- Presentar un problema o una solicitud de extracción en nuestro repositorio de GitHub.
-- Únete a la discusión en nuestra comunidad de [Discord](https://discord.gg/FngNHpbcY7).
-- Envía un correo electrónico a hello@dify.ai.
+Si necesitas personalizar la configuración, consulta los comentarios en nuestro archivo [docker-compose.yml](docker/docker-compose.yaml) y configura manualmente la configuración del entorno. Después de realizar los cambios, ejecuta nuevamente `docker-compose up -d`. Puedes ver la lista completa de variables de entorno en nuestra [documentación](https://docs.dify.ai/getting-started/install-self-hosted/environments).
-¡Estamos ansiosos por ayudarte y crear juntos aplicaciones de IA más divertidas y útiles!
+## Historial de Estrellas
-## Contribuciones
+[![Gráfico de Historial de Estrellas](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
-Para garantizar una revisión adecuada, todas las contribuciones de código, incluidas las de los colaboradores con acceso directo a los compromisos, deben enviarse mediante solicitudes de extracción y ser aprobadas por el equipo principal de
+## Comunidad y Soporte
- desarrollo antes de fusionarse.
+Te damos la bienvenida a contribuir a Dify para ayudar a hacer que Dify sea mejor de diversas maneras, enviando código, informando problemas, proponiendo nuevas ideas o compartiendo las aplicaciones de inteligencia artificial interesantes y útiles que hayas creado basadas en Dify. Al mismo tiempo, también te invitamos a compartir Dify en diferentes eventos, conferencias y redes sociales.
-¡Agradecemos todas las solicitudes de extracción! Si deseas ayudar, consulta la [Guía de Contribución](CONTRIBUTING.md) para obtener más información sobre cómo comenzar.
+- [Problemas en GitHub](https://github.com/langgenius/dify/issues). Lo mejor para: errores y problemas que encuentres al usar Dify.AI, consulta la [Guía de Contribución](CONTRIBUTING.md).
+- [Soporte por Correo Electrónico](mailto:hello@dify.ai?subject=[GitHub]Preguntas%20sobre%20Dify). Lo mejor para: preguntas que tengas sobre el uso de Dify.AI.
+- [Discord](https://discord.gg/FngNHpbcY7). Lo mejor para: compartir tus aplicaciones y socializar con la comunidad.
+- [Twitter](https://twitter.com/dify_ai). Lo mejor para: compartir tus aplicaciones y socializar con la comunidad.
+- [Licencia Comercial](mailto:business@dify.ai?subject=[GitHub]Consulta%20de%20Licencia%20Comercial). Lo mejor para: consultas comerciales sobre la licencia de Dify.AI para uso comercial.
-## Seguridad
+## Divulgación de Seguridad
Para proteger tu privacidad, evita publicar problemas de seguridad en GitHub. En su lugar, envía tus preguntas a security@dify.ai y te proporcionaremos una respuesta más detallada.
-## Citación
-
-Este software utiliza el siguiente software de código abierto:
-
-- Chase, H. (2022). LangChain [Software de computadora]. https://github.com/hwchase17/langchain
-
-Para obtener más información, consulta el sitio web oficial o el texto de la licencia del software correspondiente.
-
## Licencia
Este repositorio está disponible bajo la [Licencia de código abierto de Dify](LICENSE).
diff --git a/README_JA.md b/README_JA.md
index 8f7e983d6ead72..9eedd82efd391d 100644
--- a/README_JA.md
+++ b/README_JA.md
@@ -1,122 +1,103 @@
-![](./images/describe-en.png)
+[![](./images/describe.png)](https://dify.ai)
English |
简体中文 |
日本語 |
- Español
+ Español |
+ Klingon
-[Web サイト](https://dify.ai) • [ドキュメント](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
+
+
+
+
+
+
+
+
+
+
+"Difyは、既にDify.AI上で10万以上のアプリケーションが構築されているLLMアプリケーション開発プラットフォームです。バックエンド・アズ・ア・サービスとLLMOpsの概念を統合し、組み込みのRAGエンジンを含む、生成AIネイティブアプリケーションを構築するためのコアテックスタックをカバーしています。Difyを使用すると、どのLLMに基づいても、Assistants APIやGPTのような機能を自己デプロイすることができます。"
-**Dify** は、より多くの人々が持続可能な AI ネイティブアプリケーションを作成できるように設計された、使いやすい LLMOps プラットフォームです。様々なアプリケーションタイプに対応したビジュアルオーケストレーションにより Dify は Backend-as-a-Service API としても機能する、すぐに使えるアプリケーションを提供します。プラグインやデータセットを統合するための1つの API で開発プロセスを統一し、プロンプトエンジニアリング、ビジュアル分析、継続的な改善のための1つのインターフェイスを使って業務を合理化します。
+Please note that translating complex technical terms can sometimes result in slight variations in meaning due to differences in language nuances.
-Difyで作成したアプリケーションは以下の通りです:
+## クラウドサービスの利用
-フォームモードとチャット会話モードをサポートする、すぐに使える Web サイト
-プラグイン機能、コンテキストの強化などを網羅する単一の API により、バックエンドのコーディングの手間を省きます。
-アプリケーションの視覚的なデータ分析、ログレビュー、アノテーションが可能です。
-Dify は LangChain と互換性があり、複数の LLM を徐々にサポートします:
+[Dify.AI Cloud](https://dify.ai) を使用すると、オープンソース版の全機能を利用でき、さらに200GPTのトライアルクレジットが無料で提供されます。
-- GPT 3 (text-davinci-003)
-- GPT 3.5 Turbo(ChatGPT)
-- GPT-4
+## Difyの利点
-## クラウドサービスの利用
+Difyはモデルニュートラルであり、LangChainのようなハードコードされた開発ライブラリと比較して、完全にエンジニアリングされた技術スタックを特徴としています。OpenAIのAssistants APIとは異なり、Difyではサービスの完全なローカルデプロイメントが可能です。
+
+| 機能 | Dify.AI | Assistants API | LangChain |
+|---------|---------|----------------|-----------|
+| **プログラミングアプローチ** | API指向 | API指向 | Pythonコード指向 |
+| **エコシステム戦略** | オープンソース | 閉鎖的かつ商業的 | オープンソース |
+| **RAGエンジン** | サポート済み | サポート済み | 非サポート |
+| **プロンプトIDE** | 含まれる | 含まれる | なし |
+| **サポートされるLLMs** | 豊富な種類 | GPTのみ | 豊富な種類 |
+| **ローカルデプロイメント** | サポート済み | 非サポート | 該当なし |
+
+
+## 開始する前に
-[Dify.ai](https://dify.ai) をご覧ください
+- [Website](https://dify.ai)
+- [Docs](https://docs.dify.ai)
+- [Deployment Docs](https://docs.dify.ai/getting-started/install-self-hosted)
+- [FAQ](https://docs.dify.ai/getting-started/faq)
-## Community Edition のインストール
+
+## コミュニティエディションのインストール
### システム要件
-Dify をインストールする前に、お使いのマシンが以下の最低システム要件を満たしていることを確認してください:
+Difyをインストールする前に、以下の最低限のシステム要件を満たしていることを確認してください:
-- CPU >= 1 Core
+- CPU >= 2コア
- RAM >= 4GB
### クイックスタート
-Dify サーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml) ファイルを実行することです。インストールコマンドを実行する前に、[Docker](https://docs.docker.com/get-docker/) と [Docker Compose](https://docs.docker.com/compose/install/) がお使いのマシンにインストールされていることを確認してください:
+Difyサーバーを始める最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml) ファイルを実行することです。インストールコマンドを実行する前に、マシンに [Docker](https://docs.docker.com/get-docker/) と [Docker Compose](https://docs.docker.com/compose/install/) がインストールされていることを確認してください:
```bash
cd docker
docker compose up -d
```
-実行後、ブラウザで [http://localhost/install](http://localhost/install) にアクセスし、初期化インストール作業を開始することができます。
+実行後、ブラウザで [http://localhost/install](http://localhost/install) にアクセスし、初期化インストールプロセスを開始できます。
### Helm Chart
-@BorisPolonsky に大感謝します。彼は Dify を Kubernetes 上にデプロイするための [Helm Chart](https://helm.sh/) バージョンを提供してくれました。
+@BorisPolonskyによる[Helm Chart](https://helm.sh/) バージョンを提供してくれて、大変感謝しています。これにより、DifyはKubernetes上にデプロイすることができます。
デプロイ情報については、https://github.com/BorisPolonsky/dify-helm をご覧ください。
-### 構成
-
-カスタマイズが必要な場合は、[docker-compose.yml](docker/docker-compose.yaml) ファイルのコメントを参照し、手動で環境設定をお願いします。変更後、再度 'docker-compose up -d' を実行してください。
-
-## ロードマップ
-
-開発中の機能:
-
-- **データセット**, Notionやウェブページからのコンテンツ同期など、より多くのデータセットをサポートします
-テキスト、ウェブページ、さらには Notion コンテンツなど、より多くのデータセットをサポートする予定です。ユーザーは、自分のデータソースをもとに AI アプリケーションを構築することができます。
-- **プラグイン**, アプリケーションに ChatGPT プラグイン標準のプラグインを導入する、または Dify 制作のプラグインを利用する
-今後、ChatGPT 規格に準拠したプラグインや、ディファイ独自のプラグインを公開し、より多くの機能をアプリケーションで実現できるようにします。
-- **オープンソースモデル**, 例えばモデルプロバイダーとして Llama を採用したり、さらにファインチューニングを行う
-Llama のような優れたオープンソースモデルを、私たちのプラットフォームのモデルオプションとして提供したり、さらなる微調整のために使用したりすることで、協力していきます。
-
-
-## Q&A
+### 設定
-**Q: Dify で何ができるのか?**
+設定をカスタマイズする必要がある場合は、[docker-compose.yml](docker/docker-compose.yaml) ファイルのコメントを参照し、環境設定を手動で行ってください。変更を行った後は、もう一度 `docker-compose up -d` を実行してください。環境変数の完全なリストは、[ドキュメント](https://docs.dify.ai/getting-started/install-self-hosted/environments)で確認できます。
-A: Dify はシンプルでパワフルな LLM 開発・運用ツールです。商用グレードのアプリケーション、パーソナルアシスタントを構築するために使用することができます。独自のアプリケーションを開発したい場合、LangDifyGenius は OpenAI と統合する際のバックエンド作業を省き、視覚的な操作機能を提供し、GPT モデルを継続的に改善・訓練することが可能です。
-**Q: Dify を使って、自分のモデルを「トレーニング」するにはどうすればいいのでしょうか?**
-
-A: プロンプトエンジニアリング、コンテキスト拡張、ファインチューニングからなる価値あるアプリケーションです。プロンプトとプログラミング言語を組み合わせたハイブリッドプログラミングアプローチ(テンプレートエンジンのようなもの)で、長文の埋め込みやユーザー入力の YouTube 動画からの字幕取り込みなどを簡単に実現し、これらはすべて LLM が処理するコンテキストとして提出される予定です。また、アプリケーションの操作性を重視し、ユーザーがアプリケーションを使用する際に生成したデータを分析、アノテーション、継続的なトレーニングに利用できるようにしました。適切なツールがなければ、これらのステップに時間がかかることがあります。
-
-**Q: 自分でアプリケーションを作りたい場合、何を準備すればよいですか?**
-
-A: すでに OpenAI API Key をお持ちだと思いますが、お持ちでない場合はご登録ください。もし、すでにトレーニングのコンテキストとなるコンテンツをお持ちでしたら、それは素晴らしいことです!
-
-**Q: インターフェイスにどの言語が使えますか?**
-
-A: 現在、英語と中国語に対応しており、言語パックを寄贈することも可能です。
-
-## Star ヒストリー
+## スターヒストリー
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
-## お問合せ
-
-ご質問、ご提案、パートナーシップに関するお問い合わせは、以下のチャンネルからお気軽にご連絡ください:
+## コミュニティとサポート
-- GitHub Repo で Issue や PR を提出する
-- [Discord](https://discord.gg/FngNHpbcY7) コミュニティで議論に参加する。
-- hello@dify.ai にメールを送信します
+Difyに貢献していただき、コードの提出、問題の報告、新しいアイデアの提供、またはDifyを基に作成した興味深く有用なAIアプリケーションの共有により、Difyをより良いものにするお手伝いを歓迎します。同時に、さまざまなイベント、会議、ソーシャルメディアでDifyを共有することも歓迎します。
-私たちは、皆様のお手伝いをさせていただき、より楽しく、より便利な AI アプリケーションを一緒に作っていきたいと思っています!
-
-## コントリビュート
-
-適切なレビューを行うため、コミットへの直接アクセスが可能なコントリビュータを含むすべてのコードコントリビュータは、プルリクエストで提出し、マージされる前にコア開発チームによって承認される必要があります。
-
-私たちはすべてのプルリクエストを歓迎します!協力したい方は、[コントリビューションガイド](CONTRIBUTING.md) をチェックしてみてください。
+- [GitHub Issues](https://github.com/langgenius/dify/issues)。最適な使用法:Dify.AIの使用中に遭遇するバグやエラー、[貢献ガイド](CONTRIBUTING.md)を参照。
+- [Email サポート](mailto:hello@dify.ai?subject=[GitHub]Questions%20About%20Dify)。最適な使用法:Dify.AIの使用に関する質問。
+- [Discord](https://discord.gg/FngNHpbcY7)。最適な使用法:アプリケーションの共有とコミュニティとの交流。
+- [Twitter](https://twitter.com/dify_ai)。最適な使用法:アプリケーションの共有とコミュニティとの交流。
+- [ビジネスライセンス](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)。最適な使用法:Dify.AIを商業利用するためのビジネス関連の問い合わせ。
## セキュリティ
プライバシー保護のため、GitHub へのセキュリティ問題の投稿は避けてください。代わりに、あなたの質問を security@dify.ai に送ってください。より詳細な回答を提供します。
-## 引用
-
-本ソフトウェアは、以下のオープンソースソフトウェアを使用しています:
-
-- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
-
-詳しくは、各ソフトウェアの公式サイトまたはライセンス文をご参照ください。
-
## ライセンス
このリポジトリは、[Dify Open Source License](LICENSE) のもとで利用できます。
diff --git a/README_KL.md b/README_KL.md
new file mode 100644
index 00000000000000..d492368e3f2b18
--- /dev/null
+++ b/README_KL.md
@@ -0,0 +1,114 @@
+[![](./images/describe.png)](https://dify.ai)
+
+ English |
+ 简体中文 |
+ 日本語 |
+ Español |
+ Klingon
+
+
+
+
+
+
+
+
+
+
+
+
+
+**Dify** Hoch LLM qorwI' pIqoDvam pagh laHta' je **100,000** pIqoDvamvam Dify.AI De'wI'. Dify leghpu' Backend chu' a Service teH LLMOps vItlhutlh, generative AI-native pIqoD teq wa'vam, vIyoD Built-in RAG engine. Dify, **'ej chenmoHmoH Hoch 'oHna' Assistant API 'ej GPTmey HoStaHbogh LLMmey.**
+
+![](./images/demo.png)
+
+## ngIl QaQ
+
+[Dify.AI ngIl](https://dify.ai) pIm neHlaH 'ej ghaH. cha'logh wa' DIvI' 200 GPT trial credits.
+
+## Dify WovmoH
+
+Dify Daq rIn neutrality 'ej Hoch, LangChain tInHar HubwI'. maH Daqbe'law' Qawqar, OpenAI's Assistant API Daq local neH deployment.
+
+| Qo'logh | Dify.AI | Assistants API | LangChain |
+|---------|---------|----------------|-----------|
+| **qet QaS** | API-oriented | API-oriented | Python Code-oriented |
+| **Ecosystem Strategy** | Open Source | Closed and Commercial | Open Source |
+| **RAG Engine** | Ha'qu' | Ha'qu' | ghoS Ha'qu' |
+| **Prompt IDE** | jaH Include | jaH Include | qeylIS qaq |
+| **qet LLMmey** | bo'Degh Hoch | GPTmey tIn | bo'Degh Hoch |
+| **local deployment** | Ha'qu' | tInHa'qu' | tInHa'qu' ghogh |
+
+## ruch
+
+![](./images/models.png)
+
+**1. LLM tIq**: OpenAI's GPT Hur nISmoHvam neH vIngeH, wa' Llama2 Hur nISmoHvam. Heghlu'lu'pu' Dify mIw 'oH choH qay'be'.Daq commercial Hurmey 'ej Open Source Hurmey (maqtaHvIS pagh locally neH neH deployment HoSvam).
+
+**2. Prompt IDE**: cha'logh wa' LLMmey Hoch janlu'pu' 'ej lughpu' choH qay'be'.
+
+**3. RAG Engine**: RAG vaD tIqpu' lo'taH indexing qor neH vector database wa' embeddings wIj, PDFs, TXTs, 'ej ghojmoHmoH HIq qorlIj je upload.
+
+**4. jenSuvpu'**: jenbe' SuDqang naQ moDwu' jenSuvpu' porgh cha'logh choHvam. Dify Google Search Hur vItlhutlh plugin choH.
+
+**5. QaS muDHa'wI': cha'logh wa' pIq mI' logs 'ej quv yIn, vItlhutlh tIq 'e'wIj lo'taHmoHmoH Prompts, vItlhutlh, Hurmey ghaH production data jatlh.
+
+## Do'wI' qabmey lo'taH
+
+- [Website](https://dify.ai)
+- [Docs](https://docs.dify.ai)
+- [lo'taHmoH Docs](https://docs.dify.ai/getting-started/install-self-hosted)
+- [FAQ](https://docs.dify.ai/getting-started/faq)
+
+## Community Edition tu' yo'
+
+### System Qab
+
+Dify yo' yo' qaqmeH SuS chenmoH 'oH qech!
+
+- CPU >= 2 Cores
+- RAM >= 4GB
+
+### Quick Start
+
+Dify server luHoHtaHlu' vIngeH lo'laHbe'chugh vIyoD [docker-compose.yml](docker/docker-compose.yaml) QorwI'ghach. toH yItlhutlh chenmoH luH!chugh 'ay' vaj vIneHmeH, 'ej [Docker](https://docs.docker.com/get-docker/) 'ej [Docker Compose](https://docs.docker.com/compose/install/) vaj 'oH 'e' vIneHmeH:
+
+```bash
+cd docker
+docker compose up -d
+```
+
+luHoHtaHmeH HoHtaHvIS, Dify dashboard vIneHmeH vIngeH lI'wI' [http://localhost/install](http://localhost/install) 'ej 'oH initialization 'e' vIneHmeH.
+
+### Helm Chart
+
+@BorisPolonsky Dify wIq tIq ['ay'var (Helm Chart)](https://helm.sh/) version Hur yIn chu' Dify luHoHchu'. Heghlu'lu' vIneHmeH [https://github.com/BorisPolonsky/dify-helm](https://github.com/BorisPolonsky/dify-helm) 'ej vaj QaS deployment information.
+
+### veS config
+
+chenmoHDI' config lo'taH ghaH, vItlhutlh HIq wIgharghbe'lu'pu'. toH lo'taHvIS pagh vay' vIneHmeH, 'ej `docker-compose up -d` wa'DIch. tIqmoHmeH list full wa' lo'taHvo'lu'pu' ghaH [docs](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+
+## tIng qem
+
+[![tIng qem Hur Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
+
+## choHmoH 'ej vItlhutlh
+
+Dify choHmoH je mIw Dify puqloD, Dify ghaHta'bogh vItlhutlh, HurDI' code, ghItlh, ghItlh qo'lu'pu'pu' qej. tIqmeH, Hurmey je, Dify Hur tIqDI' woDDaj, DuD QangmeH 'ej HInobDaq vItlhutlh HImej Dify'e'.
+
+- [GitHub vItlhutlh](https://github.com/langgenius/dify/issues). Hurmey: bugs 'ej errors Dify.AI tIqmeH. yImej [Contribution Guide](CONTRIBUTING.md).
+- [Email QaH](mailto:hello@dify.ai?subject=[GitHub]Questions%20About%20Dify). Hurmey: questions vItlhutlh Dify.AI chaw'.
+- [Discord](https://discord.gg/FngNHpbcY7). Hurmey: jIpuv 'ej jImej mIw Dify vItlhutlh.
+- [Twitter](https://twitter.com/dify_ai). Hurmey: jIpuv 'ej jImej mIw Dify vItlhutlh.
+- [Business License](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry). Hurmey: qurgh vItlhutlh Hurmey Dify.AI tIqbe'law'.
+
+## bIQDaqmey bom
+
+taghlI' vIngeH'a'? pong security 'oH posting GitHub. yItlhutlh, toH security@dify.ai 'ej vIngeH'a'.
+
+## License
+
+ghItlh puqloD chenmoH [Dify vItlhutlh Hur](LICENSE), ghaH nIvbogh Apache 2.0.
+
diff --git a/api/app.py b/api/app.py
index 0a4381320128e7..a0a09994dfdbb9 100644
--- a/api/app.py
+++ b/api/app.py
@@ -10,6 +10,7 @@
import grpc.experimental.gevent
grpc.experimental.gevent.init_gevent()
+import time
import logging
import json
import threading
@@ -36,6 +37,13 @@
import warnings
warnings.simplefilter("ignore", ResourceWarning)
+# fix windows platform
+if os.name == "nt":
+ os.system('tzutil /s "UTC"')
+else:
+ os.environ['TZ'] = 'UTC'
+ time.tzset()
+
class DifyApp(Flask):
pass
diff --git a/api/commands.py b/api/commands.py
index 105f936562c5e0..35b5c5d5f8a257 100644
--- a/api/commands.py
+++ b/api/commands.py
@@ -8,6 +8,8 @@
import uuid
import click
+import qdrant_client
+from qdrant_client.http.models import TextIndexParams, TextIndexType, TokenizerType
from tqdm import tqdm
from flask import current_app, Flask
from langchain.embeddings import OpenAIEmbeddings
@@ -484,6 +486,38 @@ def normalization_collections():
click.echo(click.style('Congratulations! restore {} dataset indexes.'.format(len(normalization_count)), fg='green'))
+@click.command('add-qdrant-full-text-index', help='add qdrant full text index')
+def add_qdrant_full_text_index():
+ click.echo(click.style('Start add full text index.', fg='green'))
+ binds = db.session.query(DatasetCollectionBinding).all()
+ if binds and current_app.config['VECTOR_STORE'] == 'qdrant':
+ qdrant_url = current_app.config['QDRANT_URL']
+ qdrant_api_key = current_app.config['QDRANT_API_KEY']
+ client = qdrant_client.QdrantClient(
+ qdrant_url,
+ api_key=qdrant_api_key, # For Qdrant Cloud, None for local instance
+ )
+ for bind in binds:
+ try:
+ text_index_params = TextIndexParams(
+ type=TextIndexType.TEXT,
+ tokenizer=TokenizerType.MULTILINGUAL,
+ min_token_len=2,
+ max_token_len=20,
+ lowercase=True
+ )
+ client.create_payload_index(bind.collection_name, 'page_content',
+ field_schema=text_index_params)
+ except Exception as e:
+ click.echo(
+ click.style('Create full text index error: {} {}'.format(e.__class__.__name__, str(e)),
+ fg='red'))
+ click.echo(
+ click.style(
+ 'Congratulations! add collection {} full text index successful.'.format(bind.collection_name),
+ fg='green'))
+
+
def deal_dataset_vector(flask_app: Flask, dataset: Dataset, normalization_count: list):
with flask_app.app_context():
try:
@@ -647,10 +681,10 @@ def update_app_model_configs(batch_size):
pbar.update(len(data_batch))
+
@click.command('migrate_default_input_to_dataset_query_variable')
@click.option("--batch-size", default=500, help="Number of records to migrate in each batch.")
def migrate_default_input_to_dataset_query_variable(batch_size):
-
click.secho("Starting...", fg='green')
total_records = db.session.query(AppModelConfig) \
@@ -658,13 +692,13 @@ def migrate_default_input_to_dataset_query_variable(batch_size):
.filter(App.mode == 'completion') \
.filter(AppModelConfig.dataset_query_variable == None) \
.count()
-
+
if total_records == 0:
click.secho("No data to migrate.", fg='green')
return
num_batches = (total_records + batch_size - 1) // batch_size
-
+
with tqdm(total=total_records, desc="Migrating Data") as pbar:
for i in range(num_batches):
offset = i * batch_size
@@ -697,14 +731,14 @@ def migrate_default_input_to_dataset_query_variable(batch_size):
for form in user_input_form:
paragraph = form.get('paragraph')
if paragraph \
- and paragraph.get('variable') == 'query':
- data.dataset_query_variable = 'query'
- break
-
+ and paragraph.get('variable') == 'query':
+ data.dataset_query_variable = 'query'
+ break
+
if paragraph \
- and paragraph.get('variable') == 'default_input':
- data.dataset_query_variable = 'default_input'
- break
+ and paragraph.get('variable') == 'default_input':
+ data.dataset_query_variable = 'default_input'
+ break
db.session.commit()
@@ -712,7 +746,7 @@ def migrate_default_input_to_dataset_query_variable(batch_size):
click.secho(f"Error while migrating data: {e}, app_id: {data.app_id}, app_model_config_id: {data.id}",
fg='red')
continue
-
+
click.secho(f"Successfully migrated batch {i + 1}/{num_batches}.", fg='green')
pbar.update(len(data_batch))
@@ -731,3 +765,4 @@ def register_commands(app):
app.cli.add_command(update_app_model_configs)
app.cli.add_command(normalization_collections)
app.cli.add_command(migrate_default_input_to_dataset_query_variable)
+ app.cli.add_command(add_qdrant_full_text_index)
diff --git a/api/config.py b/api/config.py
index ea800c3f98b01e..25c6fb895f216d 100644
--- a/api/config.py
+++ b/api/config.py
@@ -61,6 +61,7 @@
'UPLOAD_IMAGE_FILE_SIZE_LIMIT': 10,
'OUTPUT_MODERATION_BUFFER_SIZE': 300,
'MULTIMODAL_SEND_IMAGE_FORMAT': 'base64',
+ 'INVITE_EXPIRY_HOURS': 72
'QA_MODEL_CONCURRENCY':1
}
@@ -91,7 +92,7 @@ def __init__(self):
# ------------------------
# General Configurations.
# ------------------------
- self.CURRENT_VERSION = "0.3.30"
+ self.CURRENT_VERSION = "0.3.31"
self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
@@ -219,6 +220,11 @@ def __init__(self):
self.MAIL_TYPE = get_env('MAIL_TYPE')
self.MAIL_DEFAULT_SEND_FROM = get_env('MAIL_DEFAULT_SEND_FROM')
self.RESEND_API_KEY = get_env('RESEND_API_KEY')
+
+ # ------------------------
+ # Workpace Configurations.
+ # ------------------------
+ self.INVITE_EXPIRY_HOURS = int(get_env('INVITE_EXPIRY_HOURS'))
# ------------------------
# Sentry Configurations.
diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py
index a0c9553977bbde..1a9c50db732519 100644
--- a/api/controllers/console/app/statistic.py
+++ b/api/controllers/console/app/statistic.py
@@ -62,16 +62,15 @@ def get(self, app_id):
sql_query += ' GROUP BY date order by date'
- with db.engine.begin() as conn:
- rs = conn.execute(db.text(sql_query), arg_dict)
-
response_data = []
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'conversation_count': i.conversation_count
- })
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql_query), arg_dict)
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'conversation_count': i.conversation_count
+ })
return jsonify({
'data': response_data
@@ -124,16 +123,15 @@ def get(self, app_id):
sql_query += ' GROUP BY date order by date'
- with db.engine.begin() as conn:
- rs = conn.execute(db.text(sql_query), arg_dict)
-
response_data = []
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'terminal_count': i.terminal_count
- })
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql_query), arg_dict)
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'terminal_count': i.terminal_count
+ })
return jsonify({
'data': response_data
@@ -187,18 +185,17 @@ def get(self, app_id):
sql_query += ' GROUP BY date order by date'
- with db.engine.begin() as conn:
- rs = conn.execute(db.text(sql_query), arg_dict)
-
response_data = []
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'token_count': i.token_count,
- 'total_price': i.total_price,
- 'currency': 'USD'
- })
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql_query), arg_dict)
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'token_count': i.token_count,
+ 'total_price': i.total_price,
+ 'currency': 'USD'
+ })
return jsonify({
'data': response_data
@@ -256,16 +253,15 @@ def get(self, app_id):
GROUP BY date
ORDER BY date"""
+ response_data = []
+
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
-
- response_data = []
-
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'interactions': float(i.interactions.quantize(Decimal('0.01')))
- })
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'interactions': float(i.interactions.quantize(Decimal('0.01')))
+ })
return jsonify({
'data': response_data
@@ -320,20 +316,19 @@ def get(self, app_id):
sql_query += ' GROUP BY date order by date'
- with db.engine.begin() as conn:
- rs = conn.execute(db.text(sql_query), arg_dict)
-
response_data = []
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'rate': round((i.feedback_count * 1000 / i.message_count) if i.message_count > 0 else 0, 2),
- })
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql_query), arg_dict)
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'rate': round((i.feedback_count * 1000 / i.message_count) if i.message_count > 0 else 0, 2),
+ })
return jsonify({
- 'data': response_data
- })
+ 'data': response_data
+ })
class AverageResponseTimeStatistic(Resource):
@@ -383,16 +378,15 @@ def get(self, app_id):
sql_query += ' GROUP BY date order by date'
- with db.engine.begin() as conn:
- rs = conn.execute(db.text(sql_query), arg_dict)
-
response_data = []
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'latency': round(i.latency * 1000, 4)
- })
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql_query), arg_dict)
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'latency': round(i.latency * 1000, 4)
+ })
return jsonify({
'data': response_data
@@ -447,16 +441,15 @@ def get(self, app_id):
sql_query += ' GROUP BY date order by date'
- with db.engine.begin() as conn:
- rs = conn.execute(db.text(sql_query), arg_dict)
-
response_data = []
- for i in rs:
- response_data.append({
- 'date': str(i.date),
- 'tps': round(i.tokens_per_second, 4)
- })
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql_query), arg_dict)
+ for i in rs:
+ response_data.append({
+ 'date': str(i.date),
+ 'tps': round(i.tokens_per_second, 4)
+ })
return jsonify({
'data': response_data
diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py
index 7aa2a7bfc46d93..9a417b36606bf3 100644
--- a/api/controllers/console/datasets/datasets.py
+++ b/api/controllers/console/datasets/datasets.py
@@ -170,6 +170,7 @@ def patch(self, dataset_id):
help='Invalid indexing technique.')
parser.add_argument('permission', type=str, location='json', choices=(
'only_me', 'all_team_members'), help='Invalid permission.')
+ parser.add_argument('retrieval_model', type=dict, location='json', help='Invalid retrieval model.')
args = parser.parse_args()
# The role of the current user in the ta table must be admin or owner
@@ -401,6 +402,7 @@ def post(self):
class DatasetApiDeleteApi(Resource):
resource_type = 'dataset'
+
@setup_required
@login_required
@account_initialization_required
@@ -436,6 +438,50 @@ def get(self):
}
+class DatasetRetrievalSettingApi(Resource):
+ @setup_required
+ @login_required
+ @account_initialization_required
+ def get(self):
+ vector_type = current_app.config['VECTOR_STORE']
+ if vector_type == 'milvus':
+ return {
+ 'retrieval_method': [
+ 'semantic_search'
+ ]
+ }
+ elif vector_type == 'qdrant' or vector_type == 'weaviate':
+ return {
+ 'retrieval_method': [
+ 'semantic_search', 'full_text_search', 'hybrid_search'
+ ]
+ }
+ else:
+ raise ValueError("Unsupported vector db type.")
+
+
+class DatasetRetrievalSettingMockApi(Resource):
+ @setup_required
+ @login_required
+ @account_initialization_required
+ def get(self, vector_type):
+
+ if vector_type == 'milvus':
+ return {
+ 'retrieval_method': [
+ 'semantic_search'
+ ]
+ }
+ elif vector_type == 'qdrant' or vector_type == 'weaviate':
+ return {
+ 'retrieval_method': [
+ 'semantic_search', 'full_text_search', 'hybrid_search'
+ ]
+ }
+ else:
+ raise ValueError("Unsupported vector db type.")
+
+
api.add_resource(DatasetListApi, '/datasets')
api.add_resource(DatasetApi, '/datasets/')
api.add_resource(DatasetQueryApi, '/datasets//queries')
@@ -445,3 +491,5 @@ def get(self):
api.add_resource(DatasetApiKeyApi, '/datasets/api-keys')
api.add_resource(DatasetApiDeleteApi, '/datasets/api-keys/')
api.add_resource(DatasetApiBaseUrlApi, '/datasets/api-base-info')
+api.add_resource(DatasetRetrievalSettingApi, '/datasets/retrieval-setting')
+api.add_resource(DatasetRetrievalSettingMockApi, '/datasets/retrieval-setting/')
diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py
index e85433c83ff591..0f5634de4d5970 100644
--- a/api/controllers/console/datasets/datasets_document.py
+++ b/api/controllers/console/datasets/datasets_document.py
@@ -221,6 +221,8 @@ def post(self, dataset_id):
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
+ parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
+ location='json')
args = parser.parse_args()
if not dataset.indexing_technique and not args['indexing_technique']:
@@ -263,6 +265,8 @@ def post(self):
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
+ parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
+ location='json')
args = parser.parse_args()
if args['indexing_technique'] == 'high_quality':
try:
diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py
index 6d3397e16f1b59..ce4897100d9df9 100644
--- a/api/controllers/console/datasets/hit_testing.py
+++ b/api/controllers/console/datasets/hit_testing.py
@@ -42,19 +42,18 @@ def post(self, dataset_id):
parser = reqparse.RequestParser()
parser.add_argument('query', type=str, location='json')
+ parser.add_argument('retrieval_model', type=dict, required=False, location='json')
args = parser.parse_args()
- query = args['query']
-
- if not query or len(query) > 250:
- raise ValueError('Query is required and cannot exceed 250 characters')
+ HitTestingService.hit_testing_args_check(args)
try:
response = HitTestingService.retrieve(
dataset=dataset,
- query=query,
+ query=args['query'],
account=current_user,
- limit=10,
+ retrieval_model=args['retrieval_model'],
+ limit=10
)
return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)}
@@ -68,7 +67,7 @@ def post(self, dataset_id):
raise ProviderModelCurrentlyNotSupportError()
except LLMBadRequestError:
raise ProviderNotInitializeError(
- f"No Embedding Model available. Please configure a valid provider "
+ f"No Embedding Model or Reranking Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ValueError as e:
raise ValueError(str(e))
diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py
index bdc08ae6dd86af..749ecd64229d47 100644
--- a/api/controllers/console/workspace/model_providers.py
+++ b/api/controllers/console/workspace/model_providers.py
@@ -21,8 +21,12 @@ class ModelProviderListApi(Resource):
def get(self):
tenant_id = current_user.current_tenant_id
+ parser = reqparse.RequestParser()
+ parser.add_argument('model_type', type=str, required=False, nullable=True, location='args')
+ args = parser.parse_args()
+
provider_service = ProviderService()
- provider_list = provider_service.get_provider_list(tenant_id)
+ provider_list = provider_service.get_provider_list(tenant_id=tenant_id, model_type=args.get('model_type'))
return provider_list
diff --git a/api/controllers/console/workspace/models.py b/api/controllers/console/workspace/models.py
index 7099b8f23dbfc4..6df99ebe1bda4f 100644
--- a/api/controllers/console/workspace/models.py
+++ b/api/controllers/console/workspace/models.py
@@ -1,3 +1,5 @@
+import logging
+
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse
@@ -19,7 +21,7 @@ class DefaultModelApi(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('model_type', type=str, required=True, nullable=False,
- choices=['text-generation', 'embeddings', 'speech2text'], location='args')
+ choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='args')
args = parser.parse_args()
tenant_id = current_user.current_tenant_id
@@ -71,19 +73,21 @@ def get(self):
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
- parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
- parser.add_argument('model_type', type=str, required=True, nullable=False,
- choices=['text-generation', 'embeddings', 'speech2text'], location='json')
- parser.add_argument('provider_name', type=str, required=True, nullable=False, location='json')
+ parser.add_argument('model_settings', type=list, required=True, nullable=False, location='json')
args = parser.parse_args()
provider_service = ProviderService()
- provider_service.update_default_model_of_model_type(
- tenant_id=current_user.current_tenant_id,
- model_type=args['model_type'],
- provider_name=args['provider_name'],
- model_name=args['model_name']
- )
+ model_settings = args['model_settings']
+ for model_setting in model_settings:
+ try:
+ provider_service.update_default_model_of_model_type(
+ tenant_id=current_user.current_tenant_id,
+ model_type=model_setting['model_type'],
+ provider_name=model_setting['provider_name'],
+ model_name=model_setting['model_name']
+ )
+ except Exception:
+ logging.warning(f"{model_setting['model_type']} save error")
return {'result': 'success'}
diff --git a/api/controllers/service_api/app/conversation.py b/api/controllers/service_api/app/conversation.py
index 2fdddef8a10081..3e9aa07da6e5c5 100644
--- a/api/controllers/service_api/app/conversation.py
+++ b/api/controllers/service_api/app/conversation.py
@@ -67,7 +67,7 @@ def post(self, app_model, end_user, c_id):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=False, location='json')
parser.add_argument('user', type=str, location='json')
- parser.add_argument('auto_generate', type=bool, required=False, default='False', location='json')
+ parser.add_argument('auto_generate', type=bool, required=False, default=False, location='json')
args = parser.parse_args()
if end_user is None and args['user'] is not None:
diff --git a/api/controllers/service_api/app/file.py b/api/controllers/service_api/app/file.py
index f928e083a5d933..b2cb7a05f902c5 100644
--- a/api/controllers/service_api/app/file.py
+++ b/api/controllers/service_api/app/file.py
@@ -26,6 +26,9 @@ def post(self, app_model, end_user):
if 'file' not in request.files:
raise NoFileUploadedError()
+ if not file.mimetype:
+ raise UnsupportedFileTypeError()
+
if len(request.files) > 1:
raise TooManyFilesError()
diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py
index 28545a36ab4677..e900e84a01e29d 100644
--- a/api/controllers/service_api/dataset/document.py
+++ b/api/controllers/service_api/dataset/document.py
@@ -36,6 +36,8 @@ def post(self, tenant_id, dataset_id):
location='json')
parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
location='json')
+ parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
+ location='json')
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
@@ -95,6 +97,8 @@ def post(self, tenant_id, dataset_id, document_id):
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
+ parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
+ location='json')
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
diff --git a/api/core/agent/agent/multi_dataset_router_agent.py b/api/core/agent/agent/multi_dataset_router_agent.py
index 16b4a2ab248786..4fb71211270df1 100644
--- a/api/core/agent/agent/multi_dataset_router_agent.py
+++ b/api/core/agent/agent/multi_dataset_router_agent.py
@@ -14,7 +14,6 @@
from core.model_providers.models.entity.message import to_prompt_messages
from core.model_providers.models.llm.base import BaseLLM
from core.third_party.langchain.llms.fake import FakeLLM
-from core.tool.dataset_retriever_tool import DatasetRetrieverTool
class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
@@ -60,7 +59,6 @@ def plan(
return AgentFinish(return_values={"output": ''}, log='')
elif len(self.tools) == 1:
tool = next(iter(self.tools))
- tool = cast(DatasetRetrieverTool, tool)
rst = tool.run(tool_input={'query': kwargs['input']})
# output = ''
# rst_json = json.loads(rst)
diff --git a/api/core/agent/agent/output_parser/retirver_dataset_agent.py b/api/core/agent/agent/output_parser/retirver_dataset_agent.py
new file mode 100644
index 00000000000000..16b4a2ab248786
--- /dev/null
+++ b/api/core/agent/agent/output_parser/retirver_dataset_agent.py
@@ -0,0 +1,158 @@
+import json
+from typing import Tuple, List, Any, Union, Sequence, Optional, cast
+
+from langchain.agents import OpenAIFunctionsAgent, BaseSingleActionAgent
+from langchain.agents.openai_functions_agent.base import _format_intermediate_steps, _parse_ai_message
+from langchain.callbacks.base import BaseCallbackManager
+from langchain.callbacks.manager import Callbacks
+from langchain.prompts.chat import BaseMessagePromptTemplate
+from langchain.schema import AgentAction, AgentFinish, SystemMessage, Generation, LLMResult, AIMessage
+from langchain.schema.language_model import BaseLanguageModel
+from langchain.tools import BaseTool
+from pydantic import root_validator
+
+from core.model_providers.models.entity.message import to_prompt_messages
+from core.model_providers.models.llm.base import BaseLLM
+from core.third_party.langchain.llms.fake import FakeLLM
+from core.tool.dataset_retriever_tool import DatasetRetrieverTool
+
+
+class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
+ """
+ An Multi Dataset Retrieve Agent driven by Router.
+ """
+ model_instance: BaseLLM
+
+ class Config:
+ """Configuration for this pydantic object."""
+
+ arbitrary_types_allowed = True
+
+ @root_validator
+ def validate_llm(cls, values: dict) -> dict:
+ return values
+
+ def should_use_agent(self, query: str):
+ """
+ return should use agent
+
+ :param query:
+ :return:
+ """
+ return True
+
+ def plan(
+ self,
+ intermediate_steps: List[Tuple[AgentAction, str]],
+ callbacks: Callbacks = None,
+ **kwargs: Any,
+ ) -> Union[AgentAction, AgentFinish]:
+ """Given input, decided what to do.
+
+ Args:
+ intermediate_steps: Steps the LLM has taken to date, along with observations
+ **kwargs: User inputs.
+
+ Returns:
+ Action specifying what tool to use.
+ """
+ if len(self.tools) == 0:
+ return AgentFinish(return_values={"output": ''}, log='')
+ elif len(self.tools) == 1:
+ tool = next(iter(self.tools))
+ tool = cast(DatasetRetrieverTool, tool)
+ rst = tool.run(tool_input={'query': kwargs['input']})
+ # output = ''
+ # rst_json = json.loads(rst)
+ # for item in rst_json:
+ # output += f'{item["content"]}\n'
+ return AgentFinish(return_values={"output": rst}, log=rst)
+
+ if intermediate_steps:
+ _, observation = intermediate_steps[-1]
+ return AgentFinish(return_values={"output": observation}, log=observation)
+
+ try:
+ agent_decision = self.real_plan(intermediate_steps, callbacks, **kwargs)
+ if isinstance(agent_decision, AgentAction):
+ tool_inputs = agent_decision.tool_input
+ if isinstance(tool_inputs, dict) and 'query' in tool_inputs and 'chat_history' not in kwargs:
+ tool_inputs['query'] = kwargs['input']
+ agent_decision.tool_input = tool_inputs
+ else:
+ agent_decision.return_values['output'] = ''
+ return agent_decision
+ except Exception as e:
+ new_exception = self.model_instance.handle_exceptions(e)
+ raise new_exception
+
+ def real_plan(
+ self,
+ intermediate_steps: List[Tuple[AgentAction, str]],
+ callbacks: Callbacks = None,
+ **kwargs: Any,
+ ) -> Union[AgentAction, AgentFinish]:
+ """Given input, decided what to do.
+
+ Args:
+ intermediate_steps: Steps the LLM has taken to date, along with observations
+ **kwargs: User inputs.
+
+ Returns:
+ Action specifying what tool to use.
+ """
+ agent_scratchpad = _format_intermediate_steps(intermediate_steps)
+ selected_inputs = {
+ k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
+ }
+ full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
+ prompt = self.prompt.format_prompt(**full_inputs)
+ messages = prompt.to_messages()
+ prompt_messages = to_prompt_messages(messages)
+ result = self.model_instance.run(
+ messages=prompt_messages,
+ functions=self.functions,
+ )
+
+ ai_message = AIMessage(
+ content=result.content,
+ additional_kwargs={
+ 'function_call': result.function_call
+ }
+ )
+
+ agent_decision = _parse_ai_message(ai_message)
+ return agent_decision
+
+ async def aplan(
+ self,
+ intermediate_steps: List[Tuple[AgentAction, str]],
+ callbacks: Callbacks = None,
+ **kwargs: Any,
+ ) -> Union[AgentAction, AgentFinish]:
+ raise NotImplementedError()
+
+ @classmethod
+ def from_llm_and_tools(
+ cls,
+ model_instance: BaseLLM,
+ tools: Sequence[BaseTool],
+ callback_manager: Optional[BaseCallbackManager] = None,
+ extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
+ system_message: Optional[SystemMessage] = SystemMessage(
+ content="You are a helpful AI assistant."
+ ),
+ **kwargs: Any,
+ ) -> BaseSingleActionAgent:
+ prompt = cls.create_prompt(
+ extra_prompt_messages=extra_prompt_messages,
+ system_message=system_message,
+ )
+ return cls(
+ model_instance=model_instance,
+ llm=FakeLLM(response=''),
+ prompt=prompt,
+ tools=tools,
+ callback_manager=callback_manager,
+ **kwargs,
+ )
diff --git a/api/core/agent/agent/structed_multi_dataset_router_agent.py b/api/core/agent/agent/structed_multi_dataset_router_agent.py
index 84c0553625b8ff..115ed69d17879d 100644
--- a/api/core/agent/agent/structed_multi_dataset_router_agent.py
+++ b/api/core/agent/agent/structed_multi_dataset_router_agent.py
@@ -89,7 +89,6 @@ def plan(
return AgentFinish(return_values={"output": ''}, log='')
elif len(self.dataset_tools) == 1:
tool = next(iter(self.dataset_tools))
- tool = cast(DatasetRetrieverTool, tool)
rst = tool.run(tool_input={'query': kwargs['input']})
return AgentFinish(return_values={"output": rst}, log=rst)
diff --git a/api/core/agent/agent_executor.py b/api/core/agent/agent_executor.py
index 05c4b632ffb2ed..579f3d5d90e4f4 100644
--- a/api/core/agent/agent_executor.py
+++ b/api/core/agent/agent_executor.py
@@ -18,6 +18,7 @@
from core.helper import moderation
from core.model_providers.error import LLMError
from core.model_providers.models.llm.base import BaseLLM
+from core.tool.dataset_multi_retriever_tool import DatasetMultiRetrieverTool
from core.tool.dataset_retriever_tool import DatasetRetrieverTool
@@ -78,7 +79,7 @@ def _init_agent(self) -> Union[BaseSingleActionAgent | BaseMultiActionAgent]:
verbose=True
)
elif self.configuration.strategy == PlanningStrategy.ROUTER:
- self.configuration.tools = [t for t in self.configuration.tools if isinstance(t, DatasetRetrieverTool)]
+ self.configuration.tools = [t for t in self.configuration.tools if isinstance(t, DatasetRetrieverTool) or isinstance(t, DatasetMultiRetrieverTool)]
agent = MultiDatasetRouterAgent.from_llm_and_tools(
model_instance=self.configuration.model_instance,
tools=self.configuration.tools,
@@ -86,7 +87,7 @@ def _init_agent(self) -> Union[BaseSingleActionAgent | BaseMultiActionAgent]:
verbose=True
)
elif self.configuration.strategy == PlanningStrategy.REACT_ROUTER:
- self.configuration.tools = [t for t in self.configuration.tools if isinstance(t, DatasetRetrieverTool)]
+ self.configuration.tools = [t for t in self.configuration.tools if isinstance(t, DatasetRetrieverTool) or isinstance(t, DatasetMultiRetrieverTool)]
agent = StructuredMultiDatasetRouterAgent.from_llm_and_tools(
model_instance=self.configuration.model_instance,
tools=self.configuration.tools,
diff --git a/api/core/callback_handler/index_tool_callback_handler.py b/api/core/callback_handler/index_tool_callback_handler.py
index ec02bdae9e85f1..ec91d67290d6a1 100644
--- a/api/core/callback_handler/index_tool_callback_handler.py
+++ b/api/core/callback_handler/index_tool_callback_handler.py
@@ -10,8 +10,7 @@
class DatasetIndexToolCallbackHandler:
"""Callback handler for dataset tool."""
- def __init__(self, dataset_id: str, conversation_message_task: ConversationMessageTask) -> None:
- self.dataset_id = dataset_id
+ def __init__(self, conversation_message_task: ConversationMessageTask) -> None:
self.conversation_message_task = conversation_message_task
def on_tool_end(self, documents: List[Document]) -> None:
@@ -21,7 +20,6 @@ def on_tool_end(self, documents: List[Document]) -> None:
# add hit count to document segment
db.session.query(DocumentSegment).filter(
- DocumentSegment.dataset_id == self.dataset_id,
DocumentSegment.index_node_id == doc_id
).update(
{DocumentSegment.hit_count: DocumentSegment.hit_count + 1},
diff --git a/api/core/completion.py b/api/core/completion.py
index ac700169e4cb2b..d4d7ec69eb4ba8 100644
--- a/api/core/completion.py
+++ b/api/core/completion.py
@@ -127,6 +127,7 @@ def generate(cls, task_id: str, app: App, app_model_config: AppModelConfig, quer
memory=memory,
rest_tokens=rest_tokens_for_context_and_memory,
chain_callback=chain_callback,
+ tenant_id=app.tenant_id,
retriever_from=retriever_from
)
diff --git a/api/core/data_loader/file_extractor.py b/api/core/data_loader/file_extractor.py
index a603bee7490c6b..40f0c1f20155e5 100644
--- a/api/core/data_loader/file_extractor.py
+++ b/api/core/data_loader/file_extractor.py
@@ -3,7 +3,7 @@
from typing import List, Union, Optional
import requests
-from langchain.document_loaders import TextLoader, Docx2txtLoader
+from langchain.document_loaders import TextLoader, Docx2txtLoader, UnstructuredFileLoader, UnstructuredAPIFileLoader
from langchain.schema import Document
from core.data_loader.loader.csv_loader import CSVLoader
@@ -20,13 +20,13 @@
class FileExtractor:
@classmethod
- def load(cls, upload_file: UploadFile, return_text: bool = False) -> Union[List[Document] | str]:
+ def load(cls, upload_file: UploadFile, return_text: bool = False, is_automatic: bool = False) -> Union[List[Document] | str]:
with tempfile.TemporaryDirectory() as temp_dir:
suffix = Path(upload_file.key).suffix
file_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
storage.download(upload_file.key, file_path)
- return cls.load_from_file(file_path, return_text, upload_file)
+ return cls.load_from_file(file_path, return_text, upload_file, is_automatic)
@classmethod
def load_from_url(cls, url: str, return_text: bool = False) -> Union[List[Document] | str]:
@@ -44,24 +44,34 @@ def load_from_url(cls, url: str, return_text: bool = False) -> Union[List[Docume
@classmethod
def load_from_file(cls, file_path: str, return_text: bool = False,
- upload_file: Optional[UploadFile] = None) -> Union[List[Document] | str]:
+ upload_file: Optional[UploadFile] = None,
+ is_automatic: bool = False) -> Union[List[Document] | str]:
input_file = Path(file_path)
delimiter = '\n'
file_extension = input_file.suffix.lower()
- if file_extension == '.xlsx':
- loader = ExcelLoader(file_path)
- elif file_extension == '.pdf':
- loader = PdfLoader(file_path, upload_file=upload_file)
- elif file_extension in ['.md', '.markdown']:
- loader = MarkdownLoader(file_path, autodetect_encoding=True)
- elif file_extension in ['.htm', '.html']:
- loader = HTMLLoader(file_path)
- elif file_extension == '.docx':
- loader = Docx2txtLoader(file_path)
- elif file_extension == '.csv':
- loader = CSVLoader(file_path, autodetect_encoding=True)
+ if is_automatic:
+ loader = UnstructuredFileLoader(
+ file_path, strategy="hi_res", mode="elements"
+ )
+ # loader = UnstructuredAPIFileLoader(
+ # file_path=filenames[0],
+ # api_key="FAKE_API_KEY",
+ # )
else:
- # txt
- loader = TextLoader(file_path, autodetect_encoding=True)
+ if file_extension == '.xlsx':
+ loader = ExcelLoader(file_path)
+ elif file_extension == '.pdf':
+ loader = PdfLoader(file_path, upload_file=upload_file)
+ elif file_extension in ['.md', '.markdown']:
+ loader = MarkdownLoader(file_path, autodetect_encoding=True)
+ elif file_extension in ['.htm', '.html']:
+ loader = HTMLLoader(file_path)
+ elif file_extension == '.docx':
+ loader = Docx2txtLoader(file_path)
+ elif file_extension == '.csv':
+ loader = CSVLoader(file_path, autodetect_encoding=True)
+ else:
+ # txt
+ loader = TextLoader(file_path, autodetect_encoding=True)
return delimiter.join([document.page_content for document in loader.load()]) if return_text else loader.load()
diff --git a/api/core/index/vector_index/base.py b/api/core/index/vector_index/base.py
index 60f092d409d8a2..bc7811a0e2a2ee 100644
--- a/api/core/index/vector_index/base.py
+++ b/api/core/index/vector_index/base.py
@@ -40,6 +40,13 @@ def _get_vector_store(self) -> VectorStore:
def _get_vector_store_class(self) -> type:
raise NotImplementedError
+ @abstractmethod
+ def search_by_full_text_index(
+ self, query: str,
+ **kwargs: Any
+ ) -> List[Document]:
+ raise NotImplementedError
+
def search(
self, query: str,
**kwargs: Any
diff --git a/api/core/index/vector_index/milvus_vector_index.py b/api/core/index/vector_index/milvus_vector_index.py
index d0a9c19ea0c472..a8bba763d4f905 100644
--- a/api/core/index/vector_index/milvus_vector_index.py
+++ b/api/core/index/vector_index/milvus_vector_index.py
@@ -1,16 +1,14 @@
-from typing import Optional, cast
+from typing import cast, Any, List
from langchain.embeddings.base import Embeddings
-from langchain.schema import Document, BaseRetriever
-from langchain.vectorstores import VectorStore, milvus
+from langchain.schema import Document
+from langchain.vectorstores import VectorStore
from pydantic import BaseModel, root_validator
from core.index.base import BaseIndex
from core.index.vector_index.base import BaseVectorIndex
from core.vector_store.milvus_vector_store import MilvusVectorStore
-from core.vector_store.weaviate_vector_store import WeaviateVectorStore
-from extensions.ext_database import db
-from models.dataset import Dataset, DatasetCollectionBinding
+from models.dataset import Dataset
class MilvusConfig(BaseModel):
@@ -74,7 +72,7 @@ def create(self, texts: list[Document], **kwargs) -> BaseIndex:
index_params = {
'metric_type': 'IP',
'index_type': "HNSW",
- 'params': {"M": 8, "efConstruction": 64}
+ 'params': {"M": 8, "efConstruction": 64}
}
self._vector_store = MilvusVectorStore.from_documents(
texts,
@@ -152,3 +150,7 @@ def delete(self) -> None:
),
],
))
+
+ def search_by_full_text_index(self, query: str, **kwargs: Any) -> List[Document]:
+ # milvus/zilliz doesn't support bm25 search
+ return []
diff --git a/api/core/index/vector_index/qdrant_vector_index.py b/api/core/index/vector_index/qdrant_vector_index.py
index 732a10b0ae2471..dbadab118eaaa2 100644
--- a/api/core/index/vector_index/qdrant_vector_index.py
+++ b/api/core/index/vector_index/qdrant_vector_index.py
@@ -191,3 +191,21 @@ def _is_origin(self):
return True
return False
+
+ def search_by_full_text_index(self, query: str, **kwargs: Any) -> List[Document]:
+ vector_store = self._get_vector_store()
+ vector_store = cast(self._get_vector_store_class(), vector_store)
+
+ from qdrant_client.http import models
+ return vector_store.similarity_search_by_bm25(models.Filter(
+ must=[
+ models.FieldCondition(
+ key="group_id",
+ match=models.MatchValue(value=self.dataset.id),
+ ),
+ models.FieldCondition(
+ key="page_content",
+ match=models.MatchText(text=query),
+ )
+ ],
+ ), kwargs.get('top_k', 2))
diff --git a/api/core/index/vector_index/weaviate_vector_index.py b/api/core/index/vector_index/weaviate_vector_index.py
index 1432a707079e3f..1305e576c35dc1 100644
--- a/api/core/index/vector_index/weaviate_vector_index.py
+++ b/api/core/index/vector_index/weaviate_vector_index.py
@@ -1,4 +1,4 @@
-from typing import Optional, cast
+from typing import Optional, cast, Any, List
import requests
import weaviate
@@ -26,6 +26,7 @@ def validate_config(cls, values: dict) -> dict:
class WeaviateVectorIndex(BaseVectorIndex):
+
def __init__(self, dataset: Dataset, config: WeaviateConfig, embeddings: Embeddings):
super().__init__(dataset, embeddings)
self._client = self._init_client(config)
@@ -110,7 +111,7 @@ def _get_vector_store(self) -> VectorStore:
if self._vector_store:
return self._vector_store
- attributes = ['doc_id', 'dataset_id', 'document_id']
+ attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
if self._is_origin():
attributes = ['doc_id']
@@ -148,3 +149,9 @@ def _is_origin(self):
return True
return False
+
+ def search_by_full_text_index(self, query: str, **kwargs: Any) -> List[Document]:
+ vector_store = self._get_vector_store()
+ vector_store = cast(self._get_vector_store_class(), vector_store)
+ return vector_store.similarity_search_by_bm25(query, kwargs.get('top_k', 2), **kwargs)
+
diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py
index e86f9fc78cbd8e..609e757f7c05d4 100644
--- a/api/core/indexing_runner.py
+++ b/api/core/indexing_runner.py
@@ -49,14 +49,14 @@ def run(self, dataset_documents: List[DatasetDocument]):
if not dataset:
raise ValueError("no dataset found")
- # load file
- text_docs = self._load_data(dataset_document)
-
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
+ # load file
+ text_docs = self._load_data(dataset_document)
+
# get splitter
splitter = self._get_splitter(processing_rule)
@@ -390,7 +390,7 @@ def notion_indexing_estimate(self, tenant_id: str, notion_info_list: list, tmp_p
"preview": preview_texts
}
- def _load_data(self, dataset_document: DatasetDocument) -> List[Document]:
+ def _load_data(self, dataset_document: DatasetDocument, automatic: bool = False) -> List[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
@@ -406,7 +406,7 @@ def _load_data(self, dataset_document: DatasetDocument) -> List[Document]:
one_or_none()
if file_detail:
- text_docs = FileExtractor.load(file_detail)
+ text_docs = FileExtractor.load(file_detail, is_automatic=False)
elif dataset_document.data_source_type == 'notion_import':
loader = NotionLoader.from_document(dataset_document)
text_docs = loader.load()
diff --git a/api/core/model_providers/model_factory.py b/api/core/model_providers/model_factory.py
index f7577b392fe792..f663707b075553 100644
--- a/api/core/model_providers/model_factory.py
+++ b/api/core/model_providers/model_factory.py
@@ -9,6 +9,7 @@
from core.model_providers.models.entity.model_params import ModelKwargs, ModelType
from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.moderation.base import BaseModeration
+from core.model_providers.models.reranking.base import BaseReranking
from core.model_providers.models.speech2text.base import BaseSpeech2Text
from extensions.ext_database import db
from models.provider import TenantDefaultModel
@@ -140,6 +141,44 @@ def get_embedding_model(cls,
name=model_name
)
+
+ @classmethod
+ def get_reranking_model(cls,
+ tenant_id: str,
+ model_provider_name: Optional[str] = None,
+ model_name: Optional[str] = None) -> Optional[BaseReranking]:
+ """
+ get reranking model.
+
+ :param tenant_id: a string representing the ID of the tenant.
+ :param model_provider_name:
+ :param model_name:
+ :return:
+ """
+ if (model_provider_name is None or len(model_provider_name) == 0) and (model_name is None or len(model_name) == 0):
+ default_model = cls.get_default_model(tenant_id, ModelType.RERANKING)
+
+ if not default_model:
+ raise LLMBadRequestError(f"Default model is not available. "
+ f"Please configure a Default Reranking Model "
+ f"in the Settings -> Model Provider.")
+
+ model_provider_name = default_model.provider_name
+ model_name = default_model.model_name
+
+ # get model provider
+ model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name)
+
+ if not model_provider:
+ raise ProviderTokenNotInitError(f"Model {model_name} provider credentials is not initialized.")
+
+ # init reranking model
+ model_class = model_provider.get_model_class(model_type=ModelType.RERANKING)
+ return model_class(
+ model_provider=model_provider,
+ name=model_name
+ )
+
@classmethod
def get_speech2text_model(cls,
tenant_id: str,
diff --git a/api/core/model_providers/model_provider_factory.py b/api/core/model_providers/model_provider_factory.py
index aa0b1efe1f74b6..86df0137387639 100644
--- a/api/core/model_providers/model_provider_factory.py
+++ b/api/core/model_providers/model_provider_factory.py
@@ -72,6 +72,9 @@ def get_model_provider_class(cls, provider_name: str) -> Type[BaseModelProvider]
elif provider_name == 'localai':
from core.model_providers.providers.localai_provider import LocalAIProvider
return LocalAIProvider
+ elif provider_name == 'cohere':
+ from core.model_providers.providers.cohere_provider import CohereProvider
+ return CohereProvider
elif provider_name == 'custom':
from core.model_providers.providers.custom_provider import CustomProvider
return CustomProvider
diff --git a/api/core/model_providers/models/entity/model_params.py b/api/core/model_providers/models/entity/model_params.py
index 225a5cc674c6e5..0effa75e6e4aa5 100644
--- a/api/core/model_providers/models/entity/model_params.py
+++ b/api/core/model_providers/models/entity/model_params.py
@@ -17,7 +17,7 @@ class ModelType(enum.Enum):
IMAGE = 'image'
VIDEO = 'video'
MODERATION = 'moderation'
-
+ RERANKING = 'reranking'
@staticmethod
def value_of(value):
for member in ModelType:
diff --git a/api/core/model_providers/models/reranking/__init__.py b/api/core/model_providers/models/reranking/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_providers/models/reranking/base.py b/api/core/model_providers/models/reranking/base.py
new file mode 100644
index 00000000000000..85863895f4208d
--- /dev/null
+++ b/api/core/model_providers/models/reranking/base.py
@@ -0,0 +1,36 @@
+from abc import abstractmethod
+from typing import Any, Optional, List
+from langchain.schema import Document
+
+from core.model_providers.models.base import BaseProviderModel
+from core.model_providers.models.entity.model_params import ModelType
+from core.model_providers.providers.base import BaseModelProvider
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class BaseReranking(BaseProviderModel):
+ name: str
+ type: ModelType = ModelType.RERANKING
+
+ def __init__(self, model_provider: BaseModelProvider, client: Any, name: str):
+ super().__init__(model_provider, client)
+ self.name = name
+
+ @property
+ def base_model_name(self) -> str:
+ """
+ get base model name
+
+ :return: str
+ """
+ return self.name
+
+ @abstractmethod
+ def rerank(self, query: str, documents: List[Document], score_threshold: Optional[float], top_k: Optional[int]) -> Optional[List[Document]]:
+ raise NotImplementedError
+
+ @abstractmethod
+ def handle_exceptions(self, ex: Exception) -> Exception:
+ raise NotImplementedError
diff --git a/api/core/model_providers/models/reranking/cohere_reranking.py b/api/core/model_providers/models/reranking/cohere_reranking.py
new file mode 100644
index 00000000000000..3119caeae1afaf
--- /dev/null
+++ b/api/core/model_providers/models/reranking/cohere_reranking.py
@@ -0,0 +1,73 @@
+import logging
+from typing import Optional, List
+
+import cohere
+import openai
+from langchain.schema import Document
+
+from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \
+ LLMRateLimitError, LLMAuthorizationError
+from core.model_providers.models.reranking.base import BaseReranking
+from core.model_providers.providers.base import BaseModelProvider
+
+
+class CohereReranking(BaseReranking):
+
+ def __init__(self, model_provider: BaseModelProvider, name: str):
+ self.credentials = model_provider.get_model_credentials(
+ model_name=name,
+ model_type=self.type
+ )
+
+ client = cohere.Client(self.credentials.get('api_key'))
+
+ super().__init__(model_provider, client, name)
+
+ def rerank(self, query: str, documents: List[Document], score_threshold: Optional[float], top_k: Optional[int]) -> Optional[List[Document]]:
+ docs = []
+ doc_id = []
+ for document in documents:
+ if document.metadata['doc_id'] not in doc_id:
+ doc_id.append(document.metadata['doc_id'])
+ docs.append(document.page_content)
+ results = self.client.rerank(query=query, documents=docs, model=self.name, top_n=top_k)
+ rerank_documents = []
+
+ for idx, result in enumerate(results):
+ # format document
+ rerank_document = Document(
+ page_content=result.document['text'],
+ metadata={
+ "doc_id": documents[result.index].metadata['doc_id'],
+ "doc_hash": documents[result.index].metadata['doc_hash'],
+ "document_id": documents[result.index].metadata['document_id'],
+ "dataset_id": documents[result.index].metadata['dataset_id'],
+ 'score': result.relevance_score
+ }
+ )
+ # score threshold check
+ if score_threshold is not None:
+ if result.relevance_score >= score_threshold:
+ rerank_documents.append(rerank_document)
+ else:
+ rerank_documents.append(rerank_document)
+ return rerank_documents
+
+ def handle_exceptions(self, ex: Exception) -> Exception:
+ if isinstance(ex, openai.error.InvalidRequestError):
+ logging.warning("Invalid request to OpenAI API.")
+ return LLMBadRequestError(str(ex))
+ elif isinstance(ex, openai.error.APIConnectionError):
+ logging.warning("Failed to connect to OpenAI API.")
+ return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex))
+ elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)):
+ logging.warning("OpenAI service unavailable.")
+ return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex))
+ elif isinstance(ex, openai.error.RateLimitError):
+ return LLMRateLimitError(str(ex))
+ elif isinstance(ex, openai.error.AuthenticationError):
+ return LLMAuthorizationError(str(ex))
+ elif isinstance(ex, openai.error.OpenAIError):
+ return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex))
+ else:
+ return ex
diff --git a/api/core/model_providers/providers/anthropic_provider.py b/api/core/model_providers/providers/anthropic_provider.py
index 6ca344bd51a9bb..eb39468610c0bf 100644
--- a/api/core/model_providers/providers/anthropic_provider.py
+++ b/api/core/model_providers/providers/anthropic_provider.py
@@ -32,9 +32,12 @@ def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]:
if model_type == ModelType.TEXT_GENERATION:
return [
{
- 'id': 'claude-instant-1',
- 'name': 'claude-instant-1',
+ 'id': 'claude-2.1',
+ 'name': 'claude-2.1',
'mode': ModelMode.CHAT.value,
+ 'features': [
+ ModelFeature.AGENT_THOUGHT.value
+ ]
},
{
'id': 'claude-2',
@@ -44,6 +47,11 @@ def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]:
ModelFeature.AGENT_THOUGHT.value
]
},
+ {
+ 'id': 'claude-instant-1',
+ 'name': 'claude-instant-1',
+ 'mode': ModelMode.CHAT.value,
+ },
]
else:
return []
@@ -73,12 +81,18 @@ def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> M
:param model_type:
:return:
"""
+ model_max_tokens = {
+ 'claude-instant-1': 100000,
+ 'claude-2': 100000,
+ 'claude-2.1': 200000,
+ }
+
return ModelKwargsRules(
temperature=KwargRule[float](min=0, max=1, default=1, precision=2),
top_p=KwargRule[float](min=0, max=1, default=0.7, precision=2),
presence_penalty=KwargRule[float](enabled=False),
frequency_penalty=KwargRule[float](enabled=False),
- max_tokens=KwargRule[int](alias="max_tokens_to_sample", min=10, max=100000, default=256, precision=0),
+ max_tokens=KwargRule[int](alias="max_tokens_to_sample", min=10, max=model_max_tokens.get(model_name, 100000), default=256, precision=0),
)
@classmethod
diff --git a/api/core/model_providers/providers/cohere_provider.py b/api/core/model_providers/providers/cohere_provider.py
new file mode 100644
index 00000000000000..9fa77dfff2a5c4
--- /dev/null
+++ b/api/core/model_providers/providers/cohere_provider.py
@@ -0,0 +1,152 @@
+import json
+from json import JSONDecodeError
+from typing import Type
+
+from langchain.schema import HumanMessage
+
+from core.helper import encrypter
+from core.model_providers.models.base import BaseProviderModel
+from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType, ModelMode
+from core.model_providers.models.reranking.cohere_reranking import CohereReranking
+from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError
+from models.provider import ProviderType
+
+
+class CohereProvider(BaseModelProvider):
+
+ @property
+ def provider_name(self):
+ """
+ Returns the name of a provider.
+ """
+ return 'cohere'
+
+ def _get_text_generation_model_mode(self, model_name) -> str:
+ return ModelMode.CHAT.value
+
+ def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]:
+ if model_type == ModelType.RERANKING:
+ return [
+ {
+ 'id': 'rerank-english-v2.0',
+ 'name': 'rerank-english-v2.0'
+ },
+ {
+ 'id': 'rerank-multilingual-v2.0',
+ 'name': 'rerank-multilingual-v2.0'
+ }
+ ]
+ else:
+ return []
+
+ def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]:
+ """
+ Returns the model class.
+
+ :param model_type:
+ :return:
+ """
+ if model_type == ModelType.RERANKING:
+ model_class = CohereReranking
+ else:
+ raise NotImplementedError
+
+ return model_class
+
+ def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules:
+ """
+ get model parameter rules.
+
+ :param model_name:
+ :param model_type:
+ :return:
+ """
+ return ModelKwargsRules(
+ temperature=KwargRule[float](min=0, max=1, default=0.3, precision=2),
+ top_p=KwargRule[float](min=0, max=0.99, default=0.85, precision=2),
+ presence_penalty=KwargRule[float](enabled=False),
+ frequency_penalty=KwargRule[float](enabled=False),
+ max_tokens=KwargRule[int](enabled=False),
+ )
+
+ @classmethod
+ def is_provider_credentials_valid_or_raise(cls, credentials: dict):
+ """
+ Validates the given credentials.
+ """
+ if 'api_key' not in credentials:
+ raise CredentialsValidateFailedError('Cohere api_key must be provided.')
+
+ try:
+ credential_kwargs = {
+ 'api_key': credentials['api_key'],
+ }
+ # todo validate
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
+
+ @classmethod
+ def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict:
+ credentials['api_key'] = encrypter.encrypt_token(tenant_id, credentials['api_key'])
+ return credentials
+
+ def get_provider_credentials(self, obfuscated: bool = False) -> dict:
+ if self.provider.provider_type == ProviderType.CUSTOM.value:
+ try:
+ credentials = json.loads(self.provider.encrypted_config)
+ except JSONDecodeError:
+ credentials = {
+ 'api_key': None,
+ }
+
+ if credentials['api_key']:
+ credentials['api_key'] = encrypter.decrypt_token(
+ self.provider.tenant_id,
+ credentials['api_key']
+ )
+
+ if obfuscated:
+ credentials['api_key'] = encrypter.obfuscated_token(credentials['api_key'])
+
+ return credentials
+ else:
+ return {}
+
+ def should_deduct_quota(self):
+ return True
+
+ @classmethod
+ def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict):
+ """
+ check model credentials valid.
+
+ :param model_name:
+ :param model_type:
+ :param credentials:
+ """
+ return
+
+ @classmethod
+ def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType,
+ credentials: dict) -> dict:
+ """
+ encrypt model credentials for save.
+
+ :param tenant_id:
+ :param model_name:
+ :param model_type:
+ :param credentials:
+ :return:
+ """
+ return {}
+
+ def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict:
+ """
+ get credentials for llm use.
+
+ :param model_name:
+ :param model_type:
+ :param obfuscated:
+ :return:
+ """
+ return self.get_provider_credentials(obfuscated)
diff --git a/api/core/model_providers/rules/_providers.json b/api/core/model_providers/rules/_providers.json
index 92d56be824df21..0e549828bb2870 100644
--- a/api/core/model_providers/rules/_providers.json
+++ b/api/core/model_providers/rules/_providers.json
@@ -13,5 +13,6 @@
"huggingface_hub",
"xinference",
"openllm",
- "localai"
+ "localai",
+ "cohere"
]
diff --git a/api/core/model_providers/rules/anthropic.json b/api/core/model_providers/rules/anthropic.json
index e617842b94d9c0..bb02ce845be4dd 100644
--- a/api/core/model_providers/rules/anthropic.json
+++ b/api/core/model_providers/rules/anthropic.json
@@ -12,6 +12,9 @@
"quota_limit": 0
},
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation"
+ ],
"price_config": {
"claude-instant-1": {
"prompt": "1.63",
@@ -20,8 +23,14 @@
"currency": "USD"
},
"claude-2": {
- "prompt": "11.02",
- "completion": "32.68",
+ "prompt": "8.00",
+ "completion": "24.00",
+ "unit": "0.000001",
+ "currency": "USD"
+ },
+ "claude-2.1": {
+ "prompt": "8.00",
+ "completion": "24.00",
"unit": "0.000001",
"currency": "USD"
}
diff --git a/api/core/model_providers/rules/azure_openai.json b/api/core/model_providers/rules/azure_openai.json
index fe4dc10c56421e..05a8007855002b 100644
--- a/api/core/model_providers/rules/azure_openai.json
+++ b/api/core/model_providers/rules/azure_openai.json
@@ -4,6 +4,10 @@
],
"system_config": null,
"model_flexibility": "configurable",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ],
"price_config":{
"gpt-4": {
"prompt": "0.03",
diff --git a/api/core/model_providers/rules/baichuan.json b/api/core/model_providers/rules/baichuan.json
index 237b0d24d29d10..70b847cd8ad245 100644
--- a/api/core/model_providers/rules/baichuan.json
+++ b/api/core/model_providers/rules/baichuan.json
@@ -4,6 +4,9 @@
],
"system_config": null,
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation"
+ ],
"price_config": {
"baichuan2-53b": {
"prompt": "0.01",
diff --git a/api/core/model_providers/rules/chatglm.json b/api/core/model_providers/rules/chatglm.json
index 0af3e61ec7af61..3ddfb8cf53b047 100644
--- a/api/core/model_providers/rules/chatglm.json
+++ b/api/core/model_providers/rules/chatglm.json
@@ -3,5 +3,8 @@
"custom"
],
"system_config": null,
- "model_flexibility": "fixed"
+ "model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation"
+ ]
}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/cohere.json b/api/core/model_providers/rules/cohere.json
new file mode 100644
index 00000000000000..5ce0c9cc5b8177
--- /dev/null
+++ b/api/core/model_providers/rules/cohere.json
@@ -0,0 +1,10 @@
+{
+ "support_provider_types": [
+ "custom"
+ ],
+ "system_config": null,
+ "model_flexibility": "fixed",
+ "supported_model_types": [
+ "reranking"
+ ]
+}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/huggingface_hub.json b/api/core/model_providers/rules/huggingface_hub.json
index 5badb071786bf8..3f1ee225f16b3c 100644
--- a/api/core/model_providers/rules/huggingface_hub.json
+++ b/api/core/model_providers/rules/huggingface_hub.json
@@ -3,5 +3,9 @@
"custom"
],
"system_config": null,
- "model_flexibility": "configurable"
+ "model_flexibility": "configurable",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ]
}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/localai.json b/api/core/model_providers/rules/localai.json
index 5badb071786bf8..3f1ee225f16b3c 100644
--- a/api/core/model_providers/rules/localai.json
+++ b/api/core/model_providers/rules/localai.json
@@ -3,5 +3,9 @@
"custom"
],
"system_config": null,
- "model_flexibility": "configurable"
+ "model_flexibility": "configurable",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ]
}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/minimax.json b/api/core/model_providers/rules/minimax.json
index 765d6712e1ea37..0348ec3dfbc92d 100644
--- a/api/core/model_providers/rules/minimax.json
+++ b/api/core/model_providers/rules/minimax.json
@@ -10,6 +10,10 @@
"quota_unit": "tokens"
},
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ],
"price_config": {
"abab5.5-chat": {
"prompt": "0.015",
diff --git a/api/core/model_providers/rules/openai.json b/api/core/model_providers/rules/openai.json
index 17a3db72b889be..4f1f39b792fdbc 100644
--- a/api/core/model_providers/rules/openai.json
+++ b/api/core/model_providers/rules/openai.json
@@ -11,6 +11,12 @@
"quota_limit": 200
},
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings",
+ "speech2text",
+ "moderation"
+ ],
"price_config": {
"gpt-4": {
"prompt": "0.03",
diff --git a/api/core/model_providers/rules/openllm.json b/api/core/model_providers/rules/openllm.json
index 5badb071786bf8..3f1ee225f16b3c 100644
--- a/api/core/model_providers/rules/openllm.json
+++ b/api/core/model_providers/rules/openllm.json
@@ -3,5 +3,9 @@
"custom"
],
"system_config": null,
- "model_flexibility": "configurable"
+ "model_flexibility": "configurable",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ]
}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/replicate.json b/api/core/model_providers/rules/replicate.json
index 5badb071786bf8..3f1ee225f16b3c 100644
--- a/api/core/model_providers/rules/replicate.json
+++ b/api/core/model_providers/rules/replicate.json
@@ -3,5 +3,9 @@
"custom"
],
"system_config": null,
- "model_flexibility": "configurable"
+ "model_flexibility": "configurable",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ]
}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/spark.json b/api/core/model_providers/rules/spark.json
index 24133107f86edd..4fa4d9a5698c8e 100644
--- a/api/core/model_providers/rules/spark.json
+++ b/api/core/model_providers/rules/spark.json
@@ -10,6 +10,9 @@
"quota_unit": "tokens"
},
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation"
+ ],
"price_config": {
"spark": {
"prompt": "0.18",
diff --git a/api/core/model_providers/rules/tongyi.json b/api/core/model_providers/rules/tongyi.json
index c431f50b3fc44d..319fbcaf9f7a92 100644
--- a/api/core/model_providers/rules/tongyi.json
+++ b/api/core/model_providers/rules/tongyi.json
@@ -4,6 +4,9 @@
],
"system_config": null,
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation"
+ ],
"price_config": {
"qwen-turbo": {
"prompt": "0.012",
diff --git a/api/core/model_providers/rules/wenxin.json b/api/core/model_providers/rules/wenxin.json
index dbb692fb42c443..193dccc411a6d5 100644
--- a/api/core/model_providers/rules/wenxin.json
+++ b/api/core/model_providers/rules/wenxin.json
@@ -4,6 +4,9 @@
],
"system_config": null,
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation"
+ ],
"price_config": {
"ernie-bot-4": {
"prompt": "0",
diff --git a/api/core/model_providers/rules/xinference.json b/api/core/model_providers/rules/xinference.json
index 5badb071786bf8..3f1ee225f16b3c 100644
--- a/api/core/model_providers/rules/xinference.json
+++ b/api/core/model_providers/rules/xinference.json
@@ -3,5 +3,9 @@
"custom"
],
"system_config": null,
- "model_flexibility": "configurable"
+ "model_flexibility": "configurable",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ]
}
\ No newline at end of file
diff --git a/api/core/model_providers/rules/zhipuai.json b/api/core/model_providers/rules/zhipuai.json
index af0e5debbab485..07badcc31312ff 100644
--- a/api/core/model_providers/rules/zhipuai.json
+++ b/api/core/model_providers/rules/zhipuai.json
@@ -10,6 +10,10 @@
"quota_unit": "tokens"
},
"model_flexibility": "fixed",
+ "supported_model_types": [
+ "text-generation",
+ "embeddings"
+ ],
"price_config": {
"chatglm_turbo": {
"prompt": "0.005",
diff --git a/api/core/orchestrator_rule_parser.py b/api/core/orchestrator_rule_parser.py
index d13282419a06d4..9b31fa69c9d1f5 100644
--- a/api/core/orchestrator_rule_parser.py
+++ b/api/core/orchestrator_rule_parser.py
@@ -1,11 +1,17 @@
-from typing import Optional
+import json
+import threading
+from typing import Optional, List
+from flask import Flask
from langchain import WikipediaAPIWrapper
from langchain.callbacks.manager import Callbacks
from langchain.memory.chat_memory import BaseChatMemory
from langchain.tools import BaseTool, Tool, WikipediaQueryRun
from pydantic import BaseModel, Field
+from core.agent.agent.multi_dataset_router_agent import MultiDatasetRouterAgent
+from core.agent.agent.output_parser.structured_chat import StructuredChatOutputParser
+from core.agent.agent.structed_multi_dataset_router_agent import StructuredMultiDatasetRouterAgent
from core.agent.agent_executor import AgentExecutor, PlanningStrategy, AgentConfiguration
from core.callback_handler.agent_loop_gather_callback_handler import AgentLoopGatherCallbackHandler
from core.callback_handler.dataset_tool_callback_handler import DatasetToolCallbackHandler
@@ -17,6 +23,7 @@
from core.model_providers.models.entity.model_params import ModelKwargs, ModelMode
from core.model_providers.models.llm.base import BaseLLM
from core.tool.current_datetime_tool import DatetimeTool
+from core.tool.dataset_multi_retriever_tool import DatasetMultiRetrieverTool
from core.tool.dataset_retriever_tool import DatasetRetrieverTool
from core.tool.provider.serpapi_provider import SerpAPIToolProvider
from core.tool.serpapi_wrapper import OptimizedSerpAPIWrapper, OptimizedSerpAPIInput
@@ -25,6 +32,16 @@
from models.dataset import Dataset, DatasetProcessRule
from models.model import AppModelConfig
+default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+}
class OrchestratorRuleParser:
"""Parse the orchestrator rule to entities."""
@@ -34,7 +51,7 @@ def __init__(self, tenant_id: str, app_model_config: AppModelConfig):
self.app_model_config = app_model_config
def to_agent_executor(self, conversation_message_task: ConversationMessageTask, memory: Optional[BaseChatMemory],
- rest_tokens: int, chain_callback: MainChainGatherCallbackHandler,
+ rest_tokens: int, chain_callback: MainChainGatherCallbackHandler, tenant_id: str,
retriever_from: str = 'dev') -> Optional[AgentExecutor]:
if not self.app_model_config.agent_mode_dict:
return None
@@ -101,7 +118,8 @@ def to_agent_executor(self, conversation_message_task: ConversationMessageTask,
rest_tokens=rest_tokens,
return_resource=return_resource,
retriever_from=retriever_from,
- dataset_configs=dataset_configs
+ dataset_configs=dataset_configs,
+ tenant_id=tenant_id
)
if len(tools) == 0:
@@ -123,7 +141,7 @@ def to_agent_executor(self, conversation_message_task: ConversationMessageTask,
return chain
- def to_tools(self, tool_configs: list, callbacks: Callbacks = None, **kwargs) -> list[BaseTool]:
+ def to_tools(self, tool_configs: list, callbacks: Callbacks = None, **kwargs) -> list[BaseTool]:
"""
Convert app agent tool configs to tools
@@ -132,6 +150,7 @@ def to_tools(self, tool_configs: list, callbacks: Callbacks = None, **kwargs) ->
:return:
"""
tools = []
+ dataset_tools = []
for tool_config in tool_configs:
tool_type = list(tool_config.keys())[0]
tool_val = list(tool_config.values())[0]
@@ -140,7 +159,7 @@ def to_tools(self, tool_configs: list, callbacks: Callbacks = None, **kwargs) ->
tool = None
if tool_type == "dataset":
- tool = self.to_dataset_retriever_tool(tool_config=tool_val, **kwargs)
+ dataset_tools.append(tool_config)
elif tool_type == "web_reader":
tool = self.to_web_reader_tool(tool_config=tool_val, **kwargs)
elif tool_type == "google_search":
@@ -156,57 +175,81 @@ def to_tools(self, tool_configs: list, callbacks: Callbacks = None, **kwargs) ->
else:
tool.callbacks = callbacks
tools.append(tool)
-
+ # format dataset tool
+ if len(dataset_tools) > 0:
+ dataset_retriever_tools = self.to_dataset_retriever_tool(tool_configs=dataset_tools, **kwargs)
+ if dataset_retriever_tools:
+ tools.extend(dataset_retriever_tools)
return tools
- def to_dataset_retriever_tool(self, tool_config: dict, conversation_message_task: ConversationMessageTask,
- dataset_configs: dict, rest_tokens: int,
+ def to_dataset_retriever_tool(self, tool_configs: List, conversation_message_task: ConversationMessageTask,
return_resource: bool = False, retriever_from: str = 'dev',
**kwargs) \
- -> Optional[BaseTool]:
+ -> Optional[List[BaseTool]]:
"""
A dataset tool is a tool that can be used to retrieve information from a dataset
- :param rest_tokens:
- :param tool_config:
- :param dataset_configs:
+ :param tool_configs:
:param conversation_message_task:
:param return_resource:
:param retriever_from:
:return:
"""
- # get dataset from dataset id
- dataset = db.session.query(Dataset).filter(
- Dataset.tenant_id == self.tenant_id,
- Dataset.id == tool_config.get("id")
- ).first()
-
- if not dataset:
- return None
-
- if dataset and dataset.available_document_count == 0 and dataset.available_document_count == 0:
- return None
-
- top_k = dataset_configs.get("top_k", 2)
-
- # dynamically adjust top_k when the remaining token number is not enough to support top_k
- top_k = self._dynamic_calc_retrieve_k(dataset=dataset, top_k=top_k, rest_tokens=rest_tokens)
+ dataset_configs = kwargs['dataset_configs']
+ retrieval_model = dataset_configs.get('retrieval_model', 'single')
+ tools = []
+ dataset_ids = []
+ tenant_id = None
+ for tool_config in tool_configs:
+ # get dataset from dataset id
+ dataset = db.session.query(Dataset).filter(
+ Dataset.tenant_id == self.tenant_id,
+ Dataset.id == tool_config.get('dataset').get("id")
+ ).first()
- score_threshold = None
- score_threshold_config = dataset_configs.get("score_threshold")
- if score_threshold_config and score_threshold_config.get("enable"):
- score_threshold = score_threshold_config.get("value")
+ if not dataset:
+ continue
- tool = DatasetRetrieverTool.from_dataset(
- dataset=dataset,
- top_k=top_k,
- score_threshold=score_threshold,
- callbacks=[DatasetToolCallbackHandler(conversation_message_task)],
- conversation_message_task=conversation_message_task,
- return_resource=return_resource,
- retriever_from=retriever_from
- )
+ if dataset and dataset.available_document_count == 0 and dataset.available_document_count == 0:
+ continue
+ dataset_ids.append(dataset.id)
+ if retrieval_model == 'single':
+ retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
+ top_k = retrieval_model['top_k']
+
+ # dynamically adjust top_k when the remaining token number is not enough to support top_k
+ # top_k = self._dynamic_calc_retrieve_k(dataset=dataset, top_k=top_k, rest_tokens=rest_tokens)
+
+ score_threshold = None
+ score_threshold_enable = retrieval_model.get("score_threshold_enable")
+ if score_threshold_enable:
+ score_threshold = retrieval_model.get("score_threshold")
+
+ tool = DatasetRetrieverTool.from_dataset(
+ dataset=dataset,
+ top_k=top_k,
+ score_threshold=score_threshold,
+ callbacks=[DatasetToolCallbackHandler(conversation_message_task)],
+ conversation_message_task=conversation_message_task,
+ return_resource=return_resource,
+ retriever_from=retriever_from
+ )
+ tools.append(tool)
+ if retrieval_model == 'multiple':
+ tool = DatasetMultiRetrieverTool.from_dataset(
+ dataset_ids=dataset_ids,
+ tenant_id=kwargs['tenant_id'],
+ top_k=dataset_configs.get('top_k', 2),
+ score_threshold=dataset_configs.get('score_threshold', 0.5) if dataset_configs.get('score_threshold_enable', False) else None,
+ callbacks=[DatasetToolCallbackHandler(conversation_message_task)],
+ conversation_message_task=conversation_message_task,
+ return_resource=return_resource,
+ retriever_from=retriever_from,
+ reranking_provider_name=dataset_configs.get('reranking_model').get('reranking_provider_name'),
+ reranking_model_name=dataset_configs.get('reranking_model').get('reranking_model_name')
+ )
+ tools.append(tool)
- return tool
+ return tools
def to_web_reader_tool(self, tool_config: dict, agent_model_instance: BaseLLM, **kwargs) -> Optional[BaseTool]:
"""
diff --git a/api/core/third_party/langchain/llms/anthropic_llm.py b/api/core/third_party/langchain/llms/anthropic_llm.py
index 3513bbbe7df299..9dfce8e4355660 100644
--- a/api/core/third_party/langchain/llms/anthropic_llm.py
+++ b/api/core/third_party/langchain/llms/anthropic_llm.py
@@ -1,7 +1,7 @@
from typing import Dict
-from httpx import Limits
from langchain.chat_models import ChatAnthropic
+from langchain.schema import ChatMessage, BaseMessage, HumanMessage, AIMessage, SystemMessage
from langchain.utils import get_from_dict_or_env, check_package_version
from pydantic import root_validator
@@ -29,8 +29,7 @@ def validate_environment(cls, values: Dict) -> Dict:
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"],
timeout=values["default_request_timeout"],
- max_retries=0,
- connection_pool_limits=Limits(max_connections=200, max_keepalive_connections=100),
+ max_retries=0
)
values["async_client"] = anthropic.AsyncAnthropic(
base_url=values["anthropic_api_url"],
@@ -46,3 +45,16 @@ def validate_environment(cls, values: Dict) -> Dict:
"Please it install it with `pip install anthropic`."
)
return values
+
+ def _convert_one_message_to_text(self, message: BaseMessage) -> str:
+ if isinstance(message, ChatMessage):
+ message_text = f"\n\n{message.role.capitalize()}: {message.content}"
+ elif isinstance(message, HumanMessage):
+ message_text = f"{self.HUMAN_PROMPT} {message.content}"
+ elif isinstance(message, AIMessage):
+ message_text = f"{self.AI_PROMPT} {message.content}"
+ elif isinstance(message, SystemMessage):
+ message_text = f"{message.content}"
+ else:
+ raise ValueError(f"Got unknown type {message}")
+ return message_text
diff --git a/api/core/third_party/langchain/llms/openllm.py b/api/core/third_party/langchain/llms/openllm.py
index 6151fe3f1c2231..d83f54da6e4158 100644
--- a/api/core/third_party/langchain/llms/openllm.py
+++ b/api/core/third_party/langchain/llms/openllm.py
@@ -51,7 +51,8 @@ def _call(
) -> str:
params = {
"prompt": prompt,
- "llm_config": self.llm_kwargs
+ "llm_config": self.llm_kwargs,
+ "stop": stop,
}
headers = {"Content-Type": "application/json"}
@@ -65,11 +66,11 @@ def _call(
raise ValueError(f"OpenLLM HTTP {response.status_code} error: {response.text}")
json_response = response.json()
- completion = json_response["responses"][0]
+ completion = json_response["outputs"][0]['text']
completion = completion.lstrip(prompt)
- if stop is not None:
- completion = enforce_stop_tokens(completion, stop)
+ # if stop is not None:
+ # completion = enforce_stop_tokens(completion, stop)
return completion
diff --git a/api/core/tool/dataset_multi_retriever_tool.py b/api/core/tool/dataset_multi_retriever_tool.py
new file mode 100644
index 00000000000000..5cf120b63b81f6
--- /dev/null
+++ b/api/core/tool/dataset_multi_retriever_tool.py
@@ -0,0 +1,232 @@
+import json
+import threading
+from typing import Type, Optional, List
+
+from flask import current_app, Flask
+from langchain.tools import BaseTool
+from pydantic import Field, BaseModel
+
+from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
+from core.conversation_message_task import ConversationMessageTask
+from core.embedding.cached_embedding import CacheEmbedding
+from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
+from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
+from core.model_providers.model_factory import ModelFactory
+from extensions.ext_database import db
+from models.dataset import Dataset, DocumentSegment, Document
+from services.retrieval_service import RetrievalService
+
+default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+}
+
+
+class DatasetMultiRetrieverToolInput(BaseModel):
+ query: str = Field(..., description="dataset multi retriever and rerank")
+
+
+class DatasetMultiRetrieverTool(BaseTool):
+ """Tool for querying multi dataset."""
+ name: str = "dataset-"
+ args_schema: Type[BaseModel] = DatasetMultiRetrieverToolInput
+ description: str = "dataset multi retriever and rerank. "
+ tenant_id: str
+ dataset_ids: List[str]
+ top_k: int = 2
+ score_threshold: Optional[float] = None
+ reranking_provider_name: str
+ reranking_model_name: str
+ conversation_message_task: ConversationMessageTask
+ return_resource: bool
+ retriever_from: str
+
+ @classmethod
+ def from_dataset(cls, dataset_ids: List[str], tenant_id: str, **kwargs):
+ return cls(
+ name=f'dataset-{tenant_id}',
+ tenant_id=tenant_id,
+ dataset_ids=dataset_ids,
+ **kwargs
+ )
+
+ def _run(self, query: str) -> str:
+ threads = []
+ all_documents = []
+ for dataset_id in self.dataset_ids:
+ retrieval_thread = threading.Thread(target=self._retriever, kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': dataset_id,
+ 'query': query,
+ 'all_documents': all_documents
+ })
+ threads.append(retrieval_thread)
+ retrieval_thread.start()
+ for thread in threads:
+ thread.join()
+ # do rerank for searched documents
+ rerank = ModelFactory.get_reranking_model(
+ tenant_id=self.tenant_id,
+ model_provider_name=self.reranking_provider_name,
+ model_name=self.reranking_model_name
+ )
+ all_documents = rerank.rerank(query, all_documents, self.score_threshold, self.top_k)
+
+ hit_callback = DatasetIndexToolCallbackHandler(self.conversation_message_task)
+ hit_callback.on_tool_end(all_documents)
+ document_score_list = {}
+ for item in all_documents:
+ document_score_list[item.metadata['doc_id']] = item.metadata['score']
+
+ document_context_list = []
+ index_node_ids = [document.metadata['doc_id'] for document in all_documents]
+ segments = DocumentSegment.query.filter(
+ DocumentSegment.completed_at.isnot(None),
+ DocumentSegment.status == 'completed',
+ DocumentSegment.enabled == True,
+ DocumentSegment.index_node_id.in_(index_node_ids)
+ ).all()
+
+ if segments:
+ index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
+ sorted_segments = sorted(segments,
+ key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
+ float('inf')))
+ for segment in sorted_segments:
+ if segment.answer:
+ document_context_list.append(f'question:{segment.content} answer:{segment.answer}')
+ else:
+ document_context_list.append(segment.content)
+ if self.return_resource:
+ context_list = []
+ resource_number = 1
+ for segment in sorted_segments:
+ dataset = Dataset.query.filter_by(
+ id=segment.dataset_id
+ ).first()
+ document = Document.query.filter(Document.id == segment.document_id,
+ Document.enabled == True,
+ Document.archived == False,
+ ).first()
+ if dataset and document:
+ source = {
+ 'position': resource_number,
+ 'dataset_id': dataset.id,
+ 'dataset_name': dataset.name,
+ 'document_id': document.id,
+ 'document_name': document.name,
+ 'data_source_type': document.data_source_type,
+ 'segment_id': segment.id,
+ 'retriever_from': self.retriever_from,
+ 'score': document_score_list.get(segment.index_node_id, None)
+ }
+
+ if self.retriever_from == 'dev':
+ source['hit_count'] = segment.hit_count
+ source['word_count'] = segment.word_count
+ source['segment_position'] = segment.position
+ source['index_node_hash'] = segment.index_node_hash
+ if segment.answer:
+ source['content'] = f'question:{segment.content} \nanswer:{segment.answer}'
+ else:
+ source['content'] = segment.content
+ context_list.append(source)
+ resource_number += 1
+ hit_callback.return_retriever_resource_info(context_list)
+
+ return str("\n".join(document_context_list))
+
+ async def _arun(self, tool_input: str) -> str:
+ raise NotImplementedError()
+
+ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, all_documents: List):
+ with flask_app.app_context():
+ dataset = db.session.query(Dataset).filter(
+ Dataset.tenant_id == self.tenant_id,
+ Dataset.id == dataset_id
+ ).first()
+
+ if not dataset:
+ return []
+ # get retrieval model , if the model is not setting , using default
+ retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
+
+ if dataset.indexing_technique == "economy":
+ # use keyword table query
+ kw_table_index = KeywordTableIndex(
+ dataset=dataset,
+ config=KeywordTableConfig(
+ max_keywords_per_chunk=5
+ )
+ )
+
+ documents = kw_table_index.search(query, search_kwargs={'k': self.top_k})
+ if documents:
+ all_documents.extend(documents)
+ else:
+
+ try:
+ embedding_model = ModelFactory.get_embedding_model(
+ tenant_id=dataset.tenant_id,
+ model_provider_name=dataset.embedding_model_provider,
+ model_name=dataset.embedding_model
+ )
+ except LLMBadRequestError:
+ return []
+ except ProviderTokenNotInitError:
+ return []
+
+ embeddings = CacheEmbedding(embedding_model)
+
+ documents = []
+ threads = []
+ if self.top_k > 0:
+ # retrieval_model source with semantic
+ if retrieval_model['search_method'] == 'semantic_search' or retrieval_model[
+ 'search_method'] == 'hybrid_search':
+ embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': str(dataset.id),
+ 'query': query,
+ 'top_k': self.top_k,
+ 'score_threshold': self.score_threshold,
+ 'reranking_model': None,
+ 'all_documents': documents,
+ 'search_method': 'hybrid_search',
+ 'embeddings': embeddings
+ })
+ threads.append(embedding_thread)
+ embedding_thread.start()
+
+ # retrieval_model source with full text
+ if retrieval_model['search_method'] == 'full_text_search' or retrieval_model[
+ 'search_method'] == 'hybrid_search':
+ full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search,
+ kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': str(dataset.id),
+ 'query': query,
+ 'search_method': 'hybrid_search',
+ 'embeddings': embeddings,
+ 'score_threshold': retrieval_model[
+ 'score_threshold'] if retrieval_model[
+ 'score_threshold_enable'] else None,
+ 'top_k': self.top_k,
+ 'reranking_model': retrieval_model[
+ 'reranking_model'] if retrieval_model[
+ 'reranking_enable'] else None,
+ 'all_documents': documents
+ })
+ threads.append(full_text_index_thread)
+ full_text_index_thread.start()
+
+ for thread in threads:
+ thread.join()
+
+ all_documents.extend(documents)
diff --git a/api/core/tool/dataset_retriever_tool.py b/api/core/tool/dataset_retriever_tool.py
index 2c14f40d15bfd1..822a6562be511c 100644
--- a/api/core/tool/dataset_retriever_tool.py
+++ b/api/core/tool/dataset_retriever_tool.py
@@ -1,5 +1,6 @@
import json
-from typing import Type, Optional
+import threading
+from typing import Type, Optional, List
from flask import current_app
from langchain.tools import BaseTool
@@ -14,6 +15,18 @@
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment, Document
+from services.retrieval_service import RetrievalService
+
+default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+}
class DatasetRetrieverToolInput(BaseModel):
@@ -56,7 +69,9 @@ def _run(self, query: str) -> str:
).first()
if not dataset:
- return f'[{self.name} failed to find dataset with id {self.dataset_id}.]'
+ return ''
+ # get retrieval model , if the model is not setting , using default
+ retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
if dataset.indexing_technique == "economy":
# use keyword table query
@@ -83,28 +98,62 @@ def _run(self, query: str) -> str:
return ''
embeddings = CacheEmbedding(embedding_model)
- vector_index = VectorIndex(
- dataset=dataset,
- config=current_app.config,
- embeddings=embeddings
- )
+ documents = []
+ threads = []
if self.top_k > 0:
- documents = vector_index.search(
- query,
- search_type='similarity_score_threshold',
- search_kwargs={
- 'k': self.top_k,
- 'score_threshold': self.score_threshold,
- 'filter': {
- 'group_id': [dataset.id]
- }
- }
- )
+ # retrieval source with semantic
+ if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
+ embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': str(dataset.id),
+ 'query': query,
+ 'top_k': self.top_k,
+ 'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
+ 'score_threshold_enable'] else None,
+ 'reranking_model': retrieval_model['reranking_model'] if retrieval_model[
+ 'reranking_enable'] else None,
+ 'all_documents': documents,
+ 'search_method': retrieval_model['search_method'],
+ 'embeddings': embeddings
+ })
+ threads.append(embedding_thread)
+ embedding_thread.start()
+
+ # retrieval_model source with full text
+ if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
+ full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': str(dataset.id),
+ 'query': query,
+ 'search_method': retrieval_model['search_method'],
+ 'embeddings': embeddings,
+ 'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
+ 'score_threshold_enable'] else None,
+ 'top_k': self.top_k,
+ 'reranking_model': retrieval_model['reranking_model'] if retrieval_model[
+ 'reranking_enable'] else None,
+ 'all_documents': documents
+ })
+ threads.append(full_text_index_thread)
+ full_text_index_thread.start()
+
+ for thread in threads:
+ thread.join()
+ # hybrid search: rerank after all documents have been searched
+ if retrieval_model['search_method'] == 'hybrid_search':
+ hybrid_rerank = ModelFactory.get_reranking_model(
+ tenant_id=dataset.tenant_id,
+ model_provider_name=retrieval_model['reranking_model']['reranking_provider_name'],
+ model_name=retrieval_model['reranking_model']['reranking_model_name']
+ )
+ documents = hybrid_rerank.rerank(query, documents,
+ retrieval_model['score_threshold'] if retrieval_model['score_threshold_enable'] else None,
+ self.top_k)
else:
documents = []
- hit_callback = DatasetIndexToolCallbackHandler(dataset.id, self.conversation_message_task)
+ hit_callback = DatasetIndexToolCallbackHandler(self.conversation_message_task)
hit_callback.on_tool_end(documents)
document_score_list = {}
if dataset.indexing_technique != "economy":
@@ -147,10 +196,10 @@ def _run(self, query: str) -> str:
'document_name': document.name,
'data_source_type': document.data_source_type,
'segment_id': segment.id,
- 'retriever_from': self.retriever_from
+ 'retriever_from': self.retriever_from,
+ 'score': document_score_list.get(segment.index_node_id, None)
+
}
- if dataset.indexing_technique != "economy":
- source['score'] = document_score_list.get(segment.index_node_id)
if self.retriever_from == 'dev':
source['hit_count'] = segment.hit_count
source['word_count'] = segment.word_count
diff --git a/api/core/vector_store/milvus_vector_store.py b/api/core/vector_store/milvus_vector_store.py
index a70445dd4c3b1f..0055d76c94177d 100644
--- a/api/core/vector_store/milvus_vector_store.py
+++ b/api/core/vector_store/milvus_vector_store.py
@@ -1,4 +1,4 @@
-from core.index.vector_index.milvus import Milvus
+from core.vector_store.vector.milvus import Milvus
class MilvusVectorStore(Milvus):
diff --git a/api/core/vector_store/qdrant_vector_store.py b/api/core/vector_store/qdrant_vector_store.py
index dc92b8cb249563..e4f6c2c78f2d98 100644
--- a/api/core/vector_store/qdrant_vector_store.py
+++ b/api/core/vector_store/qdrant_vector_store.py
@@ -4,7 +4,7 @@
from qdrant_client.http.models import Filter, PointIdsList, FilterSelector
from qdrant_client.local.qdrant_local import QdrantLocal
-from core.index.vector_index.qdrant import Qdrant
+from core.vector_store.vector.qdrant import Qdrant
class QdrantVectorStore(Qdrant):
@@ -73,3 +73,4 @@ def _reload_if_needed(self):
if isinstance(self.client, QdrantLocal):
self.client = cast(QdrantLocal, self.client)
self.client._load()
+
diff --git a/api/core/index/vector_index/milvus.py b/api/core/vector_store/vector/milvus.py
similarity index 100%
rename from api/core/index/vector_index/milvus.py
rename to api/core/vector_store/vector/milvus.py
diff --git a/api/core/index/vector_index/qdrant.py b/api/core/vector_store/vector/qdrant.py
similarity index 97%
rename from api/core/index/vector_index/qdrant.py
rename to api/core/vector_store/vector/qdrant.py
index 5b9736a0b51f32..33ba0908dd3683 100644
--- a/api/core/index/vector_index/qdrant.py
+++ b/api/core/vector_store/vector/qdrant.py
@@ -28,7 +28,7 @@
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
-from qdrant_client.http.models import PayloadSchemaType
+from qdrant_client.http.models import PayloadSchemaType, FilterSelector, TextIndexParams, TokenizerType, TextIndexType
if TYPE_CHECKING:
from qdrant_client import grpc # noqa
@@ -189,14 +189,25 @@ def add_texts(
texts, metadatas, ids, batch_size
):
self.client.upsert(
- collection_name=self.collection_name, points=points, **kwargs
+ collection_name=self.collection_name, points=points
)
added_ids.extend(batch_ids)
# if is new collection, create payload index on group_id
if self.is_new_collection:
+ # create payload index
self.client.create_payload_index(self.collection_name, self.group_payload_key,
field_schema=PayloadSchemaType.KEYWORD,
field_type=PayloadSchemaType.KEYWORD)
+ # creat full text index
+ text_index_params = TextIndexParams(
+ type=TextIndexType.TEXT,
+ tokenizer=TokenizerType.MULTILINGUAL,
+ min_token_len=2,
+ max_token_len=20,
+ lowercase=True
+ )
+ self.client.create_payload_index(self.collection_name, self.content_payload_key,
+ field_schema=text_index_params)
return added_ids
@sync_call_fallback
@@ -600,7 +611,7 @@ def similarity_search_with_score_by_vector(
limit=k,
offset=offset,
with_payload=True,
- with_vectors=True, # Langchain does not expect vectors to be returned
+ with_vectors=True,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
@@ -615,6 +626,39 @@ def similarity_search_with_score_by_vector(
for result in results
]
+ def similarity_search_by_bm25(
+ self,
+ filter: Optional[MetadataFilter] = None,
+ k: int = 4
+ ) -> List[Document]:
+ """Return docs most similar by bm25.
+
+ Args:
+ embedding: Embedding vector to look up documents similar to.
+ k: Number of Documents to return. Defaults to 4.
+ filter: Filter by metadata. Defaults to None.
+ search_params: Additional search params
+ Returns:
+ List of documents most similar to the query text and distance for each.
+ """
+ response = self.client.scroll(
+ collection_name=self.collection_name,
+ scroll_filter=filter,
+ limit=k,
+ with_payload=True,
+ with_vectors=True
+
+ )
+ results = response[0]
+ documents = []
+ for result in results:
+ if result:
+ documents.append(self._document_from_scored_point(
+ result, self.content_payload_key, self.metadata_payload_key
+ ))
+
+ return documents
+
@sync_call_fallback
async def asimilarity_search_with_score_by_vector(
self,
diff --git a/api/core/vector_store/vector/weaviate.py b/api/core/vector_store/vector/weaviate.py
new file mode 100644
index 00000000000000..7da85ed9a0fa64
--- /dev/null
+++ b/api/core/vector_store/vector/weaviate.py
@@ -0,0 +1,506 @@
+"""Wrapper around weaviate vector database."""
+from __future__ import annotations
+
+import datetime
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
+from uuid import uuid4
+
+import numpy as np
+
+from langchain.docstore.document import Document
+from langchain.embeddings.base import Embeddings
+from langchain.utils import get_from_dict_or_env
+from langchain.vectorstores.base import VectorStore
+from langchain.vectorstores.utils import maximal_marginal_relevance
+
+
+def _default_schema(index_name: str) -> Dict:
+ return {
+ "class": index_name,
+ "properties": [
+ {
+ "name": "text",
+ "dataType": ["text"],
+ }
+ ],
+ }
+
+
+def _create_weaviate_client(**kwargs: Any) -> Any:
+ client = kwargs.get("client")
+ if client is not None:
+ return client
+
+ weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL")
+
+ try:
+ # the weaviate api key param should not be mandatory
+ weaviate_api_key = get_from_dict_or_env(
+ kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None
+ )
+ except ValueError:
+ weaviate_api_key = None
+
+ try:
+ import weaviate
+ except ImportError:
+ raise ValueError(
+ "Could not import weaviate python package. "
+ "Please install it with `pip install weaviate-client`"
+ )
+
+ auth = (
+ weaviate.auth.AuthApiKey(api_key=weaviate_api_key)
+ if weaviate_api_key is not None
+ else None
+ )
+ client = weaviate.Client(weaviate_url, auth_client_secret=auth)
+
+ return client
+
+
+def _default_score_normalizer(val: float) -> float:
+ return 1 - val
+
+
+def _json_serializable(value: Any) -> Any:
+ if isinstance(value, datetime.datetime):
+ return value.isoformat()
+ return value
+
+
+class Weaviate(VectorStore):
+ """Wrapper around Weaviate vector database.
+
+ To use, you should have the ``weaviate-client`` python package installed.
+
+ Example:
+ .. code-block:: python
+
+ import weaviate
+ from langchain.vectorstores import Weaviate
+ client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
+ weaviate = Weaviate(client, index_name, text_key)
+
+ """
+
+ def __init__(
+ self,
+ client: Any,
+ index_name: str,
+ text_key: str,
+ embedding: Optional[Embeddings] = None,
+ attributes: Optional[List[str]] = None,
+ relevance_score_fn: Optional[
+ Callable[[float], float]
+ ] = _default_score_normalizer,
+ by_text: bool = True,
+ ):
+ """Initialize with Weaviate client."""
+ try:
+ import weaviate
+ except ImportError:
+ raise ValueError(
+ "Could not import weaviate python package. "
+ "Please install it with `pip install weaviate-client`."
+ )
+ if not isinstance(client, weaviate.Client):
+ raise ValueError(
+ f"client should be an instance of weaviate.Client, got {type(client)}"
+ )
+ self._client = client
+ self._index_name = index_name
+ self._embedding = embedding
+ self._text_key = text_key
+ self._query_attrs = [self._text_key]
+ self.relevance_score_fn = relevance_score_fn
+ self._by_text = by_text
+ if attributes is not None:
+ self._query_attrs.extend(attributes)
+
+ @property
+ def embeddings(self) -> Optional[Embeddings]:
+ return self._embedding
+
+ def _select_relevance_score_fn(self) -> Callable[[float], float]:
+ return (
+ self.relevance_score_fn
+ if self.relevance_score_fn
+ else _default_score_normalizer
+ )
+
+ def add_texts(
+ self,
+ texts: Iterable[str],
+ metadatas: Optional[List[dict]] = None,
+ **kwargs: Any,
+ ) -> List[str]:
+ """Upload texts with metadata (properties) to Weaviate."""
+ from weaviate.util import get_valid_uuid
+
+ ids = []
+ embeddings: Optional[List[List[float]]] = None
+ if self._embedding:
+ if not isinstance(texts, list):
+ texts = list(texts)
+ embeddings = self._embedding.embed_documents(texts)
+
+ with self._client.batch as batch:
+ for i, text in enumerate(texts):
+ data_properties = {self._text_key: text}
+ if metadatas is not None:
+ for key, val in metadatas[i].items():
+ data_properties[key] = _json_serializable(val)
+
+ # Allow for ids (consistent w/ other methods)
+ # # Or uuids (backwards compatble w/ existing arg)
+ # If the UUID of one of the objects already exists
+ # then the existing object will be replaced by the new object.
+ _id = get_valid_uuid(uuid4())
+ if "uuids" in kwargs:
+ _id = kwargs["uuids"][i]
+ elif "ids" in kwargs:
+ _id = kwargs["ids"][i]
+
+ batch.add_data_object(
+ data_object=data_properties,
+ class_name=self._index_name,
+ uuid=_id,
+ vector=embeddings[i] if embeddings else None,
+ )
+ ids.append(_id)
+ return ids
+
+ def similarity_search(
+ self, query: str, k: int = 4, **kwargs: Any
+ ) -> List[Document]:
+ """Return docs most similar to query.
+
+ Args:
+ query: Text to look up documents similar to.
+ k: Number of Documents to return. Defaults to 4.
+
+ Returns:
+ List of Documents most similar to the query.
+ """
+ if self._by_text:
+ return self.similarity_search_by_text(query, k, **kwargs)
+ else:
+ if self._embedding is None:
+ raise ValueError(
+ "_embedding cannot be None for similarity_search when "
+ "_by_text=False"
+ )
+ embedding = self._embedding.embed_query(query)
+ return self.similarity_search_by_vector(embedding, k, **kwargs)
+
+ def similarity_search_by_text(
+ self, query: str, k: int = 4, **kwargs: Any
+ ) -> List[Document]:
+ """Return docs most similar to query.
+
+ Args:
+ query: Text to look up documents similar to.
+ k: Number of Documents to return. Defaults to 4.
+
+ Returns:
+ List of Documents most similar to the query.
+ """
+ content: Dict[str, Any] = {"concepts": [query]}
+ if kwargs.get("search_distance"):
+ content["certainty"] = kwargs.get("search_distance")
+ query_obj = self._client.query.get(self._index_name, self._query_attrs)
+ if kwargs.get("where_filter"):
+ query_obj = query_obj.with_where(kwargs.get("where_filter"))
+ if kwargs.get("additional"):
+ query_obj = query_obj.with_additional(kwargs.get("additional"))
+ result = query_obj.with_near_text(content).with_limit(k).do()
+ if "errors" in result:
+ raise ValueError(f"Error during query: {result['errors']}")
+ docs = []
+ for res in result["data"]["Get"][self._index_name]:
+ text = res.pop(self._text_key)
+ docs.append(Document(page_content=text, metadata=res))
+ return docs
+
+ def similarity_search_by_bm25(
+ self, query: str, k: int = 4, **kwargs: Any
+ ) -> List[Document]:
+ """Return docs using BM25F.
+
+ Args:
+ query: Text to look up documents similar to.
+ k: Number of Documents to return. Defaults to 4.
+
+ Returns:
+ List of Documents most similar to the query.
+ """
+ content: Dict[str, Any] = {"concepts": [query]}
+ if kwargs.get("search_distance"):
+ content["certainty"] = kwargs.get("search_distance")
+ query_obj = self._client.query.get(self._index_name, self._query_attrs)
+ if kwargs.get("where_filter"):
+ query_obj = query_obj.with_where(kwargs.get("where_filter"))
+ if kwargs.get("additional"):
+ query_obj = query_obj.with_additional(kwargs.get("additional"))
+ properties = ['text', 'dataset_id', 'doc_hash', 'doc_id', 'document_id']
+ result = query_obj.with_bm25(query=query, properties=properties).with_limit(k).do()
+ if "errors" in result:
+ raise ValueError(f"Error during query: {result['errors']}")
+ docs = []
+ for res in result["data"]["Get"][self._index_name]:
+ text = res.pop(self._text_key)
+ docs.append(Document(page_content=text, metadata=res))
+ return docs
+
+ def similarity_search_by_vector(
+ self, embedding: List[float], k: int = 4, **kwargs: Any
+ ) -> List[Document]:
+ """Look up similar documents by embedding vector in Weaviate."""
+ vector = {"vector": embedding}
+ query_obj = self._client.query.get(self._index_name, self._query_attrs)
+ if kwargs.get("where_filter"):
+ query_obj = query_obj.with_where(kwargs.get("where_filter"))
+ if kwargs.get("additional"):
+ query_obj = query_obj.with_additional(kwargs.get("additional"))
+ result = query_obj.with_near_vector(vector).with_limit(k).do()
+ if "errors" in result:
+ raise ValueError(f"Error during query: {result['errors']}")
+ docs = []
+ for res in result["data"]["Get"][self._index_name]:
+ text = res.pop(self._text_key)
+ docs.append(Document(page_content=text, metadata=res))
+ return docs
+
+ def max_marginal_relevance_search(
+ self,
+ query: str,
+ k: int = 4,
+ fetch_k: int = 20,
+ lambda_mult: float = 0.5,
+ **kwargs: Any,
+ ) -> List[Document]:
+ """Return docs selected using the maximal marginal relevance.
+
+ Maximal marginal relevance optimizes for similarity to query AND diversity
+ among selected documents.
+
+ Args:
+ query: Text to look up documents similar to.
+ k: Number of Documents to return. Defaults to 4.
+ fetch_k: Number of Documents to fetch to pass to MMR algorithm.
+ lambda_mult: Number between 0 and 1 that determines the degree
+ of diversity among the results with 0 corresponding
+ to maximum diversity and 1 to minimum diversity.
+ Defaults to 0.5.
+
+ Returns:
+ List of Documents selected by maximal marginal relevance.
+ """
+ if self._embedding is not None:
+ embedding = self._embedding.embed_query(query)
+ else:
+ raise ValueError(
+ "max_marginal_relevance_search requires a suitable Embeddings object"
+ )
+
+ return self.max_marginal_relevance_search_by_vector(
+ embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
+ )
+
+ def max_marginal_relevance_search_by_vector(
+ self,
+ embedding: List[float],
+ k: int = 4,
+ fetch_k: int = 20,
+ lambda_mult: float = 0.5,
+ **kwargs: Any,
+ ) -> List[Document]:
+ """Return docs selected using the maximal marginal relevance.
+
+ Maximal marginal relevance optimizes for similarity to query AND diversity
+ among selected documents.
+
+ Args:
+ embedding: Embedding to look up documents similar to.
+ k: Number of Documents to return. Defaults to 4.
+ fetch_k: Number of Documents to fetch to pass to MMR algorithm.
+ lambda_mult: Number between 0 and 1 that determines the degree
+ of diversity among the results with 0 corresponding
+ to maximum diversity and 1 to minimum diversity.
+ Defaults to 0.5.
+
+ Returns:
+ List of Documents selected by maximal marginal relevance.
+ """
+ vector = {"vector": embedding}
+ query_obj = self._client.query.get(self._index_name, self._query_attrs)
+ if kwargs.get("where_filter"):
+ query_obj = query_obj.with_where(kwargs.get("where_filter"))
+ results = (
+ query_obj.with_additional("vector")
+ .with_near_vector(vector)
+ .with_limit(fetch_k)
+ .do()
+ )
+
+ payload = results["data"]["Get"][self._index_name]
+ embeddings = [result["_additional"]["vector"] for result in payload]
+ mmr_selected = maximal_marginal_relevance(
+ np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
+ )
+
+ docs = []
+ for idx in mmr_selected:
+ text = payload[idx].pop(self._text_key)
+ payload[idx].pop("_additional")
+ meta = payload[idx]
+ docs.append(Document(page_content=text, metadata=meta))
+ return docs
+
+ def similarity_search_with_score(
+ self, query: str, k: int = 4, **kwargs: Any
+ ) -> List[Tuple[Document, float]]:
+ """
+ Return list of documents most similar to the query
+ text and cosine distance in float for each.
+ Lower score represents more similarity.
+ """
+ if self._embedding is None:
+ raise ValueError(
+ "_embedding cannot be None for similarity_search_with_score"
+ )
+ content: Dict[str, Any] = {"concepts": [query]}
+ if kwargs.get("search_distance"):
+ content["certainty"] = kwargs.get("search_distance")
+ query_obj = self._client.query.get(self._index_name, self._query_attrs)
+
+ embedded_query = self._embedding.embed_query(query)
+ if not self._by_text:
+ vector = {"vector": embedded_query}
+ result = (
+ query_obj.with_near_vector(vector)
+ .with_limit(k)
+ .with_additional(["vector", "distance"])
+ .do()
+ )
+ else:
+ result = (
+ query_obj.with_near_text(content)
+ .with_limit(k)
+ .with_additional(["vector", "distance"])
+ .do()
+ )
+
+ if "errors" in result:
+ raise ValueError(f"Error during query: {result['errors']}")
+
+ docs_and_scores = []
+ for res in result["data"]["Get"][self._index_name]:
+ text = res.pop(self._text_key)
+ score = res["_additional"]["distance"]
+ docs_and_scores.append((Document(page_content=text, metadata=res), score))
+ return docs_and_scores
+
+ @classmethod
+ def from_texts(
+ cls: Type[Weaviate],
+ texts: List[str],
+ embedding: Embeddings,
+ metadatas: Optional[List[dict]] = None,
+ **kwargs: Any,
+ ) -> Weaviate:
+ """Construct Weaviate wrapper from raw documents.
+
+ This is a user-friendly interface that:
+ 1. Embeds documents.
+ 2. Creates a new index for the embeddings in the Weaviate instance.
+ 3. Adds the documents to the newly created Weaviate index.
+
+ This is intended to be a quick way to get started.
+
+ Example:
+ .. code-block:: python
+
+ from langchain.vectorstores.weaviate import Weaviate
+ from langchain.embeddings import OpenAIEmbeddings
+ embeddings = OpenAIEmbeddings()
+ weaviate = Weaviate.from_texts(
+ texts,
+ embeddings,
+ weaviate_url="http://localhost:8080"
+ )
+ """
+
+ client = _create_weaviate_client(**kwargs)
+
+ from weaviate.util import get_valid_uuid
+
+ index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}")
+ embeddings = embedding.embed_documents(texts) if embedding else None
+ text_key = "text"
+ schema = _default_schema(index_name)
+ attributes = list(metadatas[0].keys()) if metadatas else None
+
+ # check whether the index already exists
+ if not client.schema.contains(schema):
+ client.schema.create_class(schema)
+
+ with client.batch as batch:
+ for i, text in enumerate(texts):
+ data_properties = {
+ text_key: text,
+ }
+ if metadatas is not None:
+ for key in metadatas[i].keys():
+ data_properties[key] = metadatas[i][key]
+
+ # If the UUID of one of the objects already exists
+ # then the existing objectwill be replaced by the new object.
+ if "uuids" in kwargs:
+ _id = kwargs["uuids"][i]
+ else:
+ _id = get_valid_uuid(uuid4())
+
+ # if an embedding strategy is not provided, we let
+ # weaviate create the embedding. Note that this will only
+ # work if weaviate has been installed with a vectorizer module
+ # like text2vec-contextionary for example
+ params = {
+ "uuid": _id,
+ "data_object": data_properties,
+ "class_name": index_name,
+ }
+ if embeddings is not None:
+ params["vector"] = embeddings[i]
+
+ batch.add_data_object(**params)
+
+ batch.flush()
+
+ relevance_score_fn = kwargs.get("relevance_score_fn")
+ by_text: bool = kwargs.get("by_text", False)
+
+ return cls(
+ client,
+ index_name,
+ text_key,
+ embedding=embedding,
+ attributes=attributes,
+ relevance_score_fn=relevance_score_fn,
+ by_text=by_text,
+ )
+
+ def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
+ """Delete by vector IDs.
+
+ Args:
+ ids: List of ids to delete.
+ """
+
+ if ids is None:
+ raise ValueError("No ids provided to delete.")
+
+ # TODO: Check if this can be done in bulk
+ for id in ids:
+ self._client.data_object.delete(uuid=id)
diff --git a/api/core/vector_store/weaviate_vector_store.py b/api/core/vector_store/weaviate_vector_store.py
index 6dae5688270a7c..b5b3d84a9af36c 100644
--- a/api/core/vector_store/weaviate_vector_store.py
+++ b/api/core/vector_store/weaviate_vector_store.py
@@ -1,4 +1,4 @@
-from langchain.vectorstores import Weaviate
+from core.vector_store.vector.weaviate import Weaviate
class WeaviateVectorStore(Weaviate):
diff --git a/api/fields/dataset_fields.py b/api/fields/dataset_fields.py
index 90af9e1fdd9b17..d7be65be012f15 100644
--- a/api/fields/dataset_fields.py
+++ b/api/fields/dataset_fields.py
@@ -12,6 +12,21 @@
'created_at': TimestampField,
}
+reranking_model_fields = {
+ 'reranking_provider_name': fields.String,
+ 'reranking_model_name': fields.String
+}
+
+dataset_retrieval_model_fields = {
+ 'search_method': fields.String,
+ 'reranking_enable': fields.Boolean,
+ 'reranking_model': fields.Nested(reranking_model_fields),
+ 'top_k': fields.Integer,
+ 'score_threshold_enable': fields.Boolean,
+ 'score_threshold': fields.Float
+}
+
+
dataset_detail_fields = {
'id': fields.String,
'name': fields.String,
@@ -29,7 +44,8 @@
'updated_at': TimestampField,
'embedding_model': fields.String,
'embedding_model_provider': fields.String,
- 'embedding_available': fields.Boolean
+ 'embedding_available': fields.Boolean,
+ 'retrieval_model_dict': fields.Nested(dataset_retrieval_model_fields)
}
dataset_query_detail_fields = {
@@ -41,3 +57,5 @@
"created_by": fields.String,
"created_at": TimestampField
}
+
+
diff --git a/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py b/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py
new file mode 100644
index 00000000000000..c16781c15de75c
--- /dev/null
+++ b/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py
@@ -0,0 +1,43 @@
+"""add-dataset-retrival-model
+
+Revision ID: fca025d3b60f
+Revises: b3a09c049e8e
+Create Date: 2023-11-03 13:08:23.246396
+
+"""
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = 'fca025d3b60f'
+down_revision = '8fe468ba0ca5'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('sessions')
+ with op.batch_alter_table('datasets', schema=None) as batch_op:
+ batch_op.add_column(sa.Column('retrieval_model', postgresql.JSONB(astext_type=sa.Text()), nullable=True))
+ batch_op.create_index('retrieval_model_idx', ['retrieval_model'], unique=False, postgresql_using='gin')
+
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('datasets', schema=None) as batch_op:
+ batch_op.drop_index('retrieval_model_idx', postgresql_using='gin')
+ batch_op.drop_column('retrieval_model')
+
+ op.create_table('sessions',
+ sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
+ sa.Column('session_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
+ sa.Column('data', postgresql.BYTEA(), autoincrement=False, nullable=True),
+ sa.Column('expiry', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
+ sa.PrimaryKeyConstraint('id', name='sessions_pkey'),
+ sa.UniqueConstraint('session_id', name='sessions_session_id_key')
+ )
+ # ### end Alembic commands ###
diff --git a/api/models/dataset.py b/api/models/dataset.py
index a9a33cc1a7b470..5fbf035f848928 100644
--- a/api/models/dataset.py
+++ b/api/models/dataset.py
@@ -3,7 +3,7 @@
from json import JSONDecodeError
from sqlalchemy import func
-from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.dialects.postgresql import UUID, JSONB
from extensions.ext_database import db
from models.account import Account
@@ -15,6 +15,7 @@ class Dataset(db.Model):
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
+ db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy']
@@ -39,7 +40,7 @@ class Dataset(db.Model):
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
-
+ retrieval_model = db.Column(JSONB, nullable=True)
@property
def dataset_keyword_table(self):
@@ -93,6 +94,20 @@ def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
+ @property
+ def retrieval_model_dict(self):
+ default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+ }
+ return self.retrieval_model if self.retrieval_model else default_retrieval_model
+
class DatasetProcessRule(db.Model):
__tablename__ = 'dataset_process_rules'
@@ -120,7 +135,7 @@ class DatasetProcessRule(db.Model):
],
'segmentation': {
'delimiter': '\n',
- 'max_tokens': 1000
+ 'max_tokens': 512
}
}
@@ -462,4 +477,3 @@ class DatasetCollectionBinding(db.Model):
model_name = db.Column(db.String(40), nullable=False)
collection_name = db.Column(db.String(64), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
-
diff --git a/api/models/model.py b/api/models/model.py
index b7cd428839f554..b3570f7f4272d8 100644
--- a/api/models/model.py
+++ b/api/models/model.py
@@ -160,7 +160,13 @@ def completion_prompt_config_dict(self) -> dict:
@property
def dataset_configs_dict(self) -> dict:
- return json.loads(self.dataset_configs) if self.dataset_configs else {"top_k": 2, "score_threshold": {"enable": False}}
+ if self.dataset_configs:
+ dataset_configs = json.loads(self.dataset_configs)
+ if 'retrieval_model' not in dataset_configs:
+ return {'retrieval_model': 'single'}
+ else:
+ return dataset_configs
+ return {'retrieval_model': 'single'}
@property
def file_upload_dict(self) -> dict:
diff --git a/api/requirements.txt b/api/requirements.txt
index 91b373d4062933..7b5ed73f8c893d 100644
--- a/api/requirements.txt
+++ b/api/requirements.txt
@@ -23,7 +23,6 @@ boto3==1.28.17
tenacity==8.2.2
cachetools~=5.3.0
weaviate-client~=3.21.0
-qdrant_client~=1.1.6
mailchimp-transactional~=1.0.50
scikit-learn==1.2.2
sentry-sdk[flask]~=1.21.1
@@ -36,7 +35,7 @@ docx2txt==0.8
pypdfium2==4.16.0
resend~=0.5.1
pyjwt~=2.6.0
-anthropic~=0.3.4
+anthropic~=0.7.2
newspaper3k==0.2.8
google-api-python-client==2.90.0
wikipedia==1.4.0
@@ -53,4 +52,6 @@ xinference-client~=0.5.4
safetensors==0.3.2
zhipuai==1.0.7
werkzeug==2.3.7
-pymilvus==2.3.0
\ No newline at end of file
+pymilvus==2.3.0
+qdrant-client==1.6.4
+cohere~=4.32
\ No newline at end of file
diff --git a/api/services/account_service.py b/api/services/account_service.py
index 58c315c4249b55..c1f5dcdc1bd2a4 100644
--- a/api/services/account_service.py
+++ b/api/services/account_service.py
@@ -489,9 +489,10 @@ def generate_invite_token(cls, tenant: Tenant, account: Account) -> str:
'email': account.email,
'workspace_id': tenant.id,
}
+ expiryHours = current_app.config['INVITE_EXPIRY_HOURS']
redis_client.setex(
cls._get_invitation_token_key(token),
- 3600,
+ expiryHours * 60 * 60,
json.dumps(invitation_data)
)
return token
diff --git a/api/services/app_model_config_service.py b/api/services/app_model_config_service.py
index be7947d7f69582..3ffd8b0431e9ae 100644
--- a/api/services/app_model_config_service.py
+++ b/api/services/app_model_config_service.py
@@ -470,7 +470,16 @@ def is_advanced_prompt_valid(cls, config: dict, app_mode: str) -> None:
# dataset_configs
if 'dataset_configs' not in config or not config["dataset_configs"]:
- config["dataset_configs"] = {"top_k": 2, "score_threshold": {"enable": False}}
+ config["dataset_configs"] = {'retrieval_model': 'single'}
+
+ if not isinstance(config["dataset_configs"], dict):
+ raise ValueError("dataset_configs must be of object type")
+
+ if config["dataset_configs"]['retrieval_model'] == 'multiple':
+ if not config["dataset_configs"]['reranking_model']:
+ raise ValueError("reranking_model has not been set")
+ if not isinstance(config["dataset_configs"]['reranking_model'], dict):
+ raise ValueError("reranking_model must be of object type")
if not isinstance(config["dataset_configs"], dict):
raise ValueError("dataset_configs must be of object type")
diff --git a/api/services/completion_service.py b/api/services/completion_service.py
index 280bdf76963b01..249766236f61c2 100644
--- a/api/services/completion_service.py
+++ b/api/services/completion_service.py
@@ -232,7 +232,7 @@ def generate_worker(cls, flask_app: Flask, generate_task_id: str, detached_app_m
logging.exception("Unknown Error in completion")
PubHandler.pub_error(user, generate_task_id, e)
finally:
- db.session.commit()
+ db.session.remove()
@classmethod
def countdown_and_close(cls, flask_app: Flask, worker_thread, pubsub, detached_user,
@@ -242,22 +242,25 @@ def countdown_and_close(cls, flask_app: Flask, worker_thread, pubsub, detached_u
def close_pubsub():
with flask_app.app_context():
- user = db.session.merge(detached_user)
-
- sleep_iterations = 0
- while sleep_iterations < timeout and worker_thread.is_alive():
- if sleep_iterations > 0 and sleep_iterations % 10 == 0:
- PubHandler.ping(user, generate_task_id)
-
- time.sleep(1)
- sleep_iterations += 1
-
- if worker_thread.is_alive():
- PubHandler.stop(user, generate_task_id)
- try:
- pubsub.close()
- except Exception:
- pass
+ try:
+ user = db.session.merge(detached_user)
+
+ sleep_iterations = 0
+ while sleep_iterations < timeout and worker_thread.is_alive():
+ if sleep_iterations > 0 and sleep_iterations % 10 == 0:
+ PubHandler.ping(user, generate_task_id)
+
+ time.sleep(1)
+ sleep_iterations += 1
+
+ if worker_thread.is_alive():
+ PubHandler.stop(user, generate_task_id)
+ try:
+ pubsub.close()
+ except Exception:
+ pass
+ finally:
+ db.session.remove()
countdown_thread = threading.Thread(target=close_pubsub)
countdown_thread.start()
@@ -394,7 +397,7 @@ def compact_response(cls, pubsub: PubSub, streaming: bool = False) -> Union[dict
logging.exception(e)
raise
finally:
- db.session.commit()
+ db.session.remove()
try:
pubsub.unsubscribe(generate_channel)
@@ -436,7 +439,7 @@ def generate() -> Generator:
logging.exception(e)
raise
finally:
- db.session.commit()
+ db.session.remove()
try:
pubsub.unsubscribe(generate_channel)
diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py
index ede31246945f94..defe539ae920cc 100644
--- a/api/services/dataset_service.py
+++ b/api/services/dataset_service.py
@@ -173,6 +173,9 @@ def update_dataset(dataset_id, data, user):
filtered_data['updated_by'] = user.id
filtered_data['updated_at'] = datetime.datetime.now()
+ # update Retrieval model
+ filtered_data['retrieval_model'] = data['retrieval_model']
+
dataset.query.filter_by(id=dataset_id).update(filtered_data)
db.session.commit()
@@ -473,7 +476,19 @@ def save_document_with_dataset_id(dataset: Dataset, document_data: dict,
embedding_model.name
)
dataset.collection_binding_id = dataset_collection_binding.id
+ if not dataset.retrieval_model:
+ default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+ }
+ dataset.retrieval_model = document_data.get('retrieval_model') if document_data.get('retrieval_model') else default_retrieval_model
documents = []
batch = time.strftime('%Y%m%d%H%M%S') + str(random.randint(100000, 999999))
@@ -733,6 +748,7 @@ def save_document_without_dataset_id(tenant_id: str, document_data: dict, accoun
raise ValueError(f"All your documents have overed limit {tenant_document_count}.")
embedding_model = None
dataset_collection_binding_id = None
+ retrieval_model = None
if document_data['indexing_technique'] == 'high_quality':
embedding_model = ModelFactory.get_embedding_model(
tenant_id=tenant_id
@@ -742,6 +758,20 @@ def save_document_without_dataset_id(tenant_id: str, document_data: dict, accoun
embedding_model.name
)
dataset_collection_binding_id = dataset_collection_binding.id
+ if 'retrieval_model' in document_data and document_data['retrieval_model']:
+ retrieval_model = document_data['retrieval_model']
+ else:
+ default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+ }
+ retrieval_model = default_retrieval_model
# save dataset
dataset = Dataset(
tenant_id=tenant_id,
@@ -751,7 +781,8 @@ def save_document_without_dataset_id(tenant_id: str, document_data: dict, accoun
created_by=account.id,
embedding_model=embedding_model.name if embedding_model else None,
embedding_model_provider=embedding_model.model_provider.provider_name if embedding_model else None,
- collection_binding_id=dataset_collection_binding_id
+ collection_binding_id=dataset_collection_binding_id,
+ retrieval_model=retrieval_model
)
db.session.add(dataset)
@@ -768,7 +799,7 @@ def save_document_without_dataset_id(tenant_id: str, document_data: dict, accoun
return dataset, documents, batch
@classmethod
- def document_create_args_validate(cls, args: dict):
+ def document_create_args_validate(cls, args: dict):
if 'original_document_id' not in args or not args['original_document_id']:
DocumentService.data_source_args_validate(args)
DocumentService.process_rule_args_validate(args)
diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py
index 063292969cd55a..831a37d670aa45 100644
--- a/api/services/hit_testing_service.py
+++ b/api/services/hit_testing_service.py
@@ -1,4 +1,6 @@
+import json
import logging
+import threading
import time
from typing import List
@@ -9,16 +11,26 @@
from sklearn.manifold import TSNE
from core.embedding.cached_embedding import CacheEmbedding
-from core.index.vector_index.vector_index import VectorIndex
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.account import Account
from models.dataset import Dataset, DocumentSegment, DatasetQuery
-
+from services.retrieval_service import RetrievalService
+
+default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+}
class HitTestingService:
@classmethod
- def retrieve(cls, dataset: Dataset, query: str, account: Account, limit: int = 10) -> dict:
+ def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_model: dict, limit: int = 10) -> dict:
if dataset.available_document_count == 0 or dataset.available_segment_count == 0:
return {
"query": {
@@ -28,31 +40,68 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, limit: int = 1
"records": []
}
+ start = time.perf_counter()
+
+ # get retrieval model , if the model is not setting , using default
+ if not retrieval_model:
+ retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
+
+ # get embedding model
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
-
embeddings = CacheEmbedding(embedding_model)
- vector_index = VectorIndex(
- dataset=dataset,
- config=current_app.config,
- embeddings=embeddings
- )
+ all_documents = []
+ threads = []
+
+ # retrieval_model source with semantic
+ if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
+ embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': str(dataset.id),
+ 'query': query,
+ 'top_k': retrieval_model['top_k'],
+ 'score_threshold': retrieval_model['score_threshold'] if retrieval_model['score_threshold_enable'] else None,
+ 'reranking_model': retrieval_model['reranking_model'] if retrieval_model['reranking_enable'] else None,
+ 'all_documents': all_documents,
+ 'search_method': retrieval_model['search_method'],
+ 'embeddings': embeddings
+ })
+ threads.append(embedding_thread)
+ embedding_thread.start()
+
+ # retrieval source with full text
+ if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
+ full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
+ 'flask_app': current_app._get_current_object(),
+ 'dataset_id': str(dataset.id),
+ 'query': query,
+ 'search_method': retrieval_model['search_method'],
+ 'embeddings': embeddings,
+ 'score_threshold': retrieval_model['score_threshold'] if retrieval_model['score_threshold_enable'] else None,
+ 'top_k': retrieval_model['top_k'],
+ 'reranking_model': retrieval_model['reranking_model'] if retrieval_model['reranking_enable'] else None,
+ 'all_documents': all_documents
+ })
+ threads.append(full_text_index_thread)
+ full_text_index_thread.start()
+
+ for thread in threads:
+ thread.join()
+
+ if retrieval_model['search_method'] == 'hybrid_search':
+ hybrid_rerank = ModelFactory.get_reranking_model(
+ tenant_id=dataset.tenant_id,
+ model_provider_name=retrieval_model['reranking_model']['reranking_provider_name'],
+ model_name=retrieval_model['reranking_model']['reranking_model_name']
+ )
+ all_documents = hybrid_rerank.rerank(query, all_documents,
+ retrieval_model['score_threshold'] if retrieval_model['score_threshold_enable'] else None,
+ retrieval_model['top_k'])
- start = time.perf_counter()
- documents = vector_index.search(
- query,
- search_type='similarity_score_threshold',
- search_kwargs={
- 'k': 10,
- 'filter': {
- 'group_id': [dataset.id]
- }
- }
- )
end = time.perf_counter()
logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds")
@@ -67,7 +116,7 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, limit: int = 1
db.session.add(dataset_query)
db.session.commit()
- return cls.compact_retrieve_response(dataset, embeddings, query, documents)
+ return cls.compact_retrieve_response(dataset, embeddings, query, all_documents)
@classmethod
def compact_retrieve_response(cls, dataset: Dataset, embeddings: Embeddings, query: str, documents: List[Document]):
@@ -99,7 +148,7 @@ def compact_retrieve_response(cls, dataset: Dataset, embeddings: Embeddings, que
record = {
"segment": segment,
- "score": document.metadata['score'],
+ "score": document.metadata.get('score', None),
"tsne_position": tsne_position_data[i]
}
@@ -136,3 +185,11 @@ def get_tsne_positions_from_embeddings(cls, embeddings: list):
tsne_position_data.append({'x': float(data_tsne[i][0]), 'y': float(data_tsne[i][1])})
return tsne_position_data
+
+ @classmethod
+ def hit_testing_args_check(cls, args):
+ query = args['query']
+
+ if not query or len(query) > 250:
+ raise ValueError('Query is required and cannot exceed 250 characters')
+
diff --git a/api/services/provider_service.py b/api/services/provider_service.py
index 0cfd5194e4a787..95b4e0f81c6074 100644
--- a/api/services/provider_service.py
+++ b/api/services/provider_service.py
@@ -17,11 +17,12 @@
class ProviderService:
- def get_provider_list(self, tenant_id: str):
+ def get_provider_list(self, tenant_id: str, model_type: Optional[str] = None) -> list:
"""
get provider list of tenant.
- :param tenant_id:
+ :param tenant_id: workspace id
+ :param model_type: filter by model type
:return:
"""
# get rules for all providers
@@ -86,7 +87,10 @@ def get_provider_list(self, tenant_id: str):
providers_list = {}
# 遍历每个模型提供商和它们的规则
for model_provider_name, model_provider_rule in model_provider_rules.items():
- # 获取租户首选的提供商类型
+ if model_type and model_type not in model_provider_rule.get('supported_model_types', []):
+ continue
+
+ # get preferred provider type
preferred_model_provider = provider_name_to_preferred_provider_type_dict.get(model_provider_name)
preferred_provider_type = ModelProviderFactory.get_preferred_type_by_preferred_model_provider(
tenant_id,
@@ -98,6 +102,7 @@ def get_provider_list(self, tenant_id: str):
provider_config_dict = {
"preferred_provider_type": preferred_provider_type,
"model_flexibility": model_provider_rule['model_flexibility'],
+ "supported_model_types": model_provider_rule.get("supported_model_types", []),
}
# 记录每个模型供应商的系统和自定义提供程序类型的参数。
diff --git a/api/services/retrieval_service.py b/api/services/retrieval_service.py
new file mode 100644
index 00000000000000..f12533f2b024bb
--- /dev/null
+++ b/api/services/retrieval_service.py
@@ -0,0 +1,95 @@
+
+from typing import Optional
+from flask import current_app, Flask
+from langchain.embeddings.base import Embeddings
+from core.index.vector_index.vector_index import VectorIndex
+from core.model_providers.model_factory import ModelFactory
+from extensions.ext_database import db
+from models.dataset import Dataset
+
+default_retrieval_model = {
+ 'search_method': 'semantic_search',
+ 'reranking_enable': False,
+ 'reranking_model': {
+ 'reranking_provider_name': '',
+ 'reranking_model_name': ''
+ },
+ 'top_k': 2,
+ 'score_threshold_enable': False
+}
+
+
+class RetrievalService:
+
+ @classmethod
+ def embedding_search(cls, flask_app: Flask, dataset_id: str, query: str,
+ top_k: int, score_threshold: Optional[float], reranking_model: Optional[dict],
+ all_documents: list, search_method: str, embeddings: Embeddings):
+ with flask_app.app_context():
+ dataset = db.session.query(Dataset).filter(
+ Dataset.id == dataset_id
+ ).first()
+
+ vector_index = VectorIndex(
+ dataset=dataset,
+ config=current_app.config,
+ embeddings=embeddings
+ )
+
+ documents = vector_index.search(
+ query,
+ search_type='similarity_score_threshold',
+ search_kwargs={
+ 'k': top_k,
+ 'score_threshold': score_threshold,
+ 'filter': {
+ 'group_id': [dataset.id]
+ }
+ }
+ )
+
+ if documents:
+ if reranking_model and search_method == 'semantic_search':
+ rerank = ModelFactory.get_reranking_model(
+ tenant_id=dataset.tenant_id,
+ model_provider_name=reranking_model['reranking_provider_name'],
+ model_name=reranking_model['reranking_model_name']
+ )
+ all_documents.extend(rerank.rerank(query, documents, score_threshold, len(documents)))
+ else:
+ all_documents.extend(documents)
+
+ @classmethod
+ def full_text_index_search(cls, flask_app: Flask, dataset_id: str, query: str,
+ top_k: int, score_threshold: Optional[float], reranking_model: Optional[dict],
+ all_documents: list, search_method: str, embeddings: Embeddings):
+ with flask_app.app_context():
+ dataset = db.session.query(Dataset).filter(
+ Dataset.id == dataset_id
+ ).first()
+
+ vector_index = VectorIndex(
+ dataset=dataset,
+ config=current_app.config,
+ embeddings=embeddings
+ )
+
+ documents = vector_index.search_by_full_text_index(
+ query,
+ search_type='similarity_score_threshold',
+ top_k=top_k
+ )
+ if documents:
+ if reranking_model and search_method == 'full_text_search':
+ rerank = ModelFactory.get_reranking_model(
+ tenant_id=dataset.tenant_id,
+ model_provider_name=reranking_model['reranking_provider_name'],
+ model_name=reranking_model['reranking_model_name']
+ )
+ all_documents.extend(rerank.rerank(query, documents, score_threshold, len(documents)))
+ else:
+ all_documents.extend(documents)
+
+
+
+
diff --git a/api/tests/unit_tests/model_providers/test_anthropic_provider.py b/api/tests/unit_tests/model_providers/test_anthropic_provider.py
index ea4b62a20a6e6d..d4cc9beaaa3230 100644
--- a/api/tests/unit_tests/model_providers/test_anthropic_provider.py
+++ b/api/tests/unit_tests/model_providers/test_anthropic_provider.py
@@ -31,12 +31,12 @@ def mock_chat_generate_invalid(messages: List[BaseMessage],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any):
raise anthropic.APIStatusError('Invalid credentials',
- request=httpx._models.Request(
- method='POST',
- url='https://api.anthropic.com/v1/completions',
- ),
response=httpx._models.Response(
status_code=401,
+ request=httpx._models.Request(
+ method='POST',
+ url='https://api.anthropic.com/v1/completions',
+ )
),
body=None
)
diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml
index 559f5f6b5b4eb5..7213a38a0dca18 100644
--- a/docker/docker-compose.middleware.yaml
+++ b/docker/docker-compose.middleware.yaml
@@ -30,7 +30,7 @@ services:
# The Weaviate vector store.
weaviate:
- image: semitechnologies/weaviate:1.18.4
+ image: semitechnologies/weaviate:1.19.0
restart: always
volumes:
# Mount the Weaviate data directory to the container.
@@ -63,4 +63,4 @@ services:
# environment:
# QDRANT__API_KEY: 'difyai123456'
# ports:
-# - "6333:6333"
\ No newline at end of file
+# - "6333:6333"
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index adf0fe55979202..34401129e7ac41 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -253,6 +253,9 @@ services:
command: redis-server --requirepass difyai123456
healthcheck:
test: ["CMD", "redis-cli","ping"]
+# uncomment to expose redis port to host
+# ports:
+# - "6379:6379"
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
@@ -267,7 +270,6 @@ services:
- web
ports:
- "80:80"
-
# about milvus
etcd:
container_name: milvus-etcd
diff --git a/images/demo.png b/images/demo.png
new file mode 100644
index 00000000000000..ad0cbe0fb5b9d0
Binary files /dev/null and b/images/demo.png differ
diff --git a/images/describe-cn.jpg b/images/describe-cn.jpg
deleted file mode 100644
index 10bdc286579020..00000000000000
Binary files a/images/describe-cn.jpg and /dev/null differ
diff --git a/images/describe-en.png b/images/describe-en.png
deleted file mode 100644
index bf2bd226757515..00000000000000
Binary files a/images/describe-en.png and /dev/null differ
diff --git a/images/describe.png b/images/describe.png
new file mode 100644
index 00000000000000..5f179a35e086a7
Binary files /dev/null and b/images/describe.png differ
diff --git a/images/models.png b/images/models.png
new file mode 100644
index 00000000000000..d30ddcac91b950
Binary files /dev/null and b/images/models.png differ
diff --git a/images/wechat.png b/images/wechat.png
new file mode 100644
index 00000000000000..3cc6077edcd437
Binary files /dev/null and b/images/wechat.png differ
diff --git a/sdks/nodejs-client/README.md b/sdks/nodejs-client/README.md
index a9cf3edacc299c..50303b48675f06 100644
--- a/sdks/nodejs-client/README.md
+++ b/sdks/nodejs-client/README.md
@@ -14,27 +14,33 @@ import { DifyClient, ChatClient, CompletionClient } from 'dify-client'
const API_KEY = 'your-api-key-here'
const user = `random-user-id`
-const inputs = {
- name: 'test name a'
-}
-const query = "Please tell me a short story in 10 words or less."
+const query = 'Please tell me a short story in 10 words or less.'
+const remote_url_files = [{
+ type: 'image',
+ transfer_method: 'remote_url',
+ url: 'your_url_addresss'
+}]
// Create a completion client
const completionClient = new CompletionClient(API_KEY)
// Create a completion message
-completionClient.createCompletionMessage(inputs, query, responseMode, user)
+completionClient.createCompletionMessage({'query': query}, user)
+// Create a completion message with vision model
+completionClient.createCompletionMessage({'query': 'Describe the picture.'}, user, false, remote_url_files)
// Create a chat client
const chatClient = new ChatClient(API_KEY)
// Create a chat message in stream mode
-const response = await chatClient.createChatMessage(inputs, query, user, true, null)
+const response = await chatClient.createChatMessage({}, query, user, true, null)
const stream = response.data;
stream.on('data', data => {
console.log(data);
});
stream.on('end', () => {
- console.log("stream done");
+ console.log('stream done');
});
+// Create a chat message with vision model
+chatClient.createChatMessage({}, 'Describe the picture.', user, false, null, remote_url_files)
// Fetch conversations
chatClient.getConversations(user)
// Fetch conversation messages
diff --git a/sdks/nodejs-client/index.js b/sdks/nodejs-client/index.js
index 8aa69e9bbdd799..584ee8743b46fa 100644
--- a/sdks/nodejs-client/index.js
+++ b/sdks/nodejs-client/index.js
@@ -34,6 +34,10 @@ export const routes = {
method: "DELETE",
url: (conversation_id) => `/conversations/${conversation_id}`,
},
+ fileUpload: {
+ method: "POST",
+ url: () => `/files/upload`,
+ }
};
export class DifyClient {
@@ -51,11 +55,15 @@ export class DifyClient {
endpoint,
data = null,
params = null,
- stream = false
+ stream = false,
+ headerParams = {}
) {
const headers = {
- Authorization: `Bearer ${this.apiKey}`,
- "Content-Type": "application/json",
+ ...{
+ Authorization: `Bearer ${this.apiKey}`,
+ "Content-Type": "application/json",
+ },
+ ...headerParams
};
const url = `${this.baseUrl}${endpoint}`;
@@ -104,15 +112,28 @@ export class DifyClient {
params
);
}
+
+ fileUpload(data) {
+ return this.sendRequest(
+ routes.fileUpload.method,
+ routes.fileUpload.url(),
+ data,
+ null,
+ false,
+ {
+ "Content-Type": 'multipart/form-data'
+ }
+ );
+ }
}
export class CompletionClient extends DifyClient {
- createCompletionMessage(inputs, query, user, stream = false) {
+ createCompletionMessage(inputs, user, stream = false, files = null) {
const data = {
inputs,
- query,
user,
response_mode: stream ? "streaming" : "blocking",
+ files,
};
return this.sendRequest(
routes.createCompletionMessage.method,
@@ -130,13 +151,15 @@ export class ChatClient extends DifyClient {
query,
user,
stream = false,
- conversation_id = null
+ conversation_id = null,
+ files = null
) {
const data = {
inputs,
query,
user,
response_mode: stream ? "streaming" : "blocking",
+ files,
};
if (conversation_id) data.conversation_id = conversation_id;
diff --git a/sdks/nodejs-client/package.json b/sdks/nodejs-client/package.json
index a59b766a153cb3..30de613f3020ec 100644
--- a/sdks/nodejs-client/package.json
+++ b/sdks/nodejs-client/package.json
@@ -1,6 +1,6 @@
{
"name": "dify-client",
- "version": "2.0.0",
+ "version": "2.1.0",
"description": "This is the Node.js SDK for the Dify.AI API, which allows you to easily integrate Dify.AI into your Node.js applications.",
"main": "index.js",
"type": "module",
diff --git a/sdks/php-client/README.md b/sdks/php-client/README.md
index a44abc0f8061dd..b0a435bbaf20b3 100644
--- a/sdks/php-client/README.md
+++ b/sdks/php-client/README.md
@@ -11,7 +11,7 @@ This is the PHP SDK for the Dify API, which allows you to easily integrate Dify
After installing the SDK, you can use it in your project like this:
-```
+```php
create_completion_message($inputs, $query, $response_mode, $user);
+$response = $completionClient->create_completion_message(array("query" => "Who are you?"), "blocking", "user_id");
// Create a chat client
$chatClient = new ChatClient($apiKey);
-$response = $chatClient->create_chat_message($inputs, $query, $user, $response_mode, $conversation_id);
+$response = $chatClient->create_chat_message(array(), "Who are you?", "user_id", "blocking", $conversation_id);
+
+$fileForVision = [
+ [
+ "type" => "image",
+ "transfer_method" => "remote_url",
+ "url" => "your_image_url"
+ ]
+];
+
+// $fileForVision = [
+// [
+// "type" => "image",
+// "transfer_method" => "local_file",
+// "url" => "your_file_id"
+// ]
+// ];
+
+// Create a completion client with vision model like gpt-4-vision
+$response = $completionClient->create_completion_message(array("query" => "Describe this image."), "blocking", "user_id", $fileForVision);
+
+// Create a chat client with vision model like gpt-4-vision
+$response = $chatClient->create_chat_message(array(), "Describe this image.", "user_id", "blocking", $conversation_id, $fileForVision);
+
+// File Upload
+$fileForUpload = [
+ [
+ 'tmp_name' => '/path/to/file/filename.jpg',
+ 'name' => 'filename.jpg'
+ ]
+];
+$response = $difyClient->file_upload("user_id", $fileForUpload);
+$result = json_decode($response->getBody(), true);
+echo 'upload_file_id: ' . $result['id'];
// Fetch application parameters
-$response = $difyClient->get_application_parameters($user);
+$response = $difyClient->get_application_parameters("user_id");
// Provide feedback for a message
-$response = $difyClient->message_feedback($message_id, $rating, $user);
+$response = $difyClient->message_feedback($message_id, $rating, "user_id");
// Other available methods:
// - get_conversation_messages()
diff --git a/sdks/php-client/dify-client.php b/sdks/php-client/dify-client.php
index cc2e854775042d..c5ddcc3a87ffbe 100644
--- a/sdks/php-client/dify-client.php
+++ b/sdks/php-client/dify-client.php
@@ -19,6 +19,13 @@ public function __construct($api_key) {
'Content-Type' => 'application/json',
],
]);
+ $this->file_client = new Client([
+ 'base_uri' => $this->base_url,
+ 'headers' => [
+ 'Authorization' => 'Bearer ' . $this->api_key,
+ 'Content-Type' => 'multipart/form-data',
+ ],
+ ]);
}
protected function send_request($method, $endpoint, $data = null, $params = null, $stream = false) {
@@ -44,27 +51,57 @@ public function get_application_parameters($user) {
$params = ['user' => $user];
return $this->send_request('GET', 'parameters', null, $params);
}
+
+ public function file_upload($user, $files) {
+ $data = ['user' => $user];
+ $options = [
+ 'multipart' => $this->prepareMultipart($data, $files)
+ ];
+
+ return $this->file_client->request('POST', 'files/upload', $options);
+ }
+
+ protected function prepareMultipart($data, $files) {
+ $multipart = [];
+ foreach ($data as $key => $value) {
+ $multipart[] = [
+ 'name' => $key,
+ 'contents' => $value
+ ];
+ }
+
+ foreach ($files as $file) {
+ $multipart[] = [
+ 'name' => 'file',
+ 'contents' => fopen($file['tmp_name'], 'r'),
+ 'filename' => $file['name']
+ ];
+ }
+
+ return $multipart;
+ }
}
class CompletionClient extends DifyClient {
- public function create_completion_message($inputs, $query, $response_mode, $user) {
+ public function create_completion_message($inputs, $response_mode, $user, $files = null) {
$data = [
'inputs' => $inputs,
- 'query' => $query,
'response_mode' => $response_mode,
'user' => $user,
+ 'files' => $files,
];
return $this->send_request('POST', 'completion-messages', $data, null, $response_mode === 'streaming');
}
}
class ChatClient extends DifyClient {
- public function create_chat_message($inputs, $query, $user, $response_mode = 'blocking', $conversation_id = null) {
+ public function create_chat_message($inputs, $query, $user, $response_mode = 'blocking', $conversation_id = null, $files = null) {
$data = [
'inputs' => $inputs,
'query' => $query,
'user' => $user,
'response_mode' => $response_mode,
+ 'files' => $files,
];
if ($conversation_id) {
$data['conversation_id'] = $conversation_id;
diff --git a/sdks/python-client/README.md b/sdks/python-client/README.md
index 0997d326328895..8949ef08fa6991 100644
--- a/sdks/python-client/README.md
+++ b/sdks/python-client/README.md
@@ -14,8 +14,27 @@ Write your code with sdk:
- completion generate with `blocking` response_mode
+```python
+from dify_client import CompletionClient
+
+api_key = "your_api_key"
+
+# Initialize CompletionClient
+completion_client = CompletionClient(api_key)
+
+# Create Completion Message using CompletionClient
+completion_response = completion_client.create_completion_message(inputs={"query": "What's the weather like today?"},
+ response_mode="blocking", user="user_id")
+completion_response.raise_for_status()
+
+result = completion_response.json()
+
+print(result.get('answer'))
```
-import json
+
+- completion using vision model, like gpt-4-vision
+
+```python
from dify_client import CompletionClient
api_key = "your_api_key"
@@ -23,19 +42,31 @@ api_key = "your_api_key"
# Initialize CompletionClient
completion_client = CompletionClient(api_key)
+files = [{
+ "type": "image",
+ "transfer_method": "remote_url",
+ "url": "your_image_url"
+}]
+
+# files = [{
+# "type": "image",
+# "transfer_method": "local_file",
+# "upload_file_id": "your_file_id"
+# }]
+
# Create Completion Message using CompletionClient
-completion_response = completion_client.create_completion_message(inputs={}, query="Hello", response_mode="blocking", user="user_id")
+completion_response = completion_client.create_completion_message(inputs={"query": "Describe the picture."},
+ response_mode="blocking", user="user_id", files=files)
completion_response.raise_for_status()
-result = completion_response.text
-result = json.loads(result)
+result = completion_response.json()
print(result.get('answer'))
```
- chat generate with `streaming` response_mode
-```
+```python
import json
from dify_client import ChatClient
@@ -55,10 +86,67 @@ for line in chat_response.iter_lines(decode_unicode=True):
print(line.get('answer'))
```
-- Others
+- chat using vision model, like gpt-4-vision
+
+```python
+from dify_client import ChatClient
+api_key = "your_api_key"
+
+# Initialize ChatClient
+chat_client = ChatClient(api_key)
+
+files = [{
+ "type": "image",
+ "transfer_method": "remote_url",
+ "url": "your_image_url"
+}]
+
+# files = [{
+# "type": "image",
+# "transfer_method": "local_file",
+# "upload_file_id": "your_file_id"
+# }]
+
+# Create Chat Message using ChatClient
+chat_response = chat_client.create_chat_message(inputs={}, query="Describe the picture.", user="user_id",
+ response_mode="blocking", files=files)
+chat_response.raise_for_status()
+
+result = chat_response.json()
+
+print(result.get("answer"))
```
-import json
+
+- upload file when using vision model
+
+```python
+from dify_client import DifyClient
+
+api_key = "your_api_key"
+
+# Initialize Client
+dify_client = DifyClient(api_key)
+
+file_path = "your_image_file_path"
+file_name = "panda.jpeg"
+mime_type = "image/jpeg"
+
+with open(file_path, "rb") as file:
+ files = {
+ "file": (file_name, file, mime_type)
+ }
+ response = dify_client.file_upload("user_id", files)
+
+ result = response.json()
+ print(f'upload_file_id: {result.get("id")}')
+```
+
+
+
+- Others
+
+```python
from dify_client import ChatClient
api_key = "your_api_key"
@@ -69,32 +157,29 @@ client = ChatClient(api_key)
# Get App parameters
parameters = client.get_application_parameters(user="user_id")
parameters.raise_for_status()
-parameters = json.loads(parameters.text)
print('[parameters]')
-print(parameters)
+print(parameters.json())
# Get Conversation List (only for chat)
conversations = client.get_conversations(user="user_id")
conversations.raise_for_status()
-conversations = json.loads(conversations.text)
print('[conversations]')
-print(conversations)
+print(conversations.json())
# Get Message List (only for chat)
messages = client.get_conversation_messages(user="user_id", conversation_id="conversation_id")
messages.raise_for_status()
-messages = json.loads(messages.text)
print('[messages]')
-print(messages)
+print(messages.json())
# Rename Conversation (only for chat)
-rename_conversation_response = client.rename_conversation(conversation_id="conversation_id", name="new_name", user="user_id")
+rename_conversation_response = client.rename_conversation(conversation_id="conversation_id",
+ name="new_name", user="user_id")
rename_conversation_response.raise_for_status()
-rename_conversation_result = json.loads(rename_conversation_response.text)
print('[rename result]')
-print(rename_conversation_result)
+print(rename_conversation_response.json())
```
diff --git a/sdks/python-client/dify_client/__init__.py b/sdks/python-client/dify_client/__init__.py
index 471b8d199036b1..6fa9d190e534eb 100644
--- a/sdks/python-client/dify_client/__init__.py
+++ b/sdks/python-client/dify_client/__init__.py
@@ -1 +1 @@
-from dify_client.client import ChatClient, CompletionClient
\ No newline at end of file
+from dify_client.client import ChatClient, CompletionClient, DifyClient
diff --git a/sdks/python-client/dify_client/client.py b/sdks/python-client/dify_client/client.py
index 23f9b9c3ab8e84..53880c10004418 100644
--- a/sdks/python-client/dify_client/client.py
+++ b/sdks/python-client/dify_client/client.py
@@ -6,14 +6,24 @@ def __init__(self, api_key):
self.api_key = api_key
self.base_url = "https://api.dify.ai/v1"
- def _send_request(self, method, endpoint, data=None, params=None, stream=False):
+ def _send_request(self, method, endpoint, json=None, params=None, stream=False):
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
url = f"{self.base_url}{endpoint}"
- response = requests.request(method, url, json=data, params=params, headers=headers, stream=stream)
+ response = requests.request(method, url, json=json, params=params, headers=headers, stream=stream)
+
+ return response
+
+ def _send_request_with_files(self, method, endpoint, data, files):
+ headers = {
+ "Authorization": f"Bearer {self.api_key}"
+ }
+
+ url = f"{self.base_url}{endpoint}"
+ response = requests.request(method, url, data=data, headers=headers, files=files)
return response
@@ -28,30 +38,39 @@ def get_application_parameters(self, user):
params = {"user": user}
return self._send_request("GET", "/parameters", params=params)
+ def file_upload(self, user, files):
+ data = {
+ "user": user
+ }
+ return self._send_request_with_files("POST", "/files/upload", data=data, files=files)
+
class CompletionClient(DifyClient):
- def create_completion_message(self, inputs, query, response_mode, user):
+ def create_completion_message(self, inputs, response_mode, user, files=None):
data = {
"inputs": inputs,
- "query": query,
"response_mode": response_mode,
- "user": user
+ "user": user,
+ "files": files
}
- return self._send_request("POST", "/completion-messages", data, stream=True if response_mode == "streaming" else False)
+ return self._send_request("POST", "/completion-messages", data,
+ stream=True if response_mode == "streaming" else False)
class ChatClient(DifyClient):
- def create_chat_message(self, inputs, query, user, response_mode="blocking", conversation_id=None):
+ def create_chat_message(self, inputs, query, user, response_mode="blocking", conversation_id=None, files=None):
data = {
"inputs": inputs,
"query": query,
"user": user,
- "response_mode": response_mode
+ "response_mode": response_mode,
+ "files": files
}
if conversation_id:
data["conversation_id"] = conversation_id
- return self._send_request("POST", "/chat-messages", data, stream=True if response_mode == "streaming" else False)
+ return self._send_request("POST", "/chat-messages", data,
+ stream=True if response_mode == "streaming" else False)
def get_conversation_messages(self, user, conversation_id=None, first_id=None, limit=None):
params = {"user": user}
diff --git a/sdks/python-client/setup.py b/sdks/python-client/setup.py
index 3d4e0b0bbd176e..e74748377eb9f3 100644
--- a/sdks/python-client/setup.py
+++ b/sdks/python-client/setup.py
@@ -5,7 +5,7 @@
setup(
name="dify-client",
- version="0.1.8",
+ version="0.1.10",
author="Dify",
author_email="hello@dify.ai",
description="A package for interacting with the Dify Service-API",
diff --git a/sdks/python-client/tests/test_client.py b/sdks/python-client/tests/test_client.py
index f123c1882b2143..ac954ff8311eb1 100644
--- a/sdks/python-client/tests/test_client.py
+++ b/sdks/python-client/tests/test_client.py
@@ -12,15 +12,33 @@ def setUp(self):
def test_create_chat_message(self):
response = self.chat_client.create_chat_message({}, "Hello, World!", "test_user")
- self.assertIn("message_id", response)
+ self.assertIn("answer", response.text)
+
+ def test_create_chat_message_with_vision_model_by_remote_url(self):
+ files = [{
+ "type": "image",
+ "transfer_method": "remote_url",
+ "url": "your_image_url"
+ }]
+ response = self.chat_client.create_chat_message({}, "Describe the picture.", "test_user", files=files)
+ self.assertIn("answer", response.text)
+
+ def test_create_chat_message_with_vision_model_by_local_file(self):
+ files = [{
+ "type": "image",
+ "transfer_method": "local_file",
+ "upload_file_id": "your_file_id"
+ }]
+ response = self.chat_client.create_chat_message({}, "Describe the picture.", "test_user", files=files)
+ self.assertIn("answer", response.text)
def test_get_conversation_messages(self):
- response = self.chat_client.get_conversation_messages("test_user")
- self.assertIsInstance(response, list)
+ response = self.chat_client.get_conversation_messages("test_user", "your_conversation_id")
+ self.assertIn("answer", response.text)
def test_get_conversations(self):
response = self.chat_client.get_conversations("test_user")
- self.assertIsInstance(response, list)
+ self.assertIn("data", response.text)
class TestCompletionClient(unittest.TestCase):
@@ -28,8 +46,29 @@ def setUp(self):
self.completion_client = CompletionClient(API_KEY)
def test_create_completion_message(self):
- response = self.completion_client.create_completion_message({}, "What's the weather like today?", "blocking", "test_user")
- self.assertIn("message_id", response)
+ response = self.completion_client.create_completion_message({"query": "What's the weather like today?"},
+ "blocking", "test_user")
+ self.assertIn("answer", response.text)
+
+ def test_create_completion_message_with_vision_model_by_remote_url(self):
+ files = [{
+ "type": "image",
+ "transfer_method": "remote_url",
+ "url": "your_image_url"
+ }]
+ response = self.completion_client.create_completion_message(
+ {"query": "Describe the picture."}, "blocking", "test_user", files)
+ self.assertIn("answer", response.text)
+
+ def test_create_completion_message_with_vision_model_by_local_file(self):
+ files = [{
+ "type": "image",
+ "transfer_method": "local_file",
+ "upload_file_id": "your_file_id"
+ }]
+ response = self.completion_client.create_completion_message(
+ {"query": "Describe the picture."}, "blocking", "test_user", files)
+ self.assertIn("answer", response.text)
class TestDifyClient(unittest.TestCase):
@@ -37,12 +76,24 @@ def setUp(self):
self.dify_client = DifyClient(API_KEY)
def test_message_feedback(self):
- response = self.dify_client.message_feedback("test_message_id", 5, "test_user")
- self.assertIn("success", response)
+ response = self.dify_client.message_feedback("your_message_id", 'like', "test_user")
+ self.assertIn("success", response.text)
def test_get_application_parameters(self):
response = self.dify_client.get_application_parameters("test_user")
- self.assertIsInstance(response, dict)
+ self.assertIn("user_input_form", response.text)
+
+ def test_file_upload(self):
+ file_path = "your_image_file_path"
+ file_name = "panda.jpeg"
+ mime_type = "image/jpeg"
+
+ with open(file_path, "rb") as file:
+ files = {
+ "file": (file_name, file, mime_type)
+ }
+ response = self.dify_client.file_upload("test_user", files)
+ self.assertIn("name", response.text)
if __name__ == "__main__":
diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx
index 835fac462d3ce7..3458b14a628ebf 100644
--- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx
+++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx
@@ -153,7 +153,7 @@ const DatasetDetailLayout: FC = (props) => {
return
return (
-
+
{!hideSideBar &&
= (props) => {
dataset: datasetRes,
mutateDatasetRes: () => mutateDatasetRes(),
}}>
- {children}
+ {children}
)
diff --git a/web/app/(commonLayout)/plugins-coming-soon/assets/coming-soon.png b/web/app/(commonLayout)/plugins-coming-soon/assets/coming-soon.png
deleted file mode 100644
index a1c48b508df133..00000000000000
Binary files a/web/app/(commonLayout)/plugins-coming-soon/assets/coming-soon.png and /dev/null differ
diff --git a/web/app/(commonLayout)/plugins-coming-soon/assets/plugins-bg.png b/web/app/(commonLayout)/plugins-coming-soon/assets/plugins-bg.png
deleted file mode 100644
index 9be76acc5206af..00000000000000
Binary files a/web/app/(commonLayout)/plugins-coming-soon/assets/plugins-bg.png and /dev/null differ
diff --git a/web/app/(commonLayout)/plugins-coming-soon/page.module.css b/web/app/(commonLayout)/plugins-coming-soon/page.module.css
deleted file mode 100644
index 73aab949c9840a..00000000000000
--- a/web/app/(commonLayout)/plugins-coming-soon/page.module.css
+++ /dev/null
@@ -1,32 +0,0 @@
-.bg {
- position: relative;
- width: 750px;
- height: 450px;
- background: #fff url(./assets/plugins-bg.png) center center no-repeat;
- background-size: contain;
- box-shadow: 0px 12px 16px -4px rgba(16, 24, 40, 0.08), 0px 4px 6px -2px rgba(16, 24, 40, 0.03);
- border-radius: 16px;
-}
-
-.text {
- position: absolute;
- top: 40px;
- left: 48px;
- width: 526px;
- background: linear-gradient(91.92deg, #104AE1 -1.74%, #0098EE 75.74%);
- background-clip: text;
- color: transparent;
- font-size: 24px;
- font-weight: 700;
- line-height: 32px;
-}
-
-.tag {
- position: absolute;
- width: 116.74px;
- height: 69.3px;
- left: -18.37px;
- top: -11.48px;
- background: url(./assets/coming-soon.png) center center no-repeat;
- background-size: contain;
-}
\ No newline at end of file
diff --git a/web/app/(commonLayout)/plugins-coming-soon/page.tsx b/web/app/(commonLayout)/plugins-coming-soon/page.tsx
deleted file mode 100644
index 285b0189a33157..00000000000000
--- a/web/app/(commonLayout)/plugins-coming-soon/page.tsx
+++ /dev/null
@@ -1,19 +0,0 @@
-import s from './page.module.css'
-import { getLocaleOnServer } from '@/i18n/server'
-import { useTranslation } from '@/i18n/i18next-serverside-config'
-
-const PluginsComingSoon = async () => {
- const locale = getLocaleOnServer()
- const { t } = await useTranslation(locale, 'common')
-
- return (
-
-
-
-
{t('menus.pluginsTips')}
-
-
- )
-}
-
-export default PluginsComingSoon
diff --git a/web/app/components/app/chat/answer/index.tsx b/web/app/components/app/chat/answer/index.tsx
index 9865ff3c2bfe8d..3387780f6a58f1 100644
--- a/web/app/components/app/chat/answer/index.tsx
+++ b/web/app/components/app/chat/answer/index.tsx
@@ -194,7 +194,7 @@ const Answer: FC
= ({
)
}
-
+
@@ -280,7 +280,7 @@ const Answer: FC = ({
{!feedbackDisabled && renderFeedbackRating(feedback?.rating, !isHideFeedbackEdit, displayScene !== 'console')}
- {more &&
}
+ {more &&
}
diff --git a/web/app/components/app/chat/citation/popup.tsx b/web/app/components/app/chat/citation/popup.tsx
index 20e04df63f04a1..ea2e8a9b24fded 100644
--- a/web/app/components/app/chat/citation/popup.tsx
+++ b/web/app/components/app/chat/citation/popup.tsx
@@ -100,7 +100,11 @@ const Popup: FC = ({
data={source.index_node_hash.substring(0, 7)}
icon={ }
/>
-
+ {
+ source.score && (
+
+ )
+ }
)
}
diff --git a/web/app/components/app/chat/more-info/index.tsx b/web/app/components/app/chat/more-info/index.tsx
index 4b18a0f37475a6..83cc5be7c8c2e4 100644
--- a/web/app/components/app/chat/more-info/index.tsx
+++ b/web/app/components/app/chat/more-info/index.tsx
@@ -5,11 +5,15 @@ import { useTranslation } from 'react-i18next'
import type { MessageMore } from '../type'
import { formatNumber } from '@/utils/format'
-export type IMoreInfoProps = { more: MessageMore; isQuestion: boolean }
+export type IMoreInfoProps = {
+ more: MessageMore
+ isQuestion: boolean
+ className?: string
+}
-const MoreInfo: FC = ({ more, isQuestion }) => {
+const MoreInfo: FC = ({ more, isQuestion, className }) => {
const { t } = useTranslation()
- return (
+ return (
{`${t('appLog.detail.timeConsuming')} ${more.latency}${t('appLog.detail.second')}`}
{`${t('appLog.detail.tokenCost')} ${formatNumber(more.tokens)}`}
·
diff --git a/web/app/components/app/configuration/dataset-config/card-item/index.tsx b/web/app/components/app/configuration/dataset-config/card-item/index.tsx
index 650d717553fe2b..767943303aefe1 100644
--- a/web/app/components/app/configuration/dataset-config/card-item/index.tsx
+++ b/web/app/components/app/configuration/dataset-config/card-item/index.tsx
@@ -16,7 +16,7 @@ export type ICardItemProps = {
onRemove: (id: string) => void
readonly?: boolean
}
-
+// used in universal-chat
const CardItem: FC
= ({
className,
config,
diff --git a/web/app/components/app/configuration/dataset-config/card-item/style.module.css b/web/app/components/app/configuration/dataset-config/card-item/style.module.css
index 9113f795bcfb42..4ddec9ea37ec2d 100644
--- a/web/app/components/app/configuration/dataset-config/card-item/style.module.css
+++ b/web/app/components/app/configuration/dataset-config/card-item/style.module.css
@@ -1,16 +1,22 @@
.card {
box-shadow: 0px 1px 2px rgba(16, 24, 40, 0.05);
- width: calc(50% - 4px);
+ width: 100%;
}
.card:hover {
box-shadow: 0px 4px 8px -2px rgba(16, 24, 40, 0.1), 0px 2px 4px -2px rgba(16, 24, 40, 0.06);
}
-.deleteBtn {
+.btnWrap {
+ padding-left: 64px;
visibility: hidden;
+ background: linear-gradient(270deg, #FFF 49.99%, rgba(255, 255, 255, 0.00) 98.1%);
}
-.card:hover .deleteBtn {
+.card:hover .btnWrap {
visibility: visible;
+}
+
+.settingBtn:hover {
+ background-color: rgba(0, 0, 0, 0.05);
}
\ No newline at end of file
diff --git a/web/app/components/app/configuration/dataset-config/index.tsx b/web/app/components/app/configuration/dataset-config/index.tsx
index e7d6ead7934393..9b4dbf2b781abe 100644
--- a/web/app/components/app/configuration/dataset-config/index.tsx
+++ b/web/app/components/app/configuration/dataset-config/index.tsx
@@ -105,7 +105,6 @@ const DatasetConfig: FC = () => {
onChange={handleSelectContextVar}
/>
)}
-
)
}
diff --git a/web/app/components/app/configuration/dataset-config/params-config/index.tsx b/web/app/components/app/configuration/dataset-config/params-config/index.tsx
index 101bf15a1062bf..d166a5b457b9a9 100644
--- a/web/app/components/app/configuration/dataset-config/params-config/index.tsx
+++ b/web/app/components/app/configuration/dataset-config/params-config/index.tsx
@@ -4,96 +4,23 @@ import { memo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import cn from 'classnames'
-import { HelpCircle, Settings04 } from '@/app/components/base/icons/src/vender/line/general'
-import {
- PortalToFollowElem,
- PortalToFollowElemContent,
- PortalToFollowElemTrigger,
-} from '@/app/components/base/portal-to-follow-elem'
-import Tooltip from '@/app/components/base/tooltip-plus'
-import Slider from '@/app/components/base/slider'
-import Switch from '@/app/components/base/switch'
+import { Settings04 } from '@/app/components/base/icons/src/vender/line/general'
import ConfigContext from '@/context/debug-configuration'
-
-// TODO
-const PARAMS_KEY = [
- 'top_k',
- 'score_threshold',
-]
-const PARAMS = {
- top_k: {
- default: 2,
- step: 1,
- min: 1,
- max: 10,
- },
- score_threshold: {
- default: 0.7,
- step: 0.01,
- min: 0,
- max: 1,
- },
-} as any
-
-export type IParamItemProps = {
- id: string
- name: string
- tip: string
- value: number
- enable: boolean
- step?: number
- min?: number
- max: number
- onChange: (key: string, value: number) => void
- onSwitchChange: (key: string, enable: boolean) => void
-}
-
-const ParamItem: FC = ({ id, name, tip, step = 0.1, min = 0, max, value, enable, onChange, onSwitchChange }) => {
- return (
-
-
-
- {id === 'score_threshold' && (
- {
- onSwitchChange(id, val)
- }}
- />
- )}
- {name}
- {tip}
}>
-
-
-
-
-
-
-
-
- onChange(id, value / (max < 5 ? 100 : 1))}
- />
-
-
-
- {
- const value = parseFloat(e.target.value)
- if (value < min || value > max)
- return
-
- onChange(id, value)
- }} />
-
-
-
- )
-}
+import TopKItem from '@/app/components/base/param-item/top-k-item'
+import ScoreThresholdItem from '@/app/components/base/param-item/score-threshold-item'
+import Modal from '@/app/components/base/modal'
+import Button from '@/app/components/base/button'
+import RadioCard from '@/app/components/base/radio-card/simple'
+import { RETRIEVE_TYPE } from '@/types/app'
+import ModelSelector from '@/app/components/header/account-setting/model-page/model-selector'
+import { useProviderContext } from '@/context/provider-context'
+import { ModelType } from '@/app/components/header/account-setting/model-page/declarations'
+import Toast from '@/app/components/base/toast'
+import { DATASET_DEFAULT } from '@/config'
+import {
+ MultiPathRetrieval,
+ NTo1Retrieval,
+} from '@/app/components/base/icons/src/public/common'
const ParamsConfig: FC = () => {
const { t } = useTranslation()
@@ -102,24 +29,47 @@ const ParamsConfig: FC = () => {
datasetConfigs,
setDatasetConfigs,
} = useContext(ConfigContext)
+ const [tempDataSetConfigs, setTempDataSetConfigs] = useState(datasetConfigs)
+
+ const type = tempDataSetConfigs.retrieval_model
+ const setType = (value: RETRIEVE_TYPE) => {
+ setTempDataSetConfigs({
+ ...tempDataSetConfigs,
+ retrieval_model: value,
+ })
+ }
+
+ const {
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ } = useProviderContext()
+
+ const rerankModel = (() => {
+ if (tempDataSetConfigs.reranking_model) {
+ return {
+ provider_name: tempDataSetConfigs.reranking_model.reranking_provider_name,
+ model_name: tempDataSetConfigs.reranking_model.reranking_model_name,
+ }
+ }
+ else if (rerankDefaultModel) {
+ return {
+ provider_name: rerankDefaultModel.model_provider.provider_name,
+ model_name: rerankDefaultModel.model_name,
+ }
+ }
+ })()
const handleParamChange = (key: string, value: number) => {
- let notOutRangeValue = parseFloat(value.toFixed(2))
- notOutRangeValue = Math.max(PARAMS[key].min, notOutRangeValue)
- notOutRangeValue = Math.min(PARAMS[key].max, notOutRangeValue)
if (key === 'top_k') {
- setDatasetConfigs({
- ...datasetConfigs,
- top_k: notOutRangeValue,
+ setTempDataSetConfigs({
+ ...tempDataSetConfigs,
+ top_k: value,
})
}
else if (key === 'score_threshold') {
- setDatasetConfigs({
- ...datasetConfigs,
- [key]: {
- enable: datasetConfigs.score_threshold.enable,
- value: notOutRangeValue,
- },
+ setTempDataSetConfigs({
+ ...tempDataSetConfigs,
+ score_threshold: value,
})
}
}
@@ -128,54 +78,133 @@ const ParamsConfig: FC = () => {
if (key === 'top_k')
return
- setDatasetConfigs({
- ...datasetConfigs,
- [key]: {
- enable,
- value: (datasetConfigs as any)[key].value,
- },
+ setTempDataSetConfigs({
+ ...tempDataSetConfigs,
+ score_threshold_enabled: enable,
})
}
+ const isValid = () => {
+ let errMsg = ''
+ if (tempDataSetConfigs.retrieval_model === RETRIEVE_TYPE.multiWay) {
+ if (!tempDataSetConfigs.reranking_model?.reranking_model_name && (!rerankDefaultModel && isRerankDefaultModelVaild))
+ errMsg = t('appDebug.datasetConfig.rerankModelRequired')
+ }
+ if (errMsg) {
+ Toast.notify({
+ type: 'error',
+ message: errMsg,
+ })
+ }
+ return !errMsg
+ }
+ const handleSave = () => {
+ if (!isValid())
+ return
+
+ const config = { ...tempDataSetConfigs }
+ if (config.retrieval_model === RETRIEVE_TYPE.multiWay && !config.reranking_model) {
+ config.reranking_model = {
+ reranking_provider_name: rerankDefaultModel?.model_provider.provider_name,
+ reranking_model_name: rerankDefaultModel?.model_name,
+ } as any
+ }
+ setDatasetConfigs(config)
+ setOpen(false)
+ }
return (
-
- setOpen(v => !v)}>
-
-
-
- {t('appDebug.datasetConfig.params')}
-
+
+
{
+ setTempDataSetConfigs({
+ ...datasetConfigs,
+ top_k: datasetConfigs.top_k || DATASET_DEFAULT.top_k,
+ score_threshold: datasetConfigs.score_threshold || DATASET_DEFAULT.score_threshold,
+ })
+ setOpen(true)
+ }}
+ >
+
+
+ {t('appDebug.datasetConfig.params')}
-
-
-
- {PARAMS_KEY.map((key: string) => {
- const currentValue = key === 'top_k' ? datasetConfigs[key] : (datasetConfigs as any)[key].value
- const currentEnableState = key === 'top_k' ? true : (datasetConfigs as any)[key].enable
- return (
-
+ {
+ open && (
+ {
+ setOpen(false)
+ }}
+ className='min-w-[528px]'
+ wrapperClassName='z-50'
+ title={t('appDebug.datasetConfig.settingTitle')}
+ >
+
+ }
+ title={t('appDebug.datasetConfig.retrieveOneWay.title')}
+ description={t('appDebug.datasetConfig.retrieveOneWay.description')}
+ isChosen={type === RETRIEVE_TYPE.oneWay}
+ onChosen={() => { setType(RETRIEVE_TYPE.oneWay) }}
/>
- )
- })}
-
-
-
+ }
+ title={t('appDebug.datasetConfig.retrieveMultiWay.title')}
+ description={t('appDebug.datasetConfig.retrieveMultiWay.description')}
+ isChosen={type === RETRIEVE_TYPE.multiWay}
+ onChosen={() => { setType(RETRIEVE_TYPE.multiWay) }}
+ />
+
+ {type === RETRIEVE_TYPE.multiWay && (
+ <>
+
+
{t('common.modelProvider.rerankModel.key')}
+
+ {
+ setTempDataSetConfigs({
+ ...tempDataSetConfigs,
+ reranking_model: {
+ reranking_provider_name: v.model_provider.provider_name,
+ reranking_model_name: v.model_name,
+ },
+ })
+ }}
+ />
+
+
+
+
+
+
+ >
+ )}
+
+ {
+ setOpen(false)
+ }}>{t('common.operation.cancel')}
+ {t('common.operation.save')}
+
+
+ )
+ }
+
+
)
}
export default memo(ParamsConfig)
diff --git a/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx b/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx
index f75cdcf5966645..8019011f7ecf24 100644
--- a/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx
+++ b/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx
@@ -1,8 +1,11 @@
import type { FC } from 'react'
-import { useState } from 'react'
+import { useRef, useState } from 'react'
+import { useClickAway } from 'ahooks'
import { useTranslation } from 'react-i18next'
+import { isEqual } from 'lodash-es'
+import cn from 'classnames'
+import { BookOpenIcon } from '@heroicons/react/24/outline'
import IndexMethodRadio from '@/app/components/datasets/settings/index-method-radio'
-import Modal from '@/app/components/base/modal'
import Button from '@/app/components/base/button'
import ModelSelector from '@/app/components/header/account-setting/model-page/model-selector'
import type { ProviderEnum } from '@/app/components/header/account-setting/model-page/declarations'
@@ -11,12 +14,29 @@ import type { DataSet } from '@/models/datasets'
import { useToastContext } from '@/app/components/base/toast'
import { updateDatasetSetting } from '@/service/datasets'
import { useModalContext } from '@/context/modal-context'
+import { XClose } from '@/app/components/base/icons/src/vender/line/general'
+import type { RetrievalConfig } from '@/types/app'
+import RetrievalMethodConfig from '@/app/components/datasets/common/retrieval-method-config'
+import EconomicalRetrievalMethodConfig from '@/app/components/datasets/common/economical-retrieval-method-config'
+import { useProviderContext } from '@/context/provider-context'
+import { ensureRerankModelSelected, isReRankModelSelected } from '@/app/components/datasets/common/check-rerank-model'
+import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback'
+import PermissionsRadio from '@/app/components/datasets/settings/permissions-radio'
type SettingsModalProps = {
currentDataset: DataSet
onCancel: () => void
onSave: (newDataset: DataSet) => void
}
+
+const rowClass = `
+ flex justify-between py-4
+`
+
+const labelClass = `
+ flex w-[168px] shrink-0
+`
+
const SettingsModal: FC
= ({
currentDataset,
onCancel,
@@ -24,13 +44,29 @@ const SettingsModal: FC = ({
}) => {
const { t } = useTranslation()
const { notify } = useToastContext()
+ const ref = useRef(null)
+ useClickAway(() => {
+ if (ref)
+ onCancel()
+ }, ref)
+
const { setShowAccountSettingModal } = useModalContext()
const [loading, setLoading] = useState(false)
const [localeCurrentDataset, setLocaleCurrentDataset] = useState({ ...currentDataset })
+ const [indexMethod, setIndexMethod] = useState(currentDataset.indexing_technique)
+ const [retrievalConfig, setRetrievalConfig] = useState(localeCurrentDataset?.retrieval_model_dict as RetrievalConfig)
+
+ const {
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ } = useProviderContext()
const handleValueChange = (type: string, value: string) => {
setLocaleCurrentDataset({ ...localeCurrentDataset, [type]: value })
}
+ const [isHideChangedTip, setIsHideChangedTip] = useState(false)
+ const isRetrievalChanged = !isEqual(retrievalConfig, localeCurrentDataset?.retrieval_model_dict) || indexMethod !== localeCurrentDataset?.indexing_technique
const handleSave = async () => {
if (loading)
@@ -39,19 +75,42 @@ const SettingsModal: FC = ({
notify({ type: 'error', message: t('datasetSettings.form.nameError') })
return
}
+ if (
+ !isReRankModelSelected({
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ retrievalConfig,
+ indexMethod,
+ })
+ ) {
+ notify({ type: 'error', message: t('appDebug.datasetConfig.rerankModelRequired') })
+ return
+ }
+ const postRetrievalConfig = ensureRerankModelSelected({
+ rerankDefaultModel: rerankDefaultModel!,
+ retrievalConfig,
+ indexMethod,
+ })
try {
setLoading(true)
- const { id, name, description, indexing_technique } = localeCurrentDataset
+ const { id, name, description, permission } = localeCurrentDataset
await updateDatasetSetting({
datasetId: id,
body: {
name,
description,
- indexing_technique,
+ permission,
+ indexing_technique: indexMethod,
+ retrieval_model: postRetrievalConfig,
},
})
notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
- onSave(localeCurrentDataset)
+ onSave({
+ ...localeCurrentDataset,
+ indexing_technique: indexMethod,
+ retrieval_model_dict: postRetrievalConfig,
+ })
}
catch (e) {
notify({ type: 'error', message: t('common.actionMsg.modifiedUnsuccessfully') })
@@ -62,74 +121,162 @@ const SettingsModal: FC = ({
}
return (
- {}}
- className='!p-8 !pb-6 !max-w-none !w-[640px]'
+
-
- {t('datasetSettings.title')}
-
-
-
- {t('datasetSettings.form.name')}
+
+
+
{t('datasetSettings.title')}
-
handleValueChange('name', e.target.value)}
- className='block px-3 w-full h-9 bg-gray-100 rounded-lg text-sm text-gray-900 outline-none appearance-none'
- placeholder={t('datasetSettings.form.namePlaceholder') || ''}
- />
-
-
-
- {t('datasetSettings.form.desc')}
-
-
-
-
- {t('datasetSettings.form.indexMethod')}
-
-
-
handleValueChange('indexing_technique', v!)}
- itemClassName='!w-[282px]'
+ {/* Body */}
+
+
+
+ {t('datasetSettings.form.name')}
+
+
handleValueChange('name', e.target.value)}
+ className='block px-3 w-full h-9 bg-gray-100 rounded-lg text-sm text-gray-900 outline-none appearance-none'
+ placeholder={t('datasetSettings.form.namePlaceholder') || ''}
/>
-
-
-
- {t('datasetSettings.form.embeddingModel')}
+
+
+ {t('datasetSettings.form.desc')}
+
+
-
-
{}}
- />
+
+
+
{t('datasetSettings.form.permissions')}
+
+
+
handleValueChange('permission', v!)}
+ itemClassName='!w-[227px]'
+ />
+
-
- {t('datasetSettings.form.embeddingModelTip')}
-
setShowAccountSettingModal({ payload: 'provider' })}>{t('datasetSettings.form.embeddingModelTipLink')}
+
+
+
+ {t('datasetSettings.form.indexMethod')}
+
+
+ setIndexMethod(v!)}
+ itemClassName='!w-[227px]'
+ />
+
+
+ {indexMethod === 'high_quality' && (
+
+
+ {t('datasetSettings.form.embeddingModel')}
+
+
+
+ {}}
+ />
+
+
+ {t('datasetSettings.form.embeddingModelTip')}
+ setShowAccountSettingModal({ payload: 'provider' })}>{t('datasetSettings.form.embeddingModelTipLink')}
+
+
+
+ )}
+
+ {/* Retrieval Method Config */}
+
+
+
+
{t('datasetSettings.form.retrievalSetting.title')}
+
+
+
+
+ {indexMethod === 'high_quality'
+ ? (
+
+ )
+ : (
+
+ )}
+
-
-
+ {isRetrievalChanged && !isHideChangedTip && (
+
+
+
+
{t('appDebug.datasetConfig.retrieveChangeTip')}
+
+
{
+ setIsHideChangedTip(true)
+ e.stopPropagation()
+ e.nativeEvent.stopImmediatePropagation()
+ }}>
+
+
+
+ )}
+
+
= ({
{t('common.operation.save')}
-
+
)
}
diff --git a/web/app/components/app/configuration/index.tsx b/web/app/components/app/configuration/index.tsx
index bd40787518e81b..9fa9d3033287bc 100644
--- a/web/app/components/app/configuration/index.tsx
+++ b/web/app/components/app/configuration/index.tsx
@@ -37,7 +37,7 @@ import { fetchAppDetail, updateAppModelConfig } from '@/service/apps'
import { promptVariablesToUserInputsForm, userInputsFormToPromptVariables } from '@/utils/model-config'
import { fetchDatasets } from '@/service/datasets'
import { useProviderContext } from '@/context/provider-context'
-import { AppType, ModelModeType, Resolution, TransferMethod } from '@/types/app'
+import { AppType, ModelModeType, RETRIEVE_TYPE, Resolution, TransferMethod } from '@/types/app'
import { FlipBackward } from '@/app/components/base/icons/src/vender/line/arrows'
import { PromptMode } from '@/models/debug'
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
@@ -127,11 +127,14 @@ const Configuration: FC = () => {
})
const [datasetConfigs, setDatasetConfigs] = useState({
- top_k: 2,
- score_threshold: {
- enable: false,
- value: 0.7,
+ retrieval_model: RETRIEVE_TYPE.oneWay,
+ reranking_model: {
+ reranking_provider_name: '',
+ reranking_model_name: '',
},
+ top_k: 2,
+ score_threshold_enabled: false,
+ score_threshold: 0.7,
})
const setModelConfig = (newModelConfig: ModelConfig) => {
@@ -391,7 +394,10 @@ const Configuration: FC = () => {
syncToPublishedConfig(config)
setPublishedConfig(config)
- setDatasetConfigs(modelConfig.dataset_configs)
+ setDatasetConfigs({
+ retrieval_model: RETRIEVE_TYPE.oneWay,
+ ...modelConfig.dataset_configs,
+ })
setHasFetchedDetail(true)
})
}, [appId])
diff --git a/web/app/components/app/configuration/prompt-value-panel/index.tsx b/web/app/components/app/configuration/prompt-value-panel/index.tsx
index 4617027f7f0e13..ee2dacaac46bc2 100644
--- a/web/app/components/app/configuration/prompt-value-panel/index.tsx
+++ b/web/app/components/app/configuration/prompt-value-panel/index.tsx
@@ -161,7 +161,7 @@ const PromptValuePanel: FC = ({
{
appType === AppType.completion && visionConfig?.enabled && (
-
Image Upload
+
{t('common.imageUploader.imageUpload')}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/public/common/n-to-1-retrieval.svg b/web/app/components/base/icons/assets/public/common/n-to-1-retrieval.svg
new file mode 100644
index 00000000000000..886282973351f5
--- /dev/null
+++ b/web/app/components/base/icons/assets/public/common/n-to-1-retrieval.svg
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/public/llm/cohere-text.svg b/web/app/components/base/icons/assets/public/llm/cohere-text.svg
new file mode 100644
index 00000000000000..9c176896fe872c
--- /dev/null
+++ b/web/app/components/base/icons/assets/public/llm/cohere-text.svg
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/public/llm/cohere.svg b/web/app/components/base/icons/assets/public/llm/cohere.svg
new file mode 100644
index 00000000000000..28fe96d390245c
--- /dev/null
+++ b/web/app/components/base/icons/assets/public/llm/cohere.svg
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/vender/solid/arrows/high-priority.svg b/web/app/components/base/icons/assets/vender/solid/arrows/high-priority.svg
new file mode 100644
index 00000000000000..cfb965c47647ab
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/solid/arrows/high-priority.svg
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/vender/solid/development/pattern-recognition.svg b/web/app/components/base/icons/assets/vender/solid/development/pattern-recognition.svg
new file mode 100644
index 00000000000000..f026dffbed08a8
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/solid/development/pattern-recognition.svg
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/vender/solid/development/semantic.svg b/web/app/components/base/icons/assets/vender/solid/development/semantic.svg
new file mode 100644
index 00000000000000..9b30e1cce4455a
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/solid/development/semantic.svg
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/src/public/common/MultiPathRetrieval.json b/web/app/components/base/icons/src/public/common/MultiPathRetrieval.json
new file mode 100644
index 00000000000000..d37b2636889c81
--- /dev/null
+++ b/web/app/components/base/icons/src/public/common/MultiPathRetrieval.json
@@ -0,0 +1,153 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "36",
+ "height": "36",
+ "viewBox": "0 0 36 36",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "clip-path": "url(#clip0_13429_43710)"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "rect",
+ "attributes": {
+ "width": "36",
+ "height": "36",
+ "rx": "8",
+ "fill": "#FFF6ED"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "opacity": "0.7",
+ "d": "M22.25 28C22.25 29.7949 20.7949 31.25 19 31.25C17.2051 31.25 15.75 29.7949 15.75 28C15.75 26.2051 17.2051 24.75 19 24.75C20.7949 24.75 22.25 26.2051 22.25 28Z",
+ "stroke": "#FB6514",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M19 12C21.2091 12 23 10.2091 23 8C23 5.79086 21.2091 4 19 4C16.7909 4 15 5.79086 15 8C15 10.2091 16.7909 12 19 12Z",
+ "fill": "#FB6514"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M15 22C17.2091 22 19 20.2091 19 18C19 15.7909 17.2091 14 15 14C12.7909 14 11 15.7909 11 18C11 20.2091 12.7909 22 15 22Z",
+ "fill": "#FB6514"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M36 23C38.7614 23 41 20.7614 41 18C41 15.2386 38.7614 13 36 13C33.2386 13 31 15.2386 31 18C31 20.7614 33.2386 23 36 23Z",
+ "fill": "#FB6514"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M0 18H10",
+ "stroke": "#FB6514",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M20 18L30 18",
+ "stroke": "#FB6514",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M0.00112438 15C0.00112438 15 -5.64364 15 0.851673 15C7.34699 15 7.84654 8 14 8",
+ "stroke": "#FB6514",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M23.75 9.28125C26.5688 10.1847 27.699 13.2045 30.625 15.0312",
+ "stroke": "#FB6514",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "opacity": "0.7",
+ "d": "M-0.000543833 21C-0.000543833 21 -5.57819 21 0.893635 21C7.36546 21 7.8688 28 14 28",
+ "stroke": "#FB6514",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ }
+ ]
+ },
+ {
+ "type": "element",
+ "name": "defs",
+ "attributes": {},
+ "children": [
+ {
+ "type": "element",
+ "name": "clipPath",
+ "attributes": {
+ "id": "clip0_13429_43710"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "rect",
+ "attributes": {
+ "width": "36",
+ "height": "36",
+ "rx": "8",
+ "fill": "white"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "name": "MultiPathRetrieval"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/public/common/MultiPathRetrieval.tsx b/web/app/components/base/icons/src/public/common/MultiPathRetrieval.tsx
new file mode 100644
index 00000000000000..7eafeb8715c739
--- /dev/null
+++ b/web/app/components/base/icons/src/public/common/MultiPathRetrieval.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './MultiPathRetrieval.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'MultiPathRetrieval'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/public/common/NTo1Retrieval.json b/web/app/components/base/icons/src/public/common/NTo1Retrieval.json
new file mode 100644
index 00000000000000..086522046fbda1
--- /dev/null
+++ b/web/app/components/base/icons/src/public/common/NTo1Retrieval.json
@@ -0,0 +1,146 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "36",
+ "height": "36",
+ "viewBox": "0 0 36 36",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "clip-path": "url(#clip0_13429_43700)"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "rect",
+ "attributes": {
+ "width": "36",
+ "height": "36",
+ "rx": "8",
+ "fill": "#EEF4FF"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "opacity": "0.7",
+ "d": "M23.25 28C23.25 29.7949 21.7949 31.25 20 31.25C18.2051 31.25 16.75 29.7949 16.75 28C16.75 26.2051 18.2051 24.75 20 24.75C21.7949 24.75 23.25 26.2051 23.25 28Z",
+ "stroke": "#444CE7",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "opacity": "0.7",
+ "d": "M23.25 8C23.25 9.79493 21.7949 11.25 20 11.25C18.2051 11.25 16.75 9.79493 16.75 8C16.75 6.20507 18.2051 4.75 20 4.75C21.7949 4.75 23.25 6.20507 23.25 8Z",
+ "stroke": "#444CE7",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M16 22C18.2091 22 20 20.2091 20 18C20 15.7909 18.2091 14 16 14C13.7909 14 12 15.7909 12 18C12 20.2091 13.7909 22 16 22Z",
+ "fill": "#444CE7"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M36 23C38.7614 23 41 20.7614 41 18C41 15.2386 38.7614 13 36 13C33.2386 13 31 15.2386 31 18C31 20.7614 33.2386 23 36 23Z",
+ "fill": "#444CE7"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M0 18L11 18",
+ "stroke": "#444CE7",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M21 18L30 18",
+ "stroke": "#444CE7",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "opacity": "0.7",
+ "d": "M-0.00160408 15C-0.00160408 15 -6.00089 15 1.12411 15C8.24911 15 8.24908 8.25 14.9991 8.25",
+ "stroke": "#444CE7",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "opacity": "0.7",
+ "d": "M0.000488281 21C0.000488281 21 -5.92692 21 1.17228 21C8.27148 21 8.27423 27.75 14.9998 27.75",
+ "stroke": "#444CE7",
+ "stroke-width": "1.5"
+ },
+ "children": []
+ }
+ ]
+ },
+ {
+ "type": "element",
+ "name": "defs",
+ "attributes": {},
+ "children": [
+ {
+ "type": "element",
+ "name": "clipPath",
+ "attributes": {
+ "id": "clip0_13429_43700"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "rect",
+ "attributes": {
+ "width": "36",
+ "height": "36",
+ "rx": "8",
+ "fill": "white"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "name": "NTo1Retrieval"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/public/common/NTo1Retrieval.tsx b/web/app/components/base/icons/src/public/common/NTo1Retrieval.tsx
new file mode 100644
index 00000000000000..ea48b5e8a30d0c
--- /dev/null
+++ b/web/app/components/base/icons/src/public/common/NTo1Retrieval.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './NTo1Retrieval.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'NTo1Retrieval'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/public/common/index.ts b/web/app/components/base/icons/src/public/common/index.ts
index 81fc5bec1dd4fb..2f38fae3d042bb 100644
--- a/web/app/components/base/icons/src/public/common/index.ts
+++ b/web/app/components/base/icons/src/public/common/index.ts
@@ -1,4 +1,6 @@
export { default as Dify } from './Dify'
export { default as Github } from './Github'
export { default as MessageChatSquare } from './MessageChatSquare'
+export { default as MultiPathRetrieval } from './MultiPathRetrieval'
+export { default as NTo1Retrieval } from './NTo1Retrieval'
export { default as Notion } from './Notion'
diff --git a/web/app/components/base/icons/src/public/llm/Cohere.json b/web/app/components/base/icons/src/public/llm/Cohere.json
new file mode 100644
index 00000000000000..255514e8b03cd5
--- /dev/null
+++ b/web/app/components/base/icons/src/public/llm/Cohere.json
@@ -0,0 +1,112 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "22",
+ "height": "22",
+ "viewBox": "0 0 22 22",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "Clip path group"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "mask",
+ "attributes": {
+ "id": "mask0_13224_9519",
+ "style": "mask-type:luminance",
+ "maskUnits": "userSpaceOnUse",
+ "x": "0",
+ "y": "0",
+ "width": "22",
+ "height": "22"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "clip0_2207_90691"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "id": "Vector",
+ "d": "M21.5 0.5H0.5V21.5H21.5V0.5Z",
+ "fill": "white"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "mask": "url(#mask0_13224_9519)"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "Group"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "id": "Vector_2",
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M7.30367 13.0035C7.8689 13.0035 8.99327 12.9725 10.5474 12.3326C12.3585 11.587 15.9617 10.2334 18.561 8.84305C20.3788 7.8706 21.1757 6.58448 21.1757 4.85248C21.1757 2.44869 19.2271 0.5 16.8233 0.5H6.75176C3.299 0.5 0.5 3.299 0.5 6.75176C0.5 10.2045 3.12069 13.0035 7.30367 13.0035Z",
+ "fill": "#39594D"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "id": "Vector_3",
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M9.00732 17.3086C9.00732 15.6162 10.0262 14.0902 11.5894 13.4414L14.7612 12.1251C17.9694 10.7936 21.5006 13.1513 21.5006 16.6249C21.5006 19.316 19.3185 21.4974 16.6273 21.4967L13.1933 21.4958C10.8813 21.4952 9.00732 19.6207 9.00732 17.3086Z",
+ "fill": "#D18EE2"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "id": "Vector_4",
+ "d": "M4.10396 13.8277C2.11358 13.8277 0.5 15.4411 0.5 17.4315V17.8984C0.5 19.8887 2.11352 21.5022 4.1039 21.5022C6.09428 21.5022 7.70785 19.8887 7.70785 17.8984V17.4315C7.70785 15.4411 6.09434 13.8277 4.10396 13.8277Z",
+ "fill": "#FF7759"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "name": "Cohere"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/public/llm/Cohere.tsx b/web/app/components/base/icons/src/public/llm/Cohere.tsx
new file mode 100644
index 00000000000000..76d8bfbecb52e5
--- /dev/null
+++ b/web/app/components/base/icons/src/public/llm/Cohere.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './Cohere.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'Cohere'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/public/llm/CohereText.json b/web/app/components/base/icons/src/public/llm/CohereText.json
new file mode 100644
index 00000000000000..588b3458149206
--- /dev/null
+++ b/web/app/components/base/icons/src/public/llm/CohereText.json
@@ -0,0 +1,90 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "120",
+ "height": "24",
+ "viewBox": "0 0 120 24",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M34.4917 21.9129C37.4378 21.9129 40.0162 20.4398 41.0355 17.4656C41.2334 16.8701 40.9496 16.4743 40.384 16.4743H39.2787C38.7689 16.4743 38.4292 16.7002 38.2013 17.1818C37.3239 18.9108 36.1047 19.5324 34.5757 19.5324C31.8553 19.5324 30.1844 17.6335 30.1844 14.4616C30.1844 11.2896 31.9133 9.39083 34.5177 9.39083C36.1046 9.39083 37.4079 10.0704 38.2293 11.6854C38.4852 12.1671 38.795 12.3929 39.3067 12.3929H40.412C40.9776 12.3929 41.2614 12.0251 41.0635 11.4855C39.8742 8.25556 37.2099 7.01035 34.4917 7.01035C30.3843 7.01035 27.3242 10.0424 27.3242 14.4616C27.3242 18.8808 30.2424 21.9129 34.4917 21.9129ZM108.627 13.1584C108.995 10.75 110.638 9.24892 112.876 9.24892C115.115 9.24892 116.786 10.7779 116.983 13.1584H108.627ZM112.99 21.9129C115.596 21.9129 118.203 20.6956 119.478 17.9474C119.79 17.2958 119.506 16.8421 118.94 16.8421H117.892C117.383 16.8421 117.071 17.0679 116.816 17.5216C115.966 19.0227 114.493 19.6463 112.992 19.6463C110.414 19.6463 108.743 17.8894 108.545 15.0292H118.943C119.508 15.0292 119.878 14.7174 119.878 14.1219C119.764 9.67465 116.876 7.01235 112.88 7.01235C108.885 7.01235 105.713 9.90251 105.713 14.4636C105.713 19.0247 108.801 21.9148 112.994 21.9148L112.99 21.9129ZM96.5025 14.8313H97.4378C98.0035 14.8313 98.3152 14.5196 98.4012 13.9239C98.9409 10.0964 101.182 9.5887 103.564 9.70264C104.074 9.72661 104.491 9.33487 104.491 8.82319V7.94575C104.491 7.38012 104.208 7.03833 103.642 7.01035C101.533 6.9304 99.6525 7.65393 98.5651 9.70264C98.5052 9.81455 98.3373 9.78458 98.3233 9.65866L98.1474 8.11365C98.0915 7.54801 97.7796 7.26418 97.212 7.26418H92.9347C92.435 7.26418 92.0272 7.66993 92.0272 8.17161V8.6533C92.0272 9.15298 92.433 9.56072 92.9347 9.56072H94.6916C95.1912 9.56072 95.599 9.96646 95.599 10.4681V13.9239C95.599 14.4236 96.0048 14.8313 96.5064 14.8313H96.5025ZM92.6788 21.631H101.545C102.111 21.631 102.453 21.2913 102.453 20.7236V20.2418C102.453 19.6762 102.113 19.3345 101.545 19.3345H99.2787C98.7131 19.3345 98.3712 18.9947 98.3712 18.4271V16.8681C98.3712 16.3024 98.0315 15.9606 97.4638 15.9606H96.5005C95.9348 15.9606 95.593 16.3004 95.593 16.8681V18.4271C95.593 18.9927 95.2532 19.3345 94.6856 19.3345H92.6749C92.1092 19.3345 91.7674 19.6743 91.7674 20.2418V20.7236C91.7674 21.2893 92.1073 21.631 92.6749 21.631H92.6788ZM78.9955 13.1604C79.3633 10.752 81.0062 9.25092 83.2449 9.25092C85.4834 9.25092 87.1544 10.7799 87.3522 13.1604H78.9955ZM83.3587 21.9148C85.9651 21.9148 88.5714 20.6977 89.8466 17.9493C90.1585 17.2978 89.8746 16.844 89.309 16.844H88.2617C87.7519 16.844 87.4402 17.0699 87.1844 17.5236C86.3349 19.0247 84.8618 19.6482 83.3607 19.6482C80.7824 19.6482 79.1115 17.8914 78.9136 15.0313H89.311C89.8766 15.0313 90.2464 14.7194 90.2464 14.1238C90.1324 9.67665 87.2443 7.01434 83.2488 7.01434C79.2533 7.01434 76.0814 9.9045 76.0814 14.4656C76.0814 19.0266 79.1694 21.9168 83.3628 21.9168L83.3587 21.9148ZM50.5835 21.9148C54.8329 21.9148 57.8649 18.7708 57.8649 14.4636C57.8649 10.1563 54.8329 7.01235 50.5835 7.01235C46.3342 7.01235 43.3022 10.2143 43.3022 14.4636C43.3022 15.455 43.472 16.5602 43.9816 17.7775C44.2375 18.3731 44.7192 18.4571 45.2289 18.0892L46.0504 17.4936C46.4761 17.1818 46.588 16.8141 46.4461 16.2765C46.2202 15.5689 46.1623 14.9453 46.1623 14.4076C46.1623 11.4335 47.9472 9.39283 50.5815 9.39283C53.2159 9.39283 55.0007 11.4035 55.0007 14.4636C55.0007 17.5236 53.2439 19.5344 50.6375 19.5344C49.7301 19.5344 48.8806 19.3645 47.8612 18.5989C47.4355 18.2592 47.0397 18.2032 46.586 18.5429L45.9624 18.9967C45.4527 19.3645 45.3968 19.8741 45.8764 20.2718C47.3496 21.4611 49.0485 21.9148 50.5795 21.9148H50.5835ZM61.4606 21.631H62.3961C62.8957 21.631 63.3035 21.2252 63.3035 20.7236V13.9539C63.3035 11.0937 64.8324 9.39283 67.213 9.39283C69.3656 9.39283 70.6128 10.8099 70.6128 13.4163V20.7255C70.6128 21.2252 71.0186 21.633 71.5203 21.633H72.4836C72.9833 21.633 73.391 21.2272 73.391 20.7255V12.9625C73.391 9.13899 71.4363 7.01434 68.1224 7.01434C65.8659 7.01434 64.5327 7.93776 63.5373 9.22294C63.4613 9.32088 63.3075 9.26691 63.3075 9.14499V2.99092C63.3014 2.48924 62.8957 2.0835 62.3961 2.0835H61.4606C60.9609 2.0835 60.5532 2.48924 60.5532 2.99092V20.7236C60.5532 21.2232 60.959 21.631 61.4606 21.631Z",
+ "fill": "#39594D"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "mask",
+ "attributes": {
+ "id": "mask0_13223_52628",
+ "style": "mask-type:luminance",
+ "maskUnits": "userSpaceOnUse",
+ "x": "1",
+ "y": "2",
+ "width": "20",
+ "height": "20"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M20.8354 2.08319H1.00195V21.9165H20.8354V2.08319Z",
+ "fill": "white"
+ },
+ "children": []
+ }
+ ]
+ },
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "mask": "url(#mask0_13223_52628)"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M7.42768 13.8921C7.96151 13.8921 9.02342 13.8628 10.4912 13.2585C12.2017 12.5542 15.6047 11.2758 18.0597 9.96274C19.7765 9.04432 20.5291 7.82964 20.5291 6.19387C20.5291 3.92362 18.6887 2.08319 16.4185 2.08319H6.90643C3.64547 2.08319 1.00195 4.72669 1.00195 7.98763C1.00195 11.2486 3.47706 13.8921 7.42768 13.8921Z",
+ "fill": "#39594D"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M9.03711 17.958C9.03711 16.3596 9.99942 14.9184 11.4758 14.3057L14.4713 13.0625C17.5013 11.805 20.8364 14.0316 20.8364 17.3123C20.8364 19.8539 18.7755 21.9141 16.2338 21.9134L12.9906 21.9126C10.807 21.912 9.03711 20.1417 9.03711 17.958Z",
+ "fill": "#D18EE2"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M4.40571 14.6705C2.5259 14.6705 1.00195 16.1943 1.00195 18.0741V18.515C1.00195 20.3947 2.52584 21.9186 4.40565 21.9186C6.28547 21.9186 7.80941 20.3947 7.80941 18.515V18.0741C7.80941 16.1943 6.28552 14.6705 4.40571 14.6705Z",
+ "fill": "#FF7759"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ },
+ "name": "CohereText"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/public/llm/CohereText.tsx b/web/app/components/base/icons/src/public/llm/CohereText.tsx
new file mode 100644
index 00000000000000..efd37b6f8336e3
--- /dev/null
+++ b/web/app/components/base/icons/src/public/llm/CohereText.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './CohereText.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'CohereText'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/public/llm/index.ts b/web/app/components/base/icons/src/public/llm/index.ts
index 9144e6fa052448..e35c3838d19894 100644
--- a/web/app/components/base/icons/src/public/llm/index.ts
+++ b/web/app/components/base/icons/src/public/llm/index.ts
@@ -8,6 +8,8 @@ export { default as BaichuanText } from './BaichuanText'
export { default as Baichuan } from './Baichuan'
export { default as ChatglmText } from './ChatglmText'
export { default as Chatglm } from './Chatglm'
+export { default as CohereText } from './CohereText'
+export { default as Cohere } from './Cohere'
export { default as Gpt3 } from './Gpt3'
export { default as Gpt4 } from './Gpt4'
export { default as HuggingfaceTextHub } from './HuggingfaceTextHub'
diff --git a/web/app/components/base/icons/src/vender/solid/arrows/HighPriority.json b/web/app/components/base/icons/src/vender/solid/arrows/HighPriority.json
new file mode 100644
index 00000000000000..6710fd8109f779
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/arrows/HighPriority.json
@@ -0,0 +1,53 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "24",
+ "height": "24",
+ "viewBox": "0 0 24 24",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M9.01488 2.54553C8.91549 2.45869 8.79321 2.40229 8.66264 2.38306C8.53206 2.36384 8.39872 2.38261 8.27852 2.43712C8.15833 2.49164 8.05636 2.5796 7.98481 2.6905C7.91325 2.8014 7.87513 2.93055 7.875 3.06253V6.50003C6.05164 6.50003 4.30295 7.22436 3.01364 8.51367C1.72433 9.80299 1 11.5517 1 13.375C1 15.1984 1.72433 16.9471 3.01364 18.2364C4.30295 19.5257 6.05164 20.25 7.875 20.25H12C12.3647 20.25 12.7144 20.1052 12.9723 19.8473C13.2301 19.5894 13.375 19.2397 13.375 18.875C13.375 18.5104 13.2301 18.1606 12.9723 17.9028C12.7144 17.6449 12.3647 17.5 12 17.5H7.875C6.78098 17.5 5.73177 17.0654 4.95818 16.2919C4.1846 15.5183 3.75 14.4691 3.75 13.375C3.75 12.281 4.1846 11.2318 4.95818 10.4582C5.73177 9.68463 6.78098 9.25003 7.875 9.25003V12.6875C7.87513 12.8195 7.91325 12.9487 7.98481 13.0596C8.05636 13.1705 8.15833 13.2584 8.27852 13.3129C8.39872 13.3675 8.53206 13.3862 8.66264 13.367C8.79321 13.3478 8.91549 13.2914 9.01488 13.2045L14.5149 8.39203C14.5885 8.32751 14.6475 8.24801 14.6879 8.15885C14.7283 8.06969 14.7492 7.97292 14.7492 7.87503C14.7492 7.77714 14.7283 7.68038 14.6879 7.59122C14.6475 7.50206 14.5885 7.42256 14.5149 7.35803L9.01488 2.54553Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M21.625 17.5H17.5C17.1353 17.5 16.7856 17.6449 16.5277 17.9028C16.2699 18.1606 16.125 18.5104 16.125 18.875C16.125 19.2397 16.2699 19.5894 16.5277 19.8473C16.7856 20.1052 17.1353 20.25 17.5 20.25H21.625C21.9897 20.25 22.3394 20.1052 22.5973 19.8473C22.8551 19.5894 23 19.2397 23 18.875C23 18.5104 22.8551 18.1606 22.5973 17.9028C22.3394 17.6449 21.9897 17.5 21.625 17.5Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M21.625 12H17.5C17.1353 12 16.7856 12.1449 16.5277 12.4028C16.2699 12.6606 16.125 13.0104 16.125 13.375C16.125 13.7397 16.2699 14.0894 16.5277 14.3473C16.7856 14.6052 17.1353 14.75 17.5 14.75H21.625C21.9897 14.75 22.3394 14.6052 22.5973 14.3473C22.8551 14.0894 23 13.7397 23 13.375C23 13.0104 22.8551 12.6606 22.5973 12.4028C22.3394 12.1449 21.9897 12 21.625 12Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M17.5 9.25003H21.625C21.9897 9.25003 22.3394 9.10517 22.5973 8.8473C22.8551 8.58944 23 8.23971 23 7.87503C23 7.51036 22.8551 7.16062 22.5973 6.90276C22.3394 6.6449 21.9897 6.50003 21.625 6.50003H17.5C17.1353 6.50003 16.7856 6.6449 16.5277 6.90276C16.2699 7.16062 16.125 7.51036 16.125 7.87503C16.125 8.23971 16.2699 8.58944 16.5277 8.8473C16.7856 9.10517 17.1353 9.25003 17.5 9.25003Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ }
+ ]
+ },
+ "name": "HighPriority"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/solid/arrows/HighPriority.tsx b/web/app/components/base/icons/src/vender/solid/arrows/HighPriority.tsx
new file mode 100644
index 00000000000000..1facf6f8001ee1
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/arrows/HighPriority.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './HighPriority.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'HighPriority'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/solid/arrows/index.ts b/web/app/components/base/icons/src/vender/solid/arrows/index.ts
new file mode 100644
index 00000000000000..3a0650f7f55568
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/arrows/index.ts
@@ -0,0 +1 @@
+export { default as HighPriority } from './HighPriority'
diff --git a/web/app/components/base/icons/src/vender/solid/development/PatternRecognition.json b/web/app/components/base/icons/src/vender/solid/development/PatternRecognition.json
new file mode 100644
index 00000000000000..3d13c32b873a64
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/development/PatternRecognition.json
@@ -0,0 +1,98 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "24",
+ "height": "24",
+ "viewBox": "0 0 24 24",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M4.72727 22C4.18787 22 3.66058 21.84 3.21208 21.5404C2.76359 21.2407 2.41402 20.8148 2.2076 20.3164C2.00118 19.8181 1.94717 19.2697 2.05241 18.7407C2.15764 18.2116 2.41739 17.7257 2.7988 17.3443C3.18022 16.9628 3.66617 16.7031 4.19521 16.5979C4.72425 16.4926 5.27261 16.5466 5.77096 16.7531C6.2693 16.9595 6.69524 17.309 6.99492 17.7575C7.2946 18.206 7.45455 18.7333 7.45455 19.2727C7.45455 19.996 7.16721 20.6897 6.65575 21.2012C6.14429 21.7127 5.45059 22 4.72727 22Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M12 9.27273C11.4606 9.27273 10.9333 9.43268 10.4848 9.73236C10.0363 10.032 9.68675 10.458 9.48033 10.9563C9.27391 11.4547 9.2199 12.003 9.32513 12.5321C9.43036 13.0611 9.69011 13.5471 10.0715 13.9285C10.4529 14.3099 10.9389 14.5696 11.4679 14.6749C11.997 14.7801 12.5453 14.7261 13.0437 14.5197C13.542 14.3133 13.968 13.9637 14.2676 13.5152C14.5673 13.0667 14.7273 12.5394 14.7273 12C14.7273 11.2767 14.4399 10.583 13.9285 10.0715C13.417 9.56006 12.7233 9.27273 12 9.27273Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M4.72727 2C4.18787 2 3.66058 2.15995 3.21208 2.45963C2.76358 2.7593 2.41402 3.18525 2.2076 3.68359C2.00118 4.18193 1.94717 4.7303 2.05241 5.25934C2.15764 5.78838 2.41738 6.27433 2.7988 6.65575C3.18022 7.03716 3.66617 7.29691 4.19521 7.40214C4.72425 7.50737 5.27261 7.45336 5.77096 7.24694C6.2693 7.04052 6.69524 6.69096 6.99492 6.24246C7.29459 5.79397 7.45455 5.26668 7.45455 4.72727C7.45455 4.00395 7.16721 3.31026 6.65575 2.7988C6.14428 2.28734 5.45059 2 4.72727 2Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M19.2727 2C18.7333 2 18.206 2.15995 17.7575 2.45963C17.309 2.75931 16.9595 3.18525 16.7531 3.68359C16.5466 4.18194 16.4926 4.7303 16.5979 5.25934C16.7031 5.78838 16.9628 6.27433 17.3443 6.65575C17.7257 7.03716 18.2116 7.29691 18.7407 7.40214C19.2697 7.50737 19.8181 7.45337 20.3164 7.24694C20.8148 7.04052 21.2407 6.69096 21.5404 6.24247C21.84 5.79397 22 5.26668 22 4.72727C22 4.00396 21.7127 3.31026 21.2012 2.7988C20.6897 2.28734 19.996 2 19.2727 2Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M19.2727 16.5455C18.7333 16.5455 18.206 16.7054 17.7575 17.0051C17.309 17.3048 16.9595 17.7307 16.7531 18.229C16.5466 18.7274 16.4926 19.2758 16.5979 19.8048C16.7031 20.3338 16.9628 20.8198 17.3443 21.2012C17.7257 21.5826 18.2116 21.8424 18.7407 21.9476C19.2697 22.0528 19.8181 21.9988 20.3164 21.7924C20.8148 21.586 21.2407 21.2364 21.5404 20.7879C21.84 20.3394 22 19.8121 22 19.2727C22 18.5494 21.7127 17.8557 21.2012 17.3443C20.6897 16.8328 19.996 16.5455 19.2727 16.5455Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M7.45455 9.27273H2V14.7273H7.45455V9.27273Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M22 9.27273H16.5455V14.7273H22V9.27273Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M14.7273 2H9.27273V7.45455H14.7273V2Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M14.7273 16.5455H9.27273V22H14.7273V16.5455Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ }
+ ]
+ },
+ "name": "PatternRecognition"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/solid/development/PatternRecognition.tsx b/web/app/components/base/icons/src/vender/solid/development/PatternRecognition.tsx
new file mode 100644
index 00000000000000..74cdf2f62f79e8
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/development/PatternRecognition.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './PatternRecognition.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'PatternRecognition'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/solid/development/Semantic.json b/web/app/components/base/icons/src/vender/solid/development/Semantic.json
new file mode 100644
index 00000000000000..333b3fa1c1cb44
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/development/Semantic.json
@@ -0,0 +1,53 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "24",
+ "height": "24",
+ "viewBox": "0 0 24 24",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M16.5833 12.945C16.4856 13.3276 16.2038 14.272 15.7382 15.7784H17.4432C17.0038 14.3674 16.7569 13.5692 16.7025 13.3841C16.6493 13.1998 16.609 13.0532 16.5833 12.945Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M21.1667 9.33333H12C11.5138 9.33333 11.0475 9.52649 10.7036 9.87031C10.3598 10.2141 10.1667 10.6804 10.1667 11.1667V19.4167C10.1667 19.9029 10.3598 20.3692 10.7036 20.713C11.0475 21.0568 11.5138 21.25 12 21.25H17.5L21.1667 24V21.25C21.6529 21.25 22.1192 21.0568 22.463 20.713C22.8068 20.3692 23 19.9029 23 19.4167V11.1667C23 10.6804 22.8068 10.2141 22.463 9.87031C22.1192 9.52649 21.6529 9.33333 21.1667 9.33333ZM18.2507 18.5L17.775 16.9417H15.3917L14.9159 18.5H13.4208L15.7308 11.9293H17.4267L19.7458 18.5H18.2507Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M12 2H2.83333C2.3471 2 1.88079 2.19315 1.53697 2.53697C1.19315 2.88079 1 3.3471 1 3.83333V12.0833C1 12.5696 1.19315 13.0359 1.53697 13.3797C1.88079 13.7235 2.3471 13.9167 2.83333 13.9167V16.6667L6.5 13.9167H9.25V11.1667C9.25381 11.0459 9.26606 10.9255 9.28667 10.8064C8.64229 10.5527 8.0315 10.2208 7.468 9.81825C6.5802 10.4316 5.59355 10.8877 4.55117 11.1667C4.394 10.6965 4.15573 10.2575 3.84717 9.86958C4.76378 9.70375 5.64426 9.37861 6.44867 8.90892C6.07755 8.50417 5.75993 8.05346 5.50358 7.56783C5.29175 7.16889 5.12217 6.74892 4.99758 6.31475C4.56583 6.31475 4.3165 6.32942 3.94983 6.35875V5.03233C4.30266 5.0703 4.65741 5.08744 5.01225 5.08367H6.63292V4.64367C6.63379 4.48979 6.61904 4.33623 6.58892 4.18533H8.05833C8.02877 4.33229 8.01403 4.48185 8.01433 4.63175V5.07908H9.756C10.1108 5.08303 10.4656 5.06589 10.8184 5.02775V6.35875C10.4958 6.32942 10.2098 6.31475 9.778 6.31475C9.67623 6.80565 9.51074 7.28115 9.28575 7.72917C9.06864 8.16083 8.79489 8.56159 8.47175 8.92083C8.89523 9.17057 9.34617 9.37051 9.81558 9.51667C10.0695 9.17655 10.399 8.90012 10.7781 8.70922C11.1573 8.51831 11.5755 8.41816 12 8.41667H13.8333V3.83333C13.8333 3.3471 13.6402 2.88079 13.2964 2.53697C12.9525 2.19315 12.4862 2 12 2Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M7.43133 8.0885C7.87722 7.58102 8.19195 6.97201 8.348 6.31475H6.40833C6.59708 6.98164 6.94861 7.59116 7.43133 8.0885Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ }
+ ]
+ },
+ "name": "Semantic"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/solid/development/Semantic.tsx b/web/app/components/base/icons/src/vender/solid/development/Semantic.tsx
new file mode 100644
index 00000000000000..8b934026e485df
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/solid/development/Semantic.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './Semantic.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'Semantic'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/solid/development/index.ts b/web/app/components/base/icons/src/vender/solid/development/index.ts
index 55319821035bdd..ed2a62b8486541 100644
--- a/web/app/components/base/icons/src/vender/solid/development/index.ts
+++ b/web/app/components/base/icons/src/vender/solid/development/index.ts
@@ -1,5 +1,7 @@
export { default as Container } from './Container'
export { default as Database02 } from './Database02'
export { default as Database03 } from './Database03'
+export { default as PatternRecognition } from './PatternRecognition'
export { default as PuzzlePiece01 } from './PuzzlePiece01'
+export { default as Semantic } from './Semantic'
export { default as TerminalSquare } from './TerminalSquare'
diff --git a/web/app/components/base/image-uploader/hooks.ts b/web/app/components/base/image-uploader/hooks.ts
index 242e5aff066177..24a5af1b867ff4 100644
--- a/web/app/components/base/image-uploader/hooks.ts
+++ b/web/app/components/base/image-uploader/hooks.ts
@@ -1,4 +1,4 @@
-import { useState } from 'react'
+import { useMemo, useRef, useState } from 'react'
import { useParams } from 'next/navigation'
import { useTranslation } from 'react-i18next'
import { imageUpload } from './utils'
@@ -10,38 +10,59 @@ export const useImageFiles = () => {
const { t } = useTranslation()
const { notify } = useToastContext()
const [files, setFiles] = useState([])
+ const filesRef = useRef([])
const handleUpload = (imageFile: ImageFile) => {
- const newFiles = [...files, imageFile]
- setFiles(newFiles)
+ const files = filesRef.current
+ const index = files.findIndex(file => file._id === imageFile._id)
+
+ if (index > -1) {
+ const currentFile = files[index]
+ const newFiles = [...files.slice(0, index), { ...currentFile, ...imageFile }, ...files.slice(index + 1)]
+ setFiles(newFiles)
+ filesRef.current = newFiles
+ }
+ else {
+ const newFiles = [...files, imageFile]
+ setFiles(newFiles)
+ filesRef.current = newFiles
+ }
}
const handleRemove = (imageFileId: string) => {
+ const files = filesRef.current
const index = files.findIndex(file => file._id === imageFileId)
if (index > -1) {
- const newFiles = [...files.slice(0, index), ...files.slice(index + 1)]
+ const currentFile = files[index]
+ const newFiles = [...files.slice(0, index), { ...currentFile, deleted: true }, ...files.slice(index + 1)]
setFiles(newFiles)
+ filesRef.current = newFiles
}
}
const handleImageLinkLoadError = (imageFileId: string) => {
+ const files = filesRef.current
const index = files.findIndex(file => file._id === imageFileId)
if (index > -1) {
const currentFile = files[index]
const newFiles = [...files.slice(0, index), { ...currentFile, progress: -1 }, ...files.slice(index + 1)]
+ filesRef.current = newFiles
setFiles(newFiles)
}
}
const handleImageLinkLoadSuccess = (imageFileId: string) => {
+ const files = filesRef.current
const index = files.findIndex(file => file._id === imageFileId)
if (index > -1) {
const currentImageFile = files[index]
const newFiles = [...files.slice(0, index), { ...currentImageFile, progress: 100 }, ...files.slice(index + 1)]
+ filesRef.current = newFiles
setFiles(newFiles)
}
}
const handleReUpload = (imageFileId: string) => {
+ const files = filesRef.current
const index = files.findIndex(file => file._id === imageFileId)
if (index > -1) {
@@ -50,15 +71,18 @@ export const useImageFiles = () => {
file: currentImageFile.file!,
onProgressCallback: (progress) => {
const newFiles = [...files.slice(0, index), { ...currentImageFile, progress }, ...files.slice(index + 1)]
+ filesRef.current = newFiles
setFiles(newFiles)
},
onSuccessCallback: (res) => {
const newFiles = [...files.slice(0, index), { ...currentImageFile, fileId: res.id, progress: 100 }, ...files.slice(index + 1)]
+ filesRef.current = newFiles
setFiles(newFiles)
},
onErrorCallback: () => {
notify({ type: 'error', message: t('common.imageUploader.uploadFromComputerUploadError') })
const newFiles = [...files.slice(0, index), { ...currentImageFile, progress: -1 }, ...files.slice(index + 1)]
+ filesRef.current = newFiles
setFiles(newFiles)
},
}, !!params.token)
@@ -67,10 +91,15 @@ export const useImageFiles = () => {
const handleClear = () => {
setFiles([])
+ filesRef.current = []
}
+ const filteredFiles = useMemo(() => {
+ return files.filter(file => !file.deleted)
+ }, [files])
+
return {
- files,
+ files: filteredFiles,
onUpload: handleUpload,
onRemove: handleRemove,
onImageLinkLoadError: handleImageLinkLoadError,
diff --git a/web/app/components/base/image-uploader/uploader.tsx b/web/app/components/base/image-uploader/uploader.tsx
index 5c6ee50a6fd641..a0e420e7bf2903 100644
--- a/web/app/components/base/image-uploader/uploader.tsx
+++ b/web/app/components/base/image-uploader/uploader.tsx
@@ -85,9 +85,10 @@ const Uploader: FC = ({
{children(hovering)}
(e.target as HTMLInputElement).value = ''}
type='file'
accept='.png, .jpg, .jpeg, .webp, .gif'
onChange={handleChange}
diff --git a/web/app/components/base/param-item/score-threshold-item.tsx b/web/app/components/base/param-item/score-threshold-item.tsx
index 6aec7eba05c8b2..947961a4506aeb 100644
--- a/web/app/components/base/param-item/score-threshold-item.tsx
+++ b/web/app/components/base/param-item/score-threshold-item.tsx
@@ -41,7 +41,7 @@ const ScoreThresholdItem: FC = ({
className={className}
id={key}
name={t(`appDebug.datasetConfig.${key}`)}
- tip={t(`appDebug.datasetConfig.${key}Tip`)}
+ tip={t(`appDebug.datasetConfig.${key}Tip`) as string}
{...VALUE_LIMIT}
value={value}
enable={enable}
diff --git a/web/app/components/base/param-item/top-k-item.tsx b/web/app/components/base/param-item/top-k-item.tsx
index edbdcc33750d82..3ff801f2ae8cdf 100644
--- a/web/app/components/base/param-item/top-k-item.tsx
+++ b/web/app/components/base/param-item/top-k-item.tsx
@@ -37,7 +37,7 @@ const TopKItem: FC = ({
className={className}
id={key}
name={t(`appDebug.datasetConfig.${key}`)}
- tip={t(`appDebug.datasetConfig.${key}Tip`)}
+ tip={t(`appDebug.datasetConfig.${key}Tip`) as string}
{...VALUE_LIMIT}
value={value}
enable={enable}
diff --git a/web/app/components/base/radio-card/index.tsx b/web/app/components/base/radio-card/index.tsx
new file mode 100644
index 00000000000000..616e55aedffb3f
--- /dev/null
+++ b/web/app/components/base/radio-card/index.tsx
@@ -0,0 +1,55 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import cn from 'classnames'
+import s from './style.module.css'
+
+type Props = {
+ className?: string
+ icon: React.ReactNode
+ iconBgClassName?: string
+ title: React.ReactNode
+ description: string
+ noRadio?: boolean
+ isChosen?: boolean
+ onChosen?: () => void
+ chosenConfig?: React.ReactNode
+ chosenConfigWrapClassName?: string
+}
+
+const RadioCard: FC = ({
+ icon,
+ iconBgClassName = 'bg-[#F5F3FF]',
+ title,
+ description,
+ noRadio,
+ isChosen,
+ onChosen = () => {},
+ chosenConfig,
+ chosenConfigWrapClassName,
+}) => {
+ return (
+
+
+
+ {icon}
+
+
+
{title}
+
{description}
+
+ {!noRadio && (
+
+ )}
+
+ {((isChosen && chosenConfig) || noRadio) && (
+
+ {chosenConfig}
+
+ )}
+
+ )
+}
+export default React.memo(RadioCard)
diff --git a/web/app/components/base/radio-card/simple/index.tsx b/web/app/components/base/radio-card/simple/index.tsx
new file mode 100644
index 00000000000000..f739552e51191c
--- /dev/null
+++ b/web/app/components/base/radio-card/simple/index.tsx
@@ -0,0 +1,40 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import cn from 'classnames'
+import s from './style.module.css'
+
+type Props = {
+ className?: string
+ title: string
+ description: string
+ isChosen: boolean
+ onChosen: () => void
+ chosenConfig?: React.ReactNode
+ icon?: JSX.Element
+}
+
+const RadioCard: FC = ({
+ title,
+ description,
+ isChosen,
+ onChosen,
+ icon,
+}) => {
+ return (
+
+ )
+}
+export default React.memo(RadioCard)
diff --git a/web/app/components/base/radio-card/simple/style.module.css b/web/app/components/base/radio-card/simple/style.module.css
new file mode 100644
index 00000000000000..7b9871cc17c40f
--- /dev/null
+++ b/web/app/components/base/radio-card/simple/style.module.css
@@ -0,0 +1,25 @@
+.item {
+ @apply relative p-4 rounded-xl border border-gray-100 cursor-pointer;
+ background-color: #fcfcfd;
+}
+
+.item.active {
+ border-width: 1.5px;
+ border-color: #528BFF;
+ box-shadow: 0px 1px 3px rgba(16, 24, 40, 0.1), 0px 1px 2px rgba(16, 24, 40, 0.06);
+}
+
+.item:hover {
+ background-color: #ffffff;
+ border-color: #B2CCFF;
+ box-shadow: 0px 12px 16px -4px rgba(16, 24, 40, 0.08), 0px 4px 6px -2px rgba(16, 24, 40, 0.03);
+}
+
+.radio {
+ @apply w-4 h-4 border-[2px] border-gray-200 rounded-full;
+}
+
+.item.active .radio {
+ border-width: 5px;
+ border-color: #155EEF;
+}
\ No newline at end of file
diff --git a/web/app/components/base/radio-card/style.module.css b/web/app/components/base/radio-card/style.module.css
new file mode 100644
index 00000000000000..93a0f43c2b1ce4
--- /dev/null
+++ b/web/app/components/base/radio-card/style.module.css
@@ -0,0 +1,25 @@
+.item {
+ @apply relative rounded-xl border border-gray-100 cursor-pointer;
+ background-color: #fcfcfd;
+}
+
+.item.active {
+ border-width: 1.5px;
+ border-color: #528BFF;
+ box-shadow: 0px 1px 3px rgba(16, 24, 40, 0.1), 0px 1px 2px rgba(16, 24, 40, 0.06);
+}
+
+.item:hover {
+ background-color: #ffffff;
+ border-color: #B2CCFF;
+ box-shadow: 0px 12px 16px -4px rgba(16, 24, 40, 0.08), 0px 4px 6px -2px rgba(16, 24, 40, 0.03);
+}
+
+.radio {
+ @apply w-4 h-4 border-[2px] border-gray-200 rounded-full;
+}
+
+.item.active .radio {
+ border-width: 5px;
+ border-color: #155EEF;
+}
diff --git a/web/app/components/base/slider/index.tsx b/web/app/components/base/slider/index.tsx
index da8c7f29d2f558..0586a78152b13b 100644
--- a/web/app/components/base/slider/index.tsx
+++ b/web/app/components/base/slider/index.tsx
@@ -1,6 +1,7 @@
import ReactSlider from 'react-slider'
import cn from 'classnames'
import './style.css'
+
type ISliderProps = {
className?: string
value: number
diff --git a/web/app/components/datasets/common/check-rerank-model.ts b/web/app/components/datasets/common/check-rerank-model.ts
new file mode 100644
index 00000000000000..615bbdc3f3e298
--- /dev/null
+++ b/web/app/components/datasets/common/check-rerank-model.ts
@@ -0,0 +1,61 @@
+import type { BackendModel } from '../../header/account-setting/model-page/declarations'
+import { RETRIEVE_METHOD, type RetrievalConfig } from '@/types/app'
+
+export const isReRankModelSelected = ({
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ retrievalConfig,
+ rerankModelList,
+ indexMethod,
+}: {
+ rerankDefaultModel?: BackendModel
+ isRerankDefaultModelVaild: boolean
+ retrievalConfig: RetrievalConfig
+ rerankModelList: BackendModel[]
+ indexMethod?: string
+}) => {
+ const rerankModelSelected = (() => {
+ if (retrievalConfig.reranking_model?.reranking_model_name)
+ return !!rerankModelList.find(({ model_name }) => model_name === retrievalConfig.reranking_model?.reranking_model_name)
+
+ if (isRerankDefaultModelVaild)
+ return !!rerankDefaultModel
+
+ return false
+ })()
+
+ if (
+ indexMethod === 'high_quality'
+ && (retrievalConfig.reranking_enable || retrievalConfig.search_method === RETRIEVE_METHOD.hybrid)
+ && !rerankModelSelected
+ )
+ return false
+
+ return true
+}
+
+export const ensureRerankModelSelected = ({
+ rerankDefaultModel,
+ indexMethod,
+ retrievalConfig,
+}: {
+ rerankDefaultModel: BackendModel
+ retrievalConfig: RetrievalConfig
+ indexMethod?: string
+}) => {
+ const rerankModel = retrievalConfig.reranking_model?.reranking_model_name ? retrievalConfig.reranking_model : undefined
+ if (
+ indexMethod === 'high_quality'
+ && (retrievalConfig.reranking_enable || retrievalConfig.search_method === RETRIEVE_METHOD.hybrid)
+ && !rerankModel
+ ) {
+ return {
+ ...retrievalConfig,
+ reranking_model: {
+ reranking_provider_name: rerankDefaultModel.model_provider.provider_name,
+ reranking_model_name: rerankDefaultModel.model_name,
+ },
+ }
+ }
+ return retrievalConfig
+}
diff --git a/web/app/components/datasets/common/economical-retrieval-method-config/index.tsx b/web/app/components/datasets/common/economical-retrieval-method-config/index.tsx
new file mode 100644
index 00000000000000..f3da67b92cc5e9
--- /dev/null
+++ b/web/app/components/datasets/common/economical-retrieval-method-config/index.tsx
@@ -0,0 +1,40 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+import RetrievalParamConfig from '../retrieval-param-config'
+import { RETRIEVE_METHOD } from '@/types/app'
+import RadioCard from '@/app/components/base/radio-card'
+import { HighPriority } from '@/app/components/base/icons/src/vender/solid/arrows'
+import type { RetrievalConfig } from '@/types/app'
+
+type Props = {
+ value: RetrievalConfig
+ onChange: (value: RetrievalConfig) => void
+}
+
+const EconomicalRetrievalMethodConfig: FC = ({
+ value,
+ onChange,
+}) => {
+ const { t } = useTranslation()
+
+ return (
+
+ }
+ title={t('dataset.retrieval.invertedIndex.title')}
+ description={t('dataset.retrieval.invertedIndex.description')}
+ noRadio
+ chosenConfig={
+
+ }
+ />
+
+ )
+}
+export default React.memo(EconomicalRetrievalMethodConfig)
diff --git a/web/app/components/datasets/common/retrieval-method-config/index.tsx b/web/app/components/datasets/common/retrieval-method-config/index.tsx
new file mode 100644
index 00000000000000..b49cea2b349eef
--- /dev/null
+++ b/web/app/components/datasets/common/retrieval-method-config/index.tsx
@@ -0,0 +1,103 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+import RetrievalParamConfig from '../retrieval-param-config'
+import type { RetrievalConfig } from '@/types/app'
+import { RETRIEVE_METHOD } from '@/types/app'
+import RadioCard from '@/app/components/base/radio-card'
+import { PatternRecognition, Semantic } from '@/app/components/base/icons/src/vender/solid/development'
+import { FileSearch02 } from '@/app/components/base/icons/src/vender/solid/files'
+import { useProviderContext } from '@/context/provider-context'
+
+type Props = {
+ value: RetrievalConfig
+ onChange: (value: RetrievalConfig) => void
+}
+
+const RetrievalMethodConfig: FC = ({
+ value: passValue,
+ onChange,
+}) => {
+ const { t } = useTranslation()
+ const { supportRetrievalMethods, rerankDefaultModel } = useProviderContext()
+ const value = (() => {
+ if (!passValue.reranking_model.reranking_model_name) {
+ return {
+ ...passValue,
+ reranking_model: {
+ reranking_provider_name: rerankDefaultModel?.model_provider.provider_name || '',
+ reranking_model_name: rerankDefaultModel?.model_name || '',
+ },
+ }
+ }
+ return passValue
+ })()
+ return (
+
+ {supportRetrievalMethods.includes(RETRIEVE_METHOD.semantic) && (
+
}
+ title={t('dataset.retrieval.semantic_search.title')}
+ description={t('dataset.retrieval.semantic_search.description')}
+ isChosen={value.search_method === RETRIEVE_METHOD.semantic}
+ onChosen={() => onChange({
+ ...value,
+ search_method: RETRIEVE_METHOD.semantic,
+ })}
+ chosenConfig={
+
+ }
+ />
+ )}
+ {supportRetrievalMethods.includes(RETRIEVE_METHOD.semantic) && (
+
}
+ title={t('dataset.retrieval.full_text_search.title')}
+ description={t('dataset.retrieval.full_text_search.description')}
+ isChosen={value.search_method === RETRIEVE_METHOD.fullText}
+ onChosen={() => onChange({
+ ...value,
+ search_method: RETRIEVE_METHOD.fullText,
+ })}
+ chosenConfig={
+
+ }
+ />
+ )}
+ {supportRetrievalMethods.includes(RETRIEVE_METHOD.semantic) && (
+
}
+ title={
+
+
{t('dataset.retrieval.hybrid_search.title')}
+
{t('dataset.retrieval.hybrid_search.recommend')}
+
+ }
+ description={t('dataset.retrieval.hybrid_search.description')}
+ isChosen={value.search_method === RETRIEVE_METHOD.hybrid}
+ onChosen={() => onChange({
+ ...value,
+ search_method: RETRIEVE_METHOD.hybrid,
+ })}
+ chosenConfig={
+
+ }
+ />
+ )}
+
+ )
+}
+export default React.memo(RetrievalMethodConfig)
diff --git a/web/app/components/datasets/common/retrieval-method-info/index.tsx b/web/app/components/datasets/common/retrieval-method-info/index.tsx
new file mode 100644
index 00000000000000..7d9b999c53941f
--- /dev/null
+++ b/web/app/components/datasets/common/retrieval-method-info/index.tsx
@@ -0,0 +1,64 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+import type { RetrievalConfig } from '@/types/app'
+import { RETRIEVE_METHOD } from '@/types/app'
+import RadioCard from '@/app/components/base/radio-card'
+import { HighPriority } from '@/app/components/base/icons/src/vender/solid/arrows'
+import { PatternRecognition, Semantic } from '@/app/components/base/icons/src/vender/solid/development'
+import { FileSearch02 } from '@/app/components/base/icons/src/vender/solid/files'
+
+type Props = {
+ value: RetrievalConfig
+}
+
+export const getIcon = (type: RETRIEVE_METHOD) => {
+ return ({
+ [RETRIEVE_METHOD.semantic]: Semantic,
+ [RETRIEVE_METHOD.fullText]: FileSearch02,
+ [RETRIEVE_METHOD.hybrid]: PatternRecognition,
+ [RETRIEVE_METHOD.invertedIndex]: HighPriority,
+ })[type] || FileSearch02
+}
+
+const EconomicalRetrievalMethodConfig: FC = ({
+ // type,
+ value,
+}) => {
+ const { t } = useTranslation()
+ const type = value.search_method
+ const Icon = getIcon(type)
+ return (
+
+
}
+ title={t(`dataset.retrieval.${type}.title`)}
+ description={t(`dataset.retrieval.${type}.description`)}
+ noRadio
+ chosenConfigWrapClassName='!pb-3'
+ chosenConfig={
+
+ {value.reranking_model.reranking_model_name && (
+
+
{t('common.modelProvider.rerankModel.key')}
+
{value.reranking_model.reranking_model_name}
+
+ )}
+
+
+
{t('appDebug.datasetConfig.top_k')}
+
{value.top_k}
+
+
+
+
{t('appDebug.datasetConfig.score_threshold')}
+
{value.score_threshold}
+
+
+ }
+ />
+
+ )
+}
+export default React.memo(EconomicalRetrievalMethodConfig)
diff --git a/web/app/components/datasets/common/retrieval-param-config/index.tsx b/web/app/components/datasets/common/retrieval-param-config/index.tsx
new file mode 100644
index 00000000000000..e824dac76747e0
--- /dev/null
+++ b/web/app/components/datasets/common/retrieval-param-config/index.tsx
@@ -0,0 +1,131 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+import cn from 'classnames'
+import TopKItem from '@/app/components/base/param-item/top-k-item'
+import ScoreThresholdItem from '@/app/components/base/param-item/score-threshold-item'
+import { RETRIEVE_METHOD } from '@/types/app'
+import Switch from '@/app/components/base/switch'
+import Tooltip from '@/app/components/base/tooltip-plus'
+import { HelpCircle } from '@/app/components/base/icons/src/vender/line/general'
+import ModelSelector from '@/app/components/header/account-setting/model-page/model-selector'
+import { ModelType } from '@/app/components/header/account-setting/model-page/declarations'
+import type { RetrievalConfig } from '@/types/app'
+import { useProviderContext } from '@/context/provider-context'
+
+type Props = {
+ type: RETRIEVE_METHOD
+ value: RetrievalConfig
+ onChange: (value: RetrievalConfig) => void
+}
+
+const RetrievalParamConfig: FC = ({
+ type,
+ value,
+ onChange,
+}) => {
+ const { t } = useTranslation()
+ const canToggleRerankModalEnable = type !== RETRIEVE_METHOD.hybrid
+ const isEconomical = type === RETRIEVE_METHOD.invertedIndex
+ const {
+ rerankDefaultModel,
+ } = useProviderContext()
+
+ const rerankModel = (() => {
+ if (value.reranking_model) {
+ return {
+ provider_name: value.reranking_model.reranking_provider_name,
+ model_name: value.reranking_model.reranking_model_name,
+ }
+ }
+ else if (rerankDefaultModel) {
+ return {
+ provider_name: rerankDefaultModel.model_provider.provider_name,
+ model_name: rerankDefaultModel.model_name,
+ }
+ }
+ })()
+
+ return (
+
+ {!isEconomical && (
+
+
+ {canToggleRerankModalEnable && (
+
{
+ onChange({
+ ...value,
+ reranking_enable: v,
+ })
+ }}
+ />
+ )}
+
+ {t('common.modelProvider.rerankModel.key')}
+ {t('common.modelProvider.rerankModel.tip')}
}>
+
+
+
+
+
+ {
+ onChange({
+ ...value,
+ reranking_model: {
+ reranking_provider_name: v.model_provider.provider_name,
+ reranking_model_name: v.model_name,
+ },
+ })
+ }}
+ />
+
+
+ )}
+
+
+ {
+ onChange({
+ ...value,
+ top_k: v,
+ })
+ }}
+ enable={true}
+ />
+ {(!isEconomical && !(value.search_method === RETRIEVE_METHOD.fullText && !value.reranking_enable)) && (
+ {
+ onChange({
+ ...value,
+ score_threshold: v,
+ })
+ }}
+ enable={value.score_threshold_enable}
+ hasSwitch={true}
+ onSwitchChange={(_key, v) => {
+ onChange({
+ ...value,
+ score_threshold_enable: v,
+ })
+ }}
+ />
+ )}
+
+
+ )
+}
+export default React.memo(RetrievalParamConfig)
diff --git a/web/app/components/datasets/create/step-two/index.tsx b/web/app/components/datasets/create/step-two/index.tsx
index a4422ebc3c381c..639d8631dc59a4 100644
--- a/web/app/components/datasets/create/step-two/index.tsx
+++ b/web/app/components/datasets/create/step-two/index.tsx
@@ -7,6 +7,7 @@ import { XMarkIcon } from '@heroicons/react/20/solid'
import cn from 'classnames'
import Link from 'next/link'
import { groupBy } from 'lodash-es'
+import RetrievalMethodInfo from '../../common/retrieval-method-info'
import PreviewItem, { PreviewType } from './preview-item'
import LanguageSelect from './language-select'
import s from './index.module.css'
@@ -19,7 +20,10 @@ import {
} from '@/service/datasets'
import Button from '@/app/components/base/button'
import Loading from '@/app/components/base/loading'
-
+import RetrievalMethodConfig from '@/app/components/datasets/common/retrieval-method-config'
+import EconomicalRetrievalMethodConfig from '@/app/components/datasets/common/economical-retrieval-method-config'
+import { type RetrievalConfig } from '@/types/app'
+import { ensureRerankModelSelected, isReRankModelSelected } from '@/app/components/datasets/common/check-rerank-model'
import Toast from '@/app/components/base/toast'
import { formatNumber } from '@/utils/format'
import type { NotionPage } from '@/models/common'
@@ -31,6 +35,8 @@ import { XClose } from '@/app/components/base/icons/src/vender/line/general'
import { useDatasetDetailContext } from '@/context/dataset-detail'
import I18n from '@/context/i18n'
import { IS_CE_EDITION } from '@/config'
+import { RETRIEVE_METHOD } from '@/types/app'
+import { useProviderContext } from '@/context/provider-context'
type ValueOf
= T[keyof T]
type StepTwoProps = {
@@ -78,7 +84,7 @@ const StepTwo = ({
const { t } = useTranslation()
const { locale } = useContext(I18n)
- const { mutateDatasetRes } = useDatasetDetailContext()
+ const { dataset: currentDataset, mutateDatasetRes } = useDatasetDetailContext()
const scrollRef = useRef(null)
const [scrolled, setScrolled] = useState(false)
const previewScrollRef = useRef(null)
@@ -254,7 +260,11 @@ const StepTwo = ({
}
}
}
-
+ const {
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ } = useProviderContext()
const getCreationParams = () => {
let params
if (isSetting) {
@@ -263,9 +273,31 @@ const StepTwo = ({
doc_form: docForm,
doc_language: docLanguage,
process_rule: getProcessRule(),
+ // eslint-disable-next-line @typescript-eslint/no-use-before-define
+ retrieval_model: retrievalConfig, // Readonly. If want to changed, just go to settings page.
} as CreateDocumentReq
}
- else {
+ else { // create
+ const indexMethod = getIndexing_technique()
+ if (
+ !isReRankModelSelected({
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ // eslint-disable-next-line @typescript-eslint/no-use-before-define
+ retrievalConfig,
+ indexMethod: indexMethod as string,
+ })
+ ) {
+ Toast.notify({ type: 'error', message: t('appDebug.datasetConfig.rerankModelRequired') })
+ return
+ }
+ const postRetrievalConfig = ensureRerankModelSelected({
+ rerankDefaultModel: rerankDefaultModel!,
+ // eslint-disable-next-line @typescript-eslint/no-use-before-define
+ retrievalConfig,
+ indexMethod: indexMethod as string,
+ })
params = {
data_source: {
type: dataSourceType,
@@ -277,6 +309,8 @@ const StepTwo = ({
process_rule: getProcessRule(),
doc_form: docForm,
doc_language: docLanguage,
+
+ retrieval_model: postRetrievalConfig,
} as CreateDocumentReq
if (dataSourceType === DataSourceType.FILE) {
params.data_source.info_list.file_info_list = {
@@ -327,10 +361,13 @@ const StepTwo = ({
try {
let res
const params = getCreationParams()
+ if (!params)
+ return false
+
setIsCreating(true)
if (!datasetId) {
res = await createFirstDocument({
- body: params,
+ body: params as CreateDocumentReq,
})
updateIndexingTypeCache && updateIndexingTypeCache(indexType as string)
updateResultCache && updateResultCache(res)
@@ -338,7 +375,7 @@ const StepTwo = ({
else {
res = await createDocument({
datasetId,
- body: params,
+ body: params as CreateDocumentReq,
})
updateIndexingTypeCache && updateIndexingTypeCache(indexType as string)
updateResultCache && updateResultCache(res)
@@ -441,6 +478,18 @@ const StepTwo = ({
}
}, [segmentationType, indexType])
+ const [retrievalConfig, setRetrievalConfig] = useState(currentDataset?.retrieval_model_dict || {
+ search_method: RETRIEVE_METHOD.semantic,
+ reranking_enable: false,
+ reranking_model: {
+ reranking_provider_name: rerankDefaultModel?.model_provider.provider_name,
+ reranking_model_name: rerankDefaultModel?.model_name,
+ },
+ top_k: 3,
+ score_threshold_enable: false,
+ score_threshold: 0.5,
+ } as RetrievalConfig)
+
return (
@@ -626,6 +675,56 @@ const StepTwo = ({
)}
)}
+ {/* Retrieval Method Config */}
+
+ {!datasetId
+ ? (
+
+ {t('datasetSettings.form.retrievalSetting.title')}
+
+
+ )
+ : (
+
+
{t('datasetSettings.form.retrievalSetting.title')}
+
+ )}
+
+
+ {!datasetId
+ ? (<>
+ {getIndexing_technique() === IndexingType.QUALIFIED
+ ? (
+
+ )
+ : (
+
+ )}
+ >)
+ : (
+
+
+
+ {t('datasetCreation.stepTwo.retrivalSettedTip')}
+ {t('datasetCreation.stepTwo.datasetSettingLink')}
+
+
+ )}
+
+
+
+
{dataSourceType === DataSourceType.FILE && (
diff --git a/web/app/components/datasets/documents/detail/completed/SegmentCard.tsx b/web/app/components/datasets/documents/detail/completed/SegmentCard.tsx
index 787bf1a79c49ec..d90a42b8d642ec 100644
--- a/web/app/components/datasets/documents/detail/completed/SegmentCard.tsx
+++ b/web/app/components/datasets/documents/detail/completed/SegmentCard.tsx
@@ -141,10 +141,16 @@ const SegmentCard: FC = ({
)}
>
- :
}
+ : (
+ score !== null
+ ? (
+
+ )
+ : null
+ )}
{loading
? (
diff --git a/web/app/components/datasets/hit-testing/index.tsx b/web/app/components/datasets/hit-testing/index.tsx
index 683d00b1f1759d..2d996b20f82f3c 100644
--- a/web/app/components/datasets/hit-testing/index.tsx
+++ b/web/app/components/datasets/hit-testing/index.tsx
@@ -6,16 +6,20 @@ import useSWR from 'swr'
import { omit } from 'lodash-es'
import cn from 'classnames'
import dayjs from 'dayjs'
+import { useContext } from 'use-context-selector'
import SegmentCard from '../documents/detail/completed/SegmentCard'
import docStyle from '../documents/detail/completed/style.module.css'
import Textarea from './textarea'
import s from './style.module.css'
import HitDetail from './hit-detail'
+import ModifyRetrievalModal from './modify-retrieval-modal'
import type { HitTestingResponse, HitTesting as HitTestingType } from '@/models/datasets'
import Loading from '@/app/components/base/loading'
import Modal from '@/app/components/base/modal'
import Pagination from '@/app/components/base/pagination'
import { fetchTestingRecords } from '@/service/datasets'
+import DatasetDetailContext from '@/context/dataset-detail'
+import type { RetrievalConfig } from '@/types/app'
const limit = 10
@@ -55,6 +59,11 @@ const HitTesting: FC
= ({ datasetId }: Props) => {
setCurrParagraph({ paraInfo: detail, showModal: true })
}
+ const { dataset: currentDataset } = useContext(DatasetDetailContext)
+
+ const [retrievalConfig, setRetrievalConfig] = useState(currentDataset?.retrieval_model_dict as RetrievalConfig)
+ const [isShowModifyRetrievalModal, setIsShowModifyRetrievalModal] = useState(false)
+
return (
@@ -70,6 +79,9 @@ const HitTesting: FC
= ({ datasetId }: Props) => {
setLoading={setSubmitLoading}
setText={setText}
text={text}
+ onClickRetrievalMethod={() => setIsShowModifyRetrievalModal(true)}
+ retrievalConfig={retrievalConfig}
+ isEconomy={currentDataset?.indexing_technique === 'economy'}
/>
{t('datasetHitTesting.recents')}
{(!recordsRes && !error)
@@ -178,6 +190,18 @@ const HitTesting: FC = ({ datasetId }: Props) => {
}}
/>}
+ {isShowModifyRetrievalModal && (
+ setIsShowModifyRetrievalModal(false)}
+ onSave={(value) => {
+ setRetrievalConfig(value)
+ setIsShowModifyRetrievalModal(false)
+ }}
+ />
+ )}
)
}
diff --git a/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx b/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx
new file mode 100644
index 00000000000000..31524ad1dcc9fc
--- /dev/null
+++ b/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx
@@ -0,0 +1,123 @@
+'use client'
+import type { FC } from 'react'
+import React, { useRef, useState } from 'react'
+import { useTranslation } from 'react-i18next'
+import Toast from '../../base/toast'
+import { XClose } from '@/app/components/base/icons/src/vender/line/general'
+import type { RetrievalConfig } from '@/types/app'
+import RetrievalMethodConfig from '@/app/components/datasets/common/retrieval-method-config'
+import EconomicalRetrievalMethodConfig from '@/app/components/datasets/common/economical-retrieval-method-config'
+import Button from '@/app/components/base/button'
+import { useProviderContext } from '@/context/provider-context'
+import { ensureRerankModelSelected, isReRankModelSelected } from '@/app/components/datasets/common/check-rerank-model'
+
+type Props = {
+ indexMethod: string
+ value: RetrievalConfig
+ isShow: boolean
+ onHide: () => void
+ onSave: (value: RetrievalConfig) => void
+}
+
+const ModifyRetrievalModal: FC
= ({
+ indexMethod,
+ value,
+ isShow,
+ onHide,
+ onSave,
+}) => {
+ const ref = useRef(null)
+ const { t } = useTranslation()
+ const [retrievalConfig, setRetrievalConfig] = useState(value)
+
+ // useClickAway(() => {
+ // if (ref)
+ // onHide()
+ // }, ref)
+
+ const {
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ } = useProviderContext()
+
+ const handleSave = () => {
+ if (
+ !isReRankModelSelected({
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ retrievalConfig,
+ indexMethod,
+ })
+ ) {
+ Toast.notify({ type: 'error', message: t('appDebug.datasetConfig.rerankModelRequired') })
+ return
+ }
+ onSave(ensureRerankModelSelected({
+ rerankDefaultModel: rerankDefaultModel!,
+ retrievalConfig,
+ indexMethod,
+ }))
+ }
+
+ if (!isShow)
+ return null
+
+ return (
+
+
+
+
{t('datasetSettings.form.retrievalSetting.title')}
+
+
+
+
+
+
+ {indexMethod === 'high_quality'
+ ? (
+
+ )
+ : (
+
+ )}
+
+
+ {t('common.operation.cancel')}
+ {t('common.operation.save')}
+
+
+ )
+}
+export default React.memo(ModifyRetrievalModal)
diff --git a/web/app/components/datasets/hit-testing/style.module.css b/web/app/components/datasets/hit-testing/style.module.css
index 4c05f2be3cc830..71629eda4ccc6d 100644
--- a/web/app/components/datasets/hit-testing/style.module.css
+++ b/web/app/components/datasets/hit-testing/style.module.css
@@ -20,8 +20,8 @@
@apply text-sm font-normal text-gray-500;
}
.textarea {
- min-height: 96px;
- @apply border-none resize-none font-normal caret-primary-600 text-gray-700 text-sm w-full bg-gray-25 focus-visible:outline-none placeholder:text-gray-300 placeholder:text-sm placeholder:font-normal !important;
+ height: 220px;
+ @apply border-none resize-none font-normal caret-primary-600 text-gray-700 text-sm w-full focus-visible:outline-none placeholder:text-gray-300 placeholder:text-sm placeholder:font-normal !important;
}
.table {
@apply text-[13px] text-gray-500;
@@ -46,7 +46,7 @@
}
.wrapper {
- @apply relative border border-primary-600 min-h-[200px] rounded-xl pt-3 pb-14 px-4 bg-gray-25;
+ @apply relative border border-primary-600 rounded-xl;
}
.cardWrapper {
diff --git a/web/app/components/datasets/hit-testing/textarea.tsx b/web/app/components/datasets/hit-testing/textarea.tsx
index 4dcacf11f66abf..824136a3cee1da 100644
--- a/web/app/components/datasets/hit-testing/textarea.tsx
+++ b/web/app/components/datasets/hit-testing/textarea.tsx
@@ -1,26 +1,30 @@
-import type { FC } from "react";
+import type { FC } from 'react'
import { useContext } from 'use-context-selector'
-import { DocumentTextIcon } from "@heroicons/react/24/solid";
-import { useTranslation } from "react-i18next";
-import { hitTesting } from "@/service/datasets";
+import { useTranslation } from 'react-i18next'
+import cn from 'classnames'
+import Button from '../../base/button'
+import Tag from '../../base/tag'
+import Tooltip from '../../base/tooltip'
+import { getIcon } from '../common/retrieval-method-info'
+import s from './style.module.css'
import DatasetDetailContext from '@/context/dataset-detail'
-import { HitTestingResponse } from "@/models/datasets";
-import cn from "classnames";
-import Button from "../../base/button";
-import Tag from "../../base/tag";
-import Tooltip from "../../base/tooltip";
-import s from "./style.module.css";
-import { asyncRunSafe } from "@/utils";
+import type { HitTestingResponse } from '@/models/datasets'
+import { hitTesting } from '@/service/datasets'
+import { asyncRunSafe } from '@/utils'
+import { RETRIEVE_METHOD, type RetrievalConfig } from '@/types/app'
type Props = {
- datasetId: string;
- onUpdateList: () => void;
- setHitResult: (res: HitTestingResponse) => void;
- loading: boolean;
- setLoading: (v: boolean) => void;
- text: string;
- setText: (v: string) => void;
-};
+ datasetId: string
+ onUpdateList: () => void
+ setHitResult: (res: HitTestingResponse) => void
+ loading: boolean
+ setLoading: (v: boolean) => void
+ text: string
+ setText: (v: string) => void
+ onClickRetrievalMethod: () => void
+ retrievalConfig: RetrievalConfig
+ isEconomy: boolean
+}
const TextAreaWithButton: FC = ({
datasetId,
@@ -30,87 +34,109 @@ const TextAreaWithButton: FC = ({
loading,
text,
setText,
+ onClickRetrievalMethod,
+ retrievalConfig,
+ isEconomy,
}) => {
- const { t } = useTranslation();
+ const { t } = useTranslation()
const { indexingTechnique } = useContext(DatasetDetailContext)
- // 处理文本框内容变化的函数
function handleTextChange(event: any) {
- setText(event.target.value);
+ setText(event.target.value)
}
- // 处理按钮点击的函数
const onSubmit = async () => {
- setLoading(true);
+ setLoading(true)
const [e, res] = await asyncRunSafe(
- hitTesting({ datasetId, queryText: text }) as Promise
- );
+ hitTesting({ datasetId, queryText: text, retrieval_model: retrievalConfig }) as Promise,
+ )
if (!e) {
- setHitResult(res);
- onUpdateList?.();
+ setHitResult(res)
+ onUpdateList?.()
}
- setLoading(false);
- };
+ setLoading(false)
+ }
+ const retrievalMethod = isEconomy ? RETRIEVE_METHOD.invertedIndex : retrievalConfig.search_method
+ const Icon = getIcon(retrievalMethod)
return (
<>
-
-
-
- {t("datasetHitTesting.input.title")}
-
-
-
-
- {text?.length > 200 ? (
+
+
+
+ {t('datasetHitTesting.input.title')}
+
-
-
- {text?.length}
- /
+
+
+
{t(`dataset.retrieval.${retrievalMethod}.title`)}
+
+
+
+
+
+
+
+
+ {text?.length > 200
+ ? (
+
+
+
+ {text?.length}
+ /
200
+
+
+
+ )
+ : (
+
+ {text?.length}
+ /
+ 200
+ )}
+
+
+ 200)}
+ >
+ {t('datasetHitTesting.input.testing')}
+
- ) : (
-
- {text?.length}
- /
- 200
-
- )}
-
-
- 200)}
- >
- {t("datasetHitTesting.input.testing")}
-
-
-
+
+
>
- );
-};
+ )
+}
-export default TextAreaWithButton;
+export default TextAreaWithButton
diff --git a/web/app/components/datasets/settings/form/index.tsx b/web/app/components/datasets/settings/form/index.tsx
index 9aac9e145ea823..4afddb82241a4d 100644
--- a/web/app/components/datasets/settings/form/index.tsx
+++ b/web/app/components/datasets/settings/form/index.tsx
@@ -9,6 +9,8 @@ import { useSWRConfig } from 'swr'
import { unstable_serialize } from 'swr/infinite'
import PermissionsRadio from '../permissions-radio'
import IndexMethodRadio from '../index-method-radio'
+import RetrievalMethodConfig from '@/app/components/datasets/common/retrieval-method-config'
+import EconomicalRetrievalMethodConfig from '@/app/components/datasets/common/economical-retrieval-method-config'
import { ToastContext } from '@/app/components/base/toast'
import Button from '@/app/components/base/button'
import { updateDatasetSetting } from '@/service/datasets'
@@ -17,8 +19,10 @@ import ModelSelector from '@/app/components/header/account-setting/model-page/mo
import type { ProviderEnum } from '@/app/components/header/account-setting/model-page/declarations'
import { ModelType } from '@/app/components/header/account-setting/model-page/declarations'
import DatasetDetailContext from '@/context/dataset-detail'
+import { type RetrievalConfig } from '@/types/app'
import { useModalContext } from '@/context/modal-context'
-
+import { useProviderContext } from '@/context/provider-context'
+import { ensureRerankModelSelected, isReRankModelSelected } from '@/app/components/datasets/common/check-rerank-model'
const rowClass = `
flex justify-between py-4
`
@@ -51,6 +55,13 @@ const Form = () => {
const [description, setDescription] = useState(currentDataset?.description ?? '')
const [permission, setPermission] = useState(currentDataset?.permission)
const [indexMethod, setIndexMethod] = useState(currentDataset?.indexing_technique)
+ const [retrievalConfig, setRetrievalConfig] = useState(currentDataset?.retrieval_model_dict as RetrievalConfig)
+ const {
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ } = useProviderContext()
+
const handleSave = async () => {
if (loading)
return
@@ -58,6 +69,23 @@ const Form = () => {
notify({ type: 'error', message: t('datasetSettings.form.nameError') })
return
}
+ if (
+ !isReRankModelSelected({
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ rerankModelList,
+ retrievalConfig,
+ indexMethod,
+ })
+ ) {
+ notify({ type: 'error', message: t('appDebug.datasetConfig.rerankModelRequired') })
+ return
+ }
+ const postRetrievalConfig = ensureRerankModelSelected({
+ rerankDefaultModel: rerankDefaultModel!,
+ retrievalConfig,
+ indexMethod,
+ })
try {
setLoading(true)
await updateDatasetSetting({
@@ -67,6 +95,7 @@ const Form = () => {
description,
permission,
indexing_technique: indexMethod,
+ retrieval_model: postRetrievalConfig,
},
})
notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
@@ -172,6 +201,33 @@ const Form = () => {
)}
+ {/* Retrieval Method Config */}
+
+
+
+
{t('datasetSettings.form.retrievalSetting.title')}
+
+
+
+
+ {indexMethod === 'high_quality'
+ ? (
+
+ )
+ : (
+
+ )}
+
+
{currentDataset?.embedding_available && (
diff --git a/web/app/components/datasets/settings/permissions-radio/index.tsx b/web/app/components/datasets/settings/permissions-radio/index.tsx
index 3f83f32377363f..edd5f6888d3033 100644
--- a/web/app/components/datasets/settings/permissions-radio/index.tsx
+++ b/web/app/components/datasets/settings/permissions-radio/index.tsx
@@ -13,12 +13,14 @@ const radioClass = `
type IPermissionsRadioProps = {
value?: DataSet['permission']
onChange: (v?: DataSet['permission']) => void
+ itemClassName?: string
disable?: boolean
}
const PermissionsRadio = ({
value,
onChange,
+ itemClassName,
disable,
}: IPermissionsRadioProps) => {
const { t } = useTranslation()
@@ -41,6 +43,7 @@ const PermissionsRadio = ({
key={option.key}
className={classNames(
itemClass,
+ itemClassName,
s.item,
option.key === value && s['item-active'],
disable && s.disable,
diff --git a/web/app/components/explore/universal-chat/index.tsx b/web/app/components/explore/universal-chat/index.tsx
index 5fb4b7d932722d..1ac6dfaeaa916c 100644
--- a/web/app/components/explore/universal-chat/index.tsx
+++ b/web/app/components/explore/universal-chat/index.tsx
@@ -688,6 +688,7 @@ const Main: FC
= () => {
onUnpin={handleUnpin}
controlUpdateList={controlUpdateConversationList}
onDelete={handleDelete}
+ onStartChat={() => handleConversationIdChange('-1')}
/>
)
}
diff --git a/web/app/components/header/account-setting/model-page/configs/cohere.tsx b/web/app/components/header/account-setting/model-page/configs/cohere.tsx
new file mode 100644
index 00000000000000..11bad8a9d7afe5
--- /dev/null
+++ b/web/app/components/header/account-setting/model-page/configs/cohere.tsx
@@ -0,0 +1,57 @@
+import { ProviderEnum } from '../declarations'
+import type { ProviderConfig } from '../declarations'
+import { Cohere, CohereText } from '@/app/components/base/icons/src/public/llm'
+
+const config: ProviderConfig = {
+ selector: {
+ name: {
+ 'en': 'cohere',
+ 'zh-Hans': 'cohere',
+ },
+ icon: ,
+ },
+ item: {
+ key: ProviderEnum.cohere,
+ titleIcon: {
+ 'en': ,
+ 'zh-Hans': ,
+ },
+ hit: {
+ 'en': 'Rerank Model Supported',
+ 'zh-Hans': '支持 Rerank 模型',
+ },
+ },
+ modal: {
+ key: ProviderEnum.cohere,
+ title: {
+ 'en': 'Rerank Model',
+ 'zh-Hans': 'Rerank 模型',
+ },
+ icon: ,
+ link: {
+ href: 'https://dashboard.cohere.com/api-keys',
+ label: {
+ 'en': 'Get your API key from cohere',
+ 'zh-Hans': '从 cohere 获取 API Key',
+ },
+ },
+ validateKeys: ['api_key'],
+ fields: [
+ {
+ type: 'text',
+ key: 'api_key',
+ required: true,
+ label: {
+ 'en': 'API Key',
+ 'zh-Hans': 'API Key',
+ },
+ placeholder: {
+ 'en': 'Enter your API key here',
+ 'zh-Hans': '在此输入您的 API Key',
+ },
+ },
+ ],
+ },
+}
+
+export default config
diff --git a/web/app/components/header/account-setting/model-page/configs/index.ts b/web/app/components/header/account-setting/model-page/configs/index.ts
index ef36eff0309ad5..cee0ad8214e0f6 100644
--- a/web/app/components/header/account-setting/model-page/configs/index.ts
+++ b/web/app/components/header/account-setting/model-page/configs/index.ts
@@ -13,6 +13,7 @@ import openllm from './openllm'
import localai from './localai'
import zhipuai from './zhipuai'
import baichuan from './baichuan'
+import cohere from './cohere'
export default {
openai,
@@ -30,4 +31,5 @@ export default {
localai,
zhipuai,
baichuan,
+ cohere,
}
diff --git a/web/app/components/header/account-setting/model-page/declarations.ts b/web/app/components/header/account-setting/model-page/declarations.ts
index 924d20024aaf59..ba44844eaad60f 100644
--- a/web/app/components/header/account-setting/model-page/declarations.ts
+++ b/web/app/components/header/account-setting/model-page/declarations.ts
@@ -45,6 +45,7 @@ export enum ProviderEnum {
'localai' = 'localai',
'zhipuai' = 'zhipuai',
'baichuan' = 'baichuan',
+ 'cohere' = 'cohere',
}
export type ProviderConfigItem = {
@@ -67,6 +68,7 @@ export enum ModelType {
textGeneration = 'text-generation',
embeddings = 'embeddings',
speech2text = 'speech2text',
+ reranking = 'reranking',
}
export enum ModelFeature {
diff --git a/web/app/components/header/account-setting/model-page/index.tsx b/web/app/components/header/account-setting/model-page/index.tsx
index d7cc899fa3177a..dce2005cf638d6 100644
--- a/web/app/components/header/account-setting/model-page/index.tsx
+++ b/web/app/components/header/account-setting/model-page/index.tsx
@@ -3,47 +3,36 @@ import useSWR from 'swr'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import type {
- BackendModel,
FormValue,
ProviderConfigModal,
ProviderEnum,
} from './declarations'
-import ModelSelector from './model-selector'
import ModelCard from './model-card'
import ModelItem from './model-item'
import ModelModal from './model-modal'
+import SystemModel from './system-model'
import config from './configs'
import { ConfigurableProviders } from './utils'
-import { HelpCircle } from '@/app/components/base/icons/src/vender/line/general'
import {
changeModelProviderPriority,
deleteModelProvider,
deleteModelProviderModel,
- fetchDefaultModal,
fetchModelProviders,
setModelProvider,
- updateDefaultModel,
} from '@/service/common'
import { useToastContext } from '@/app/components/base/toast'
import Confirm from '@/app/components/base/confirm/common'
import { ModelType } from '@/app/components/header/account-setting/model-page/declarations'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { useProviderContext } from '@/context/provider-context'
-import Tooltip from '@/app/components/base/tooltip'
import I18n from '@/context/i18n'
+import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback'
const MODEL_CARD_LIST = [
config.openai,
config.anthropic,
]
-const titleClassName = `
-flex items-center h-9 text-sm font-medium text-gray-900
-`
-const tipClassName = `
-ml-0.5 w-[14px] h-[14px] text-gray-400
-`
-
type DeleteModel = {
model_name: string
model_type: string
@@ -54,13 +43,12 @@ const ModelPage = () => {
const { locale } = useContext(I18n)
const {
updateModelList,
+ textGenerationDefaultModel,
embeddingsDefaultModel,
- mutateEmbeddingsDefaultModel,
speech2textDefaultModel,
- mutateSpeech2textDefaultModel,
+ rerankDefaultModel,
} = useProviderContext()
const { data: providers, mutate: mutateProviders } = useSWR('/workspaces/current/model-providers', fetchModelProviders)
- const { data: textGenerationDefaultModel, mutate: mutateTextGenerationDefaultModel } = useSWR('/workspaces/current/default-model?model_type=text-generation', fetchDefaultModal)
const [showModal, setShowModal] = useState(false)
const { notify } = useToastContext()
const { eventEmitter } = useEventEmitterContextContext()
@@ -76,6 +64,7 @@ const ModelPage = () => {
config.azure_openai,
config.replicate,
config.huggingface_hub,
+ config.cohere,
config.zhipuai,
config.baichuan,
config.spark,
@@ -91,6 +80,7 @@ const ModelPage = () => {
else {
modelList = [
config.huggingface_hub,
+ config.cohere,
config.zhipuai,
config.spark,
config.baichuan,
@@ -127,6 +117,7 @@ const ModelPage = () => {
updateModelList(ModelType.textGeneration)
updateModelList(ModelType.embeddings)
updateModelList(ModelType.speech2text)
+ updateModelList(ModelType.reranking)
mutateProviders()
}
const handleSave = async (originValue?: FormValue) => {
@@ -210,95 +201,23 @@ const ModelPage = () => {
}
}
- const mutateDefaultModel = (type: ModelType) => {
- if (type === ModelType.textGeneration)
- mutateTextGenerationDefaultModel()
- if (type === ModelType.embeddings)
- mutateEmbeddingsDefaultModel()
- if (type === ModelType.speech2text)
- mutateSpeech2textDefaultModel()
- }
- const handleChangeDefaultModel = async (type: ModelType, v: BackendModel) => {
- const res = await updateDefaultModel({
- url: '/workspaces/current/default-model',
- body: {
- model_type: type,
- provider_name: v.model_provider.provider_name,
- model_name: v.model_name,
- },
- })
- if (res.result === 'success') {
- notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
- mutateDefaultModel(type)
- }
- }
+ const defaultModelNotConfigured = !textGenerationDefaultModel && !embeddingsDefaultModel && !speech2textDefaultModel && !rerankDefaultModel
return (
-
-
-
- {t('common.modelProvider.systemReasoningModel.key')}
- {t('common.modelProvider.systemReasoningModel.tip')}
- }
- >
-
-
-
-
- handleChangeDefaultModel(ModelType.textGeneration, v)}
- />
-
-
-
-
- {t('common.modelProvider.embeddingModel.key')}
- {t('common.modelProvider.embeddingModel.tip')}
- }
- >
-
-
-
-
- handleChangeDefaultModel(ModelType.embeddings, v)}
- />
-
-
-
-
- {t('common.modelProvider.speechToTextModel.key')}
- {t('common.modelProvider.speechToTextModel.tip')}
- }
- >
-
-
-
-
- handleChangeDefaultModel(ModelType.speech2text, v)}
- />
-
-
+
+ {
+ defaultModelNotConfigured
+ ? (
+
+
+ {t('common.modelProvider.notConfigured')}
+
+ )
+ :
{t('common.modelProvider.models')}
+ }
+
mutateProviders()} />
-
- {t('common.modelProvider.models')}
{
MODEL_CARD_LIST.map((model, index) => (
diff --git a/web/app/components/header/account-setting/model-page/model-modal/index.tsx b/web/app/components/header/account-setting/model-page/model-modal/index.tsx
index 385e064ffb6e91..88c60c8c13b9b4 100644
--- a/web/app/components/header/account-setting/model-page/model-modal/index.tsx
+++ b/web/app/components/header/account-setting/model-page/model-modal/index.tsx
@@ -2,7 +2,6 @@ import { useCallback, useState } from 'react'
import type { FC } from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
-import { Portal } from '@headlessui/react'
import type { FormValue, ProviderConfigModal } from '../declarations'
import { ConfigurableProviders } from '../utils'
import Form from './Form'
@@ -12,6 +11,10 @@ import { Lock01 } from '@/app/components/base/icons/src/vender/solid/security'
import { LinkExternal02 } from '@/app/components/base/icons/src/vender/line/general'
import { AlertCircle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback'
import { useEventEmitterContextContext } from '@/context/event-emitter'
+import {
+ PortalToFollowElem,
+ PortalToFollowElemContent,
+} from '@/app/components/base/portal-to-follow-elem'
type ModelModalProps = {
isShow: boolean
@@ -90,75 +93,77 @@ const ModelModal: FC
= ({
return null
return (
-
-
-
-
-
-
{renderTitlePrefix()}
- {modelModal?.icon}
-
-
+
+
+ {t('common.modelProvider.speechToTextModel.key')}
+ {t('common.modelProvider.speechToTextModel.tip')}
+ }
+ >
+
+
+
+
+ handleChangeDefaultModel(ModelType.speech2text, v)}
+ />
+
+
+
+ setOpen(false)}
+ >
+ {t('common.operation.cancel')}
+
+
+ {t('common.operation.save')}
+
+
+
+
+
+ )
+}
+
+export default SystemModel
diff --git a/web/app/components/header/index.tsx b/web/app/components/header/index.tsx
index 0500c86aef704e..a81c6c6f6ca03e 100644
--- a/web/app/components/header/index.tsx
+++ b/web/app/components/header/index.tsx
@@ -7,7 +7,6 @@ import DatasetNav from './dataset-nav'
import EnvNav from './env-nav'
import ExploreNav from './explore-nav'
import GithubStar from './github-star'
-import PluginNav from './plugin-nav'
import { WorkspaceProvider } from '@/context/workspace-context'
import { useAppContext } from '@/context/app-context'
import LogoSite from '@/app/components/base/logo/logo-site'
@@ -31,7 +30,6 @@ const Header = () => {
-
{isCurrentWorkspaceManager &&
}
diff --git a/web/app/components/header/plugin-nav/index.tsx b/web/app/components/header/plugin-nav/index.tsx
deleted file mode 100644
index 7a838540496483..00000000000000
--- a/web/app/components/header/plugin-nav/index.tsx
+++ /dev/null
@@ -1,37 +0,0 @@
-'use client'
-
-import { useTranslation } from 'react-i18next'
-import Link from 'next/link'
-import { useSelectedLayoutSegment } from 'next/navigation'
-import classNames from 'classnames'
-import { PuzzlePiece01 } from '@/app/components/base/icons/src/vender/line/development'
-import { PuzzlePiece01 as PuzzlePiece01Solid } from '@/app/components/base/icons/src/vender/solid/development'
-
-type PluginNavProps = {
- className?: string
-}
-
-const PluginNav = ({
- className,
-}: PluginNavProps) => {
- const { t } = useTranslation()
- const selectedSegment = useSelectedLayoutSegment()
- const isPluginsComingSoon = selectedSegment === 'plugins-coming-soon'
-
- return (
-
- {
- isPluginsComingSoon
- ?
- :
- }
- {t('common.menus.plugins')}
-
- )
-}
-
-export default PluginNav
diff --git a/web/app/components/share/chat/index.tsx b/web/app/components/share/chat/index.tsx
index 39a1a733336c94..58d495a37faca3 100644
--- a/web/app/components/share/chat/index.tsx
+++ b/web/app/components/share/chat/index.tsx
@@ -668,6 +668,7 @@ const Main: FC
= ({
onUnpin={handleUnpin}
controlUpdateList={controlUpdateConversationList}
onDelete={handleDelete}
+ onStartChat={() => handleConversationIdChange('-1')}
/>
)
}
diff --git a/web/app/components/share/chat/sidebar/index.tsx b/web/app/components/share/chat/sidebar/index.tsx
index d1f69272fbe9b3..51b564ace1b340 100644
--- a/web/app/components/share/chat/sidebar/index.tsx
+++ b/web/app/components/share/chat/sidebar/index.tsx
@@ -35,6 +35,7 @@ export type ISidebarProps = {
onUnpin: (id: string) => void
controlUpdateList: number
onDelete: (id: string) => void
+ onStartChat: (inputs: Record) => void
}
const Sidebar: FC = ({
@@ -59,6 +60,7 @@ const Sidebar: FC = ({
onUnpin,
controlUpdateList,
onDelete,
+ onStartChat,
}) => {
const { t } = useTranslation()
const [hasPinned, setHasPinned] = useState(false)
@@ -104,7 +106,7 @@ const Sidebar: FC = ({
)}
{ onCurrentIdChange('-1') }}
+ onClick={() => onStartChat({})}
className="group block w-full flex-shrink-0 !justify-start !h-9 text-primary-600 items-center text-sm">
{t('share.chat.newChat')}
diff --git a/web/app/components/share/text-generation/run-once/index.tsx b/web/app/components/share/text-generation/run-once/index.tsx
index e51466f74aa448..77eb63a8ee306c 100644
--- a/web/app/components/share/text-generation/run-once/index.tsx
+++ b/web/app/components/share/text-generation/run-once/index.tsx
@@ -82,7 +82,7 @@ const RunOnce: FC
= ({
{
visionConfig?.enabled && (
-
Image Upload
+
{t('common.imageUploader.imageUpload')}
{
- © {new Date().getFullYear()} Dify, Inc. All rights reserved.
+ © {new Date().getFullYear()} LangGenius, Inc. All rights reserved.
diff --git a/web/config/index.ts b/web/config/index.ts
index fc5bad967786ac..b3db24d281e1fb 100644
--- a/web/config/index.ts
+++ b/web/config/index.ts
@@ -141,3 +141,8 @@ export const VAR_ITEM_TEMPLATE = {
export const appDefaultIconBackground = '#D5F5F6'
export const NEED_REFRESH_APP_LIST_KEY = 'needRefreshAppList'
+
+export const DATASET_DEFAULT = {
+ top_k: 2,
+ score_threshold: 0.5,
+}
diff --git a/web/context/debug-configuration.ts b/web/context/debug-configuration.ts
index 0c0d00e3de835e..9fc629e1ed1c3a 100644
--- a/web/context/debug-configuration.ts
+++ b/web/context/debug-configuration.ts
@@ -20,7 +20,7 @@ import type {
import type { ExternalDataTool } from '@/models/common'
import type { DataSet } from '@/models/datasets'
import type { VisionSettings } from '@/types/app'
-import { ModelModeType, Resolution, TransferMethod } from '@/types/app'
+import { ModelModeType, RETRIEVE_TYPE, Resolution, TransferMethod } from '@/types/app'
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
type IDebugConfiguration = {
@@ -180,11 +180,14 @@ const DebugConfigurationContext = createContext
({
showSelectDataSet: () => { },
setDataSets: () => { },
datasetConfigs: {
- top_k: 2,
- score_threshold: {
- enable: false,
- value: 0.7,
+ retrieval_model: RETRIEVE_TYPE.oneWay,
+ reranking_model: {
+ reranking_provider_name: '',
+ reranking_model_name: '',
},
+ top_k: 2,
+ score_threshold_enabled: false,
+ score_threshold: 0.7,
},
setDatasetConfigs: () => {},
hasSetContextVar: false,
diff --git a/web/context/provider-context.tsx b/web/context/provider-context.tsx
index 46161fe6715672..26f3f9a712178c 100644
--- a/web/context/provider-context.tsx
+++ b/web/context/provider-context.tsx
@@ -2,29 +2,44 @@
import { createContext, useContext } from 'use-context-selector'
import useSWR from 'swr'
-import { fetchDefaultModal, fetchModelList } from '@/service/common'
+import { fetchDefaultModal, fetchModelList, fetchSupportRetrievalMethods } from '@/service/common'
import { ModelFeature, ModelType } from '@/app/components/header/account-setting/model-page/declarations'
import type { BackendModel } from '@/app/components/header/account-setting/model-page/declarations'
+import type { RETRIEVE_METHOD } from '@/types/app'
const ProviderContext = createContext<{
textGenerationModelList: BackendModel[]
embeddingsModelList: BackendModel[]
speech2textModelList: BackendModel[]
+ rerankModelList: BackendModel[]
agentThoughtModelList: BackendModel[]
updateModelList: (type: ModelType) => void
+ textGenerationDefaultModel?: BackendModel
+ mutateTextGenerationDefaultModel: () => void
embeddingsDefaultModel?: BackendModel
mutateEmbeddingsDefaultModel: () => void
speech2textDefaultModel?: BackendModel
mutateSpeech2textDefaultModel: () => void
+ rerankDefaultModel?: BackendModel
+ isRerankDefaultModelVaild: boolean
+ mutateRerankDefaultModel: () => void
+ supportRetrievalMethods: RETRIEVE_METHOD[]
}>({
textGenerationModelList: [],
embeddingsModelList: [],
speech2textModelList: [],
+ rerankModelList: [],
agentThoughtModelList: [],
updateModelList: () => {},
+ textGenerationDefaultModel: undefined,
+ mutateTextGenerationDefaultModel: () => {},
speech2textDefaultModel: undefined,
mutateSpeech2textDefaultModel: () => {},
embeddingsDefaultModel: undefined,
mutateEmbeddingsDefaultModel: () => {},
+ rerankDefaultModel: undefined,
+ isRerankDefaultModelVaild: false,
+ mutateRerankDefaultModel: () => {},
+ supportRetrievalMethods: [],
})
export const useProviderContext = () => useContext(ProviderContext)
@@ -35,21 +50,34 @@ type ProviderContextProviderProps = {
export const ProviderContextProvider = ({
children,
}: ProviderContextProviderProps) => {
+ const { data: textGenerationDefaultModel, mutate: mutateTextGenerationDefaultModel } = useSWR('/workspaces/current/default-model?model_type=text-generation', fetchDefaultModal)
const { data: embeddingsDefaultModel, mutate: mutateEmbeddingsDefaultModel } = useSWR('/workspaces/current/default-model?model_type=embeddings', fetchDefaultModal)
const { data: speech2textDefaultModel, mutate: mutateSpeech2textDefaultModel } = useSWR('/workspaces/current/default-model?model_type=speech2text', fetchDefaultModal)
+ const { data: rerankDefaultModel, mutate: mutateRerankDefaultModel } = useSWR('/workspaces/current/default-model?model_type=reranking', fetchDefaultModal)
const fetchModelListUrlPrefix = '/workspaces/current/models/model-type/'
const { data: textGenerationModelList, mutate: mutateTextGenerationModelList } = useSWR(`${fetchModelListUrlPrefix}${ModelType.textGeneration}`, fetchModelList)
const { data: embeddingsModelList, mutate: mutateEmbeddingsModelList } = useSWR(`${fetchModelListUrlPrefix}${ModelType.embeddings}`, fetchModelList)
- const { data: speech2textModelList } = useSWR(`${fetchModelListUrlPrefix}${ModelType.speech2text}`, fetchModelList)
+ const { data: speech2textModelList, mutate: mutateSpeech2textModelList } = useSWR(`${fetchModelListUrlPrefix}${ModelType.speech2text}`, fetchModelList)
+ const { data: rerankModelList, mutate: mutateRerankModelList } = useSWR(`${fetchModelListUrlPrefix}${ModelType.reranking}`, fetchModelList)
+ const { data: supportRetrievalMethods } = useSWR('/datasets/retrieval-setting', fetchSupportRetrievalMethods)
+
const agentThoughtModelList = textGenerationModelList?.filter((item) => {
return item.features?.includes(ModelFeature.agentThought)
})
+ const isRerankDefaultModelVaild = !!rerankModelList?.find(
+ item => item.model_name === rerankDefaultModel?.model_name && item.model_provider.provider_name === rerankDefaultModel?.model_provider.provider_name,
+ )
+
const updateModelList = (type: ModelType) => {
if (type === ModelType.textGeneration)
mutateTextGenerationModelList()
if (type === ModelType.embeddings)
mutateEmbeddingsModelList()
+ if (type === ModelType.speech2text)
+ mutateSpeech2textModelList()
+ if (type === ModelType.reranking)
+ mutateRerankModelList()
}
return (
@@ -57,12 +85,19 @@ export const ProviderContextProvider = ({
textGenerationModelList: textGenerationModelList || [],
embeddingsModelList: embeddingsModelList || [],
speech2textModelList: speech2textModelList || [],
+ rerankModelList: rerankModelList || [],
agentThoughtModelList: agentThoughtModelList || [],
updateModelList,
+ textGenerationDefaultModel,
+ mutateTextGenerationDefaultModel,
embeddingsDefaultModel,
mutateEmbeddingsDefaultModel,
speech2textDefaultModel,
mutateSpeech2textDefaultModel,
+ rerankDefaultModel,
+ isRerankDefaultModelVaild,
+ mutateRerankDefaultModel,
+ supportRetrievalMethods: supportRetrievalMethods?.retrieval_method || [],
}}>
{children}
diff --git a/web/global.d.ts b/web/global.d.ts
index 0dff35ce0dd0a1..b81cdddc77b562 100644
--- a/web/global.d.ts
+++ b/web/global.d.ts
@@ -1,2 +1,3 @@
declare module 'lamejs';
-declare module 'react-18-input-autosize';
\ No newline at end of file
+declare module 'react-18-input-autosize';
+
diff --git a/web/i18n/lang/app-debug.en.ts b/web/i18n/lang/app-debug.en.ts
index 69ac120674daa0..a6f23817779783 100644
--- a/web/i18n/lang/app-debug.en.ts
+++ b/web/i18n/lang/app-debug.en.ts
@@ -305,11 +305,22 @@ const translation = {
},
result: 'Output Text',
datasetConfig: {
+ settingTitle: 'Retrieval settings',
+ retrieveOneWay: {
+ title: 'N-to-1 retrieval',
+ description: 'Based on user intent and dataset descriptions, the Agent autonomously selects the best dataset for querying. Best for applications with distinct, limited datasets.',
+ },
+ retrieveMultiWay: {
+ title: 'Multi-path retrieval',
+ description: 'Based on user intent, queries across all datasets, retrieves relevant text from multi-sources, and selects the best results matching the user query after reranking. Configuration of the Rerank model API is required.',
+ },
+ rerankModelRequired: 'Rerank model is required',
params: 'Params',
top_k: 'Top K',
top_kTip: 'Used to filter segments that are most similar to user questions. The system will also dynamically adjust the value of Top K, according to max_tokens of the selected model.',
score_threshold: 'Score Threshold',
score_thresholdTip: 'Used to set the similarity threshold for segment filtering.',
+ retrieveChangeTip: 'Modifying the index mode and retrieval mode may affect applications associated with this dataset.',
},
}
diff --git a/web/i18n/lang/app-debug.zh.ts b/web/i18n/lang/app-debug.zh.ts
index 4b42516072802c..f14f739149789e 100644
--- a/web/i18n/lang/app-debug.zh.ts
+++ b/web/i18n/lang/app-debug.zh.ts
@@ -299,11 +299,22 @@ const translation = {
},
result: '结果',
datasetConfig: {
+ settingTitle: '召回设置',
+ retrieveOneWay: {
+ title: 'N选1召回',
+ description: '根据用户意图和数据集描述,由 Agent 自主判断选择最匹配的单个数据集来查询相关文本,适合数据集区分度大且数据集数量偏少的应用。',
+ },
+ retrieveMultiWay: {
+ title: '多路召回',
+ description: '根据用户意图同时匹配所有数据集,从多路数据集查询相关文本片段,经过重排序步骤,从多路查询结果中选择匹配用户问题的最佳结果,需配置 Rerank 模型 API。',
+ },
+ rerankModelRequired: '请选择 Rerank 模型',
params: '参数设置',
top_k: 'Top K',
top_kTip: '用于筛选与用户问题相似度最高的文本片段。系统同时会根据选用模型上下文窗口大小动态调整分段数量。',
score_threshold: 'Score 阈值',
score_thresholdTip: '用于设置文本片段筛选的相似度阈值。',
+ retrieveChangeTip: '修改索引模式和检索模式可能会影响与该数据集关联的应用程序。',
},
}
diff --git a/web/i18n/lang/common.en.ts b/web/i18n/lang/common.en.ts
index 664f480224cf6f..2e2be7f36e443f 100644
--- a/web/i18n/lang/common.en.ts
+++ b/web/i18n/lang/common.en.ts
@@ -95,7 +95,7 @@ const translation = {
settings: 'Settings',
workspace: 'Workspace',
createWorkspace: 'Create Workspace',
- helpCenter: 'Help Document',
+ helpCenter: 'Help',
about: 'About',
logout: 'Log out',
},
@@ -223,6 +223,9 @@ const translation = {
},
},
modelProvider: {
+ notConfigured: 'The system model has not yet been fully configured, and some functions may be unavailable.',
+ systemModelSettings: 'System Model Settings',
+ systemModelSettingsLink: 'Why is it necessary to set up a system model?',
selectModel: 'Select your model',
setupModelFirst: 'Please set up your model first',
systemReasoningModel: {
@@ -237,6 +240,10 @@ const translation = {
key: 'Speech-to-Text Model',
tip: 'Set the default model for speech-to-text input in conversation.',
},
+ rerankModel: {
+ key: 'Rerank Model',
+ tip: 'Rerank model will reorder the candidate document list based on the semantic match with user query, improving the results of semantic ranking',
+ },
quota: 'Quota',
searchModel: 'Search model',
noModelFound: 'No model found for {{model}}',
@@ -244,6 +251,9 @@ const translation = {
showMoreModelProvider: 'Show more model provider',
selector: {
tip: 'This model has been removed. Please add a model or select another model.',
+ emptyTip: 'No available models',
+ emptySetting: 'Please go to settings to configure',
+ rerankTip: 'Please set up the Rerank model',
},
card: {
quota: 'QUOTA',
@@ -351,7 +361,7 @@ const translation = {
},
datasetMenus: {
documents: 'Documents',
- hitTesting: 'Hit Testing',
+ hitTesting: 'Retrieval Testing',
settings: 'Settings',
emptyTip: 'The data set has not been associated, please go to the application or plug-in to complete the association.',
viewDoc: 'View documentation',
@@ -382,9 +392,9 @@ const translation = {
title: 'CITATIONS',
linkToDataset: 'Link to dataset',
characters: 'Characters:',
- hitCount: 'Hit count:',
+ hitCount: 'Retrieval count:',
vectorHash: 'Vector hash:',
- hitScore: 'Hit Score:',
+ hitScore: 'Retrieval Score:',
},
},
promptEditor: {
@@ -438,6 +448,7 @@ const translation = {
pasteImageLink: 'Paste image link',
pasteImageLinkInputPlaceholder: 'Paste image link here',
pasteImageLinkInvalid: 'Invalid image link',
+ imageUpload: 'Image Upload',
},
}
diff --git a/web/i18n/lang/common.zh.ts b/web/i18n/lang/common.zh.ts
index b0964380594b00..ed473514e1db2d 100644
--- a/web/i18n/lang/common.zh.ts
+++ b/web/i18n/lang/common.zh.ts
@@ -223,6 +223,9 @@ const translation = {
},
},
modelProvider: {
+ notConfigured: '系统模型尚未完全配置,部分功能可能无法使用。',
+ systemModelSettings: '系统模型设置',
+ systemModelSettingsLink: '为什么需要设置系统模型?',
selectModel: '选择您的模型',
setupModelFirst: '请先设置您的模型',
systemReasoningModel: {
@@ -237,6 +240,10 @@ const translation = {
key: '语音转文本模型',
tip: '设置对话中语音转文字输入的默认使用模型。',
},
+ rerankModel: {
+ key: 'Rerank 模型',
+ tip: '重排序模型将根据候选文档列表与用户问题语义匹配度进行重新排序,从而改进语义排序的结果',
+ },
quota: '额度',
searchModel: '搜索模型',
noModelFound: '找不到模型 {{model}}',
@@ -244,6 +251,9 @@ const translation = {
showMoreModelProvider: '显示更多模型提供商',
selector: {
tip: '该模型已被删除。请添模型或选择其他模型。',
+ emptyTip: '无可用模型',
+ emptySetting: '请前往设置进行配置',
+ rerankTip: '请设置 Rerank 模型',
},
card: {
quota: '额度',
@@ -351,7 +361,7 @@ const translation = {
},
datasetMenus: {
documents: '文档',
- hitTesting: '命中测试',
+ hitTesting: '召回测试',
settings: '设置',
emptyTip: ' 数据集尚未关联,请前往应用程序或插件完成关联。',
viewDoc: '查看文档',
@@ -382,9 +392,9 @@ const translation = {
title: '引用',
linkToDataset: '跳转至数据集',
characters: '字符:',
- hitCount: '命中次数:',
+ hitCount: '召回次数:',
vectorHash: '向量哈希:',
- hitScore: '命中得分:',
+ hitScore: '召回得分:',
},
},
promptEditor: {
@@ -438,6 +448,7 @@ const translation = {
pasteImageLink: '粘贴图片链接',
pasteImageLinkInputPlaceholder: '将图像链接粘贴到此处',
pasteImageLinkInvalid: '图片链接无效',
+ imageUpload: '图片上传',
},
}
diff --git a/web/i18n/lang/dataset-creation.en.ts b/web/i18n/lang/dataset-creation.en.ts
index 1f8919e878bd87..172468ed548a54 100644
--- a/web/i18n/lang/dataset-creation.en.ts
+++ b/web/i18n/lang/dataset-creation.en.ts
@@ -101,6 +101,7 @@ const translation = {
previewSwitchTipEnd: ' consume additional tokens',
characters: 'characters',
indexSettedTip: 'To change the index method, please go to the ',
+ retrivalSettedTip: 'To change the index method, please go to the ',
datasetSettingLink: 'dataset settings.',
},
stepThree: {
diff --git a/web/i18n/lang/dataset-creation.zh.ts b/web/i18n/lang/dataset-creation.zh.ts
index 013161a6018716..9b3303896199be 100644
--- a/web/i18n/lang/dataset-creation.zh.ts
+++ b/web/i18n/lang/dataset-creation.zh.ts
@@ -101,6 +101,7 @@ const translation = {
previewSwitchTipEnd: '消耗额外的 token',
characters: '字符',
indexSettedTip: '要更改索引方法,请转到',
+ retrivalSettedTip: '要更改检索方法,请转到',
datasetSettingLink: '数据集设置。',
},
stepThree: {
diff --git a/web/i18n/lang/dataset-documents.en.ts b/web/i18n/lang/dataset-documents.en.ts
index c5fdcc4007f613..aeda1992af29c8 100644
--- a/web/i18n/lang/dataset-documents.en.ts
+++ b/web/i18n/lang/dataset-documents.en.ts
@@ -8,7 +8,7 @@ const translation = {
header: {
fileName: 'FILE NAME',
words: 'WORDS',
- hitCount: 'HIT COUNT',
+ hitCount: 'RETRIEVAL COUNT',
uploadTime: 'UPLOAD TIME',
status: 'STATUS',
action: 'ACTION',
@@ -216,7 +216,7 @@ const translation = {
segmentLength: 'Segment length',
avgParagraphLength: 'Avg. paragraph length',
paragraphs: 'Paragraphs',
- hitCount: 'Hit count',
+ hitCount: 'Retrieval count',
embeddingTime: 'Embedding time',
embeddedSpend: 'Embedded spend',
},
@@ -332,7 +332,7 @@ const translation = {
addKeyWord: 'Add key word',
keywordError: 'The maximum length of keyword is 20',
characters: 'characters',
- hitCount: 'hit count',
+ hitCount: 'Retrieval count',
vectorHash: 'Vector hash: ',
questionPlaceholder: 'add question here',
questionEmpty: 'Question can not be empty',
diff --git a/web/i18n/lang/dataset-documents.zh.ts b/web/i18n/lang/dataset-documents.zh.ts
index 0ea31d190b2800..0b1a9ec90a71e5 100644
--- a/web/i18n/lang/dataset-documents.zh.ts
+++ b/web/i18n/lang/dataset-documents.zh.ts
@@ -8,7 +8,7 @@ const translation = {
header: {
fileName: '文件名',
words: '字符数',
- hitCount: '命中次数',
+ hitCount: '召回次数',
uploadTime: '上传时间',
status: '状态',
action: '操作',
@@ -215,7 +215,7 @@ const translation = {
segmentLength: '段落长度',
avgParagraphLength: '平均段落长度',
paragraphs: '段落数量',
- hitCount: '命中次数',
+ hitCount: '召回次数',
embeddingTime: '嵌入时间',
embeddedSpend: '嵌入花费',
},
@@ -331,7 +331,7 @@ const translation = {
addKeyWord: '添加关键词',
keywordError: '关键词最大长度为 20',
characters: '字符',
- hitCount: '命中次数',
+ hitCount: '召回次数',
vectorHash: '向量哈希:',
questionPlaceholder: '在这里添加问题',
questionEmpty: '问题不能为空',
diff --git a/web/i18n/lang/dataset-hit-testing.en.ts b/web/i18n/lang/dataset-hit-testing.en.ts
index 4946b59d83c1e5..fa05810c57adad 100644
--- a/web/i18n/lang/dataset-hit-testing.en.ts
+++ b/web/i18n/lang/dataset-hit-testing.en.ts
@@ -1,13 +1,13 @@
const translation = {
- title: "Hit Testing",
- desc: "Test the hitting effect of the dataset based on the given query text.",
+ title: 'Retrieval Testing',
+ desc: 'Test the hitting effect of the dataset based on the given query text.',
dateTimeFormat: 'MM/DD/YYYY hh:mm A',
recents: 'Recents',
table: {
header: {
- source: "Source",
- text: "Text",
- time: "Time",
+ source: 'Source',
+ text: 'Text',
+ time: 'Time',
},
},
input: {
@@ -18,11 +18,11 @@ const translation = {
testing: 'Testing',
},
hit: {
- title: "HIT PARAGRAPHS",
- emptyTip: 'Hit Testing results will show here',
+ title: 'RETRIEVAL PARAGRAPHS',
+ emptyTip: 'Retrieval Testing results will show here',
},
noRecentTip: 'No recent query results here',
viewChart: 'View VECTOR CHART',
-};
+}
-export default translation;
+export default translation
diff --git a/web/i18n/lang/dataset-hit-testing.zh.ts b/web/i18n/lang/dataset-hit-testing.zh.ts
index 0ef23cbd9bd8ff..cbda48482de8b8 100644
--- a/web/i18n/lang/dataset-hit-testing.zh.ts
+++ b/web/i18n/lang/dataset-hit-testing.zh.ts
@@ -1,13 +1,13 @@
const translation = {
- title: '命中测试',
- desc: '基于给定的查询文本测试数据集的命中效果。',
+ title: '召回测试',
+ desc: '基于给定的查询文本测试数据集的召回效果。',
dateTimeFormat: 'YYYY-MM-DD HH:mm',
recents: '最近查询',
table: {
header: {
- source: "数据源",
- text: "文本",
- time: "时间",
+ source: '数据源',
+ text: '文本',
+ time: '时间',
},
},
input: {
@@ -18,8 +18,8 @@ const translation = {
testing: '测试',
},
hit: {
- title: "命中段落",
- emptyTip: '命中测试结果将展示在这里',
+ title: '召回段落',
+ emptyTip: '召回测试结果将展示在这里',
},
noRecentTip: '最近无查询结果',
viewChart: '查看向量图表',
diff --git a/web/i18n/lang/dataset-settings.en.ts b/web/i18n/lang/dataset-settings.en.ts
index 9f37c9b37efa4c..2cc13eeb6b4aea 100644
--- a/web/i18n/lang/dataset-settings.en.ts
+++ b/web/i18n/lang/dataset-settings.en.ts
@@ -20,6 +20,12 @@ const translation = {
embeddingModel: 'Embedding Model',
embeddingModelTip: 'Change the embedded model, please go to ',
embeddingModelTipLink: 'Settings',
+ retrievalSetting: {
+ title: 'Retrieval setting',
+ learnMore: 'Learn more',
+ description: ' about retrieval method.',
+ longDescription: ' about retrieval method, you can change this at any time in the dataset settings.',
+ },
save: 'Save',
},
}
diff --git a/web/i18n/lang/dataset-settings.zh.ts b/web/i18n/lang/dataset-settings.zh.ts
index 66667894354613..3d2b68fc989665 100644
--- a/web/i18n/lang/dataset-settings.zh.ts
+++ b/web/i18n/lang/dataset-settings.zh.ts
@@ -20,6 +20,12 @@ const translation = {
embeddingModel: 'Embedding 模型',
embeddingModelTip: '修改 Embedding 模型,请去',
embeddingModelTipLink: '设置',
+ retrievalSetting: {
+ title: '检索设置',
+ learnMore: '了解更多',
+ description: '关于检索方法。',
+ longDescription: '关于检索方法,您可以随时在数据集设置中更改此设置。',
+ },
save: '保存',
},
}
diff --git a/web/i18n/lang/dataset.en.ts b/web/i18n/lang/dataset.en.ts
index b352d4419f3c6e..5667c37e6db0f0 100644
--- a/web/i18n/lang/dataset.en.ts
+++ b/web/i18n/lang/dataset.en.ts
@@ -4,7 +4,7 @@ const translation = {
appCount: ' linked apps',
createDataset: 'Create Dataset',
createDatasetIntro: 'Import your own text data or write data in real-time via Webhook for LLM context enhancement.',
- deleteDatasetConfirmTitle: 'Delete this app?',
+ deleteDatasetConfirmTitle: 'Delete this dataset?',
deleteDatasetConfirmContent:
'Deleting the dataset is irreversible. Users will no longer be able to access your dataset, and all prompt configurations and logs will be permanently deleted.',
datasetDeleted: 'Dataset deleted',
@@ -20,6 +20,27 @@ const translation = {
unavailableTip: 'Embedding model is not available, the default embedding model needs to be configured',
datasets: 'DATASETS',
datasetsApi: 'API',
+ retrieval: {
+ semantic_search: {
+ title: 'Vector Search',
+ description: 'Generate query embeddings and search for the text chunk most similar to its vector representation.',
+ },
+ full_text_search: {
+ title: 'Full-Text Search',
+ description: 'Index all terms in the document, allowing users to search any term and retrieve relevant text chunk containing those terms.',
+ },
+ hybrid_search: {
+ title: 'Hybrid Search',
+ description: 'Execute full-text search and vector searches simultaneously, re-rank to select the best match for the user\'s query. Configuration of the Rerank model APIis necessary.',
+ recommend: 'Recommend',
+ },
+ invertedIndex: {
+ title: 'Inverted Index',
+ description: 'Inverted Index is a structure used for efficient retrieval. Organized by terms, each term points to documents or web pages containing it.',
+ },
+ change: 'Change',
+ changeRetrievalMethod: 'Change retrieval method',
+ },
}
export default translation
diff --git a/web/i18n/lang/dataset.zh.ts b/web/i18n/lang/dataset.zh.ts
index eef5f2670f92f1..294db3795b5474 100644
--- a/web/i18n/lang/dataset.zh.ts
+++ b/web/i18n/lang/dataset.zh.ts
@@ -20,6 +20,27 @@ const translation = {
unavailableTip: '由于 embedding 模型不可用,需要配置默认 embedding 模型',
datasets: '数据集',
datasetsApi: 'API',
+ retrieval: {
+ semantic_search: {
+ title: '向量检索',
+ description: '通过生成查询嵌入并查询与其向量表示最相似的文本分段',
+ },
+ full_text_search: {
+ title: '全文检索',
+ description: '索引文档中的所有词汇,从而允许用户查询任意词汇,并返回包含这些词汇的文本片段',
+ },
+ hybrid_search: {
+ title: '混合检索',
+ description: '同时执行全文检索和向量检索,并应用重排序步骤,从两类查询结果中选择匹配用户问题的最佳结果,需配置 Rerank 模型 API',
+ recommend: '推荐',
+ },
+ invertedIndex: {
+ title: '倒排索引',
+ description: '倒排索引是一种用于高效检索的结构。按术语组织,每个术语指向包含它的文档或网页',
+ },
+ change: '更改',
+ changeRetrievalMethod: '更改检索方法',
+ },
}
export default translation
diff --git a/web/models/datasets.ts b/web/models/datasets.ts
index be0b281e69e490..8940af00ba238d 100644
--- a/web/models/datasets.ts
+++ b/web/models/datasets.ts
@@ -1,5 +1,6 @@
import type { AppMode } from './app'
import type { DataSourceNotionPage } from './common'
+import type { RetrievalConfig } from '@/types/app'
export enum DataSourceType {
FILE = 'upload_file',
@@ -25,6 +26,8 @@ export type DataSet = {
embedding_model: string
embedding_model_provider: string
embedding_available: boolean
+ retrieval_model_dict: RetrievalConfig
+ retrieval_model: RetrievalConfig
}
export type CustomFile = File & {
@@ -193,6 +196,7 @@ export type DocumentReq = {
export type CreateDocumentReq = DocumentReq & {
data_source: DataSource
+ retrieval_model: RetrievalConfig
}
export type IndexingEstimateParams = DocumentReq & Partial & {
diff --git a/web/models/debug.ts b/web/models/debug.ts
index 19d2687bbcb133..92e2cb3d94e523 100644
--- a/web/models/debug.ts
+++ b/web/models/debug.ts
@@ -1,4 +1,4 @@
-import type { ModelModeType } from '@/types/app'
+import type { ModelModeType, RETRIEVE_TYPE } from '@/types/app'
export type Inputs = Record
export enum PromptMode {
@@ -107,9 +107,16 @@ export type DatasetConfigItem = {
enable: boolean
value: number
}
+
export type DatasetConfigs = {
+ retrieval_model: RETRIEVE_TYPE
+ reranking_model: {
+ reranking_provider_name: string
+ reranking_model_name: string
+ }
top_k: number
- score_threshold: DatasetConfigItem
+ score_threshold_enabled: boolean
+ score_threshold: number
}
export type DebugRequestBody = {
diff --git a/web/package.json b/web/package.json
index 725c283545a5aa..f6c33231d99e8d 100644
--- a/web/package.json
+++ b/web/package.json
@@ -1,6 +1,6 @@
{
"name": "dify-web",
- "version": "0.3.30",
+ "version": "0.3.31",
"private": true,
"scripts": {
"dev": "next dev",
diff --git a/web/service/common.ts b/web/service/common.ts
index edee3fc4b5f8dd..b43f37c69efe20 100644
--- a/web/service/common.ts
+++ b/web/service/common.ts
@@ -27,6 +27,7 @@ import type {
ValidateOpenAIKeyResponse,
} from '@/models/app'
import type { BackendModel, ProviderMap } from '@/app/components/header/account-setting/model-page/declarations'
+import type { RETRIEVE_METHOD } from '@/types/app'
export const login: Fetcher }> = ({ url, body }) => {
return post(url, { body }) as Promise
@@ -237,3 +238,10 @@ export const fetchCodeBasedExtensionList: Fetcher =
export const moderate = (url: string, body: { app_id: string; text: string }) => {
return post(url, { body }) as Promise
}
+
+type RetrievalMethodsRes = {
+ 'retrieval_method': RETRIEVE_METHOD[]
+}
+export const fetchSupportRetrievalMethods: Fetcher = (url) => {
+ return get(url)
+}
diff --git a/web/service/datasets.ts b/web/service/datasets.ts
index 63120d39dfb658..fcfc1539311993 100644
--- a/web/service/datasets.ts
+++ b/web/service/datasets.ts
@@ -27,6 +27,7 @@ import type {
ApikeysListResponse,
CreateApiKeyResponse,
} from '@/models/app'
+import type { RetrievalConfig } from '@/types/app'
// apis for documents in a dataset
@@ -48,7 +49,7 @@ export const fetchDatasetDetail: Fetcher = (datasetId: string)
return get(`/datasets/${datasetId}`)
}
-export const updateDatasetSetting: Fetcher> }> = ({ datasetId, body }) => {
+export const updateDatasetSetting: Fetcher> }> = ({ datasetId, body }) => {
return patch(`/datasets/${datasetId}`, { body })
}
@@ -182,8 +183,8 @@ export const checkSegmentBatchImportProgress: Fetcher<{ job_id: string; job_stat
}
// hit testing
-export const hitTesting: Fetcher = ({ datasetId, queryText }) => {
- return post(`/datasets/${datasetId}/hit-testing`, { body: { query: queryText } })
+export const hitTesting: Fetcher = ({ datasetId, queryText, retrieval_model }) => {
+ return post(`/datasets/${datasetId}/hit-testing`, { body: { query: queryText, retrieval_model } })
}
export const fetchTestingRecords: Fetcher = ({ datasetId, params }) => {
diff --git a/web/types/app.ts b/web/types/app.ts
index 80a4ff588d070c..fea0d5933a0d60 100644
--- a/web/types/app.ts
+++ b/web/types/app.ts
@@ -22,6 +22,18 @@ export enum ModelModeType {
'unset' = '',
}
+export enum RETRIEVE_TYPE {
+ oneWay = 'single',
+ multiWay = 'multiple',
+}
+
+export enum RETRIEVE_METHOD {
+ semantic = 'semantic_search',
+ fullText = 'full_text_search',
+ hybrid = 'hybrid_search',
+ invertedIndex = 'invertedIndex',
+}
+
export type VariableInput = {
key: string
name: string
@@ -301,6 +313,7 @@ export type ImageFile = {
progress: number
url: string
base64Url?: string
+ deleted?: boolean
}
export type VisionFile = {
@@ -310,3 +323,15 @@ export type VisionFile = {
url: string
upload_file_id: string
}
+
+export type RetrievalConfig = {
+ search_method: RETRIEVE_METHOD
+ reranking_enable: boolean
+ reranking_model: {
+ reranking_provider_name: string
+ reranking_model_name: string
+ }
+ top_k: number
+ score_threshold_enable: boolean
+ score_threshold: number
+}
diff --git a/web/yarn.lock b/web/yarn.lock
index bfd38d6af5e3a1..665c901a08b767 100644
--- a/web/yarn.lock
+++ b/web/yarn.lock
@@ -149,6 +149,13 @@
dependencies:
"@floating-ui/utils" "^0.1.1"
+"@floating-ui/dom@1.1.1":
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.1.1.tgz"
+ integrity sha512-TpIO93+DIujg3g7SykEAGZMDtbJRrmnYRCNYSjJlvIbGhBjRSNTLVbNeDQBrzy9qDgUbiWdc7KA0uZHZ2tJmiw==
+ dependencies:
+ "@floating-ui/core" "^1.1.0"
+
"@floating-ui/dom@^1.5.1":
version "1.5.1"
resolved "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.1.tgz"
@@ -157,13 +164,6 @@
"@floating-ui/core" "^1.4.1"
"@floating-ui/utils" "^0.1.1"
-"@floating-ui/dom@1.1.1":
- version "1.1.1"
- resolved "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.1.1.tgz"
- integrity sha512-TpIO93+DIujg3g7SykEAGZMDtbJRrmnYRCNYSjJlvIbGhBjRSNTLVbNeDQBrzy9qDgUbiWdc7KA0uZHZ2tJmiw==
- dependencies:
- "@floating-ui/core" "^1.1.0"
-
"@floating-ui/react-dom@^2.0.1":
version "2.0.2"
resolved "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.2.tgz"
@@ -223,7 +223,7 @@
resolved "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz"
integrity sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==
-"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2":
+"@jridgewell/gen-mapping@^0.3.2":
version "0.3.3"
resolved "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz"
integrity sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==
@@ -242,25 +242,17 @@
resolved "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz"
integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
-"@jridgewell/source-map@^0.3.3":
- version "0.3.3"
- resolved "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.3.tgz"
- integrity sha512-b+fsZXeLYi9fEULmfBrhxn4IrPlINf8fiNarzTof004v3lFdntdwa9PF7vFJqm3mg7s+ScJMxXaE3Acp1irZcg==
- dependencies:
- "@jridgewell/gen-mapping" "^0.3.0"
- "@jridgewell/trace-mapping" "^0.3.9"
+"@jridgewell/sourcemap-codec@1.4.14":
+ version "1.4.14"
+ resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz"
+ integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
"@jridgewell/sourcemap-codec@^1.4.10":
version "1.4.15"
resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz"
integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
-"@jridgewell/sourcemap-codec@1.4.14":
- version "1.4.14"
- resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz"
- integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
-
-"@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.9":
+"@jridgewell/trace-mapping@^0.3.9":
version "0.3.18"
resolved "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz"
integrity sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==
@@ -422,7 +414,7 @@
dependencies:
"@lexical/offset" "0.12.2"
-"@mdx-js/loader@^2.3.0", "@mdx-js/loader@>=0.15.0":
+"@mdx-js/loader@^2.3.0":
version "2.3.0"
resolved "https://registry.npmjs.org/@mdx-js/loader/-/loader-2.3.0.tgz"
integrity sha512-IqsscXh7Q3Rzb+f5DXYk0HU71PK+WuFsEhf+mSV3fOhpLcEpgsHvTQ2h0T6TlZ5gHOaBeFjkXwB52by7ypMyNg==
@@ -453,7 +445,7 @@
unist-util-visit "^4.0.0"
vfile "^5.0.0"
-"@mdx-js/react@^2.3.0", "@mdx-js/react@>=0.15.0":
+"@mdx-js/react@^2.3.0":
version "2.3.0"
resolved "https://registry.npmjs.org/@mdx-js/react/-/react-2.3.0.tgz"
integrity sha512-zQH//gdOmuu7nt2oJR29vFhDv88oGPmVw6BggmrHeMI+xgEkp1B2dX9/bMBSYtK0dyLX/aOmesKS09g222K1/g==
@@ -490,6 +482,46 @@
resolved "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.3.1.tgz"
integrity sha512-UXPtriEc/pBP8luSLSCZBcbzPeVv+SSjs9cH/KygTbhmACye8/OOXRZO13Z2Wq1G0gLmEAIHQAOuF+vafPd2lw==
+"@next/swc-darwin-x64@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-darwin-x64/-/swc-darwin-x64-13.3.1.tgz#0be90342c89e53a390ccd9bece15f7f5cd480049"
+ integrity sha512-lT36yYxosCfLtplFzJWgo0hrPu6/do8+msgM7oQkPeohDNdhjtjFUgOOwdSnPublLR6Mo2Ym4P/wl5OANuD2bw==
+
+"@next/swc-linux-arm64-gnu@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.3.1.tgz#a7353265839f8b8569a346a444dc3ab3770d297e"
+ integrity sha512-wRb76nLWJhonH8s3kxC/1tFguEkeOPayIwe9mkaz1G/yeS3OrjeyKMJsb4+Kdg0zbTo53bNCOl59NNtDM7yyyw==
+
+"@next/swc-linux-arm64-musl@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.3.1.tgz#24552e6102c350e372f83f505a1d93c880551a50"
+ integrity sha512-qz3BzjJRZ16Iq/jrp+pjiYOc0jTjHlfmxQmZk9x/+5uhRP6/eWQSTAPVJ33BMo6oK5O5N4644OgTAbzXzorecg==
+
+"@next/swc-linux-x64-gnu@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.3.1.tgz#5f335a683b6eafa52307b12af97782993b6c45ff"
+ integrity sha512-6mgkLmwlyWlomQmpl21I3hxgqE5INoW4owTlcLpNsd1V4wP+J46BlI/5zV5KWWbzjfncIqzXoeGs5Eg+1GHODA==
+
+"@next/swc-linux-x64-musl@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.3.1.tgz#58e5aad6f97203a0788783f66324456c8f9cdb50"
+ integrity sha512-uqm5sielhQmKJM+qayIhgZv1KlS5pqTdQ99b+Z7hMWryXS96qE0DftTmMZowBcUL6x7s2vSXyH5wPtO1ON7LBg==
+
+"@next/swc-win32-arm64-msvc@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.3.1.tgz#f8ed1badab57ed4503969758754e6fb0cf326753"
+ integrity sha512-WomIiTj/v3LevltlibNQKmvrOymNRYL+a0dp5R73IwPWN5FvXWwSELN/kiNALig/+T3luc4qHNTyvMCp9L6U5Q==
+
+"@next/swc-win32-ia32-msvc@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.3.1.tgz#7f599c8975b09ee5527cc49b9e5a4d13be50635a"
+ integrity sha512-M+PoH+0+q658wRUbs285RIaSTYnGBSTdweH/0CdzDgA6Q4rBM0sQs4DHmO3BPP0ltCO/vViIoyG7ks66XmCA5g==
+
+"@next/swc-win32-x64-msvc@13.3.1":
+ version "13.3.1"
+ resolved "https://registry.yarnpkg.com/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.3.1.tgz#192d43ab44ebb98bd4f5865d0e1d7ce62703182f"
+ integrity sha512-Sl1F4Vp5Z1rNXWZYqJwMuWRRol4bqOB6+/d7KqkgQ4AcafKPN1PZmpkCoxv4UFHtFNIB7EotnuIhtXu3zScicQ==
+
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
resolved "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz"
@@ -498,7 +530,7 @@
"@nodelib/fs.stat" "2.0.5"
run-parallel "^1.1.9"
-"@nodelib/fs.stat@^2.0.2", "@nodelib/fs.stat@2.0.5":
+"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
version "2.0.5"
resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz"
integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
@@ -589,7 +621,7 @@
resolved "https://registry.npmjs.org/@sentry/types/-/types-7.54.0.tgz"
integrity sha512-D+i9xogBeawvQi2r0NOrM7zYcUaPuijeME4O9eOTrDF20tj71hWtJLilK+KTGLYFtpGg1h+9bPaz7OHEIyVopg==
-"@sentry/utils@^7.54.0", "@sentry/utils@7.54.0":
+"@sentry/utils@7.54.0", "@sentry/utils@^7.54.0":
version "7.54.0"
resolved "https://registry.npmjs.org/@sentry/utils/-/utils-7.54.0.tgz"
integrity sha512-3Yf5KlKjIcYLddOexSt2ovu2TWlR4Fi7M+aCK8yUTzwNzf/xwFSWOstHlD/WiDy9HvfhWAOB/ukNTuAeJmtasw==
@@ -655,22 +687,6 @@
dependencies:
"@types/ms" "*"
-"@types/eslint-scope@^3.7.3":
- version "3.7.4"
- resolved "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz"
- integrity sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==
- dependencies:
- "@types/eslint" "*"
- "@types/estree" "*"
-
-"@types/eslint@*":
- version "8.40.0"
- resolved "https://registry.npmjs.org/@types/eslint/-/eslint-8.40.0.tgz"
- integrity sha512-nbq2mvc/tBrK9zQQuItvjJl++GTN5j06DaPtp3hZCpngmG6Q3xoyEmd0TwZI0gAy/G1X0zhGBbr2imsGFdFV0g==
- dependencies:
- "@types/estree" "*"
- "@types/json-schema" "*"
-
"@types/estree-jsx@^1.0.0":
version "1.0.0"
resolved "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.0.tgz"
@@ -700,7 +716,7 @@
resolved "https://registry.npmjs.org/@types/js-cookie/-/js-cookie-3.0.3.tgz"
integrity sha512-Xe7IImK09HP1sv2M/aI+48a20VX+TdRJucfq4vfRVy6nWN8PYPOEnlMRSgxJAgYQIXJVL8dZ4/ilAM7dWNaOww==
-"@types/json-schema@*", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
+"@types/json-schema@^7.0.9":
version "7.0.12"
resolved "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz"
integrity sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==
@@ -817,7 +833,7 @@
dependencies:
"@types/react" "*"
-"@types/react@*", "@types/react@>=16", "@types/react@18.0.28":
+"@types/react@*", "@types/react@18.0.28", "@types/react@>=16":
version "18.0.28"
resolved "https://registry.npmjs.org/@types/react/-/react-18.0.28.tgz"
integrity sha512-RD0ivG1kEztNBdoAK7lekI9M+azSnitIn85h4iOiaLjaTrMjzslhaqCGaI4IyCJ1RljWiLCEu4jyrLLgqxBTew==
@@ -841,7 +857,7 @@
resolved "https://registry.npmjs.org/@types/semver/-/semver-7.5.0.tgz"
integrity sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==
-"@types/sortablejs@^1.15.1", "@types/sortablejs@1":
+"@types/sortablejs@^1.15.1":
version "1.15.1"
resolved "https://registry.npmjs.org/@types/sortablejs/-/sortablejs-1.15.1.tgz"
integrity sha512-g/JwBNToh6oCTAwNS8UGVmjO7NLDKsejVhvE4x1eWiPTC3uCuNsa/TD4ssvX3du+MLiM+SHPNDuijp8y76JzLQ==
@@ -851,7 +867,7 @@
resolved "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz"
integrity sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==
-"@typescript-eslint/eslint-plugin@^5.0.0", "@typescript-eslint/eslint-plugin@^5.53.0":
+"@typescript-eslint/eslint-plugin@^5.53.0":
version "5.59.9"
resolved "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.59.9.tgz"
integrity sha512-4uQIBq1ffXd2YvF7MAvehWKW3zVv/w+mSfRAu+8cKbfj3nwzyqJLNcZJpQ/WZ1HLbJDiowwmQ6NO+63nCA+fqA==
@@ -867,7 +883,7 @@
semver "^7.3.7"
tsutils "^3.21.0"
-"@typescript-eslint/parser@^5.0.0", "@typescript-eslint/parser@^5.42.0", "@typescript-eslint/parser@^5.53.0":
+"@typescript-eslint/parser@^5.42.0", "@typescript-eslint/parser@^5.53.0":
version "5.59.9"
resolved "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.59.9.tgz"
integrity sha512-FsPkRvBtcLQ/eVK1ivDiNYBjn3TGJdXy2fhXX+rc7czWl4ARwnpArwbihSOHI2Peg9WbtGHrbThfBUkZZGTtvQ==
@@ -913,7 +929,7 @@
semver "^7.3.7"
tsutils "^3.21.0"
-"@typescript-eslint/utils@^5.10.0", "@typescript-eslint/utils@^5.53.0", "@typescript-eslint/utils@5.59.9":
+"@typescript-eslint/utils@5.59.9", "@typescript-eslint/utils@^5.10.0", "@typescript-eslint/utils@^5.53.0":
version "5.59.9"
resolved "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.59.9.tgz"
integrity sha512-1PuMYsju/38I5Ggblaeb98TOoUvjhRvLpLa1DoTOFaLWqaXl/1iQ1eGurTXgBY58NUdtfTXKP5xBq7q9NDaLKg==
@@ -935,148 +951,12 @@
"@typescript-eslint/types" "5.59.9"
eslint-visitor-keys "^3.3.0"
-"@webassemblyjs/ast@^1.11.5", "@webassemblyjs/ast@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz"
- integrity sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==
- dependencies:
- "@webassemblyjs/helper-numbers" "1.11.6"
- "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
-
-"@webassemblyjs/floating-point-hex-parser@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz"
- integrity sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==
-
-"@webassemblyjs/helper-api-error@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz"
- integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==
-
-"@webassemblyjs/helper-buffer@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz"
- integrity sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==
-
-"@webassemblyjs/helper-numbers@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz"
- integrity sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==
- dependencies:
- "@webassemblyjs/floating-point-hex-parser" "1.11.6"
- "@webassemblyjs/helper-api-error" "1.11.6"
- "@xtuc/long" "4.2.2"
-
-"@webassemblyjs/helper-wasm-bytecode@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz"
- integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==
-
-"@webassemblyjs/helper-wasm-section@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz"
- integrity sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-buffer" "1.11.6"
- "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
- "@webassemblyjs/wasm-gen" "1.11.6"
-
-"@webassemblyjs/ieee754@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz"
- integrity sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==
- dependencies:
- "@xtuc/ieee754" "^1.2.0"
-
-"@webassemblyjs/leb128@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz"
- integrity sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==
- dependencies:
- "@xtuc/long" "4.2.2"
-
-"@webassemblyjs/utf8@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz"
- integrity sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==
-
-"@webassemblyjs/wasm-edit@^1.11.5":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz"
- integrity sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-buffer" "1.11.6"
- "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
- "@webassemblyjs/helper-wasm-section" "1.11.6"
- "@webassemblyjs/wasm-gen" "1.11.6"
- "@webassemblyjs/wasm-opt" "1.11.6"
- "@webassemblyjs/wasm-parser" "1.11.6"
- "@webassemblyjs/wast-printer" "1.11.6"
-
-"@webassemblyjs/wasm-gen@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz"
- integrity sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
- "@webassemblyjs/ieee754" "1.11.6"
- "@webassemblyjs/leb128" "1.11.6"
- "@webassemblyjs/utf8" "1.11.6"
-
-"@webassemblyjs/wasm-opt@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz"
- integrity sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-buffer" "1.11.6"
- "@webassemblyjs/wasm-gen" "1.11.6"
- "@webassemblyjs/wasm-parser" "1.11.6"
-
-"@webassemblyjs/wasm-parser@^1.11.5", "@webassemblyjs/wasm-parser@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz"
- integrity sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-api-error" "1.11.6"
- "@webassemblyjs/helper-wasm-bytecode" "1.11.6"
- "@webassemblyjs/ieee754" "1.11.6"
- "@webassemblyjs/leb128" "1.11.6"
- "@webassemblyjs/utf8" "1.11.6"
-
-"@webassemblyjs/wast-printer@1.11.6":
- version "1.11.6"
- resolved "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz"
- integrity sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@xtuc/long" "4.2.2"
-
-"@xtuc/ieee754@^1.2.0":
- version "1.2.0"
- resolved "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz"
- integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==
-
-"@xtuc/long@4.2.2":
- version "4.2.2"
- resolved "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz"
- integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==
-
-acorn-import-assertions@^1.9.0:
- version "1.9.0"
- resolved "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz"
- integrity sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==
-
acorn-jsx@^5.0.0, acorn-jsx@^5.3.2:
version "5.3.2"
resolved "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz"
integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
-"acorn@^6.0.0 || ^7.0.0 || ^8.0.0", acorn@^8, acorn@^8.0.0, acorn@^8.5.0, acorn@^8.7.1, acorn@^8.8.0, acorn@^8.8.2:
+acorn@^8.0.0, acorn@^8.5.0, acorn@^8.8.0:
version "8.8.2"
resolved "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz"
integrity sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==
@@ -1110,12 +990,7 @@ ahooks@^3.7.5:
screenfull "^5.0.0"
tslib "^2.4.1"
-ajv-keywords@^3.5.2:
- version "3.5.2"
- resolved "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz"
- integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==
-
-ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.9.1:
+ajv@^6.10.0, ajv@^6.12.4:
version "6.12.6"
resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
@@ -1337,7 +1212,7 @@ braces@^3.0.2, braces@~3.0.2:
dependencies:
fill-range "^7.0.1"
-browserslist@^4.14.5, browserslist@^4.21.5, "browserslist@>= 4.21.0":
+browserslist@^4.21.5:
version "4.21.7"
resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.21.7.tgz"
integrity sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==
@@ -1347,11 +1222,6 @@ browserslist@^4.14.5, browserslist@^4.21.5, "browserslist@>= 4.21.0":
node-releases "^2.0.12"
update-browserslist-db "^1.0.11"
-buffer-from@^1.0.0:
- version "1.1.2"
- resolved "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz"
- integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
-
builtin-modules@^3.3.0:
version "3.3.0"
resolved "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.3.0.tgz"
@@ -1406,6 +1276,11 @@ ccount@^2.0.0:
resolved "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz"
integrity sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==
+chalk@5.2.0:
+ version "5.2.0"
+ resolved "https://registry.npmjs.org/chalk/-/chalk-5.2.0.tgz"
+ integrity sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==
+
chalk@^2.0.0:
version "2.4.2"
resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
@@ -1423,11 +1298,6 @@ chalk@^4.0.0:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
-chalk@5.2.0:
- version "5.2.0"
- resolved "https://registry.npmjs.org/chalk/-/chalk-5.2.0.tgz"
- integrity sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==
-
character-entities-html4@^2.0.0:
version "2.1.0"
resolved "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz"
@@ -1463,7 +1333,7 @@ character-reference-invalid@^2.0.0:
resolved "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz"
integrity sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==
-chokidar@^3.5.3, "chokidar@>=3.0.0 <4.0.0":
+"chokidar@>=3.0.0 <4.0.0", chokidar@^3.5.3:
version "3.5.3"
resolved "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz"
integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==
@@ -1478,26 +1348,21 @@ chokidar@^3.5.3, "chokidar@>=3.0.0 <4.0.0":
optionalDependencies:
fsevents "~2.3.2"
-chrome-trace-event@^1.0.2:
- version "1.0.3"
- resolved "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz"
- integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==
-
ci-info@^3.6.1:
version "3.8.0"
resolved "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz"
integrity sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==
-classnames@^2.2.1, classnames@^2.3.2:
- version "2.3.2"
- resolved "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz"
- integrity sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==
-
classnames@2.3.1:
version "2.3.1"
resolved "https://registry.npmjs.org/classnames/-/classnames-2.3.1.tgz"
integrity sha512-OlQdbZ7gLfGarSqxesMesDa5uz7KFbID8Kpq/SxIoNGDqY8lSYs0D+hhtBXhcdB3rcbXArFr7vlHheLk1voeNA==
+classnames@^2.2.1, classnames@^2.3.2:
+ version "2.3.2"
+ resolved "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz"
+ integrity sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==
+
clean-regexp@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/clean-regexp/-/clean-regexp-1.0.0.tgz"
@@ -1533,7 +1398,7 @@ cli-truncate@^3.1.0:
slice-ansi "^5.0.0"
string-width "^5.0.0"
-client-only@^0.0.1, client-only@0.0.1:
+client-only@0.0.1, client-only@^0.0.1:
version "0.0.1"
resolved "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz"
integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==
@@ -1552,16 +1417,16 @@ color-convert@^2.0.1:
dependencies:
color-name "~1.1.4"
-color-name@~1.1.4:
- version "1.1.4"
- resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
- integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
-
color-name@1.1.3:
version "1.1.3"
resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz"
integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
+color-name@~1.1.4:
+ version "1.1.4"
+ resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
+ integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+
colorette@^2.0.19:
version "2.0.20"
resolved "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz"
@@ -1577,16 +1442,16 @@ comma-separated-tokens@^2.0.0:
resolved "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz"
integrity sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==
+commander@7:
+ version "7.2.0"
+ resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz"
+ integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==
+
commander@^10.0.0:
version "10.0.1"
resolved "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz"
integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==
-commander@^2.20.0:
- version "2.20.3"
- resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
- integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
-
commander@^4.0.0:
version "4.1.1"
resolved "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz"
@@ -1597,11 +1462,6 @@ commander@^8.3.0:
resolved "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz"
integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==
-commander@7:
- version "7.2.0"
- resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz"
- integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==
-
concat-map@0.0.1:
version "0.0.1"
resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz"
@@ -1673,7 +1533,7 @@ cytoscape-fcose@^2.1.0:
dependencies:
cose-base "^2.2.0"
-cytoscape@^3.2.0, cytoscape@^3.23.0:
+cytoscape@^3.23.0:
version "3.26.0"
resolved "https://registry.npmjs.org/cytoscape/-/cytoscape-3.26.0.tgz"
integrity sha512-IV+crL+KBcrCnVVUCZW+zRRRFUZQcrtdOPXki+o4CFUWLdAEYvuZLcBSJC9EBK++suamERKzeY7roq2hdovV3w==
@@ -1681,13 +1541,6 @@ cytoscape@^3.2.0, cytoscape@^3.23.0:
heap "^0.2.6"
lodash "^4.17.21"
-d3-array@^3.2.0, "d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3", d3-array@3:
- version "3.2.4"
- resolved "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz"
- integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==
- dependencies:
- internmap "1 - 2"
-
"d3-array@1 - 2":
version "2.12.1"
resolved "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz"
@@ -1695,6 +1548,13 @@ d3-array@^3.2.0, "d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3",
dependencies:
internmap "^1.0.0"
+"d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3", d3-array@3, d3-array@^3.2.0:
+ version "3.2.4"
+ resolved "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz"
+ integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==
+ dependencies:
+ internmap "1 - 2"
+
d3-axis@3:
version "3.0.0"
resolved "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz"
@@ -1804,16 +1664,16 @@ d3-hierarchy@3:
dependencies:
d3-color "1 - 3"
-d3-path@^3.1.0, "d3-path@1 - 3", d3-path@3:
- version "3.1.0"
- resolved "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz"
- integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==
-
d3-path@1:
version "1.0.9"
resolved "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz"
integrity sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==
+"d3-path@1 - 3", d3-path@3, d3-path@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz"
+ integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==
+
d3-polygon@3:
version "3.0.1"
resolved "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz"
@@ -1861,13 +1721,6 @@ d3-scale@4:
resolved "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz"
integrity sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==
-d3-shape@^1.2.0:
- version "1.3.7"
- resolved "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz"
- integrity sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==
- dependencies:
- d3-path "1"
-
d3-shape@3:
version "3.2.0"
resolved "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz"
@@ -1875,6 +1728,13 @@ d3-shape@3:
dependencies:
d3-path "^3.1.0"
+d3-shape@^1.2.0:
+ version "1.3.7"
+ resolved "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz"
+ integrity sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==
+ dependencies:
+ d3-path "1"
+
"d3-time-format@2 - 4", d3-time-format@4:
version "4.1.0"
resolved "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz"
@@ -2147,7 +2007,7 @@ echarts-for-react@^3.0.2:
fast-deep-equal "^3.1.3"
size-sensor "^1.0.1"
-"echarts@^3.0.0 || ^4.0.0 || ^5.0.0", echarts@^5.4.1:
+echarts@^5.4.1:
version "5.4.2"
resolved "https://registry.npmjs.org/echarts/-/echarts-5.4.2.tgz"
integrity sha512-2W3vw3oI2tWJdyAz+b8DuWS0nfXtSDqlDmqgin/lfzbkB01cuMEN66KWBlmur3YMp5nEDEEt5s23pllnAzB4EA==
@@ -2180,7 +2040,7 @@ emoji-regex@^9.2.2:
resolved "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz"
integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==
-enhanced-resolve@^5.12.0, enhanced-resolve@^5.14.1:
+enhanced-resolve@^5.12.0:
version "5.14.1"
resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.14.1.tgz"
integrity sha512-Vklwq2vDKtl0y/vtwjSesgJ5MYS7Etuk5txS8VdKL4AOS1aUlD96zqIfsOSLQsdv3xgMRbtkWM8eG9XDfKUPow==
@@ -2255,11 +2115,6 @@ es-get-iterator@^1.1.3:
isarray "^2.0.5"
stop-iteration-iterator "^1.0.0"
-es-module-lexer@^1.2.1:
- version "1.2.1"
- resolved "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.2.1.tgz"
- integrity sha512-9978wrXM50Y4rTMmW5kXIC09ZdXQZqkE4mxhwkd8VbzsGkXGPgV4zWuqQJgCEzYngdo2dYDa0l8xhX4fkSwJSg==
-
es-set-tostringtag@^2.0.1:
version "2.0.1"
resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz"
@@ -2380,7 +2235,7 @@ eslint-plugin-html@^7.1.0:
dependencies:
htmlparser2 "^8.0.1"
-eslint-plugin-import@*, eslint-plugin-import@^2.26.0, eslint-plugin-import@^2.27.5:
+eslint-plugin-import@^2.26.0, eslint-plugin-import@^2.27.5:
version "2.27.5"
resolved "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz"
integrity sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==
@@ -2553,7 +2408,7 @@ eslint-rule-composer@^0.3.0:
resolved "https://registry.npmjs.org/eslint-rule-composer/-/eslint-rule-composer-0.3.0.tgz"
integrity sha512-bt+Sh8CtDmn2OajxvNO+BX7Wn4CIWMpTRm3MaiKPCQcnnlm0CS2mhui6QaoeQugs+3Kj2ESKEEGJUdVafwhiCg==
-eslint-scope@^5.1.1, eslint-scope@5.1.1:
+eslint-scope@^5.1.1:
version "5.1.1"
resolved "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz"
integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==
@@ -2598,7 +2453,7 @@ eslint-visitor-keys@^3.0.0, eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4
resolved "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz"
integrity sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==
-eslint@*, "eslint@^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8", "eslint@^3 || ^4 || ^5 || ^6 || ^7 || ^8", "eslint@^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0", "eslint@^6.0.0 || ^7.0.0 || ^8.0.0", "eslint@^6.0.0 || ^7.0.0 || >=8.0.0", "eslint@^6.2.0 || ^7.0.0 || ^8.0.0", "eslint@^7.0.0 || ^8.0.0", "eslint@^7.23.0 || ^8.0.0", eslint@^8.0.0, eslint@>=4.19.1, eslint@>=5, eslint@>=6.0.0, eslint@>=7.0.0, eslint@>=7.4.0, eslint@>=8.28.0, eslint@8.36.0:
+eslint@8.36.0:
version "8.36.0"
resolved "https://registry.npmjs.org/eslint/-/eslint-8.36.0.tgz"
integrity sha512-Y956lmS7vDqomxlaaQAHVmeb4tNMp2FWIvU/RnU5BD3IKMD/MJPr76xdyr68P8tV1iNMvN2mRK0yy3c+UjL+bw==
@@ -2727,11 +2582,6 @@ esutils@^2.0.2:
resolved "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz"
integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==
-events@^3.2.0:
- version "3.3.0"
- resolved "https://registry.npmjs.org/events/-/events-3.3.0.tgz"
- integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==
-
execa@^5.0.0:
version "5.1.1"
resolved "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz"
@@ -2932,7 +2782,7 @@ get-tsconfig@^4.5.0:
dependencies:
resolve-pkg-maps "^1.0.0"
-glob-parent@^5.1.2:
+glob-parent@^5.1.2, glob-parent@~5.1.2:
version "5.1.2"
resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz"
integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
@@ -2946,30 +2796,6 @@ glob-parent@^6.0.2:
dependencies:
is-glob "^4.0.3"
-glob-parent@~5.1.2:
- version "5.1.2"
- resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz"
- integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
- dependencies:
- is-glob "^4.0.1"
-
-glob-to-regexp@^0.4.1:
- version "0.4.1"
- resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz"
- integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==
-
-glob@^7.1.3:
- version "7.2.3"
- resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz"
- integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
- dependencies:
- fs.realpath "^1.0.0"
- inflight "^1.0.4"
- inherits "2"
- minimatch "^3.1.1"
- once "^1.3.0"
- path-is-absolute "^1.0.0"
-
glob@7.1.6:
version "7.1.6"
resolved "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz"
@@ -2994,6 +2820,18 @@ glob@7.1.7:
once "^1.3.0"
path-is-absolute "^1.0.0"
+glob@^7.1.3:
+ version "7.2.3"
+ resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz"
+ integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
+ dependencies:
+ fs.realpath "^1.0.0"
+ inflight "^1.0.4"
+ inherits "2"
+ minimatch "^3.1.1"
+ once "^1.3.0"
+ path-is-absolute "^1.0.0"
+
globals@^13.19.0:
version "13.20.0"
resolved "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz"
@@ -3038,7 +2876,7 @@ gopd@^1.0.1:
dependencies:
get-intrinsic "^1.1.3"
-graceful-fs@^4.1.2, graceful-fs@^4.2.4, graceful-fs@^4.2.9:
+graceful-fs@^4.2.4:
version "4.2.11"
resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz"
integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
@@ -3275,7 +3113,7 @@ i18next-resources-to-backend@^1.1.3:
dependencies:
"@babel/runtime" "^7.21.5"
-i18next@^22.4.13, "i18next@>= 19.0.0":
+i18next@^22.4.13:
version "22.5.1"
resolved "https://registry.npmjs.org/i18next/-/i18next-22.5.1.tgz"
integrity sha512-8TGPgM3pAD+VRsMtUMNknRz3kzqwp/gPALrWMsDnmC1mKqJwpWyooQRLMcbTwq8z8YwSmuj+ZYvc+xCuEpkssA==
@@ -3354,16 +3192,16 @@ internal-slot@^1.0.3, internal-slot@^1.0.4, internal-slot@^1.0.5:
has "^1.0.3"
side-channel "^1.0.4"
-internmap@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz"
- integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==
-
"internmap@1 - 2":
version "2.0.3"
resolved "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz"
integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==
+internmap@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz"
+ integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==
+
intersection-observer@^0.12.0:
version "0.12.2"
resolved "https://registry.npmjs.org/intersection-observer/-/intersection-observer-0.12.2.tgz"
@@ -3660,20 +3498,6 @@ isexe@^2.0.0:
resolved "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz"
integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==
-isomorphic.js@^0.2.4:
- version "0.2.5"
- resolved "https://registry.npmjs.org/isomorphic.js/-/isomorphic.js-0.2.5.tgz"
- integrity sha512-PIeMbHqMt4DnUP3MA/Flc0HElYjMXArsw1qwJZcm9sqR8mq3l8NYizFMty0pWwE/tzIGH3EKK5+jes5mAr85yw==
-
-jest-worker@^27.4.5:
- version "27.5.1"
- resolved "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz"
- integrity sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==
- dependencies:
- "@types/node" "*"
- merge-stream "^2.0.0"
- supports-color "^8.0.0"
-
jiti@^1.18.2:
version "1.18.2"
resolved "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz"
@@ -3721,7 +3545,7 @@ jsesc@~0.5.0:
resolved "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz"
integrity sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==
-json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1:
+json-parse-even-better-errors@^2.3.0:
version "2.3.1"
resolved "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz"
integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==
@@ -3815,19 +3639,12 @@ levn@^0.4.1:
prelude-ls "^1.2.1"
type-check "~0.4.0"
-lexical@^0.12.2, lexical@0.12.2:
+lexical@^0.12.2:
version "0.12.2"
resolved "https://registry.npmjs.org/lexical/-/lexical-0.12.2.tgz"
integrity sha512-Kxavd+ETjxtVwG/hvPd6WZfXD44sLOKe9Vlkwxy7lBQ1qZArS+rZfs+u5iXwXe6tX9f2PIM0u3RHsrCEDDE0fw==
-lib0@^0.2.74:
- version "0.2.85"
- resolved "https://registry.npmjs.org/lib0/-/lib0-0.2.85.tgz"
- integrity sha512-vtAhVttLXCu3ps2OIsTz8CdKYKdcMo7ds1MNBIcSXz6vrY8sxASqpTi4vmsAIn7xjWvyT7haKcWW6woP6jebjQ==
- dependencies:
- isomorphic.js "^0.2.4"
-
-lilconfig@^2.0.5, lilconfig@^2.1.0, lilconfig@2.1.0:
+lilconfig@2.1.0, lilconfig@^2.0.5, lilconfig@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz"
integrity sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==
@@ -3870,11 +3687,6 @@ listr2@^5.0.7:
through "^2.3.8"
wrap-ansi "^7.0.0"
-loader-runner@^4.2.0:
- version "4.3.0"
- resolved "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz"
- integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==
-
local-pkg@^0.4.3:
version "0.4.3"
resolved "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.3.tgz"
@@ -4106,43 +3918,7 @@ mdast-util-from-markdown@^0.8.5:
parse-entities "^2.0.0"
unist-util-stringify-position "^2.0.0"
-mdast-util-from-markdown@^1.0.0:
- version "1.3.1"
- resolved "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz"
- integrity sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==
- dependencies:
- "@types/mdast" "^3.0.0"
- "@types/unist" "^2.0.0"
- decode-named-character-reference "^1.0.0"
- mdast-util-to-string "^3.1.0"
- micromark "^3.0.0"
- micromark-util-decode-numeric-character-reference "^1.0.0"
- micromark-util-decode-string "^1.0.0"
- micromark-util-normalize-identifier "^1.0.0"
- micromark-util-symbol "^1.0.0"
- micromark-util-types "^1.0.0"
- unist-util-stringify-position "^3.0.0"
- uvu "^0.5.0"
-
-mdast-util-from-markdown@^1.1.0:
- version "1.3.1"
- resolved "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz"
- integrity sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==
- dependencies:
- "@types/mdast" "^3.0.0"
- "@types/unist" "^2.0.0"
- decode-named-character-reference "^1.0.0"
- mdast-util-to-string "^3.1.0"
- micromark "^3.0.0"
- micromark-util-decode-numeric-character-reference "^1.0.0"
- micromark-util-decode-string "^1.0.0"
- micromark-util-normalize-identifier "^1.0.0"
- micromark-util-symbol "^1.0.0"
- micromark-util-types "^1.0.0"
- unist-util-stringify-position "^3.0.0"
- uvu "^0.5.0"
-
-mdast-util-from-markdown@^1.3.0:
+mdast-util-from-markdown@^1.0.0, mdast-util-from-markdown@^1.1.0, mdast-util-from-markdown@^1.3.0:
version "1.3.1"
resolved "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz"
integrity sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==
@@ -4327,14 +4103,7 @@ mdast-util-to-string@^2.0.0:
resolved "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz"
integrity sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==
-mdast-util-to-string@^3.0.0:
- version "3.2.0"
- resolved "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz"
- integrity sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==
- dependencies:
- "@types/mdast" "^3.0.0"
-
-mdast-util-to-string@^3.1.0:
+mdast-util-to-string@^3.0.0, mdast-util-to-string@^3.1.0:
version "3.2.0"
resolved "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz"
integrity sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==
@@ -4778,18 +4547,6 @@ micromatch@^4.0.4, micromatch@^4.0.5:
braces "^3.0.2"
picomatch "^2.3.1"
-mime-db@1.52.0:
- version "1.52.0"
- resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
- integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
-
-mime-types@^2.1.27:
- version "2.1.35"
- resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
- integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
- dependencies:
- mime-db "1.52.0"
-
mimic-fn@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz"
@@ -4854,7 +4611,7 @@ mri@^1.1.0:
resolved "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz"
integrity sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==
-ms@^2.1.1, ms@2.1.2:
+ms@2.1.2, ms@^2.1.1:
version "2.1.2"
resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
@@ -4888,11 +4645,6 @@ negotiator@^0.6.3:
resolved "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz"
integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==
-neo-async@^2.6.2:
- version "2.6.2"
- resolved "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz"
- integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==
-
next@13.3.1:
version "13.3.1"
resolved "https://registry.npmjs.org/next/-/next-13.3.1.tgz"
@@ -5046,14 +4798,7 @@ once@^1.3.0:
dependencies:
wrappy "1"
-onetime@^5.1.0:
- version "5.1.2"
- resolved "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz"
- integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==
- dependencies:
- mimic-fn "^2.1.0"
-
-onetime@^5.1.2:
+onetime@^5.1.0, onetime@^5.1.2:
version "5.1.2"
resolved "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz"
integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==
@@ -5284,18 +5029,18 @@ postcss-nested@^6.0.1:
dependencies:
postcss-selector-parser "^6.0.11"
-postcss-selector-parser@^6.0.11:
- version "6.0.13"
- resolved "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz"
- integrity sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==
+postcss-selector-parser@6.0.10, postcss-selector-parser@^6.0.9:
+ version "6.0.10"
+ resolved "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz"
+ integrity sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==
dependencies:
cssesc "^3.0.0"
util-deprecate "^1.0.2"
-postcss-selector-parser@^6.0.9, postcss-selector-parser@6.0.10:
- version "6.0.10"
- resolved "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz"
- integrity sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==
+postcss-selector-parser@^6.0.11:
+ version "6.0.13"
+ resolved "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz"
+ integrity sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==
dependencies:
cssesc "^3.0.0"
util-deprecate "^1.0.2"
@@ -5305,15 +5050,6 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0:
resolved "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz"
integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==
-postcss@^8.0.0, postcss@^8.1.0, postcss@^8.2.14, postcss@^8.4.21, postcss@^8.4.23, postcss@^8.4.31, postcss@>=8.0.9:
- version "8.4.31"
- resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz"
- integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==
- dependencies:
- nanoid "^3.3.6"
- picocolors "^1.0.0"
- source-map-js "^1.0.2"
-
postcss@8.4.14:
version "8.4.14"
resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz"
@@ -5323,6 +5059,15 @@ postcss@8.4.14:
picocolors "^1.0.0"
source-map-js "^1.0.2"
+postcss@^8.4.23, postcss@^8.4.31:
+ version "8.4.31"
+ resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz"
+ integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==
+ dependencies:
+ nanoid "^3.3.6"
+ picocolors "^1.0.0"
+ source-map-js "^1.0.2"
+
prelude-ls@^1.2.1:
version "1.2.1"
resolved "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz"
@@ -5389,13 +5134,6 @@ queue-microtask@^1.2.2:
resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz"
integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
-randombytes@^2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz"
- integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==
- dependencies:
- safe-buffer "^5.1.0"
-
rc-input@~1.3.5:
version "1.3.6"
resolved "https://registry.npmjs.org/rc-input/-/rc-input-1.3.6.tgz"
@@ -5441,7 +5179,7 @@ react-18-input-autosize@^3.0.0:
dependencies:
prop-types "^15.5.8"
-react-dom@*, "react-dom@^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", "react-dom@^15.3.0 || ^16.0.0-alpha || ^17.0.0 || ^18.0.0", "react-dom@^16 || ^17 || ^18", react-dom@^18.2.0, react-dom@>=16.0.0, react-dom@>=16.14.0, react-dom@>=16.8.0, react-dom@>=16.9.0, react-dom@>=17.x:
+react-dom@^18.2.0:
version "18.2.0"
resolved "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz"
integrity sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==
@@ -5490,12 +5228,7 @@ react-is@^16.13.1, react-is@^16.7.0:
resolved "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz"
integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==
-react-is@^18.0.0:
- version "18.2.0"
- resolved "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz"
- integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==
-
-react-is@^18.2.0:
+react-is@^18.0.0, react-is@^18.2.0:
version "18.2.0"
resolved "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz"
integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==
@@ -5581,7 +5314,7 @@ react-window@^1.8.9:
"@babel/runtime" "^7.0.0"
memoize-one ">=3.1.1 <6"
-"react@^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", "react@^15.0.0 || >=16.0.0", "react@^15.3.0 || ^16.0.0-alpha || ^17.0.0 || ^18.0.0", "react@^16 || ^17 || ^18", "react@^16.11.0 || ^17.0.0 || ^18.0.0", "react@^16.3.0 || ^17.0.0 || ^18.0.0", "react@^16.8.0 || ^17.0.0 || ^18.0.0", react@^18.2.0, "react@>= 0.14.0", "react@>= 16", "react@>= 16.8.0", "react@>= 16.8.0 || 17.x.x || ^18.0.0-0", react@>=16, react@>=16.0.0, react@>=16.13.1, react@>=16.14.0, react@>=16.8.0, react@>=16.9.0, react@>=17.x, "react@15.x || 16.x || 17.x || 18.x":
+react@^18.2.0:
version "18.2.0"
resolved "https://registry.npmjs.org/react/-/react-18.2.0.tgz"
integrity sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==
@@ -5835,11 +5568,6 @@ sade@^1.7.3:
dependencies:
mri "^1.1.0"
-safe-buffer@^5.1.0:
- version "5.2.1"
- resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
- integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
-
safe-regex-test@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz"
@@ -5861,7 +5589,7 @@ safe-regex@^2.1.1:
resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz"
integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==
-sass@^1.3.0, sass@^1.61.0:
+sass@^1.61.0:
version "1.62.1"
resolved "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz"
integrity sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==
@@ -5870,27 +5598,23 @@ sass@^1.3.0, sass@^1.61.0:
immutable "^4.0.0"
source-map-js ">=0.6.2 <2.0.0"
-scheduler@^0.23.0, scheduler@>=0.19.0:
+scheduler@^0.23.0:
version "0.23.0"
resolved "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz"
integrity sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==
dependencies:
loose-envify "^1.1.0"
-schema-utils@^3.1.1, schema-utils@^3.1.2:
- version "3.1.2"
- resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.2.tgz"
- integrity sha512-pvjEHOgWc9OWA/f/DE3ohBWTD6EleVLf7iFUkoSwAxttdBhB9QUebQgxER2kWueOvRJXPHNnyrvvh9eZINB8Eg==
- dependencies:
- "@types/json-schema" "^7.0.8"
- ajv "^6.12.5"
- ajv-keywords "^3.5.2"
-
screenfull@^5.0.0:
version "5.2.0"
resolved "https://registry.npmjs.org/screenfull/-/screenfull-5.2.0.tgz"
integrity sha512-9BakfsO2aUQN2K9Fdbj87RJIEZ82Q9IGim7FqM5OsebfoFC6ZHXgDq/KvniuLTPdeM8wY2o6Dj3WQ7KeQCj3cA==
+"semver@2 || 3 || 4 || 5":
+ version "5.7.1"
+ resolved "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz"
+ integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==
+
semver@^6.3.0:
version "6.3.0"
resolved "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz"
@@ -5903,18 +5627,6 @@ semver@^7.0.0, semver@^7.3.5, semver@^7.3.6, semver@^7.3.7, semver@^7.3.8:
dependencies:
lru-cache "^6.0.0"
-"semver@2 || 3 || 4 || 5":
- version "5.7.1"
- resolved "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz"
- integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==
-
-serialize-javascript@^6.0.1:
- version "6.0.1"
- resolved "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz"
- integrity sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==
- dependencies:
- randombytes "^2.1.0"
-
server-only@^0.0.1:
version "0.0.1"
resolved "https://registry.npmjs.org/server-only/-/server-only-0.0.1.tgz"
@@ -5987,29 +5699,16 @@ slice-ansi@^5.0.0:
ansi-styles "^6.0.0"
is-fullwidth-code-point "^4.0.0"
-sortablejs@^1.15.0, sortablejs@1:
+sortablejs@^1.15.0:
version "1.15.0"
resolved "https://registry.npmjs.org/sortablejs/-/sortablejs-1.15.0.tgz"
integrity sha512-bv9qgVMjUMf89wAvM6AxVvS/4MX3sPeN0+agqShejLU5z5GX4C75ow1O2e5k4L6XItUyAK3gH6AxSbXrOM5e8w==
-source-map-js@^1.0.2, "source-map-js@>=0.6.2 <2.0.0":
+"source-map-js@>=0.6.2 <2.0.0", source-map-js@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz"
integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==
-source-map-support@~0.5.20:
- version "0.5.21"
- resolved "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz"
- integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
- dependencies:
- buffer-from "^1.0.0"
- source-map "^0.6.0"
-
-source-map@^0.6.0:
- version "0.6.1"
- resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
- integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
-
source-map@^0.7.0:
version "0.7.4"
resolved "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz"
@@ -6068,16 +5767,7 @@ string-argv@^0.3.1:
resolved "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz"
integrity sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==
-string-width@^4.1.0:
- version "4.2.3"
- resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
- integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
- dependencies:
- emoji-regex "^8.0.0"
- is-fullwidth-code-point "^3.0.0"
- strip-ansi "^6.0.1"
-
-string-width@^4.2.0:
+string-width@^4.1.0, string-width@^4.2.0:
version "4.2.3"
resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
@@ -6231,13 +5921,6 @@ supports-color@^7.1.0:
dependencies:
has-flag "^4.0.0"
-supports-color@^8.0.0:
- version "8.1.1"
- resolved "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz"
- integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==
- dependencies:
- has-flag "^4.0.0"
-
supports-preserve-symlinks-flag@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz"
@@ -6263,7 +5946,7 @@ tabbable@^6.0.1:
resolved "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz"
integrity sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==
-tailwindcss@^3.3.3, "tailwindcss@>=2.0.0 || >=3.0.0 || >=3.0.0-alpha.1", "tailwindcss@>=3.0.0 || insiders":
+tailwindcss@^3.3.3:
version "3.3.3"
resolved "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.3.tgz"
integrity sha512-A0KgSkef7eE4Mf+nKJ83i75TMyq8HqY3qmFIJSWy8bNt0v1lG7jUcpGpoTFxAwYcWOphcTBLPPJg+bDfhDf52w==
@@ -6291,32 +5974,11 @@ tailwindcss@^3.3.3, "tailwindcss@>=2.0.0 || >=3.0.0 || >=3.0.0-alpha.1", "tailwi
resolve "^1.22.2"
sucrase "^3.32.0"
-tapable@^2.1.1, tapable@^2.2.0:
+tapable@^2.2.0:
version "2.2.1"
resolved "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz"
integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==
-terser-webpack-plugin@^5.3.7:
- version "5.3.9"
- resolved "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz"
- integrity sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==
- dependencies:
- "@jridgewell/trace-mapping" "^0.3.17"
- jest-worker "^27.4.5"
- schema-utils "^3.1.1"
- serialize-javascript "^6.0.1"
- terser "^5.16.8"
-
-terser@^5.16.8:
- version "5.17.7"
- resolved "https://registry.npmjs.org/terser/-/terser-5.17.7.tgz"
- integrity sha512-/bi0Zm2C6VAexlGgLlVxA0P2lru/sdLyfCVaRMfKVo9nWxbmz7f/sD8VPybPeSUJaJcwmCJis9pBIhcVcG1QcQ==
- dependencies:
- "@jridgewell/source-map" "^0.3.3"
- acorn "^8.8.2"
- commander "^2.20.0"
- source-map-support "~0.5.20"
-
text-table@^0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz"
@@ -6398,12 +6060,12 @@ tsconfig-paths@^3.14.1:
minimist "^1.2.6"
strip-bom "^3.0.0"
-tslib@^1.8.1:
- version "1.14.1"
- resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz"
- integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
+tslib@2.3.0:
+ version "2.3.0"
+ resolved "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz"
+ integrity sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==
-tslib@^1.9.3:
+tslib@^1.8.1, tslib@^1.9.3:
version "1.14.1"
resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
@@ -6413,11 +6075,6 @@ tslib@^2.1.0, tslib@^2.4.0, tslib@^2.4.1, tslib@^2.5.0:
resolved "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz"
integrity sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==
-tslib@2.3.0:
- version "2.3.0"
- resolved "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz"
- integrity sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==
-
tsutils@^3.21.0:
version "3.21.0"
resolved "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz"
@@ -6461,7 +6118,7 @@ typed-array-length@^1.0.4:
for-each "^0.3.3"
is-typed-array "^1.1.9"
-"typescript@>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta", typescript@>=3.3.1, typescript@>=3.9, typescript@4.9.5:
+typescript@4.9.5:
version "4.9.5"
resolved "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz"
integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
@@ -6674,14 +6331,6 @@ vue-eslint-parser@^9.3.0:
lodash "^4.17.21"
semver "^7.3.6"
-watchpack@^2.4.0:
- version "2.4.0"
- resolved "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz"
- integrity sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==
- dependencies:
- glob-to-regexp "^0.4.1"
- graceful-fs "^4.1.2"
-
web-namespaces@^2.0.0:
version "2.0.1"
resolved "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz"
@@ -6692,41 +6341,6 @@ web-worker@^1.2.0:
resolved "https://registry.npmjs.org/web-worker/-/web-worker-1.2.0.tgz"
integrity sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA==
-webpack-sources@^3.2.3:
- version "3.2.3"
- resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz"
- integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
-
-webpack@^5.1.0, webpack@>=4:
- version "5.85.1"
- resolved "https://registry.npmjs.org/webpack/-/webpack-5.85.1.tgz"
- integrity sha512-xTb7MRf4LY8Z5rzn7aIx4TDrwYJrjcHnIfU1TqtyZOoObyuGSpAUwIvVuqq5wPnv7WEgQr8UvO1q/dgoGG4HjA==
- dependencies:
- "@types/eslint-scope" "^3.7.3"
- "@types/estree" "^1.0.0"
- "@webassemblyjs/ast" "^1.11.5"
- "@webassemblyjs/wasm-edit" "^1.11.5"
- "@webassemblyjs/wasm-parser" "^1.11.5"
- acorn "^8.7.1"
- acorn-import-assertions "^1.9.0"
- browserslist "^4.14.5"
- chrome-trace-event "^1.0.2"
- enhanced-resolve "^5.14.1"
- es-module-lexer "^1.2.1"
- eslint-scope "5.1.1"
- events "^3.2.0"
- glob-to-regexp "^0.4.1"
- graceful-fs "^4.2.9"
- json-parse-even-better-errors "^2.3.1"
- loader-runner "^4.2.0"
- mime-types "^2.1.27"
- neo-async "^2.6.2"
- schema-utils "^3.1.2"
- tapable "^2.1.1"
- terser-webpack-plugin "^5.3.7"
- watchpack "^2.4.0"
- webpack-sources "^3.2.3"
-
which-boxed-primitive@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz"
@@ -6824,13 +6438,6 @@ yaml@^2.0.0, yaml@^2.1.1, yaml@^2.2.2:
resolved "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz"
integrity sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==
-yjs@>=13.5.22:
- version "13.6.7"
- resolved "https://registry.npmjs.org/yjs/-/yjs-13.6.7.tgz"
- integrity sha512-mCZTh4kjvUS2DnaktsYN6wLH3WZCJBLqrTdkWh1bIDpA/sB/GNFaLA/dyVJj2Hc7KwONuuoC/vWe9bwBBosZLQ==
- dependencies:
- lib0 "^0.2.74"
-
yocto-queue@^0.1.0:
version "0.1.0"
resolved "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz"