diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000000..352c2766ae16 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,29 @@ +# Source: https://github.com/a5chin/python-uv +FROM debian:bookworm-slim AS builder + +ENV CARGO_HOME="/opt/.cargo" + +SHELL [ "/bin/bash", "-o", "pipefail", "-c" ] + +WORKDIR /opt + +# The installer requires curl (and certificates) to download the release archive +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -y --no-install-recommends ca-certificates curl + +# Run uv installer +RUN curl -LsSf https://astral.sh/uv/install.sh | sh + + +FROM mcr.microsoft.com/vscode/devcontainers/base:bookworm + + +ENV CARGO_HOME="/opt/.cargo" +ENV PATH="$CARGO_HOME/bin/:$PATH" +ENV PYTHONUNBUFFERED=True +ENV UV_LINK_MODE=copy + +WORKDIR /opt + +COPY --from=builder --chown=vscode: $CARGO_HOME $CARGO_HOME \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 4df8df96b0f2..88c16501327c 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,13 +2,14 @@ // README at: https://github.com/devcontainers/templates/tree/main/src/universal { "name": "Langflow Dev Container", - // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile - "image": "mcr.microsoft.com/devcontainers/python:3.10", - + "build": { + "context": "..", + "dockerfile": "Dockerfile" + }, // Features to add to the dev container. More info: https://containers.dev/features. "features": { "ghcr.io/devcontainers/features/node": {}, - "ghcr.io/devcontainers-contrib/features/poetry": {} + "ghcr.io/dhoeric/features/hadolint:1": {} }, // Use 'forwardPorts' to make a list of ports inside the container available locally. @@ -16,26 +17,35 @@ // Use 'postCreateCommand' to run commands after the container is created. "postCreateCommand": "make install_frontend && make install_backend", - - "containerEnv": { - "POETRY_VIRTUALENVS_IN_PROJECT": "true" - }, + "postStartCommand": "make init", // Configure tool-specific properties. "customizations": { "vscode": { "extensions": [ + "charliermarsh.ruff", + "njpwerner.autodocstring", + "oderwat.indent-rainbow", + "exiasr.hadolint", "actboy168.tasks", "GitHub.copilot", "ms-python.python", - "sourcery.sourcery", "eamodio.gitlens", "ms-vscode.makefile-tools", "GitHub.vscode-pull-request-github" - ] + ], + "settings": { + "terminal.integrated.defaultProfile.linux": "zsh", + "terminal.integrated.profiles.linux": { + "zsh": { + "path": "/bin/zsh" + } + } + } } - } + }, // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. // "remoteUser": "root" + "remoteUser": "vscode" } diff --git a/.github/actions/setup-uv/action.yml b/.github/actions/setup-uv/action.yml new file mode 100644 index 000000000000..1b75878f3f83 --- /dev/null +++ b/.github/actions/setup-uv/action.yml @@ -0,0 +1,25 @@ +name: "Setup uv" +description: "Checks out code, installs uv, and sets up Python environment" + +runs: + using: "composite" + steps: + - name: Install uv + uses: astral-sh/setup-uv@v3 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: "Set up Python" + uses: actions/setup-python@v5 + with: + python-version-file: "pyproject.toml" + + - name: Restore uv cache + uses: actions/cache@v4 + with: + path: /tmp/.uv-cache + key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }} + restore-keys: | + uv-${{ runner.os }}-${{ hashFiles('uv.lock') }} + uv-${{ runner.os }} diff --git a/.github/changes-filter.yaml b/.github/changes-filter.yaml index f3df3db58882..36eb19a62a1b 100644 --- a/.github/changes-filter.yaml +++ b/.github/changes-filter.yaml @@ -5,8 +5,11 @@ python: - "pyproject.toml" - "poetry.lock" - "**/python_test.yml" -tests: - - "tests/**" +components-changes: + - "src/backend/base/langflow/components/**" +starter-projects-changes: + - "src/backend/base/langflow/initial_setup/**" +frontend-tests: - "src/frontend/tests/**" frontend: - "src/frontend/**" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f09031e6aae7..a53481d2ba8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ on: description: "Frontend Tests Folder" required: false type: string - default: "tests/end-to-end" + default: "tests/core" workflow_dispatch: inputs: branch: @@ -35,7 +35,6 @@ on: pull_request: types: [synchronize, labeled] - concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -55,14 +54,15 @@ jobs: path-filter: needs: set-ci-condition if: ${{ needs.set-ci-condition.outputs.should-run-ci == 'true' }} - name: Filter Paths runs-on: ubuntu-latest outputs: python: ${{ steps.filter.outputs.python }} frontend: ${{ steps.filter.outputs.frontend }} docs: ${{ steps.filter.outputs.docs }} - tests: ${{ steps.filter.outputs.tests }} + frontend-tests: ${{ steps.filter.outputs.frontend-tests }} + components-changes: ${{ steps.filter.outputs.components-changes }} + starter-projects-changes: ${{ steps.filter.outputs.starter-projects-changes }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -77,7 +77,7 @@ jobs: test-backend: needs: path-filter name: Run Backend Tests - if: ${{ needs.path-filter.outputs.python == 'true' || needs.path-filter.outputs.tests == 'true' }} + if: ${{ needs.path-filter.outputs.python == 'true'}} uses: ./.github/workflows/python_test.yml with: python-versions: ${{ inputs.python-versions || '["3.10"]' }} @@ -85,7 +85,7 @@ jobs: test-frontend: needs: path-filter name: Run Frontend Tests - if: ${{ needs.path-filter.outputs.python == 'true' || needs.path-filter.outputs.frontend == 'true' || needs.path-filter.outputs.tests == 'true' }} + if: ${{ needs.path-filter.outputs.frontend == 'true' || needs.path-filter.outputs.frontend-tests == 'true' || needs.path-filter.outputs.components-changes == 'true' || needs.path-filter.outputs.starter-projects-changes == 'true' }} uses: ./.github/workflows/typescript_test.yml with: tests_folder: ${{ inputs.frontend-tests-folder }} @@ -95,10 +95,9 @@ jobs: lint-backend: needs: path-filter - if: ${{ needs.path-filter.outputs.python == 'true' || needs.path-filter.outputs.tests == 'true' }} + if: ${{ needs.path-filter.outputs.python == 'true'}} name: Lint Backend uses: ./.github/workflows/lint-py.yml - # Run only if there are python files changed test-docs-build: needs: path-filter @@ -109,7 +108,14 @@ jobs: # https://github.com/langchain-ai/langchain/blob/master/.github/workflows/check_diffs.yml ci_success: name: "CI Success" - needs: [test-backend,test-frontend,lint-backend,test-docs-build,set-ci-condition] + needs: + [ + test-backend, + test-frontend, + lint-backend, + test-docs-build, + set-ci-condition, + ] if: always() runs-on: ubuntu-latest @@ -123,4 +129,4 @@ jobs: echo $JOBS_JSON echo $RESULTS_JSON echo "Exiting with $EXIT_CODE" - exit $EXIT_CODE \ No newline at end of file + exit $EXIT_CODE diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml new file mode 100644 index 000000000000..34909e84d2ce --- /dev/null +++ b/.github/workflows/codspeed.yml @@ -0,0 +1,45 @@ +name: Run benchmarks + +on: + push: + paths: + - "src/backend/base/**" + - "src/backend/tests/performance/**" + branches: + - "main" # or "master" + pull_request: + paths: + - "src/backend/base/**" + - "src/backend/tests/performance/**" + - "!src/backend/base/langflow/components/**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + codspeed: + name: Run benchmarks + runs-on: ubuntu-latest + strategy: + matrix: + python-version: + - "3.12" + steps: + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Run benchmarks + uses: CodSpeedHQ/action@v3 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: | + uv run pytest src/backend/tests \ + --ignore=src/backend/tests/integration \ + --codspeed \ + -m "not api_key_required" \ + -n auto + - name: Minimize uv cache + run: uv cache prune --ci diff --git a/.github/workflows/conventional-labels.yml b/.github/workflows/conventional-labels.yml index 676c87564817..06c9fc46064b 100644 --- a/.github/workflows/conventional-labels.yml +++ b/.github/workflows/conventional-labels.yml @@ -15,7 +15,6 @@ jobs: uses: Namchee/conventional-pr@v0.15.4 with: access_token: ${{ secrets.GITHUB_TOKEN }} - verbose: true issue: false label: diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml index e1b806ccf074..98329a3ec047 100644 --- a/.github/workflows/create-release.yml +++ b/.github/workflows/create-release.yml @@ -6,51 +6,23 @@ on: description: "Version to release" required: true type: string - release_type: - description: "Type of release (base or main)" + ref: + description: "Commit to tag the release" required: true - type: choice - options: - - base - - main + type: string + pre_release: + description: "Pre-release tag" + required: true + type: boolean -env: - POETRY_VERSION: "1.8.2" jobs: - release: - name: Build Langflow - runs-on: ubuntu-latest - outputs: - version: ${{ steps.check-version.outputs.version }} - steps: - - uses: actions/checkout@v4 - - name: Install poetry - run: pipx install poetry==${{ env.POETRY_VERSION }} - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - cache: "poetry" - - name: Build project for distribution - run: | - if [ "${{ inputs.release_type }}" == "base" ]; then - make build base=true - else - make build main=true - fi - - name: Upload Artifact - uses: actions/upload-artifact@v4 - with: - name: dist${{ inputs.release_type }} - path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }} create_release: name: Create Release Job runs-on: ubuntu-latest - needs: release steps: - uses: actions/download-artifact@v4 with: - name: dist${{ inputs.release_type }} + name: dist-main path: dist - name: Create Release Notes uses: ncipollo/release-action@v1 @@ -59,6 +31,6 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} draft: false generateReleaseNotes: true - prerelease: true + prerelease: ${{ inputs.pre_release }} tag: v${{ inputs.version }} - commit: dev + commit: ${{ inputs.ref }} \ No newline at end of file diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 9c77e9813c3f..acd6d1aacc82 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -2,32 +2,51 @@ name: Docker Build and Push on: workflow_call: inputs: - version: + main_version: + required: true + type: string + description: "Main version to tag images with. Required for both main and base releases." + base_version: required: false type: string + description: "Base version to tag images with. Required for base release type." release_type: required: true type: string + description: "Release type. One of 'main', 'main-ep', 'base', 'nightly-main', 'nightly-base'." pre_release: required: false type: boolean default: false + ref: + required: false + type: string + description: "Ref to check out. If not specified, will default to the main version or current branch." workflow_dispatch: inputs: - version: - required: true + main_version: + description: "Main version to tag images with. Required for both main and base releases." + required: false + type: string + base_version: + description: "Base version to tag images with. Required for base release type." + required: false type: string release_type: + description: "Type of release. One of 'main', 'main-ep', 'base', 'nightly-main', 'nightly-base'." required: true - type: choice - options: - - base - - main + type: string pre_release: required: false type: boolean - default: true + default: false + ref: + required: false + type: string + description: "Ref to check out. If not specified, will default to the main version or current branch." + + env: POETRY_VERSION: "1.8.2" TEST_TAG: "langflowai/langflow:test" @@ -36,111 +55,239 @@ jobs: get-version: name: Get Version runs-on: ubuntu-latest - outputs: version: ${{ steps.get-version-input.outputs.version || steps.get-version-base.outputs.version || steps.get-version-main.outputs.version }} steps: - - uses: actions/checkout@v4 - - name: Set up Python 3.12 + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" + - name: Verify a main version exists + if: ${{ inputs.main_version == '' }} + run: | + # due to our how we split packages, we need to have a main version to check out. + echo "Must specify a main version to check out." + exit 1 + + - name: Check out the code at a specific ref + uses: actions/checkout@v4 with: - python-version: "3.12" - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Get Version from Input - if : ${{ inputs.version != '' }} + ref: ${{ inputs.ref || inputs.main_version || github.ref }} + persist-credentials: true + + - name: Get Version to Tag + if: ${{ inputs.main_version != '' }} id: get-version-input run: | - version=${{ inputs.version }} - echo version=$version >> $GITHUB_OUTPUT - - name: Get Version Main - if : ${{ inputs.version == '' && inputs.release_type == 'base' }} + # Produces the versions we will use to tag the docker images with. + + if [[ "${{ inputs.release_type }}" == "base" && "${{ inputs.base_version }}" == '' ]]; then + echo "Must specify a base version for base release type." + exit 1 + fi + + if [[ "${{ inputs.release_type }}" == "nightly-base" && "${{ inputs.base_version }}" == '' ]]; then + echo "Must specify a base version for nightly-base release type." + exit 1 + fi + + if [[ "${{ inputs.release_type }}" == "main" && "${{ inputs.main_version }}" == '' ]]; then + echo "Must specify a main version for main release type." + exit 1 + fi + + if [[ "${{ inputs.release_type }}" == "main-ep" && "${{ inputs.main_version }}" == '' ]]; then + echo "Must specify a main version for main-ep release type." + exit 1 + fi + + if [[ "${{ inputs.release_type }}" == "nightly-main" && "${{ inputs.main_version }}" == '' ]]; then + echo "Must specify a main version for nightly-main release type." + exit 1 + fi + + if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then + version=${{ inputs.base_version }} + echo "base version=${{ inputs.base_version }}" + echo version=$version + echo version=$version >> $GITHUB_OUTPUT + elif [[ "${{ inputs.release_type }}" == "main" || "${{ inputs.release_type }}" == "main-ep" || "${{ inputs.release_type }}" == "nightly-main" ]]; then + version=${{ inputs.main_version }} + echo version=$version + echo version=$version >> $GITHUB_OUTPUT + else + echo "No version or ref specified. Exiting the workflow." + exit 1 + fi + - name: Get Version Base + if: ${{ inputs.base_version == '' && (inputs.release_type == 'base' || inputs.release_type == 'nightly-base') }} id: get-version-base run: | - version=$(cd src/backend/base && poetry version --short) + version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | sed 's/^v//') + if [ -z "$version" ]; then + echo "Failed to extract version from uv tree output" + exit 1 + fi + echo version=$version echo version=$version >> $GITHUB_OUTPUT - - name: Get Version Base - if : ${{ inputs.version == '' && inputs.release_type == 'main' }} + - name: Get Version Main + if: ${{ inputs.main_version == '' && (inputs.release_type == 'main' || inputs.release_type == 'main-ep' || inputs.release_type == 'nightly-main') }} id: get-version-main run: | - version=$(poetry version --short) + version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}' | sed 's/^v//') + echo version=$version echo version=$version >> $GITHUB_OUTPUT setup: runs-on: ubuntu-latest needs: get-version outputs: - tags: ${{ steps.set-vars.outputs.tags }} + docker_tags: ${{ steps.set-vars.outputs.docker_tags }} + ghcr_tags: ${{ steps.set-vars.outputs.ghcr_tags }} file: ${{ steps.set-vars.outputs.file }} steps: - - uses: actions/checkout@v4 - name: Set Dockerfile and Tags id: set-vars run: | - if [[ "${{ inputs.release_type }}" == "base" ]]; then - echo "tags=langflowai/langflow:base-${{ needs.get-version.outputs.version }},langflowai/langflow:base-latest" >> $GITHUB_OUTPUT + nightly_suffix='' + if [[ "${{ inputs.release_type }}" == "nightly-base" || "${{ inputs.release_type }}" == "nightly-main" ]]; then + nightly_suffix="-nightly" + fi + + if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then + # LANGFLOW-BASE RELEASE + echo "docker_tags=langflowai/langflow${nightly_suffix}:base-${{ needs.get-version.outputs.version }},langflowai/langflow${nightly_suffix}:base-latest" >> $GITHUB_OUTPUT + echo "ghcr_tags=ghcr.io/langflow-ai/langflow${nightly_suffix}:base-${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow${nightly_suffix}:base-latest" >> $GITHUB_OUTPUT echo "file=./docker/build_and_push_base.Dockerfile" >> $GITHUB_OUTPUT else if [[ "${{ inputs.pre_release }}" == "true" ]]; then - echo "tags=langflowai/langflow:${{ needs.get-version.outputs.version }}" >> $GITHUB_OUTPUT + # LANGFLOW-MAIN PRE-RELEASE + echo "docker_tags=langflowai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }}" >> $GITHUB_OUTPUT + echo "ghcr_tags=ghcr.io/langflow-ai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }}" >> $GITHUB_OUTPUT + echo "file=./docker/build_and_push.Dockerfile" >> $GITHUB_OUTPUT + elif [[ "${{ inputs.release_type }}" == "main-ep" ]]; then + # LANGFLOW-MAIN (ENTRYPOINT) RELEASE + echo "docker_tags=langflowai/langflow-ep${nightly_suffix}:${{ needs.get-version.outputs.version }},langflowai/langflow-ep${nightly_suffix}:latest" >> $GITHUB_OUTPUT + echo "ghcr_tags=ghcr.io/langflow-ai/langflow-ep${nightly_suffix}:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow-ep${nightly_suffix}:latest" >> $GITHUB_OUTPUT + echo "file=./docker/build_and_push_ep.Dockerfile" >> $GITHUB_OUTPUT + elif [[ "${{ inputs.release_type }}" == "main" || "${{ inputs.release_type }}" == "nightly-main" ]]; then + # LANGFLOW-MAIN RELEASE + echo "docker_tags=langflowai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }},langflowai/langflow${nightly_suffix}:latest" >> $GITHUB_OUTPUT + echo "ghcr_tags=ghcr.io/langflow-ai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow${nightly_suffix}:latest" >> $GITHUB_OUTPUT + echo "file=./docker/build_and_push.Dockerfile" >> $GITHUB_OUTPUT else - echo "tags=langflowai/langflow:${{ needs.get-version.outputs.version }},langflowai/langflow:latest" >> $GITHUB_OUTPUT + echo "Invalid release type. Exiting the workflow." + exit 1 fi - echo "file=./docker/build_and_push.Dockerfile" >> $GITHUB_OUTPUT fi build: runs-on: ubuntu-latest - needs: setup + needs: [get-version, setup] steps: - - uses: actions/checkout@v4 + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref || inputs.main_version || github.ref }} + persist-credentials: true + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Install the project + run: | + if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then + uv sync --directory src/backend/base --no-dev --no-sources + else + uv sync --no-dev --no-sources + fi + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push Docker Image + - name: Build and Push to Docker Hub uses: docker/build-push-action@v6 with: context: . push: true file: ${{ needs.setup.outputs.file }} - tags: ${{ needs.setup.outputs.tags }} + tags: ${{ needs.setup.outputs.docker_tags }} + # provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines) + provenance: false + + - name: Login to Github Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.TEMP_GHCR_TOKEN}} + - name: Build and push to Github Container Registry + uses: docker/build-push-action@v6 + with: + context: . + push: true + file: ${{ needs.setup.outputs.file }} + tags: ${{ needs.setup.outputs.ghcr_tags }} # provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines) provenance: false build_components: if: ${{ inputs.release_type == 'main' }} runs-on: ubuntu-latest + permissions: + packages: write needs: [build, get-version] strategy: matrix: - component: [backend, frontend] + component: [docker-backend, docker-frontend, ghcr-backend, ghcr-frontend] include: - - component: backend + - component: docker-backend dockerfile: ./docker/build_and_push_backend.Dockerfile - tags: ${{ inputs.pre_release == 'true' && format('langflowai/langflow-backend:{0}', needs.get-version.outputs.version) || format('langflowai/langflow-backend:{0},langflowai/langflow-backend:latest', needs.get-version.outputs.version) }} - - component: frontend + tags: langflowai/langflow-backend:${{ needs.get-version.outputs.version }},langflowai/langflow-backend:latest + langflow_image: langflowai/langflow:${{ needs.get-version.outputs.version }} + - component: docker-frontend dockerfile: ./docker/frontend/build_and_push_frontend.Dockerfile - tags: ${{ inputs.pre_release == 'true' && format('langflowai/langflow-frontend:{0}', needs.get-version.outputs.version) || format('langflowai/langflow-frontend:{0},langflowai/langflow-frontend:latest', needs.get-version.outputs.version) }} + tags: langflowai/langflow-frontend:${{ needs.get-version.outputs.version }},langflowai/langflow-frontend:latest + langflow_image: langflowai/langflow:${{ needs.get-version.outputs.version }} + - component: ghcr-backend + dockerfile: ./docker/build_and_push_backend.Dockerfile + tags: ghcr.io/langflow-ai/langflow-backend:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow-backend:latest + langflow_image: ghcr.io/langflow-ai/langflow:${{ needs.get-version.outputs.version }} + - component: ghcr-frontend + dockerfile: ./docker/frontend/build_and_push_frontend.Dockerfile + tags: ghcr.io/langflow-ai/langflow-frontend:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow-frontend:latest + langflow_image: ghcr.io/langflow-ai/langflow:${{ needs.get-version.outputs.version }} steps: - - uses: actions/checkout@v4 + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref || inputs.main_version || github.ref }} + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub + if: ${{ matrix.component == 'docker-backend' }} || ${{ matrix.component == 'docker-frontend' }} uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Wait for Docker Hub to propagate (for backend) + + - name: Login to Github Container Registry + if: ${{ matrix.component == 'ghcr-backend' }} || ${{ matrix.component == 'ghcr-frontend' }} + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.TEMP_GHCR_TOKEN}} + + - name: Wait for propagation (for backend) run: sleep 120 + - name: Build and push ${{ matrix.component }} uses: docker/build-push-action@v6 with: context: . push: true build-args: | - LANGFLOW_IMAGE=langflowai/langflow:${{ needs.get-version.outputs.version }} + LANGFLOW_IMAGE=${{ matrix.langflow_image }} file: ${{ matrix.dockerfile }} tags: ${{ matrix.tags }} # provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines) @@ -150,24 +297,22 @@ jobs: name: Restart HuggingFace Spaces if: ${{ inputs.release_type == 'main' }} runs-on: ubuntu-latest - needs: build + needs: [build, get-version] strategy: matrix: python-version: - "3.12" steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" + - name: Check out the code at a specific ref + uses: actions/checkout@v4 with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Install Python dependencies - run: | - poetry env use ${{ matrix.python-version }} - poetry install + ref: ${{ inputs.ref || inputs.main_version || github.ref }} + - name: "Setup Environment" + uses: ./.github/actions/setup-uv - name: Restart HuggingFace Spaces Build run: | - poetry run python ./scripts/factory_restart_space.py --space "Langflow/Langflow" --token ${{ secrets.HUGGINGFACE_API_TOKEN }} + uv run ./scripts/factory_restart_space.py --space "Langflow/Langflow" --token ${{ secrets.HUGGINGFACE_API_TOKEN }} + + + diff --git a/.github/workflows/docker_test.yml b/.github/workflows/docker_test.yml index 07b826c588ef..ea4824ef3fb9 100644 --- a/.github/workflows/docker_test.yml +++ b/.github/workflows/docker_test.yml @@ -35,7 +35,7 @@ jobs: - name: Test image run: | expected_version=$(cat pyproject.toml | grep version | head -n 1 | cut -d '"' -f 2) - version=$(docker run --rm --entrypoint bash langflowai/langflow:latest-dev -c 'python -c "from langflow.version import __version__ as langflow_version; print(langflow_version)"') + version=$(docker run --rm --entrypoint bash langflowai/langflow:latest-dev -c "python -c 'from langflow.utils.version import get_version_info; print(get_version_info()[\"version\"])'") if [ "$expected_version" != "$version" ]; then echo "Expected version: $expected_version" echo "Actual version: $version" @@ -51,7 +51,7 @@ jobs: - name: Test backend image run: | expected_version=$(cat pyproject.toml | grep version | head -n 1 | cut -d '"' -f 2) - version=$(docker run --rm --entrypoint bash langflowai/langflow-backend:latest-dev -c 'python -c "from langflow.version import __version__ as langflow_version; print(langflow_version)"') + version=$(docker run --rm --entrypoint bash langflowai/langflow-backend:latest-dev -c "python -c 'from langflow.utils.version import get_version_info; print(get_version_info()[\"version\"])'") if [ "$expected_version" != "$version" ]; then echo "Expected version: $expected_version" echo "Actual version: $version" diff --git a/.github/workflows/fetch_docs_notion.yml b/.github/workflows/fetch_docs_notion.yml index b11f566bfe2f..fcc572cb5038 100644 --- a/.github/workflows/fetch_docs_notion.yml +++ b/.github/workflows/fetch_docs_notion.yml @@ -50,7 +50,7 @@ jobs: - name: Create Pull Request id: create_pr - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.GITHUB_TOKEN }} commit-message: Update docs from Notion diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml new file mode 100644 index 000000000000..df5480ac3606 --- /dev/null +++ b/.github/workflows/integration_tests.yml @@ -0,0 +1,50 @@ +name: Integration Tests + +on: + workflow_dispatch: + inputs: + ref: + description: "(Optional) ref to checkout" + required: false + type: string + workflow_call: + inputs: + python-versions: + description: "(Optional) Python versions to test" + required: true + type: string + default: "['3.10', '3.11', '3.12']" + ref: + description: "(Optional) ref to checkout" + required: false + type: string + +env: + POETRY_VERSION: "1.8.2" + +jobs: + integration-tests: + name: Run Integration Tests + runs-on: ubuntu-latest + strategy: + max-parallel: 1 # Currently, we can only run at a time for collection-per-db-constraints + matrix: + python-version: + - "3.12" + - "3.11" + - "3.10" + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }} + ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref || github.ref }} + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Run integration tests with api keys + timeout-minutes: 20 + run: | + make integration_tests_api_keys diff --git a/.github/workflows/lint-py.yml b/.github/workflows/lint-py.yml index e63a0e82960e..937de410dd83 100644 --- a/.github/workflows/lint-py.yml +++ b/.github/workflows/lint-py.yml @@ -23,21 +23,19 @@ jobs: - "3.11" - "3.10" steps: - - uses: actions/checkout@v4 + - name: Check out the code at a specific ref + uses: actions/checkout@v4 with: ref: ${{ inputs.branch || github.ref }} - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" - with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Install Python dependencies - run: | - poetry env use ${{ matrix.python-version }} - poetry install + persist-credentials: true + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev - name: Run Mypy run: | - make lint + uv run mypy --namespace-packages -p "langflow" env: GITHUB_TOKEN: ${{ secrets.github_token }} + - name: Minimize uv cache + run: uv cache prune --ci diff --git a/.github/workflows/nightly_build.yml b/.github/workflows/nightly_build.yml index 9d8179635443..9890ee3e43e4 100644 --- a/.github/workflows/nightly_build.yml +++ b/.github/workflows/nightly_build.yml @@ -3,15 +3,199 @@ name: Nightly Build on: workflow_dispatch: schedule: - - cron: "0 0 * * *" # Run every day at midnight (UTC) + # Run job at 6:30 UTC, 10.30pm PST, or 11.30pm PDT + - cron: "30 6 * * *" env: - POETRY_VERSION: "1.8.2" + POETRY_VERSION: "1.8.3" + PYTHON_VERSION: "3.12" jobs: - hello-world: + create-nightly-tag: + if: github.repository == 'langflow-ai/langflow' runs-on: ubuntu-latest + defaults: + run: + shell: bash -ex -o pipefail {0} + permissions: + # Required to create tag + contents: write + outputs: + main_tag: ${{ steps.generate_main_tag.outputs.main_tag }} + base_tag: ${{ steps.set_base_tag.outputs.base_tag }} steps: - - name: Run hello world + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: true + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev + + - name: Generate main nightly tag + id: generate_main_tag + run: | + # NOTE: This outputs the tag with the `v` prefix. + MAIN_TAG="$(uv run ./scripts/ci/pypi_nightly_tag.py main)" + echo "main_tag=$MAIN_TAG" >> $GITHUB_OUTPUT + echo "main_tag=$MAIN_TAG" + + - name: Check if main tag already exists + id: check_main_tag + run: | + git fetch --tags + if git rev-parse -q --verify "refs/tags/${{ steps.generate_main_tag.outputs.main_tag }}" >/dev/null; then + echo "main_tag_exists=true" >> $GITHUB_OUTPUT + else + echo "main_tag_exists=false" >> $GITHUB_OUTPUT + fi + + - name: Generate base nightly tag + id: generate_base_tag + if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'false' }} + run: | + # NOTE: This outputs the tag with the `v` prefix. + BASE_TAG="$(uv run ./scripts/ci/pypi_nightly_tag.py base)" + echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT + echo "base_tag=$BASE_TAG" + + - name: Commit tag + id: commit_tag + if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'false' }} + run: | + # If the main tag does not exist in GH, we create the base tag from the existing codebase. + + git config --global user.email "bot-nightly-builds@langflow.org" + git config --global user.name "Langflow Bot" + + MAIN_TAG="${{ steps.generate_main_tag.outputs.main_tag }}" + BASE_TAG="${{ steps.generate_base_tag.outputs.base_tag }}" + echo "Updating base project version to $BASE_TAG and updating main project version to $MAIN_TAG" + uv run ./scripts/ci/update_pyproject_combined.py main $MAIN_TAG $BASE_TAG + + uv lock + cd src/backend/base && uv lock && cd ../../.. + + git add pyproject.toml src/backend/base/pyproject.toml uv.lock src/backend/base/uv.lock + git commit -m "Update version and project name" + + echo "Tagging main with $MAIN_TAG" + if ! git tag -a $MAIN_TAG -m "Langflow nightly $MAIN_TAG"; then + echo "Tag creation failed. Exiting the workflow." + exit 1 + fi + + echo "Pushing main tag $MAIN_TAG" + if ! git push origin $MAIN_TAG; then + echo "Tag push failed. Check if the tag already exists. Exiting the workflow." + exit 1 + fi + # TODO: notify on failure + + - name: Checkout main nightly tag + uses: actions/checkout@v4 + if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'true' }} + with: + ref: ${{ steps.generate_main_tag.outputs.main_tag }} + + - name: Retrieve Base Tag + id: retrieve_base_tag + if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'true' }} + working-directory: src/backend/base + run: | + # If the main tag already exists, we need to retrieve the base version from the main tag codebase. + version=$(uv tree | grep 'langflow-base' | awk '{print $3}') + echo "base_tag=$version" >> $GITHUB_OUTPUT + echo "base_tag=$version" + + - name: Set Base Tag + id: set_base_tag run: | - echo "Hello, world!" + if [ "${{ steps.retrieve_base_tag.conclusion }}" != "skipped" ] && [ "${{ steps.retrieve_base_tag.outputs.base_tag }}" ]; then + BASE_TAG="${{ steps.retrieve_base_tag.outputs.base_tag }}" + echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT + echo "base_tag=$BASE_TAG" + elif [ "${{ steps.commit_tag.conclusion }}" != "skipped" ] && [ "${{ steps.generate_base_tag.outputs.base_tag }}" ]; then + BASE_TAG="${{ steps.generate_base_tag.outputs.base_tag }}" + echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT + echo "base_tag=$BASE_TAG" + else + echo "No base tag found. Exiting the workflow." + exit 1 + fi + + frontend-tests: + if: github.repository == 'langflow-ai/langflow' + name: Run Frontend Tests + needs: create-nightly-tag + uses: ./.github/workflows/typescript_test.yml + with: + tests_folder: "tests" + secrets: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + STORE_API_KEY: ${{ secrets.STORE_API_KEY }} + + backend-unit-tests: + if: github.repository == 'langflow-ai/langflow' + name: Run Backend Unit Tests + needs: create-nightly-tag + uses: ./.github/workflows/python_test.yml + with: + python-versions: '["3.10", "3.11", "3.12"]' + + # Not making nightly builds dependent on integration test success + # due to inherent flakiness of 3rd party integrations + # Revisit when https://github.com/langflow-ai/langflow/pull/3607 is merged. + # backend-integration-tests: + # name: Run Backend Integration Tests + # needs: create-nightly-tag + # uses: ./.github/workflows/integration_tests.yml + # with: + # python-versions: '["3.10", "3.11", "3.12"]' + # ref: ${{ needs.create-nightly-tag.outputs.tag }} + + release-nightly-build: + if: github.repository == 'langflow-ai/langflow' + name: Run Nightly Langflow Build + needs: [frontend-tests, backend-unit-tests, create-nightly-tag] + uses: ./.github/workflows/release_nightly.yml + with: + build_docker_base: true + build_docker_main: true + nightly_tag_main: ${{ needs.create-nightly-tag.outputs.main_tag }} + nightly_tag_base: ${{ needs.create-nightly-tag.outputs.base_tag }} + secrets: inherit + + # slack-notification: + # name: Send Slack Notification + # needs: run-nightly-build + # runs-on: ubuntu-latest + # steps: + # - name: Send success notification to Slack + # if: success() + # uses: slackapi/slack-github-action@v1.26.0 + # with: + # payload: | + # { + # "channel": "#langflow-nightly-builds", + # "username": "GitHub Actions", + # "text": "Nightly Build Successful :white_check_mark:", + # "icon_emoji": ":rocket:" + # } + # env: + # SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + # - name: Send failure notification to Slack + # if: failure() + # uses: slackapi/slack-github-action@v1.26.0 + # with: + # payload: | + # { + # "channel": "#langflow-nightly-builds", + # "username": "GitHub Actions", + # "text": "Nightly Build Failed :x:", + # "icon_emoji": ":warning:" + # } + # env: + # SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/py_autofix.yml b/.github/workflows/py_autofix.yml index eabed5afbf4d..db901491bed7 100644 --- a/.github/workflows/py_autofix.yml +++ b/.github/workflows/py_autofix.yml @@ -2,9 +2,7 @@ name: autofix.ci on: pull_request: paths: - - "poetry.lock" - - "pyproject.toml" - - "src/backend/**" + - "**/*.py" env: POETRY_VERSION: "1.8.2" @@ -14,31 +12,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: install-pinned/ruff@48a5818c5f7ce30e2822b67fb9c26d3e25d31fab - - run: ruff check --fix-only . - - run: ruff format . + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - run: uv run ruff check --fix-only . + - run: uv run ruff format . - uses: autofix-ci/action@dd55f44df8f7cdb7a6bf74c78677eb8acd40cd0a - lock: - name: Check Poetry lock - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Set up Python 3.12 - uses: "./.github/actions/poetry_caching" - with: - python-version: "3.12" - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Check poetry.lock - id: check - run: | - poetry check --lock - continue-on-error: true - - - name: Run lock - if : steps.check.outcome == 'failure' - run: | - make lock - - uses: autofix-ci/action@dd55f44df8f7cdb7a6bf74c78677eb8acd40cd0a - - name: Diff poetry.lock - uses: nborrmann/diff-poetry-lock@main + - name: Minimize uv cache + run: uv cache prune --ci diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 1c3039a54af9..e7ee2ec20a01 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -8,12 +8,17 @@ on: required: true type: string default: "['3.10', '3.11', '3.12']" - workflow_dispatch: - inputs: - branch: - description: "(Optional) Branch to checkout" + ref: + description: "(Optional) ref to checkout" required: false type: string + nightly: + description: "Whether run is from the nightly build" + required: false + type: boolean + default: false + workflow_dispatch: + inputs: python-versions: description: "(Optional) Python versions to test" required: true @@ -22,6 +27,7 @@ on: env: POETRY_VERSION: "1.8.2" NODE_VERSION: "21" + PYTEST_RUN_PATH: "src/backend/tests" jobs: build: @@ -32,34 +38,75 @@ jobs: python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12"]' ) }} splitCount: [5] group: [1, 2, 3, 4, 5] - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} steps: - uses: actions/checkout@v4 with: - ref: ${{ inputs.branch || github.ref }} + ref: ${{ inputs.ref || github.ref }} - name: Setup Node.js uses: actions/setup-node@v4 id: setup-node with: node-version: ${{ env.NODE_VERSION }} - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" + - name: Install uv + uses: astral-sh/setup-uv@v3 with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Install Python dependencies - run: | - poetry env use ${{ matrix.python-version }} - poetry install + enable-cache: true + cache-dependency-glob: "uv.lock" + - name: "Set up Python" + uses: actions/setup-python@v5 + with: + python-version-file: "pyproject.toml" + - name: Restore uv cache + uses: actions/cache@v4 + with: + path: /tmp/.uv-cache + key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }} + restore-keys: | + uv-${{ runner.os }}-${{ hashFiles('uv.lock') }} + uv-${{ runner.os }} + - name: Install the project + run: uv sync --dev - name: Run unit tests uses: nick-fields/retry@v3 with: timeout_minutes: 12 max_attempts: 2 - command: make unit_tests async=false args="--splits ${{ matrix.splitCount }} --group ${{ matrix.group }}" - + command: make unit_tests async=false args="-x --splits ${{ matrix.splitCount }} --group ${{ matrix.group }}" + - name: Minimize uv cache + run: uv cache prune --ci + integration-tests: + name: Integration Tests - Python ${{ matrix.python-version }} + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12"]' ) }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref || github.ref }} + - name: Install uv + uses: astral-sh/setup-uv@v3 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + - name: "Set up Python" + uses: actions/setup-python@v5 + with: + python-version-file: "pyproject.toml" + - name: Restore uv cache + uses: actions/cache@v4 + with: + path: /tmp/.uv-cache + key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }} + restore-keys: | + uv-${{ runner.os }}-${{ hashFiles('uv.lock') }} + uv-${{ runner.os }} + - name: Install the project + run: uv sync --dev + - name: Run integration tests + run: make integration_tests_no_api_keys + - name: Minimize uv cache + run: uv cache prune --ci test-cli: name: Test CLI - Python ${{ matrix.python-version }} runs-on: ubuntu-latest @@ -67,18 +114,25 @@ jobs: matrix: python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12"]') }} steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" + - name: Check out the code at a specific ref + uses: actions/checkout@v4 with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} + ref: ${{ inputs.ref || github.ref }} + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Check Version id: check-version + # We need to print $3 because langflow-base is a dependency of langflow + # For langlow we'd use print $2 run: | - version=$(cd src/backend/base && poetry version --short) - last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1) + version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | sed 's/^v//') + url="https://pypi.org/pypi/langflow-base/json" + if [ ${{ inputs.nightly }} == true ]; then + url="https://pypi.org/pypi/langflow-base-nightly/json" + fi + + last_released_version=$(curl -s $url | jq -r '.releases | keys | .[]' | sort -V | tail -n 1) if [ "$version" != "$last_released_version" ]; then echo "Version $version has not been released yet. Skipping the rest of the job." echo skipped=true >> $GITHUB_OUTPUT @@ -90,26 +144,30 @@ jobs: - name: Build wheel if: steps.check-version.outputs.skipped == 'false' run: | - poetry env use ${{ matrix.python-version }} make build main=true - - name: Install wheel + - name: Install wheel and Test CLI if: steps.check-version.outputs.skipped == 'false' run: | - python -m pip install dist/*.whl + uv venv new-venv + source new-venv/bin/activate + uv pip install dist/*.whl - name: Test CLI if: steps.check-version.outputs.skipped == 'false' run: | + source new-venv/bin/activate python -m langflow run --host 127.0.0.1 --port 7860 --backend-only & SERVER_PID=$! # Wait for the server to start timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/api/v1/auto_login; do sleep 5; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1) # Terminate the server kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1) - sleep 10 # give the server some time to terminate + sleep 20 # give the server some time to terminate # Check if the server is still running if kill -0 $SERVER_PID 2>/dev/null; then echo "Failed to terminate the server" - exit 1 + exit 0 else echo "Server terminated successfully" fi + - name: Minimize uv cache + run: uv cache prune --ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 90dd618f50a9..217cbaa98046 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,16 +24,23 @@ on: required: true type: boolean default: false + build_docker_ep: + description: "Build Docker Image for Langflow with Entrypoint" + required: false + type: boolean + default: false pre_release: description: "Pre-release" required: false type: boolean default: false + create_release: + description: "Whether to create a gh release" + required: false + type: boolean + default: true -env: - POETRY_VERSION: "1.8.2" - jobs: ci: if: ${{ github.event.inputs.release_package_base == 'true' || github.event.inputs.release_package_main == 'true' }} @@ -52,22 +59,16 @@ jobs: version: ${{ steps.check-version.outputs.version }} skipped: ${{ steps.check-version.outputs.skipped }} steps: - - uses: actions/checkout@v4 - - name: Install poetry - run: pipx install poetry==${{ env.POETRY_VERSION }} - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - cache: "poetry" - - name: Set up Nodejs 20 - uses: actions/setup-node@v4 - with: - node-version: "20" + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Environment + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev - name: Check Version id: check-version run: | - version=$(cd src/backend/base && poetry version --short) + version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | sed 's/^v//') last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1) if [ "$version" = "$last_released_version" ]; then echo "Version $version is already released. Skipping release." @@ -79,30 +80,34 @@ jobs: fi - name: Build project for distribution if: steps.check-version.outputs.skipped == 'false' - run: make build base=true + run: make build base=true args="--wheel" - name: Test CLI if: steps.check-version.outputs.skipped == 'false' run: | - python -m pip install src/backend/base/dist/*.whl - python -m langflow run --host 127.0.0.1 --port 7860 & + # TODO: Unsure why the whl is not built in src/backend/base/dist + mkdir src/backend/base/dist + mv dist/*.whl src/backend/base/dist + uv pip install src/backend/base/dist/*.whl + uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only & SERVER_PID=$! # Wait for the server to start timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/api/v1/auto_login; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1) # Terminate the server kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1) - sleep 10 # give the server some time to terminate + sleep 20 # give the server some time to terminate # Check if the server is still running if kill -0 $SERVER_PID 2>/dev/null; then echo "Failed to terminate the server" - exit 1 + exit 0 else echo "Server terminated successfully" fi - name: Publish to PyPI if: steps.check-version.outputs.skipped == 'false' env: - POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} - run: make publish base=true + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + make publish base=true - name: Upload Artifact if: steps.check-version.outputs.skipped == 'false' uses: actions/upload-artifact@v4 @@ -118,24 +123,19 @@ jobs: outputs: version: ${{ steps.check-version.outputs.version }} steps: - - uses: actions/checkout@v4 - - name: Install poetry - run: pipx install poetry==${{ env.POETRY_VERSION }} - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - cache: "poetry" - - name: Set up Nodejs 20 - uses: actions/setup-node@v4 - with: - node-version: "20" + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Environment + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev + # If pre-release is true, we need to check if ["a", "b", "rc", "dev", "post"] is in the version string # if the version string is incorrect, we need to exit the workflow - name: Check if pre-release if: inputs.pre_release == 'true' run: | - version=$(poetry version --short) + version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}' | sed 's/^v//') if [[ "${version}" =~ ^([0-9]+\.)?([0-9]+\.)?[0-9]+((a|b|rc|dev|post)([0-9]+))$ ]]; then echo "Pre-release version detected. Continuing with the release." else @@ -145,7 +145,7 @@ jobs: - name: Check Version id: check-version run: | - version=$(poetry version --short) + version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}' | sed 's/^v//') last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1) if [ "$version" = "$last_released_version" ]; then echo "Version $version is already released. Skipping release." @@ -155,31 +155,32 @@ jobs: fi - name: Wait for PyPI Propagation if: needs.release-base.outputs.skipped == 'false' - run: sleep 300 # wait for 5 minutes to ensure PyPI propagation + run: sleep 300 # wait for 5 minutes to ensure PyPI propagation - name: Build project for distribution - run: make build main=true + run: make build main=true args="--no-sources --wheel" - name: Test CLI run: | - python -m pip install dist/*.whl - python -m langflow run --host 127.0.0.1 --port 7860 --backend-only & + uv pip install dist/*.whl + uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only & SERVER_PID=$! # Wait for the server to start timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/health_check; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1) # Terminate the server kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1) - sleep 10 # give the server some time to terminate + sleep 20 # give the server some time to terminate # Check if the server is still running if kill -0 $SERVER_PID 2>/dev/null; then echo "Failed to terminate the server" - exit 1 + exit 0 else echo "Server terminated successfully" fi - name: Publish to PyPI env: - POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} - run: make publish main=true + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + make publish main=true - name: Upload Artifact uses: actions/upload-artifact@v4 with: @@ -188,36 +189,38 @@ jobs: call_docker_build_base: name: Call Docker Build Workflow for Langflow Base - if : inputs.build_docker_base == true + if: inputs.build_docker_base == true + needs: [release-base, release-main] uses: ./.github/workflows/docker-build.yml - strategy: - matrix: - release_type: - - base with: - # version should be needs.release-base.outputs.version if release_type is base - # version should be needs.release-main.outputs.version if release_type is main - version: '' - release_type: ${{ matrix.release_type }} + base_version: ${{ needs.release-base.outputs.version }} + main_version: ${{ needs.release-main.outputs.version }} + release_type: base pre_release: ${{ inputs.pre_release }} secrets: inherit call_docker_build_main: name: Call Docker Build Workflow for Langflow - if : inputs.build_docker_main == true + if: inputs.build_docker_main == true + needs: [release-main] uses: ./.github/workflows/docker-build.yml - strategy: - matrix: - release_type: - - main with: - # version should be needs.release-base.outputs.version if release_type is base - # version should be needs.release-main.outputs.version if release_type is main - version: '' - release_type: ${{ matrix.release_type }} + main_version: ${{ needs.release-main.outputs.version }} + release_type: main pre_release: ${{ inputs.pre_release }} secrets: inherit + call_docker_build_main_ep: + name: Call Docker Build Workflow for Langflow with Entrypoint + if: inputs.build_docker_ep == true + needs: [release-main] + uses: ./.github/workflows/docker-build.yml + with: + main_version: ${{ needs.release-main.outputs.version }} + release_type: main-ep + pre_release: False + secrets: inherit + create_release: name: Create Release runs-on: ubuntu-latest @@ -236,4 +239,4 @@ jobs: generateReleaseNotes: true prerelease: ${{ inputs.pre_release }} tag: v${{ needs.release-main.outputs.version }} - commit: ${{ github.ref }} \ No newline at end of file + commit: ${{ github.ref }} diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml new file mode 100644 index 000000000000..88c1e0420cd3 --- /dev/null +++ b/.github/workflows/release_nightly.yml @@ -0,0 +1,233 @@ +name: Langflow Nightly Build +run-name: Langflow Nightly Release by @${{ github.actor }} + +on: + workflow_dispatch: + inputs: + build_docker_base: + description: "Build Docker Image for Langflow Nightly Base" + required: true + type: boolean + default: false + build_docker_main: + description: "Build Docker Image for Langflow Nightly" + required: true + type: boolean + default: false + build_docker_ep: + description: "Build Docker Image for Langflow Nightly with Entrypoint" + required: false + type: boolean + default: false + nightly_tag_main: + description: "Tag for the nightly main build" + required: true + type: string + nightly_tag_base: + description: "Tag for the nightly base build" + required: true + type: string + workflow_call: + inputs: + build_docker_base: + description: "Build Docker Image for Langflow Nightly Base" + required: true + type: boolean + default: false + build_docker_main: + description: "Build Docker Image for Langflow Nightly" + required: true + type: boolean + default: false + build_docker_ep: + description: "Build Docker Image for Langflow Nightly with Entrypoint" + required: false + type: boolean + default: false + nightly_tag_main: + description: "Tag for the nightly main build" + required: true + type: string + nightly_tag_base: + description: "Tag for the nightly base build" + required: true + type: string + +env: + POETRY_VERSION: "1.8.3" + PYTHON_VERSION: "3.12" + +jobs: + release-nightly-base: + name: Release Langflow Nightly Base + runs-on: ubuntu-latest + defaults: + run: + shell: bash + outputs: + version: ${{ steps.verify.outputs.version }} + steps: + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + with: + ref: ${{ inputs.nightly_tag_main }} + persist-credentials: true + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev + + - name: Verify Nightly Name and Version + id: verify + run: | + name=$(uv tree | grep 'langflow-base' | awk '{print $2}') + version=$(uv tree | grep 'langflow-base' | awk '{print $3}') + if [ "$name" != "langflow-base-nightly" ]; then + echo "Name $name does not match langflow-base-nightly. Exiting the workflow." + exit 1 + fi + if [ "$version" != "${{ inputs.nightly_tag_base }}" ]; then + echo "Version $version does not match nightly tag ${{ inputs.nightly_tag_base }}. Exiting the workflow." + exit 1 + fi + # Strip the leading `v` from the version + version=$(echo $version | sed 's/^v//') + echo "version=$version" >> $GITHUB_OUTPUT + + - name: Build project for distribution + run: make build base=true args="--wheel" + + - name: Test CLI + run: | + # TODO: Unsure why the whl is not built in src/backend/base/dist + mkdir src/backend/base/dist + mv dist/*.whl src/backend/base/dist/ + uv pip install src/backend/base/dist/*.whl + uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only & + SERVER_PID=$! + # Wait for the server to start + timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/api/v1/auto_login; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1) + # Terminate the server + kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1) + sleep 20 # give the server some time to terminate + # Check if the server is still running + if kill -0 $SERVER_PID 2>/dev/null; then + echo "Failed to terminate the server" + exit 0 + else + echo "Server terminated successfully" + fi + + - name: Publish to PyPI + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + make publish base=true + + - name: Upload Artifact + uses: actions/upload-artifact@v4 + with: + name: dist-base + path: src/backend/base/dist + + release-nightly-main: + name: Release Langflow Nightly Main + needs: [release-nightly-base] + runs-on: ubuntu-latest + outputs: + version: ${{ steps.verify.outputs.version }} + defaults: + run: + shell: bash + steps: + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + with: + ref: ${{ inputs.nightly_tag_main}} + persist-credentials: true + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev + + - name: Verify Nightly Name and Version + id: verify + run: | + name=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $1}') + version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}') + if [ "$name" != "langflow-nightly" ]; then + echo "Name $name does not match langflow-nightly. Exiting the workflow." + exit 1 + fi + if [ "$version" != "${{ inputs.nightly_tag_main }}" ]; then + echo "Version $version does not match nightly tag ${{ inputs.nightly_tag_main }}. Exiting the workflow." + exit 1 + fi + # Strip the leading `v` from the version + version=$(echo $version | sed 's/^v//') + echo "version=$version" >> $GITHUB_OUTPUT + - name: Wait for PyPI Propagation + run: sleep 300 # wait for 5 minutes to ensure PyPI propagation of base + + - name: Build project for distribution + run: make build main=true args="--no-sources --wheel" + - name: Test CLI + run: | + uv pip install dist/*.whl + uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only & + SERVER_PID=$! + # Wait for the server to start + timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/health_check; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1) + # Terminate the server + kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1) + sleep 20 # give the server some time to terminate + # Check if the server is still running + if kill -0 $SERVER_PID 2>/dev/null; then + echo "Failed to terminate the server" + exit 0 + else + echo "Server terminated successfully" + fi + - name: Publish to PyPI + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + make publish main=true + - name: Upload Artifact + uses: actions/upload-artifact@v4 + with: + name: dist-main + path: dist + + call_docker_build_base: + name: Call Docker Build Workflow for Langflow Base + if: always() && ${{ inputs.build_docker_base == 'true' }} + needs: [release-nightly-base, release-nightly-main] + uses: ./.github/workflows/docker-build.yml + with: + release_type: nightly-base + base_version: ${{ inputs.nightly_tag_base }} + main_version: ${{ inputs.nightly_tag_main }} + secrets: inherit + + call_docker_build_main: + name: Call Docker Build Workflow for Langflow + if: always() && ${{ inputs.build_docker_main == 'true' }} + needs: [release-nightly-main] + uses: ./.github/workflows/docker-build.yml + with: + release_type: nightly-main + main_version: ${{ inputs.nightly_tag_main }} + secrets: inherit + + call_docker_build_main_ep: + name: Call Docker Build Workflow for Langflow with Entrypoint + if: always() && ${{ inputs.build_docker_ep == 'true' }} + needs: [release-nightly-main] + uses: ./.github/workflows/docker-build.yml + with: + release_type: main-ep + main_version: ${{ inputs.nightly_tag_main }} + secrets: inherit diff --git a/.github/workflows/scheduled_integration_test.yml b/.github/workflows/scheduled_integration_test.yml deleted file mode 100644 index 56afc035f2ea..000000000000 --- a/.github/workflows/scheduled_integration_test.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Integration tests - -on: - workflow_dispatch: - inputs: - branch: - description: "(Optional) Branch to checkout" - required: false - type: string - schedule: - - cron: "0 0 */2 * *" # Run every 2 days - -env: - POETRY_VERSION: "1.8.2" - -jobs: - test-integration: - name: Run Integration Tests - runs-on: ubuntu-latest - strategy: - max-parallel: 1 # Currently, we can only run at a time for collection-per-db-constraints - matrix: - python-version: - - "3.12" - - "3.11" - - "3.10" - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }} - ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ inputs.branch || github.ref }} - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" - with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Install Python dependencies - run: | - poetry env use ${{ matrix.python-version }} - poetry install - - name: Run integration tests - timeout-minutes: 12 - run: | - make integration_tests diff --git a/.github/workflows/style-check-py.yml b/.github/workflows/style-check-py.yml index dfab075ce67e..25abc5e4319d 100644 --- a/.github/workflows/style-check-py.yml +++ b/.github/workflows/style-check-py.yml @@ -3,11 +3,12 @@ name: Ruff Style Check on: pull_request: types: [opened, synchronize, reopened, auto_merge_enabled] + paths: + - "**/*.py" + -env: - POETRY_VERSION: "1.8.2" jobs: lint: @@ -18,18 +19,13 @@ jobs: python-version: - "3.12" steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" - with: - python-version: ${{ matrix.python-version }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} - - name: Install Python dependencies - run: | - poetry env use ${{ matrix.python-version }} - poetry install + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + - name: "Setup Environment" + uses: ./.github/actions/setup-uv - name: Register problem matcher run: echo "::add-matcher::.github/workflows/matchers/ruff.json" - name: Run Ruff Check - run: poetry run ruff check --output-format=github . + run: uv run --only-dev ruff check --output-format=github . + - name: Minimize uv cache + run: uv cache prune --ci diff --git a/.github/workflows/typescript_test.yml b/.github/workflows/typescript_test.yml index 4bcd9834dcfc..2ef7e6579f29 100644 --- a/.github/workflows/typescript_test.yml +++ b/.github/workflows/typescript_test.yml @@ -13,12 +13,12 @@ on: required: false type: string default: "tests" - workflow_dispatch: - inputs: - branch: - description: "(Optional) Branch to checkout" + ref: + description: "(Optional) ref to checkout" required: false type: string + workflow_dispatch: + inputs: tests_folder: description: "(Optional) Tests to run" required: false @@ -26,7 +26,6 @@ on: default: "tests" env: - POETRY_VERSION: "1.8.3" NODE_VERSION: "21" PYTHON_VERSION: "3.12" # Define the directory where Playwright browsers will be installed. @@ -42,10 +41,11 @@ jobs: matrix: shardIndex: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] shardTotal: [10] + env: OPENAI_API_KEY: ${{ inputs.openai_api_key || secrets.OPENAI_API_KEY }} STORE_API_KEY: ${{ inputs.store_api_key || secrets.STORE_API_KEY }} - BRAVE_SEARCH_API_KEY: "${{ secrets.BRAVE_SEARCH_API_KEY }}" + SEARCH_API_KEY: "${{ secrets.SEARCH_API_KEY }}" ASTRA_DB_APPLICATION_TOKEN: "${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}" ASTRA_DB_API_ENDPOINT: "${{ secrets.ASTRA_DB_API_ENDPOINT }}" outputs: @@ -54,9 +54,9 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - # If branch is passed as input, checkout that branch - # else checkout the default branch - ref: ${{ github.event.inputs.branch || github.ref }} + # If ref is passed as input, checkout that ref + # else checkout the default ref + ref: ${{ inputs.ref || github.ref }} - name: Setup Node.js uses: actions/setup-node@v4 @@ -103,17 +103,10 @@ jobs: npx playwright install-deps if: steps.playwright-cache.outputs.cache-hit != 'true' - - name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }} - uses: "./.github/actions/poetry_caching" - with: - python-version: ${{ env.PYTHON_VERSION }} - poetry-version: ${{ env.POETRY_VERSION }} - cache-key: tests - - - name: Install Python dependencies - run: | - poetry env use ${{ env.PYTHON_VERSION }} - poetry install + - name: "Setup Environment" + uses: ./.github/actions/setup-uv + - name: Install the project + run: uv sync --dev - name: create .env run: | @@ -127,6 +120,7 @@ jobs: max_attempts: 2 command: | cd src/frontend + npx playwright test ${{ inputs.tests_folder }} --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --list npx playwright test ${{ inputs.tests_folder }} --trace on --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --workers 2 - name: Upload blob report to GitHub Actions Artifacts @@ -137,6 +131,9 @@ jobs: path: src/frontend/blob-report retention-days: 1 + - name: Minimize uv cache + run: uv cache prune --ci + merge-reports: # We need to repeat the condition at every step # https://github.com/actions/runner/issues/662 diff --git a/.gitignore b/.gitignore index f3fb37a39f83..9be9589dce31 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ yarn-error.log* lerna-debug.log* qdrant_storage +.dspy_cache # Mac .DS_Store @@ -273,3 +274,6 @@ src/frontend/temp *-shm *-wal .history + +.dspy_cache/ +*.db diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 16599a199c7b..42b3ae39bba5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,3 @@ -fail_fast: true repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 @@ -12,3 +11,15 @@ repos: args: - --fix=lf - id: trailing-whitespace + - repo: local + hooks: + - id: ruff-check + name: ruff check + language: system + entry: bash -c "uv run ruff check" + types: [file, python] + - id: ruff-format + name: ruff format + language: system + entry: bash -c "uv run ruff format" + types: [file, python] diff --git a/.vscode/launch.json b/.vscode/launch.json index 1f66413d183f..1bc163fb5815 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -6,6 +6,8 @@ "type": "debugpy", "request": "launch", "module": "uvicorn", + "preLaunchTask": "Install Backend", + "args": [ "--factory", "langflow.main:create_app", diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 25b480b27a28..5142610af1f1 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -24,7 +24,7 @@ { "label": "Test", "type": "shell", - "command": "make tests" + "command": "make unit_tests" }, // make lint { @@ -43,6 +43,12 @@ "label": "Install", "type": "shell", "command": "make install_backend && make install_frontend" + }, + // install backend + { + "label": "Install Backend", + "type": "shell", + "command": "make install_backend" } ] } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7247d2b7a6ba..960f6522afcc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,6 @@ to contributions, whether it be in the form of a new feature, improved infra, or To contribute to this project, please follow the [fork and pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow. - ## Reporting bugs or suggesting improvements Our [GitHub issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date @@ -32,10 +31,9 @@ so that more people can benefit from it. [collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so it only becomes visible on click, making the issue easier to read and follow. - ## Contributing code and documentation -You can develop Langflow locally via Poetry + NodeJS or docker-compose. +You can develop Langflow locally via uv + NodeJS. ### Clone the Langflow Repository @@ -50,23 +48,24 @@ git remote add fork https://github.com//langflow.git We also provide a .vscode/launch.json file for debugging the backend in VSCode, which is a lot faster than using docker compose. ### Prepare the environment + Setting up hooks: ```bash make init ``` -This will install the pre-commit hooks, which will run `make format` on every commit. +This will set up the development environment by installing backend and frontend dependencies, building the frontend static files, and initializing the project. It runs `make install_backend`, `make install_frontend`, `make build_frontend`, and finally `uv run langflow run` to start the application. -It is advised to run `make lint` before pushing to the repository. +It is advised to run `make lint`, `make format`, and `make unit_tests` before pushing to the repository. -### Run locally (Poetry and Node.js) +### Run locally (uv and Node.js) Langflow can run locally by cloning the repository and installing the dependencies. We recommend using a virtual environment to isolate the dependencies from your system. Before you start, make sure you have the following installed: -- Poetry (>=1.4) +- uv (>=0.4) - Node.js Then, in the root folder, install the dependencies and start the development server for the backend: @@ -81,17 +80,6 @@ And the frontend: make frontend ``` - -### Run locally (docker compose) - -The following snippet will run the backend and frontend in separate containers. The frontend will be available at `localhost:3000` and the backend at `localhost:7860`. - -```bash -docker compose up --build -# or -make dev build=1 -``` - ### Run documentation The documentation is built using [Docusaurus](https://docusaurus.io/). To run the documentation locally, run the following commands: @@ -104,8 +92,8 @@ npm run start The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder. - ## Opening a pull request + Once you wrote and manually tested your change, you can start sending the patch to the main repository. - Open a new GitHub pull request with the patch against the `main` branch. diff --git a/Makefile b/Makefile index 71c7baa56905..b928ccc0a44c 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ DOCKERFILE=docker/build_and_push.Dockerfile DOCKERFILE_BACKEND=docker/build_and_push_backend.Dockerfile DOCKERFILE_FRONTEND=docker/frontend/build_and_push_frontend.Dockerfile DOCKER_COMPOSE=docker_example/docker-compose.yml -PYTHON_REQUIRED=$(shell grep '^python[[:space:]]*=' pyproject.toml | sed -n 's/.*"\([^"]*\)".*/\1/p') +PYTHON_REQUIRED=$(shell grep '^requires-python[[:space:]]*=' pyproject.toml | sed -n 's/.*"\([^"]*\)".*/\1/p') RED=\033[0;31m NC=\033[0m # No Color GREEN=\033[0;32m @@ -19,6 +19,8 @@ open_browser ?= true path = src/backend/base/langflow/frontend workers ?= 1 async ?= true +lf ?= false +ff ?= true all: help ###################### @@ -35,20 +37,10 @@ patch: ## bump the version in langflow and langflow-base # check for required tools check_tools: - @command -v poetry >/dev/null 2>&1 || { echo >&2 "$(RED)Poetry is not installed. Aborting.$(NC)"; exit 1; } + @command -v uv >/dev/null 2>&1 || { echo >&2 "$(RED)uv is not installed. Aborting.$(NC)"; exit 1; } @command -v npm >/dev/null 2>&1 || { echo >&2 "$(RED)NPM is not installed. Aborting.$(NC)"; exit 1; } - @command -v docker >/dev/null 2>&1 || { echo >&2 "$(RED)Docker is not installed. Aborting.$(NC)"; exit 1; } - @command -v pipx >/dev/null 2>&1 || { echo >&2 "$(RED)pipx is not installed. Aborting.$(NC)"; exit 1; } - @$(MAKE) check_env @echo "$(GREEN)All required tools are installed.$(NC)" -# check if Python version is compatible -check_env: ## check if Python version is compatible - @chmod +x scripts/setup/check_env.sh - @PYTHON_INSTALLED=$$(scripts/setup/check_env.sh python --version 2>&1 | awk '{print $$2}'); \ - if ! scripts/setup/check_env.sh python -c "import sys; from packaging.specifiers import SpecifierSet; from packaging.version import Version; sys.exit(not SpecifierSet('$(PYTHON_REQUIRED)').contains(Version('$$PYTHON_INSTALLED')))" 2>/dev/null; then \ - echo "$(RED)Error: Python version $$PYTHON_INSTALLED is not compatible with the required version $(PYTHON_REQUIRED). Aborting.$(NC)"; exit 1; \ - fi help: ## show this help message @echo '----' @@ -61,25 +53,30 @@ help: ## show this help message # INSTALL PROJECT ###################### +reinstall_backend: ## forces reinstall all dependencies (no caching) + @echo 'Installing backend dependencies' + @uv sync -n --reinstall --frozen + install_backend: ## install the backend dependencies @echo 'Installing backend dependencies' - @poetry install + @uv sync --frozen install_frontend: ## install the frontend dependencies @echo 'Installing frontend dependencies' - cd src/frontend && npm install + @cd src/frontend && npm install > /dev/null 2>&1 build_frontend: ## build the frontend static files - cd src/frontend && CI='' npm run build - rm -rf src/backend/base/langflow/frontend - cp -r src/frontend/build src/backend/base/langflow/frontend + @echo 'Building frontend static files' + @cd src/frontend && CI='' npm run build > /dev/null 2>&1 + @rm -rf src/backend/base/langflow/frontend + @cp -r src/frontend/build src/backend/base/langflow/frontend init: check_tools clean_python_cache clean_npm_cache ## initialize the project - make install_backend - make install_frontend - make build_frontend + @make install_backend + @make install_frontend + @make build_frontend @echo "$(GREEN)All requirements are installed.$(NC)" - python -m langflow run + @uv run langflow run ###################### # CLEAN PROJECT @@ -91,6 +88,7 @@ clean_python_cache: find . -type f -name '*.py[cod]' -exec rm -f {} + find . -type f -name '*~' -exec rm -f {} + find . -type f -name '.*~' -exec rm -f {} + + find . -type d -empty -delete @echo "$(GREEN)Python cache cleaned.$(NC)" clean_npm_cache: @@ -102,21 +100,21 @@ clean_npm_cache: clean_all: clean_python_cache clean_npm_cache # clean all caches and temporary directories @echo "$(GREEN)All caches and temporary directories cleaned.$(NC)" -setup_poetry: ## install poetry using pipx - pipx install poetry +setup_uv: ## install poetry using pipx + pipx install uv add: @echo 'Adding dependencies' ifdef devel - cd src/backend/base && poetry add --group dev $(devel) + @cd src/backend/base && uv add --group dev $(devel) endif ifdef main - poetry add $(main) + @uv add $(main) endif ifdef base - cd src/backend/base && poetry add $(base) + @cd src/backend/base && uv add $(base) endif @@ -126,29 +124,39 @@ endif ###################### coverage: ## run the tests and generate a coverage report - @poetry run coverage run - @poetry run coverage erase + @uv run coverage run + @uv run coverage erase unit_tests: ## run unit tests -ifeq ($(async), true) - poetry run pytest src/backend/tests \ - --ignore=src/backend/tests/integration \ - --instafail -n auto -ra -m "not api_key_required" \ - --durations-path src/backend/tests/.test_durations \ - --splitting-algorithm least_duration \ + @uv sync --extra dev --frozen + @EXTRA_ARGS="" + @if [ "$(async)" = "true" ]; then \ + EXTRA_ARGS="$$EXTRA_ARGS --instafail -n auto"; \ + fi; \ + if [ "$(lf)" = "true" ]; then \ + EXTRA_ARGS="$$EXTRA_ARGS --lf"; \ + fi; \ + if [ "$(ff)" = "true" ]; then \ + EXTRA_ARGS="$$EXTRA_ARGS --ff"; \ + fi; \ + uv run pytest src/backend/tests --ignore=src/backend/tests/integration $$EXTRA_ARGS --instafail -ra -m 'not api_key_required' --durations-path src/backend/tests/.test_durations --splitting-algorithm least_duration $(args) + +unit_tests_looponfail: + @make unit_tests args="-f" + +integration_tests: + uv run pytest src/backend/tests/integration \ + --instafail -ra \ $(args) -else - poetry run pytest src/backend/tests \ - --ignore=src/backend/tests/integration \ + +integration_tests_no_api_keys: + uv run pytest src/backend/tests/integration \ --instafail -ra -m "not api_key_required" \ - --durations-path src/backend/tests/.test_durations \ - --splitting-algorithm least_duration \ $(args) -endif -integration_tests: ## run integration tests - poetry run pytest src/backend/tests/integration \ - --instafail -ra \ +integration_tests_api_keys: + uv run pytest src/backend/tests/integration \ + --instafail -ra -m "api_key_required" \ $(args) tests: ## run unit, integration, coverage tests @@ -172,42 +180,42 @@ fix_codespell: ## run codespell to fix spelling errors poetry run codespell --toml pyproject.toml --write format: ## run code formatters - poetry run ruff check . --fix - poetry run ruff format . - cd src/frontend && npm run format + @uv run ruff check . --fix + @uv run ruff format . --config pyproject.toml + @cd src/frontend && npm run format -lint: ## run linters - poetry run mypy --namespace-packages -p "langflow" +unsafe_fix: + @uv run ruff check . --fix --unsafe-fixes + +lint: install_backend ## run linters + @uv run mypy --namespace-packages -p "langflow" install_frontendci: - cd src/frontend && npm ci + @cd src/frontend && npm ci > /dev/null 2>&1 install_frontendc: - cd src/frontend && rm -rf node_modules package-lock.json && npm install + @cd src/frontend && rm -rf node_modules package-lock.json && npm install > /dev/null 2>&1 run_frontend: ## run the frontend @-kill -9 `lsof -t -i:3000` - cd src/frontend && npm start + @cd src/frontend && npm start tests_frontend: ## run frontend tests ifeq ($(UI), true) - cd src/frontend && npx playwright test --ui --project=chromium + @cd src/frontend && npx playwright test --ui --project=chromium else - cd src/frontend && npx playwright test --project=chromium + @cd src/frontend && npx playwright test --project=chromium endif -run_cli: +run_cli: install_frontend install_backend build_frontend ## run the CLI @echo 'Running the CLI' - @make install_frontend > /dev/null - @echo 'Install backend dependencies' - @make install_backend > /dev/null - @echo 'Building the frontend' - @make build_frontend > /dev/null -ifdef env - @make start env=$(env) host=$(host) port=$(port) log_level=$(log_level) -else - @make start host=$(host) port=$(port) log_level=$(log_level) -endif + @uv run langflow run \ + --frontend-path $(path) \ + --log-level $(log_level) \ + --host $(host) \ + --port $(port) \ + $(if $(env),--env-file $(env),) \ + $(if $(filter false,$(open_browser)),--no-open-browser) run_cli_debug: @echo 'Running the CLI in debug mode' @@ -222,128 +230,89 @@ else @make start host=$(host) port=$(port) log_level=debug endif -start: - @echo 'Running the CLI' - -ifeq ($(open_browser),false) - @make install_backend && poetry run langflow run \ - --path $(path) \ - --log-level $(log_level) \ - --host $(host) \ - --port $(port) \ - --env-file $(env) \ - --no-open-browser -else - @make install_backend && poetry run langflow run \ - --path $(path) \ - --log-level $(log_level) \ - --host $(host) \ - --port $(port) \ - --env-file $(env) -endif setup_devcontainer: ## set up the development container make install_backend make install_frontend make build_frontend - poetry run langflow --path src/frontend/build + uv run langflow --frontend-path src/frontend/build setup_env: ## set up the environment @sh ./scripts/setup/setup_env.sh -frontend: ## run the frontend in development mode - make install_frontend +frontend: install_frontend ## run the frontend in development mode make run_frontend -frontendc: - make install_frontendc +frontendc: install_frontendc make run_frontend - -backend: ## run the backend in development mode - @echo 'Setting up the environment' - @make setup_env - make install_backend - @-kill -9 $$(lsof -t -i:7860) +backend: setup_env install_backend ## run the backend in development mode + @-kill -9 $$(lsof -t -i:7860) || true ifdef login @echo "Running backend autologin is $(login)"; - LANGFLOW_AUTO_LOGIN=$(login) poetry run uvicorn \ + LANGFLOW_AUTO_LOGIN=$(login) uv run uvicorn \ --factory langflow.main:create_app \ --host 0.0.0.0 \ --port $(port) \ - --reload \ + $(if $(filter-out 1,$(workers)),, --reload) \ --env-file $(env) \ --loop asyncio \ - --workers $(workers) + $(if $(workers),--workers $(workers),) else @echo "Running backend respecting the $(env) file"; - poetry run uvicorn \ + uv run uvicorn \ --factory langflow.main:create_app \ --host 0.0.0.0 \ --port $(port) \ - --reload \ + $(if $(filter-out 1,$(workers)),, --reload) \ --env-file $(env) \ --loop asyncio \ - --workers $(workers) + $(if $(workers),--workers $(workers),) endif -build_and_run: ## build the project and run it - @echo 'Removing dist folder' - @make setup_env +build_and_run: setup_env ## build the project and run it rm -rf dist rm -rf src/backend/base/dist make build - poetry run pip install dist/*.tar.gz - poetry run langflow run + uv run pip install dist/*.tar.gz + uv run langflow run build_and_install: ## build the project and install it @echo 'Removing dist folder' rm -rf dist rm -rf src/backend/base/dist - make build && poetry run pip install dist/*.whl && pip install src/backend/base/dist/*.whl --force-reinstall + make build && uv run pip install dist/*.whl && pip install src/backend/base/dist/*.whl --force-reinstall -build: ## build the frontend static files and package the project - @echo 'Building the project' - @make setup_env +build: setup_env ## build the frontend static files and package the project ifdef base make install_frontendci make build_frontend - make build_langflow_base + make build_langflow_base args="$(args)" endif ifdef main make install_frontendci make build_frontend - make build_langflow_base - make build_langflow + make build_langflow_base args="$(args)" + make build_langflow args="$(args)" endif build_langflow_base: - cd src/backend/base && poetry build + cd src/backend/base && uv build $(args) rm -rf src/backend/base/langflow/frontend build_langflow_backup: - poetry lock && poetry build + uv lock && uv build build_langflow: - cd ./scripts && poetry run python update_dependencies.py - poetry lock --no-update - poetry build + uv lock --no-upgrade + uv build $(args) ifdef restore mv pyproject.toml.bak pyproject.toml - mv poetry.lock.bak poetry.lock + mv uv.lock.bak uv.lock endif -dev: ## run the project in development mode with docker compose - make install_frontend -ifeq ($(build),1) - @echo 'Running docker compose up with build' - docker compose $(if $(debug),-f docker-compose.debug.yml) up --build -else - @echo 'Running docker compose up without build' - docker compose $(if $(debug),-f docker-compose.debug.yml) up -endif docker_build: dockerfile_build clear_dockerimage ## build DockerFile @@ -391,26 +360,34 @@ dcdev_up: docker compose -f docker/dev.docker-compose.yml up --remove-orphans lock_base: - cd src/backend/base && poetry lock + cd src/backend/base && uv lock lock_langflow: - poetry lock + uv lock lock: ## lock dependencies @echo 'Locking dependencies' - cd src/backend/base && poetry lock --no-update - poetry lock --no-update + cd src/backend/base && uv lock + uv lock update: ## update dependencies @echo 'Updating dependencies' - cd src/backend/base && poetry update - poetry update + cd src/backend/base && uv sync --upgrade + uv sync --upgrade publish_base: - cd src/backend/base && poetry publish --skip-existing + cd src/backend/base && uv publish publish_langflow: - poetry publish + uv publish + +publish_base_testpypi: + # TODO: update this to use the test-pypi repository + cd src/backend/base && uv publish -r test-pypi + +publish_langflow_testpypi: + # TODO: update this to use the test-pypi repository + uv publish -r test-pypi publish: ## build the frontend static files and package the project and publish it to PyPI @echo 'Publishing the project' @@ -421,3 +398,49 @@ endif ifdef main make publish_langflow endif + +publish_testpypi: ## build the frontend static files and package the project and publish it to PyPI + @echo 'Publishing the project' + +ifdef base + #TODO: replace with uvx twine upload dist/* + poetry config repositories.test-pypi https://test.pypi.org/legacy/ + make publish_base_testpypi +endif + +ifdef main + #TODO: replace with uvx twine upload dist/* + poetry config repositories.test-pypi https://test.pypi.org/legacy/ + make publish_langflow_testpypi +endif + + +# example make alembic-revision message="Add user table" +alembic-revision: ## generate a new migration + @echo 'Generating a new Alembic revision' + cd src/backend/base/langflow/ && uv run alembic revision --autogenerate -m "$(message)" + + +alembic-upgrade: ## upgrade database to the latest version + @echo 'Upgrading database to the latest version' + cd src/backend/base/langflow/ && uv run alembic upgrade head + +alembic-downgrade: ## downgrade database by one version + @echo 'Downgrading database by one version' + cd src/backend/base/langflow/ && uv run alembic downgrade -1 + +alembic-current: ## show current revision + @echo 'Showing current Alembic revision' + cd src/backend/base/langflow/ && uv run alembic current + +alembic-history: ## show migration history + @echo 'Showing Alembic migration history' + cd src/backend/base/langflow/ && uv run alembic history --verbose + +alembic-check: ## check migration status + @echo 'Running alembic check' + cd src/backend/base/langflow/ && uv run alembic check + +alembic-stamp: ## stamp the database with a specific revision + @echo 'Stamping the database with revision $(revision)' + cd src/backend/base/langflow/ && uv run alembic stamp $(revision) diff --git a/README.ES.md b/README.ES.md index acf8c6342bd1..3c2fd27a6f58 100644 --- a/README.ES.md +++ b/README.ES.md @@ -132,7 +132,7 @@ Cada opción se detalla a continuación: - `--help`: Muestra todas las opciones disponibles. - `--host`: Establece el host al que vincular el servidor. Se puede configurar usando la variable de entorno `LANGFLOW_HOST`. El valor predeterminado es `127.0.0.1`. - `--workers`: Establece el número de procesos. Se puede configurar usando la variable de entorno `LANGFLOW_WORKERS`. El valor predeterminado es `1`. -- `--timeout`: Establece el tiempo de espera del worker en segundos. El valor predeterminado es `60`. +- `--worker-timeout`: Establece el tiempo de espera del worker en segundos. El valor predeterminado es `60`. - `--port`: Establece el puerto en el que escuchar. Se puede configurar usando la variable de entorno `LANGFLOW_PORT`. El valor predeterminado es `7860`. - `--env-file`: Especifica la ruta al archivo .env que contiene variables de entorno. El valor predeterminado es `.env`. - `--log-level`: Establece el nivel de registro. Se puede configurar usando la variable de entorno `LANGFLOW_LOG_LEVEL`. El valor predeterminado es `critical`. diff --git a/README.KR.md b/README.KR.md index 52fea323de11..1744a5a95f70 100644 --- a/README.KR.md +++ b/README.KR.md @@ -33,7 +33,7 @@
README in English README in Portuguese - README in Spanish + README in Spanish README in Simplified Chinese README in Japanese README in KOREAN @@ -154,7 +154,7 @@ langflow run [OPTIONS] - `--help`: 사용 가능한 모든 옵션을 표시합니다. - `--host`: 서버를 바인딩할 호스트를 정의합니다. `LANGFLOW_HOST` 환경 변수를 사용하여 설정할 수 있습니다. 기본 값은 `127.0.0.1`입니다. - `--workers`: 작업자 프로세스 수를 설정합니다. `LANGFLOW_WORKERS` 환경 변수를 사용하여 설정할 수 있습니다. 기본 값은 `1`입니다. -- `--timeout`: 작업자 시간 제한을 초 단위로 설정합니다. 기본 값은 `60`입니다. +- `--worker-timeout`: 작업자 시간 제한을 초 단위로 설정합니다. 기본 값은 `60`입니다. - `--port`: 수신할 포트를 설정합니다. `LANGFLOW_PORT` 환경 변수를 사용하여 설정할 수 있습니다. 기본 값은 `7860`입니다. - `--env-file`: 환경 변수가 포함된 .env 파일의 경로를 지정합니다. 기본 값은 `.env`입니다. - `--log-level`: 로깅 수준을 정의합니다. `LANGFLOW_LOG_LEVEL` 환경 변수를 사용하여 설정할 수 있습니다. 기본 값은 `critical`입니다. diff --git a/README.PT.md b/README.PT.md index 10a33e21aac5..df605107424b 100644 --- a/README.PT.md +++ b/README.PT.md @@ -134,7 +134,7 @@ Cada opção é detalhada abaixo: - `--help`: Exibe todas as opções disponíveis. - `--host`: Define o host para vincular o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_HOST`. O padrão é `127.0.0.1`. - `--workers`: Define o número de processos. Pode ser configurado usando a variável de ambiente `LANGFLOW_WORKERS`. O padrão é `1`. -- `--timeout`: Define o tempo limite do worker em segundos. O padrão é `60`. +- `--worker-timeout`: Define o tempo limite do worker em segundos. O padrão é `60`. - `--port`: Define a porta para escutar. Pode ser configurado usando a variável de ambiente `LANGFLOW_PORT`. O padrão é `7860`. - `--env-file`: Especifica o caminho para o arquivo .env contendo variáveis de ambiente. O padrão é `.env`. - `--log-level`: Define o nível de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_LEVEL`. O padrão é `critical`. diff --git a/README.ja.md b/README.ja.md index 579a826a48b5..5205ff395913 100644 --- a/README.ja.md +++ b/README.ja.md @@ -33,7 +33,7 @@
README in English README in Portuguese - README in Spanish + README in Spanish README in Simplified Chinese README in Japanese README in KOREAN @@ -152,7 +152,7 @@ langflow run [OPTIONS] - `--help`: 利用可能なすべてのオプションを表示します。 - `--host`: サーバーをバインドするホストを定義します。`LANGFLOW_HOST`環境変数を使用して設定できます。デフォルトは`127.0.0.1`です。 - `--workers`: ワーカープロセスの数を設定します。`LANGFLOW_WORKERS`環境変数を使用して設定できます。デフォルトは`1`です。 -- `--timeout`: ワーカーのタイムアウトを秒単位で設定します。デフォルトは`60`です。 +- `--worker-timeout`: ワーカーのタイムアウトを秒単位で設定します。デフォルトは`60`です。 - `--port`: リッスンするポートを設定します。`LANGFLOW_PORT`環境変数を使用して設定できます。デフォルトは`7860`です。 - `--env-file`: 環境変数を含む.env ファイルのパスを指定します。デフォルトは`.env`です。 - `--log-level`: ログレベルを定義します。`LANGFLOW_LOG_LEVEL`環境変数を使用して設定できます。デフォルトは`critical`です。 diff --git a/README.md b/README.md index 6a6175a87b38..f7898fcf9e5d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org) +![Langflow](./docs/static/img/hero.png)

Langflow is a low-code app builder for RAG and multi-agent AI applications. It’s Python-based and agnostic to any model, API, or database. @@ -10,13 +10,13 @@ Docs - Free Cloud Service - Self Managed - +

README in English README in Portuguese - README in Spanish + README in Spanish README in Simplified Chinese README in Japanese README in KOREAN @@ -35,8 +35,7 @@ 9. **Customize workflows** or create flows entirely just using Python. 10. **Ecosystem integrations** as reusable components for any model, API or database. -![Integrations](https://github.com/user-attachments/assets/e9c96dc4-47bf-48ab-ad58-e01e038f25e8) - +![Integrations](./docs/static/img/integrations.png) ## 📦 Quickstart diff --git a/README.zh_CN.md b/README.zh_CN.md index 613c84daeb65..d860319473c7 100644 --- a/README.zh_CN.md +++ b/README.zh_CN.md @@ -28,7 +28,7 @@
README in English README in Portuguese - README in Spanish + README in Spanish README in Simplified Chinese README in Japanese README in KOREAN @@ -134,7 +134,7 @@ langflow run [OPTIONS] - `--help`: 显示所有可用参数。 - `--host`: 定义绑定服务器的主机 host 参数,可以使用 LANGFLOW_HOST 环境变量设置,默认值为 127.0.0.1。 - `--workers`: 设置工作进程的数量,可以使用 LANGFLOW_WORKERS 环境变量设置,默认值为 1。 -- `--timeout`: 设置工作进程的超时时间(秒),默认值为 60。 +- `--worker-timeout`: 设置工作进程的超时时间(秒),默认值为 60。 - `--port`: 设置服务监听的端口,可以使用 LANGFLOW_PORT 环境变量设置,默认值为 7860。 - `--config`: 定义配置文件的路径,默认值为 config.yaml。 - `--env-file`: 指定包含环境变量的 .env 文件路径,默认值为 .env。 diff --git a/docker/build_and_push.Dockerfile b/docker/build_and_push.Dockerfile index aeb948b5f5c1..5259ce501414 100644 --- a/docker/build_and_push.Dockerfile +++ b/docker/build_and_push.Dockerfile @@ -1,7 +1,6 @@ # syntax=docker/dockerfile:1 # Keep this syntax directive! It's used to enable Docker BuildKit - ################################ # BUILDER-BASE # Used to build deps + create our virtual environment @@ -9,76 +8,67 @@ # 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved # 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture -FROM python:3.12.3-slim as builder-base - -ENV PYTHONDONTWRITEBYTECODE=1 \ - \ - # pip - PIP_DISABLE_PIP_VERSION_CHECK=on \ - PIP_DEFAULT_TIMEOUT=100 \ - \ - # poetry - # https://python-poetry.org/docs/configuration/#using-environment-variables - POETRY_VERSION=1.8.2 \ - # make poetry install to this location - POETRY_HOME="/opt/poetry" \ - # make poetry create the virtual environment in the project's root - # it gets named `.venv` - POETRY_VIRTUALENVS_IN_PROJECT=true \ - # do not ask any interactive question - POETRY_NO_INTERACTION=1 \ - \ - # paths - # this is where our requirements + virtual environment will live - PYSETUP_PATH="/opt/pysetup" \ - VENV_PATH="/opt/pysetup/.venv" +# Use a Python image with uv pre-installed +FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder + +# Install the project into `/app` +WORKDIR /app + +# Enable bytecode compilation +ENV UV_COMPILE_BYTECODE=1 + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy RUN apt-get update \ && apt-get install --no-install-recommends -y \ - # deps for installing poetry - curl \ # deps for building python deps - build-essential npm \ + build-essential \ + # npm + npm \ # gcc gcc \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -RUN --mount=type=cache,target=/root/.cache \ - curl -sSL https://install.python-poetry.org | python3 - +RUN --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=uv.lock,target=uv.lock \ + --mount=type=bind,source=README.md,target=README.md \ + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ + --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \ + --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \ + --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \ + uv sync --frozen --no-install-project --no-editable + +ADD ./src /app/src + +COPY src/frontend /tmp/src/frontend +WORKDIR /tmp/src/frontend +RUN --mount=type=cache,target=/root/.npm \ + npm ci \ + && npm run build \ + && cp -r build /app/src/backend/langflow/frontend \ + && rm -rf /tmp/src/frontend WORKDIR /app -COPY pyproject.toml poetry.lock README.md ./ -COPY src/ ./src -COPY scripts/ ./scripts -RUN python -m pip install requests --user && cd ./scripts && python update_dependencies.py - -# 1. Install the dependencies using the current poetry.lock file to create reproducible builds -# 2. Do not install dev dependencies -# 3. Install all the extras to ensure all optionals are installed as well -# 4. --sync to ensure nothing else is in the environment -# 5. Build the wheel and install "langflow" package (mainly for version) - -# Note: moving to build and installing the wheel will make the docker images not reproducible. -RUN $POETRY_HOME/bin/poetry lock --no-update \ - # install current lock file with fixed dependencies versions \ - # do not install dev dependencies \ - && $POETRY_HOME/bin/poetry install --without dev --sync -E deploy -E couchbase -E cassio \ - && $POETRY_HOME/bin/poetry build -f wheel \ - && $POETRY_HOME/bin/poetry run pip install dist/*.whl +ADD ./pyproject.toml /app/pyproject.toml +ADD ./uv.lock /app/uv.lock +ADD ./README.md /app/README.md + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen --no-editable ################################ # RUNTIME # Setup user, utilities and copy the virtual environment only ################################ -# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved -FROM python:3.12.3-slim as runtime +FROM python:3.12.3-slim AS runtime -RUN apt-get -y update \ - && apt-get install --no-install-recommends -y \ - curl \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* +RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data +COPY --from=builder --chown=1000 /app/.venv /app/.venv + +# Place executables in the environment at the front of the path +ENV PATH="/app/.venv/bin:$PATH" LABEL org.opencontainers.image.title=langflow LABEL org.opencontainers.image.authors=['Langflow'] @@ -86,14 +76,10 @@ LABEL org.opencontainers.image.licenses=MIT LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow -RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data -COPY --from=builder-base --chown=1000 /app/.venv /app/.venv -ENV PATH="/app/.venv/bin:${PATH}" - USER user WORKDIR /app ENV LANGFLOW_HOST=0.0.0.0 ENV LANGFLOW_PORT=7860 -CMD ["python", "-m", "langflow", "run"] +CMD ["langflow", "run"] \ No newline at end of file diff --git a/docker/build_and_push_base.Dockerfile b/docker/build_and_push_base.Dockerfile index a996ec4a63af..c20f8b6bd90b 100644 --- a/docker/build_and_push_base.Dockerfile +++ b/docker/build_and_push_base.Dockerfile @@ -1,58 +1,28 @@ - - # syntax=docker/dockerfile:1 # Keep this syntax directive! It's used to enable Docker BuildKit -# Based on https://github.com/python-poetry/poetry/discussions/1879?sort=top#discussioncomment-216865 -# but I try to keep it updated (see history) ################################ -# PYTHON-BASE -# Sets up all our shared environment variables +# BUILDER-BASE +# Used to build deps + create our virtual environment ################################ -# use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved -FROM python:3.12.3-slim as python-base - -# python -ENV PYTHONUNBUFFERED=1 \ - # prevents python creating .pyc files - PYTHONDONTWRITEBYTECODE=1 \ - \ - # pip - PIP_DISABLE_PIP_VERSION_CHECK=on \ - PIP_DEFAULT_TIMEOUT=100 \ - \ - # poetry - # https://python-poetry.org/docs/configuration/#using-environment-variables - POETRY_VERSION=1.8.2 \ - # make poetry install to this location - POETRY_HOME="/opt/poetry" \ - # make poetry create the virtual environment in the project's root - # it gets named `.venv` - POETRY_VIRTUALENVS_IN_PROJECT=true \ - # do not ask any interactive question - POETRY_NO_INTERACTION=1 \ - \ - # paths - # this is where our requirements + virtual environment will live - PYSETUP_PATH="/opt/pysetup" \ - VENV_PATH="/opt/pysetup/.venv" - - -# prepend poetry and venv to path -ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH" +# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved +# 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture +# Use a Python image with uv pre-installed +FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder +# Install the project into `/app` +WORKDIR /app + +# Enable bytecode compilation +ENV UV_COMPILE_BYTECODE=1 + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy -################################ -# BUILDER-BASE -# Used to build deps + create our virtual environment -################################ -FROM python-base as builder-base RUN apt-get update \ && apt-get install --no-install-recommends -y \ - # deps for installing poetry - curl \ # deps for building python deps build-essential \ # npm @@ -62,40 +32,60 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -RUN --mount=type=cache,target=/root/.cache \ - curl -sSL https://install.python-poetry.org | python3 - +# Install the project's dependencies using the lockfile and settings +# We need to mount the root uv.lock and pyproject.toml to build the base with uv because we're still using uv workspaces +RUN --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \ + --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \ + --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \ + --mount=type=bind,source=uv.lock,target=uv.lock \ + --mount=type=bind,source=README.md,target=README.md \ + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ + cd src/backend/base && uv sync --frozen --no-install-project --no-dev --no-editable + +ADD ./src /app/src + +COPY src/frontend /tmp/src/frontend +WORKDIR /tmp/src/frontend +RUN npm install \ + && npm run build \ + && cp -r build /app/src/backend/base/langflow/frontend \ + && rm -rf /tmp/src/frontend + +ADD ./src/backend/base /app/src/backend/base +WORKDIR /app/src/backend/base +# again we need these because of workspaces +ADD ./pyproject.toml /app/pyproject.toml +ADD ./uv.lock /app/uv.lock +ADD ./src/backend/base/pyproject.toml /app/src/backend/base/pyproject.toml +ADD ./src/backend/base/uv.lock /app/src/backend/base/uv.lock +ADD ./src/backend/base/README.md /app/src/backend/base/README.md +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen --no-dev --no-editable -# Now we need to copy the entire project into the image -COPY pyproject.toml poetry.lock ./ -COPY src/frontend/package.json /tmp/package.json -RUN cd /tmp && npm install -WORKDIR /app -COPY src/frontend ./src/frontend -RUN rm -rf src/frontend/node_modules -RUN cp -a /tmp/node_modules /app/src/frontend -COPY scripts ./scripts -COPY Makefile ./ -COPY README.md ./ -RUN cd src/frontend && npm run build -COPY src/backend ./src/backend -RUN cp -r src/frontend/build src/backend/base/langflow/frontend -RUN rm -rf src/backend/base/dist -RUN useradd -m -u 1000 user && \ - mkdir -p /app/langflow && \ - chown -R user:user /app && \ - chmod -R u+w /app/langflow - -# Update PATH with home/user/.local/bin -ENV PATH="/home/user/.local/bin:${PATH}" -RUN cd src/backend/base && $POETRY_HOME/bin/poetry build - -# Copy virtual environment and built .tar.gz from builder base +################################ +# RUNTIME +# Setup user, utilities and copy the virtual environment only +################################ +FROM python:3.12.3-slim AS runtime + +RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data +# and we use the venv at the root because workspaces +COPY --from=builder --chown=1000 /app/.venv /app/.venv + +# Place executables in the environment at the front of the path +ENV PATH="/app/.venv/bin:$PATH" + +LABEL org.opencontainers.image.title=langflow +LABEL org.opencontainers.image.authors=['Langflow'] +LABEL org.opencontainers.image.licenses=MIT +LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow +LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow USER user -# Install the package from the .tar.gz -RUN python -m pip install /app/src/backend/base/dist/*.tar.gz --user +WORKDIR /app ENV LANGFLOW_HOST=0.0.0.0 ENV LANGFLOW_PORT=7860 -CMD ["python", "-m", "langflow", "run"] +CMD ["langflow-base", "run"] diff --git a/docker/build_and_push_ep.Dockerfile b/docker/build_and_push_ep.Dockerfile new file mode 100644 index 000000000000..4c5cc2bfaa5a --- /dev/null +++ b/docker/build_and_push_ep.Dockerfile @@ -0,0 +1,90 @@ +# syntax=docker/dockerfile:1 +# Keep this syntax directive! It's used to enable Docker BuildKit + +################################ +# BUILDER-BASE +# Used to build deps + create our virtual environment +################################ + +# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved +# 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture +# Use a Python image with uv pre-installed +FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder + +# Install the project into `/app` +WORKDIR /app + +# Enable bytecode compilation +ENV UV_COMPILE_BYTECODE=1 + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy + +RUN apt-get update \ + && apt-get install --no-install-recommends -y \ + # deps for building python deps + build-essential \ + # npm + npm \ + # gcc + gcc \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=uv.lock,target=uv.lock \ + --mount=type=bind,source=README.md,target=README.md \ + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ + --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \ + --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \ + --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \ + uv sync --frozen --no-install-project --no-editable + +ADD ./src /app/src + +COPY src/frontend /tmp/src/frontend +WORKDIR /tmp/src/frontend +RUN --mount=type=cache,target=/root/.npm \ + npm ci \ + && npm run build \ + && cp -r build /app/src/backend/langflow/frontend \ + && rm -rf /tmp/src/frontend + +WORKDIR /app +ADD ./pyproject.toml /app/pyproject.toml +ADD ./uv.lock /app/uv.lock +ADD ./README.md /app/README.md + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen --no-editable + +################################ +# RUNTIME +# Setup user, utilities and copy the virtual environment only +################################ +FROM python:3.12.3-slim AS runtime + +RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data && \ + mkdir /data && chown -R 1000:0 /data + +COPY --from=builder --chown=1000 /app/.venv /app/.venv + +# curl is required for langflow health checks +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +# Place executables in the environment at the front of the path +ENV PATH="/app/.venv/bin:$PATH" + +LABEL org.opencontainers.image.title=langflow +LABEL org.opencontainers.image.authors=['Langflow'] +LABEL org.opencontainers.image.licenses=MIT +LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow +LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow + +WORKDIR /app + +ENV LANGFLOW_HOST=0.0.0.0 +ENV LANGFLOW_PORT=7860 + +USER 1000 +ENTRYPOINT ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--backend-only"] diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile index fe60259b9cad..ad1497ef384f 100644 --- a/docker/dev.Dockerfile +++ b/docker/dev.Dockerfile @@ -1,20 +1,26 @@ -FROM python:3.12-bookworm +FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim ENV TZ=UTC WORKDIR /app -RUN apt update -y -RUN apt install \ +RUN apt-get update && apt-get install -y \ build-essential \ curl \ npm \ - -y + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* COPY . /app -RUN pip install poetry -RUN poetry config virtualenvs.create false -RUN poetry install --no-interaction --no-ansi +# Install dependencies using uv +RUN --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=uv.lock,target=uv.lock \ + --mount=type=bind,source=README.md,target=README.md \ + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ + --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \ + --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \ + --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \ + uv sync --frozen --no-install-project --no-dev EXPOSE 7860 EXPOSE 3000 diff --git a/docker/dev.docker-compose.yml b/docker/dev.docker-compose.yml index 31b6505c9e4a..5cad3f5d2a9d 100644 --- a/docker/dev.docker-compose.yml +++ b/docker/dev.docker-compose.yml @@ -23,7 +23,7 @@ services: volumes: - ../:/app depends_on: - - postgres # Dependência no seu banco de dados existente + - postgres networks: - dev-langflow diff --git a/docker_example/docker-compose.yml b/docker_example/docker-compose.yml index 7e7e3406486d..4dda51442253 100644 --- a/docker_example/docker-compose.yml +++ b/docker_example/docker-compose.yml @@ -2,7 +2,8 @@ version: "3.8" services: langflow: - image: langflowai/langflow:latest + image: langflowai/langflow:latest # or another version tag on https://hub.docker.com/r/langflowai/langflow + pull_policy: always # set to 'always' when using 'latest' image ports: - "7860:7860" depends_on: diff --git a/docs/css/custom.css b/docs/css/custom.css index 68d901e0761c..5ab062952b2e 100644 --- a/docs/css/custom.css +++ b/docs/css/custom.css @@ -5,10 +5,11 @@ */ :root { --ifm-background-color: var(--token-primary-bg-c); + --ifm-color-primary: hsla(330, 81%, 60%, 1); --ifm-navbar-link-hover-color: initial; --ifm-navbar-padding-vertical: 0; --ifm-navbar-item-padding-vertical: 0; - --ifm-font-family-base: -apple-system, BlinkMacSystemFont, Inter, Helvetica, + --ifm-font-family-base: Inter, -apple-system, BlinkMacSystemFont, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI emoji"; --ifm-font-family-monospace: "SFMono-Regular", "Roboto Mono", Consolas, "Liberation Mono", Menlo, Courier, monospace; @@ -118,17 +119,15 @@ body { width: 24px; height: 24px; display: flex; - background: url("/logos/gitLight.svg") - no-repeat; + background: url("/logos/gitLight.svg") no-repeat; } -[data-theme='dark'] .header-github-link:before { +[data-theme="dark"] .header-github-link:before { content: ""; width: 24px; height: 24px; display: flex; - background: url("/logos/gitDark.svg") - no-repeat; + background: url("/logos/gitDark.svg") no-repeat; } /* Twitter */ @@ -145,7 +144,7 @@ body { background-size: contain; } -[data-theme='dark'] .header-twitter-link::before { +[data-theme="dark"] .header-twitter-link::before { content: ""; width: 24px; height: 24px; @@ -164,7 +163,7 @@ body { opacity: 0.6; } -[data-theme='dark'] .header-discord-link::before { +[data-theme="dark"] .header-discord-link::before { content: ""; width: 24px; height: 24px; @@ -241,6 +240,8 @@ body { min-height: 70px; } -.theme-doc-sidebar-item-category.theme-doc-sidebar-item-category-level-2.menu__list-item:not(:first-child) { - margin-top: 0.25rem!important; -} \ No newline at end of file +.theme-doc-sidebar-item-category.theme-doc-sidebar-item-category-level-2.menu__list-item:not( + :first-child + ) { + margin-top: 0.25rem !important; +} diff --git a/docs/docs/Agents/_category_.json b/docs/docs/Agents/_category_.json new file mode 100644 index 000000000000..279ca75eea5e --- /dev/null +++ b/docs/docs/Agents/_category_.json @@ -0,0 +1 @@ +{"position":5, "label":"Agents"} \ No newline at end of file diff --git a/docs/docs/Agents/agent-tool-calling-agent-component.md b/docs/docs/Agents/agent-tool-calling-agent-component.md new file mode 100644 index 000000000000..d0c3bdbd971a --- /dev/null +++ b/docs/docs/Agents/agent-tool-calling-agent-component.md @@ -0,0 +1,188 @@ +--- +title: Create a problem-solving agent +sidebar_position: 2 +slug: /agents-tool-calling-agent-component +--- + +Developing **agents** in Langchain is complex. + +The `AgentComponent` is a component for easily creating an AI agent capable of analyzing tasks using tools you provide. + +The component contains all of the elements you'll need for creating an agent. Instead of managing LLM models and providers, pick your model and enter your API key. Instead of connecting a **Prompt** component, enter instructions in the component's **Agent Instruction** fields. + +Prompt component + +Learn how to build a flow starting with the **Tool calling agent** component, and see how it can help you solve problems. + +## Prerequisites + +- [An OpenAI API key](https://platform.openai.com/) +- [A Search API key](https://www.searchapi.io/) + +## Create a problem-solving agent with AgentComponent + +Create a problem-solving agent in Langflow, starting with the **Tool calling agent**. + +1. Click **New Flow**, and then click **Blank Flow**. +2. Click and drag an **Agent** component to your workspace. +The default settings are acceptable for now, so this guide assumes you're using **Open AI** for the LLM. +3. Add your **Open AI API Key** to the **Agent** component. +4. Add **Chat input** and **Chat output** components to your flow, and connect them to the tool calling agent. + +Chat with agent component + +This basic flow enables you to chat with the agent with the **Playground** after you've connected some **Tools**. + +5. Connect the **Search API** tool component to your agent. +6. Add your **Search API key** to the component. +Your agent can now query the Search API for information. +7. Connect a **Calculator** tool for solving basic math problems. +8. Connect an **API Request** component to the agent. +This component is not in the **Tools** category, but the agent can still use it as a tool by enabling **Tool Mode**. +**Tool Mode** makes a component into a tool by adding a **Toolset** port that can be connected to an agent's **Tools** port. +To enable **Tool Mode** on the component, click **Tool Mode**. +The component's fields change dynamically based on the mode it's in. + +Chat with agent component + +## Solve problems with the agent + +Your agent now has tools for performing a web search, doing basic math, and performing API requests. You can solve many problems with just these capabilities. + +* Your tabletop game group cancelled, and you're stuck at home. +Point **API Request** to an online rules document, tell your agent `You are a fun game organizer who uses the tools at your disposal`, and play a game. +* You need to learn a new software language quickly. +Point **API Request** to some docs, tell your agent `You are a knowledgeable software developer who uses the tools at your disposal`, and start learning. + +See what problems you can solve with this flow. As your problem becomes more specialized, add a tool. For example, the [simple agent starter project](/starter-projects-simple-agent) adds a Python REPL component to solve math problems that are too challenging for the calculator. + +## Use an agent as a tool + +The agent component itself also supports **Tool Mode** for creating multi-agent flows. + +Add an agent to your problem-solving flow that uses a different OpenAI model for more specialized problem solving. + +1. Click and drag an **Agent** component to your workspace. +2. Add your **Open AI API Key** to the **Agent** component. +3. In the **Model Name** field, select `gpt-4o`. +4. Click **Tool Mode** to use this new agent as a tool. +5. Connect the new agent's **Toolset** port to the previously created agent's **Tools** port. +6. Connect **Search API** and **API Request** to the new agent. +The new agent will use `gpt-4o` for the larger tasks of scraping and searching information that requires large context windows. +The problem-solving agent will now use this agent as a tool, with its unique LLM and toolset. + +Chat with agent component + +## Add custom components as tools {#components-as-tools} + +An agent can use custom components as tools. + +1. To add a custom component to the problem-solving agent flow, click **New Custom Component**. + +2. Add custom Python code to the custom component. +Here's an example text analyzer for sentiment analysis. + +```python +from langflow.custom import Component +from langflow.io import MessageTextInput, Output +from langflow.schema import Data +import re + +class TextAnalyzerComponent(Component): + display_name = "Text Analyzer" + description = "Analyzes and transforms input text." + documentation: str = "http://docs.langflow.org/components/custom" + icon = "chart-bar" + name = "TextAnalyzerComponent" + + inputs = [ + MessageTextInput( + name="input_text", + display_name="Input Text", + info="Enter text to analyze", + value="Hello, World!", + tool_mode=True, + ), + ] + + outputs = [ + Output(display_name="Analysis Result", name="output", method="analyze_text"), + ] + + def analyze_text(self) -> Data: + text = self.input_text + + # Perform text analysis + word_count = len(text.split()) + char_count = len(text) + sentence_count = len(re.findall(r'\w+[.!?]', text)) + + # Transform text + reversed_text = text[::-1] + uppercase_text = text.upper() + + analysis_result = { + "original_text": text, + "word_count": word_count, + "character_count": char_count, + "sentence_count": sentence_count, + "reversed_text": reversed_text, + "uppercase_text": uppercase_text + } + + data = Data(value=analysis_result) + self.status = data + return data +``` + +3. To enable the custom component as a tool, click **Tool Mode**. +4. Connect the tool output to the agent's tools input. +5. Ask the agent, `What tools are you using to answer my questions?` +Your response will be similar to the following, and will include your custom component. +```plain +I have access to several tools that assist me in answering your questions, including: +Search API: This allows me to search for recent information or results on the web. +HTTP Requests: I can make HTTP requests to various URLs to retrieve data or interact with APIs. +Calculator: I can evaluate basic arithmetic expressions. +Text Analyzer: I can analyze and transform input text. +Current Date and Time: I can retrieve the current date and time in various time zones. +``` + +## Make any component a tool + +These components support **Tool Mode**: + +* **URL** +* **API request** +* **Calculator** +* **Current date** + +If the component you want to use as a tool doesn't have a **Tool Mode** button, add `tool_mode=True` to the component's code under `MessageTextInput`. + +For example, in the [components as tools](#components-as-tools) example above, `tool_mode=True,` is added so the custom component can be used as a tool. + +**Tool Mode** supports the `MessageTextInput` type. + +```python +inputs = [ + MessageTextInput( + name="input_text", + display_name="Input Text", + info="Enter text to analyze", + value="Hello, World!", + tool_mode=True, + ), +] +``` + +## Add flows as tools + +An agent can use flows that are saved in your workspace as tools with the [Flow as Tool](/components-logic#flow-as-tool) component. + +1. To add a **Flow as Tool** component, click and drag a **Flow as Tool** component to your workspace. +2. Select the flow you want the agent to use as a tool. +3. Connect the tool output to the agent's tools input. +4. Ask the agent, `What tools are you using to answer my questions?` +Your **Flow as Tool** flow should be visible in the response. + + diff --git a/docs/docs/Agents/agents-overview.md b/docs/docs/Agents/agents-overview.md new file mode 100644 index 000000000000..d8901eb76ba0 --- /dev/null +++ b/docs/docs/Agents/agents-overview.md @@ -0,0 +1,15 @@ +--- +title: Agents overview +sidebar_position: 1 +slug: /agents-overview +--- + +**Agents** are AI systems that use LLMs as a brain to analyze problems and select external tools. + +Instead of developers having to create logical statements to direct every possible path of a program, an agent can operate with autonomy. An agent can leverage external tools and APIs to gather information and take action, demonstrate chain-of-thought reasoning, and generate tailored text for specific purposes. + +To simplify the development of agents, Langflow created a custom [Tool calling agent](/components-agents#agent-component) component that simplifies configuration and lets developers focus on solving problems with agents. + +Prompt component + +To get started, see [Create a problem solving agent](/agents-tool-calling-agent-component). \ No newline at end of file diff --git a/docs/docs/Components/1028644105.png b/docs/docs/Components/1028644105.png deleted file mode 100644 index 8ae6499639b6..000000000000 Binary files a/docs/docs/Components/1028644105.png and /dev/null differ diff --git a/docs/docs/Components/238089171.png b/docs/docs/Components/238089171.png deleted file mode 100644 index cd1ab72ae95e..000000000000 Binary files a/docs/docs/Components/238089171.png and /dev/null differ diff --git a/docs/docs/Components/241280398.png b/docs/docs/Components/241280398.png deleted file mode 100644 index 27e83c969822..000000000000 Binary files a/docs/docs/Components/241280398.png and /dev/null differ diff --git a/docs/docs/Components/263391508.png b/docs/docs/Components/263391508.png deleted file mode 100644 index 1c15466c2fa3..000000000000 Binary files a/docs/docs/Components/263391508.png and /dev/null differ diff --git a/docs/docs/Components/565424296.png b/docs/docs/Components/565424296.png deleted file mode 100644 index b3a3cb8229ad..000000000000 Binary files a/docs/docs/Components/565424296.png and /dev/null differ diff --git a/docs/docs/Components/938852908.png b/docs/docs/Components/938852908.png deleted file mode 100644 index 6e7bbde2f801..000000000000 Binary files a/docs/docs/Components/938852908.png and /dev/null differ diff --git a/docs/docs/Components/987204819.png b/docs/docs/Components/987204819.png deleted file mode 100644 index 14418f74cafe..000000000000 Binary files a/docs/docs/Components/987204819.png and /dev/null differ diff --git a/docs/docs/Components/_category_.json b/docs/docs/Components/_category_.json index fdcf118239bf..62531e49ed92 100644 --- a/docs/docs/Components/_category_.json +++ b/docs/docs/Components/_category_.json @@ -1 +1 @@ -{"position":5, "label":"Components"} \ No newline at end of file +{"position":6, "label":"Components"} \ No newline at end of file diff --git a/docs/docs/Components/components-agents.md b/docs/docs/Components/components-agents.md new file mode 100644 index 000000000000..53b73768fb9f --- /dev/null +++ b/docs/docs/Components/components-agents.md @@ -0,0 +1,309 @@ +--- +title: Agents +sidebar_position: 12 +slug: /components-agents +--- + +# Agent components in Langflow + +Agent components are used to define the behavior and capabilities of AI agents in your flow. Agents can interact with APIs, databases, and other services and use LLMs as a reasoning engine to decide which course to take in your flow. + +## Agent component {#agent-component} + +This component creates an agent that can use tools to answer questions and perform tasks based on given instructions. + +For more information on this component, see the [tool calling agent documentation](/agents-tool-calling-agent-component). + +### Inputs + +| Name | Type | Description | +|----------------------|----------|-------------------------------------------------------------------------------------------------| +| agent_llm | Dropdown | The provider of the language model that the agent will use to generate responses. | +| system_prompt | String | Initial instructions and context provided to guide the agent's behavior. | +| tools | List | List of tools available for the agent to use. | +| input_value | String | The input task or question for the agent to process. | +| add_current_date_tool| Boolean | If true, adds a tool to the agent that returns the current date. | + +### Outputs + +| Name | Type | Description | +|----------|---------|-------------------------------------------------| +| response | Message | The agent's response to the given input task. | + +## CSV Agent + +This component creates a CSV agent from a CSV file and LLM. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| path | File | Path to the CSV file | +| agent_type | String | Type of agent to create (zero-shot-react-description, openai-functions, or openai-tools) | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | CSV agent instance | + +## CrewAI Agent + +This component represents an Agent of CrewAI, allowing for the creation of specialized AI agents with defined roles, goals, and capabilities within a crew. + +For more information, see the [CrewAI documentation](https://docs.crewai.com/core-concepts/Agents/). + +### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| role | Role | The role of the agent | +| goal | Goal | The objective of the agent | +| backstory | Backstory | The backstory of the agent | +| tools | Tools | Tools at agent's disposal | +| llm | Language Model | Language model that will run the agent | +| memory | Memory | Whether the agent should have memory or not | +| verbose | Verbose | Enables verbose output | +| allow_delegation | Allow Delegation | Whether the agent is allowed to delegate tasks to other agents | +| allow_code_execution | Allow Code Execution | Whether the agent is allowed to execute code | +| kwargs | kwargs | Additional keyword arguments for the agent | + +### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| output | Agent | The constructed CrewAI Agent object | + +## Hierarchical Crew + +This component represents a group of agents, managing how they should collaborate and the tasks they should perform in a hierarchical structure. This component allows for the creation of a crew with a manager overseeing the task execution. + +For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Hierarchical/). + +### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| agents | Agents | List of Agent objects representing the crew members | +| tasks | Tasks | List of HierarchicalTask objects representing the tasks to be executed | +| manager_llm | Manager LLM | Language model for the manager agent (optional) | +| manager_agent | Manager Agent | Specific agent to act as the manager (optional) | +| verbose | Verbose | Enables verbose output for detailed logging | +| memory | Memory | Specifies the memory configuration for the crew | +| use_cache | Use Cache | Enables caching of results | +| max_rpm | Max RPM | Sets the maximum requests per minute | +| share_crew | Share Crew | Determines if the crew information is shared among agents | +| function_calling_llm | Function Calling LLM | Specifies the language model for function calling | + +### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| crew | Crew | The constructed Crew object with hierarchical task execution | + +## JSON Agent + +This component creates a JSON agent from a JSON or YAML file and an LLM. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| path | File | Path to the JSON or YAML file | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | JSON agent instance | + +## OpenAI Tools Agent + +This component creates an OpenAI Tools Agent using LangChain. + +For more information, see the [LangChain documentation](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/openai_tools/). + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent (must be tool-enabled) | +| system_prompt | String | System prompt for the agent | +| user_prompt | String | User prompt template (must contain 'input' key) | +| chat_history | List[Data] | Optional chat history for the agent | +| tools | List[Tool] | List of tools available to the agent | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | OpenAI Tools Agent instance | + +## OpenAPI Agent + +This component creates an OpenAPI Agent to interact with APIs defined by OpenAPI specifications. + +For more information, see the LangChain documentation on OpenAPI Agents. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| path | File | Path to the OpenAPI specification file (JSON or YAML) | +| allow_dangerous_requests | Boolean | Whether to allow potentially dangerous API requests | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | OpenAPI Agent instance | + +## SQL Agent + +This component creates a SQL Agent to interact with SQL databases. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| database_uri | String | URI of the SQL database to connect to | +| extra_tools | List[Tool] | Additional tools to provide to the agent (optional) | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | SQL Agent instance | + +## Sequential Crew + +This component represents a group of agents with tasks that are executed sequentially. This component allows for the creation of a crew that performs tasks in a specific order. + +For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Sequential/). + +### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| tasks | Tasks | List of SequentialTask objects representing the tasks to be executed | +| verbose | Verbose | Enables verbose output for detailed logging | +| memory | Memory | Specifies the memory configuration for the crew | +| use_cache | Use Cache | Enables caching of results | +| max_rpm | Max RPM | Sets the maximum requests per minute | +| share_crew | Share Crew | Determines if the crew information is shared among agents | +| function_calling_llm | Function Calling LLM | Specifies the language model for function calling | + +### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| crew | Crew | The constructed Crew object with sequential task execution | + +## Sequential task agent + +This component creates a CrewAI Task and its associated Agent, allowing for the definition of sequential tasks with specific agent roles and capabilities. + +For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Sequential/). + +### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| role | Role | The role of the agent | +| goal | Goal | The objective of the agent | +| backstory | Backstory | The backstory of the agent | +| tools | Tools | Tools at agent's disposal | +| llm | Language Model | Language model that will run the agent | +| memory | Memory | Whether the agent should have memory or not | +| verbose | Verbose | Enables verbose output | +| allow_delegation | Allow Delegation | Whether the agent is allowed to delegate tasks to other agents | +| allow_code_execution | Allow Code Execution | Whether the agent is allowed to execute code | +| agent_kwargs | Agent kwargs | Additional kwargs for the agent | +| task_description | Task Description | Descriptive text detailing task's purpose and execution | +| expected_output | Expected Task Output | Clear definition of expected task outcome | +| async_execution | Async Execution | Boolean flag indicating asynchronous task execution | +| previous_task | Previous Task | The previous task in the sequence (for chaining) | + +### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| task_output | Sequential Task | List of SequentialTask objects representing the created task(s) | + +## Tool Calling Agent + +This component creates a Tool Calling Agent using LangChain. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| system_prompt | String | System prompt for the agent | +| user_prompt | String | User prompt template (must contain 'input' key) | +| chat_history | List[Data] | Optional chat history for the agent | +| tools | List[Tool] | List of tools available to the agent | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | Tool Calling Agent instance | + +## Vector Store Agent + +This component creates a Vector Store Agent using LangChain. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| vectorstore | VectorStoreInfo | Vector store information for the agent to use | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | Vector Store Agent instance | + +## Vector Store Router Agent + +This component creates a Vector Store Router Agent using LangChain. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| vectorstores | List[VectorStoreInfo] | List of vector store information for the agent to route between | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | Vector Store Router Agent instance | + +## XML Agent + +This component creates an XML Agent using LangChain. + +The agent uses XML formatting for tool instructions to the Language Model. + +### Inputs + +| Name | Type | Description | +|------|------|-------------| +| llm | LanguageModel | Language model to use for the agent | +| user_prompt | String | Custom prompt template for the agent (includes XML formatting instructions) | +| tools | List[Tool] | List of tools available to the agent | + +### Outputs + +| Name | Type | Description | +|------|------|-------------| +| agent | AgentExecutor | XML Agent instance | \ No newline at end of file diff --git a/docs/docs/Components/components-custom-components.md b/docs/docs/Components/components-custom-components.md index 56a2de5ff775..8b5ef11a5761 100644 --- a/docs/docs/Components/components-custom-components.md +++ b/docs/docs/Components/components-custom-components.md @@ -4,105 +4,171 @@ sidebar_position: 8 slug: /components-custom-components --- +# Custom Components +Custom components are created within Langflow and extend the platform's functionality with custom, resusable Python code. -Langflow components can be created from within the platform, allowing users to extend the platform's functionality using Python code. They encapsulate are designed to be independent units, reusable across different workflows. +Since Langflow operates with Python behind the scenes, you can implement any Python function within a Custom Component. This means you can leverage the power of libraries such as Pandas, Scikit-learn, Numpy, and thousands of other packages to create components that handle data processing in unlimited ways. You can use any type as long as the type is properly annotated in the output methods (e.g., `> list[int]`). +Custom Components create reusable and configurable components to enhance the capabilities of Langflow, making it a powerful tool for developing complex processing between user and AI messages. -These components can be easily connected within a language model pipeline, adding freedom and flexibility to what can be included in between user and AI messages. - - -![](./238089171.png) - - -Since Langflow operates with Python behind the scenes, you can implement any Python function within a Custom Component. This means you can leverage the power of libraries such as Pandas, Scikit-learn, Numpy, and thousands of packages to create components that handle data processing in unlimited ways. +## How to Create Custom Components +Creating custom components in Langflow involves creating a Python class that defines the component's functionality, inputs, and outputs. +The default code provides a working structure for your custom component. +```python +# from langflow.field_typing import Data +from langflow.custom import Component +from langflow.io import MessageTextInput, Output +from langflow.schema import Data -Custom Components are not just about extending functionality; they also streamline the development process. By creating reusable and configurable components, you can enhance the capabilities of Langflow, making it a powerful tool for developing complex workflows. +class CustomComponent(Component): + display_name = "Custom Component" + description = "Use as a template to create your own component." + documentation: str = "http://docs.langflow.org/components/custom" + icon = "custom_components" + name = "CustomComponent" -### Key Characteristics: {#d3a151089a9e4584bd420461cd1432c6} + inputs = [ + MessageTextInput(name="input_value", display_name="Input Value", value="Hello, World!"), + ] -1. **Modular and Reusable**: Designed as independent units, components encapsulate specific functionality, making them reusable across different projects and workflows. -2. **Integration with Python Libraries**: You can import libraries like Pandas, Scikit-learn, Numpy, etc., to build components that handle data processing, machine learning, numerical computations, and more. -3. **Flexible Inputs and Outputs**: While Langflow offers native input and output types, you can use any type as long as they are properly annotated in the output methods (e.g., `> list[int]`). -4. **Python-Powered**: Since Langflow operates with Python behind the scenes, any Python function can be implemented within a custom component. -5. **Enhanced Workflow**: Custom components serve as reusable building blocks, enabling you to create pre-processing visual blocks with ease and integrate them into your language model pipeline. + outputs = [ + Output(display_name="Output", name="output", method="build_output"), + ] -### Why Use Custom Components? {#827a2b5acec94426a4a2106a8332622d} + def build_output(self) -> Data: + data = Data(value=self.input_value) + self.status = data + return data -- **Customization**: Tailor the functionality to your specific needs by writing Python code that suits your workflow. -- **Flexibility**: Add any Python-based logic or processing step between user/AI messages, enhancing the flexibility of Langflow. -- **Efficiency**: Streamline your development process by creating reusable, configurable components that can be easily deployed. +``` -### How to Write Them {#2088ade519514bb3923cdf7f2ac2089a} +You can create your class in your favorite text editor outside of Langflow and paste it in later, or just follow along in the code pane. +1. In Langflow, from under **Helpers**, drag a **Custom Component** into the workspace. +2. Open the component's code pane. +3. Import dependencies. +Your custom component inherits from the langflow `Component` class so you need to include it. +```python +from langflow.custom import Component +from langflow.io import MessageTextInput, Output +from langflow.schema import Data +``` +4. **Define the Class**: Start by defining a Python class that inherits from `Component`. This class will encapsulate the functionality of your custom component. ---- +```python +class CustomComponent(Component): + display_name = "Custom Component" + description = "Use as a template to create your own component." + documentation: str = "http://docs.langflow.org/components/custom" + icon = "custom_components" + name = "CustomComponent" +``` +5. **Specify Inputs and Outputs**: Use Langflow's input and output classes to define the inputs and outputs of your component. They should be declared as class attributes. +```python + inputs = [ + MessageTextInput(name="input_value", display_name="Input Value", value="Hello, World!"), + ] + outputs = [ + Output(display_name="Output", name="output", method="build_output"), + ] +``` +6. **Implement Output Methods**: Implement methods for each output, which contains the logic of your component. These methods can access input values using `self.` , return processed values and define what to be displayed in the component with the `self.status` attribute. +```python + def build_output(self) -> Data: + data = Data(value=self.input_value) + self.status = data + return data +``` +7. **Use Proper Annotations**: Ensure that output methods are properly annotated with their types. Langflow uses these annotations to validate and handle data correctly. For example, this method is annotated to output `Data`. +```python + def build_output(self) -> Data: +``` +8. Click **Check & Save** to confirm your component works. +You now have an operational custom component. -Writing custom components in Langflow involves creating a Python class that defines the component's functionality, inputs, and outputs. The process involves a few key steps: -1. **Define the Class**: Start by defining a Python class that inherits from `Component`. This class will encapsulate the functionality of your custom component. -2. **Specify Inputs and Outputs**: Use Langflow's input and output classes to define the inputs and outputs of your component. They should be declared as class attributes. -3. **Implement Output Methods**: Implement methods for each output, which contains the logic of your component. These methods can access input values using `self.` , return processed values and define what to be displayed in the component with the `self.status` attribute. -4. **Use Proper Annotations**: Ensure that output methods are properly annotated with their types. Langflow uses these annotations to validate and handle data correctly. +## Add inputs and modify output methods -Here's a basic structure of a custom component: +This code defines a custom component that accepts 5 inputs and outputs a Message. +Copy and paste it into the Custom Component code pane and click **Check & Save.** ```python from langflow.custom import Component -from langflow.inputs import StrInput, IntInput -from langflow.template import Output +from langflow.inputs import StrInput, MultilineInput, SecretStrInput, IntInput, DropdownInput +from langflow.template import Output, Input +from langflow.schema.message import Message class MyCustomComponent(Component): - icon = "coffee" # check lucide.dev/icons or pass an emoji + display_name = "My Custom Component" + description = "An example of a custom component with various input types." inputs = [ StrInput( - name="input_text", - display_name="Input Text", - info="Text to be processed.", + name="username", + display_name="Username", + info="Enter your username." + ), + SecretStrInput( + name="password", + display_name="Password", + info="Enter your password." + ), + MessageTextInput( + name="special_message", + display_name="special_message", + info="Enter a special message.", ), IntInput( - name="input_number", - display_name="Input Number", - info="Number to be processed.", + name="age", + display_name="Age", + info="Enter your age." ), + DropdownInput( + name="gender", + display_name="Gender", + options=["Male", "Female", "Other"], + info="Select your gender." + ) ] outputs = [ - Output(display_name="Processed Text", name="processed_text", method="process_text"), + Output(display_name="Result", name="result", method="process_inputs"), ] - def process_text(self) -> str: - input_text = self.input_text - input_number = self.input_number - # Implement your logic here - processed_text = f"{input_text} processed with number {input_number}" - self.status = processed_text - return processed_text - - + def process_inputs(self) -> Message: + """ + Process the user inputs and return a Message object. + + Returns: + Message: A Message object containing the processed information. + """ + try: + processed_text = f"User {self.username} (Age: {self.age}, Gender: {self.gender}) " \ + f"sent the following special message: {self.special_message}" + return Message(text=processed_text) + except AttributeError as e: + return Message(text=f"Error processing inputs: {str(e)}") ``` +Since the component outputs a `Message`, you can wire it into a chat and pass messages to yourself. -Paste that code into the Custom Component code snippet and click **Check & Save.** - - -![](./1028644105.png) - - -You should see something like the component below. Double click the name or description areas to edit them. +Your Custom Component accepts the Chat Input message through `MessageTextInput`, fills in the variables with the `process_inputs` method, and finally passes the message `User Username (Age: 49, Gender: Male) sent the following special message: Hello!` to Chat Output. +By defining inputs this way, Langflow can automatically handle the validation and display of these fields in the user interface, making it easier to create robust and user-friendly custom components. -![](./241280398.png) +All of the types detailed above derive from a general class that can also be accessed through the generic `Input` class. +:::tip +Use `MessageInput` to get the entire Message object instead of just the text. +::: ## Input Types {#3815589831f24ab792328ed233c8b00d} - --- @@ -224,62 +290,6 @@ Represents a file input field. - **Attributes:** `file_types` to specify the types of files that can be uploaded. - **Input Types:** `["File"]` -Here is an example of how these inputs can be defined in a custom component: - - -```python -from langflow.custom import Component -from langflow.inputs import StrInput, MultilineInput, SecretStrInput, IntInput, DropdownInput -from langflow.template import Output, Input - -class MyCustomComponent(Component): - display_name = "My Custom Component" - description = "An example of a custom component with various input types." - - inputs = [ - StrInput( - name="username", - display_name="Username", - info="Enter your username." - ), - SecretStrInput( - name="password", - display_name="Password", - info="Enter your password." - ), - MultilineInput( - name="description", - display_name="Description", - info="Enter a detailed description.", - ), - IntInput( - name="age", - display_name="Age", - info="Enter your age." - ), - DropdownInput( - name="gender", - display_name="Gender", - options=["Male", "Female", "Other"], - info="Select your gender." - ) - ] - - outputs = [ - Output(display_name="Result", name="result", method="process_inputs"), - ] - - def process_inputs(self): - # Your processing logic here - return "Processed" -``` - - -By defining inputs this way, Langflow can automatically handle the validation and display of these fields in the user interface, making it easier to create robust and user-friendly custom components. - - -All of the types detailed above derive from a general class that can also be accessed through the generic `Input` class. - ### Generic Input {#278e2027493e45b68746af0a5b6c06f6} @@ -316,74 +326,104 @@ The `Input` class is highly customizable, allowing you to specify a wide range - `load_from_db`: Boolean indicating if the field should load from the database. Default is `False`. - `title_case`: Boolean indicating if the display name should be in title case. Default is `True`. -Below is an example of how to define inputs for a component using the `Input` class: +## Create a Custom Component with Generic Input + +Here is an example of how to define inputs for a component using the `Input` class. +Copy and paste it into the Custom Component code pane and click **Check & Save.** ```python from langflow.template import Input, Output from langflow.custom import Component from langflow.field_typing import Text +from langflow.schema.message import Message +from typing import Dict, Any -class ExampleComponent(Component): - display_name = "Example Component" - description = "An example component demonstrating input fields." +class TextAnalyzerComponent(Component): + display_name = "Text Analyzer" + description = "Analyzes input text and provides basic statistics." inputs = [ Input( name="input_text", display_name="Input Text", - field_type="str", + field_type="Message", required=True, - placeholder="Enter some text", + placeholder="Enter text to analyze", multiline=True, - info="This is a required text input.", + info="The text you want to analyze.", input_types=["Text"] ), Input( - name="max_length", - display_name="Max Length", - field_type="int", + name="include_word_count", + display_name="Include Word Count", + field_type="bool", required=False, - placeholder="Maximum length", - info="Enter the maximum length of the text.", - range_spec={"min": 0, "max": 1000}, + info="Whether to include word count in the analysis.", ), Input( - name="options", - display_name="Options", - field_type="str", - is_list=True, - options=["Option 1", "Option 2", "Option 3"], - info="Select one or more options." + name="perform_sentiment_analysis", + display_name="Perform Sentiment Analysis", + field_type="bool", + required=False, + info="Whether to perform basic sentiment analysis.", ), ] outputs = [ - Output(display_name="Result", name="result", method="process_input"), + Output(display_name="Analysis Results", name="results", method="analyze_text"), ] - def process_input(self) -> Text: - # Process the inputs and generate output - return Text(value=f"Processed: {self.input_text}, Max Length: {self.max_length}, Options: {self.options}") + def analyze_text(self) -> Message: + # Extract text from the Message object + if isinstance(self.input_text, Message): + text = self.input_text.text + else: + text = str(self.input_text) -# Define how to use the inputs and outputs -component = ExampleComponent() + results = { + "character_count": len(text), + "sentence_count": text.count('.') + text.count('!') + text.count('?') + } + + if self.include_word_count: + results["word_count"] = len(text.split()) + + if self.perform_sentiment_analysis: + # Basic sentiment analysis + text_lower = text.lower() + if "happy" in text_lower or "good" in text_lower: + sentiment = "positive" + elif "sad" in text_lower or "bad" in text_lower: + sentiment = "negative" + else: + sentiment = "neutral" + results["sentiment"] = sentiment + # Convert the results dictionary to a formatted string + formatted_results = "\n".join([f"{key}: {value}" for key, value in results.items()]) + + # Return a Message object + return Message(text=formatted_results) + +# Define how to use the inputs and outputs +component = TextAnalyzerComponent() ``` +In this custom component: -In this example: +- The `input_text` input is a required multi-line text field that accepts a Message object or a string. It's used to provide the text for analysis. -- The `input_text` input is a required multi-line text field. -- The `max_length` input is an optional integer field with a range specification. -- The `options` input is a list of strings with predefined options. +- The `include_word_count` input is an optional boolean field. When set to True, it adds a word count to the analysis results. -These attributes allow for a high degree of customization, making it easy to create input fields that suit the needs of your specific component. +- The `perform_sentiment_analysis` input is an optional boolean field. When set to True, it triggers a basic sentiment analysis of the input text. +The component performs basic text analysis, including character count and sentence count (based on punctuation marks). If word count is enabled, it splits the text and counts the words. If sentiment analysis is enabled, it performs a simple keyword-based sentiment classification (positive, negative, or neutral). -### Multiple Outputs {#6f225be8a142450aa19ee8e46a3b3c8c} +Since the component inputs and outputs a `Message`, you can wire the component into a chat and see how the basic custom component logic interacts with your input. +## Create a Custom Component with Multiple Outputs {#6f225be8a142450aa19ee8e46a3b3c8c} --- @@ -393,7 +433,7 @@ In Langflow, custom components can have multiple outputs. Each output can be ass 1. **Definition of Outputs**: Each output is defined in the `outputs` list of the component. Each output is associated with a display name, an internal name, and a method that gets called to generate the output. 2. **Output Methods**: The methods associated with outputs are responsible for generating the data for that particular output. These methods are called when the component is executed, and each method can independently produce its result. -Below is an example of a component with two outputs: +This example component has two outputs: - `process_data`: Processes the input text (e.g., converts it to uppercase) and returns it. - `get_processing_function`: Returns the `process_data` method itself to be reused in composition. @@ -434,18 +474,12 @@ class DualOutputComponent(Component): return self.process_data ``` - This example shows how to define multiple outputs in a custom component. The first output returns the processed data, while the second output returns the processing function itself. - The `processing_function` output can be used in scenarios where the function itself is needed for further processing or dynamic flow control. Notice how both outputs are properly annotated with their respective types, ensuring clarity and type safety. -## Special Operations {#b1ef2d18e2694b93927ae9403d24b96b} - - ---- - +## Special Operations Advanced methods and attributes offer additional control and functionality. Understanding how to leverage these can enhance your custom components' capabilities. @@ -454,3 +488,7 @@ Advanced methods and attributes offer additional control and functionality. Unde - `self.status`: Use this to update the component's status or intermediate results. It helps track the component's internal state or store temporary data. - `self.graph.flow_id`: Retrieve the flow ID, useful for maintaining context or debugging. - `self.stop("output_name")`: Use this method within an output function to prevent data from being sent through other components. This method stops next component execution and is particularly useful for specific operations where a component should stop from running based on specific conditions. + +## Contribute Custom Components to Langflow + +See [How to Contribute](/contributing-how-to-contribute#submitting-components) to contribute your custom component to Langflow. \ No newline at end of file diff --git a/docs/docs/Components/components-data.md b/docs/docs/Components/components-data.md index 65d95326fec9..87ea75ac7314 100644 --- a/docs/docs/Components/components-data.md +++ b/docs/docs/Components/components-data.md @@ -4,95 +4,157 @@ sidebar_position: 3 slug: /components-data --- +## API Request +This component sends HTTP requests to the specified URLs. -:::info +Use this component to interact with external APIs or services and retrieve data. Ensure that the URLs are valid and that you configure the method, headers, body, and timeout correctly. -This page may contain outdated information. It will be updated as soon as possible. +### Parameters -::: +#### Inputs +| Name | Display Name | Info | +| ------- | ------------ | -------------------------------------------------------------------------- | +| URLs | URLs | The URLs to target | +| curl | curl | Paste a curl command to fill in the dictionary fields for headers and body | +| Method | HTTP Method | The HTTP method to use, such as GET or POST | +| Headers | Headers | The headers to include with the request | +| Body | Request Body | The data to send with the request (for methods like POST, PATCH, PUT) | +| Timeout | Timeout | The maximum time to wait for a response | +## Directory +This component recursively loads files from a directory, with options for file types, depth, and concurrency. -## API Request {#23da589293f74016a1f70d6d7c0fdc55} +### Parameters +| Input | Type | Description | +| ------------------ | ---------------- | -------------------------------------------------- | +| path | MessageTextInput | Path to the directory to load files from | +| types | MessageTextInput | File types to load (leave empty to load all types) | +| depth | IntInput | Depth to search for files | +| max_concurrency | IntInput | Maximum concurrency for loading files | +| load_hidden | BoolInput | If true, hidden files will be loaded | +| recursive | BoolInput | If true, the search will be recursive | +| silent_errors | BoolInput | If true, errors will not raise an exception | +| use_multithreading | BoolInput | If true, multithreading will be used | ---- +| Output | Type | Description | +| ------ | ---------- | ----------------------------------- | +| data | List[Data] | Loaded file data from the directory | +## File -This component sends HTTP requests to the specified URLs. - +The FileComponent is a class that loads and parses text files of various supported formats, converting the content into a Data object. It supports multiple file types and provides an option for silent error handling. -Use this component to interact with external APIs or services and retrieve data. Ensure that the URLs are valid and that you configure the method, headers, body, and timeout correctly. +### Parameters +#### Inputs -**Parameters:** +| Name | Display Name | Info | +| ------------- | ------------- | -------------------------------------------- | +| path | Path | File path to load. | +| silent_errors | Silent Errors | If true, errors will not raise an exception. | -- **URLs:** The URLs to target. -- **Method:** The HTTP method, such as GET or POST. -- **Headers:** The headers to include with the request. -- **Body:** The data to send with the request (for methods like POST, PATCH, PUT). -- **Timeout:** The maximum time to wait for a response. +#### Outputs -## Directory {#4fe56acaaac847029ace173dc793f8f4} +| Name | Display Name | Info | +| ---- | ------------ | -------------------------------------------- | +| data | Data | Parsed content of the file as a Data object. | +## URL ---- +The URLComponent is a class that fetches content from one or more URLs, processes the content, and returns it as a list of Data objects. It ensures that the provided URLs are valid and uses WebBaseLoader to fetch the content. +### Parameters -This component recursively retrieves files from a specified directory. +#### Inputs +| Name | Display Name | Info | +| ---- | ------------ | ---------------------- | +| urls | URLs | Enter one or more URLs | -Use this component to retrieve various file types, such as text or JSON files, from a directory. Make sure to provide the correct path and configure the other parameters as needed. +#### Outputs +| Name | Display Name | Info | +| ---- | ------------ | ------------------------------------------------------------ | +| data | Data | List of Data objects containing fetched content and metadata | -**Parameters:** +## Gmail Loader -- **Path:** The directory path. -- **Types:** The types of files to retrieve. Leave this blank to retrieve all file types. -- **Depth:** The level of directory depth to search. -- **Max Concurrency:** The maximum number of simultaneous file loading operations. -- **Load Hidden:** Set to true to include hidden files. -- **Recursive:** Set to true to enable recursive search. -- **Silent Errors:** Set to true to suppress exceptions on errors. -- **Use Multithreading:** Set to true to use multithreading in file loading. +This component loads emails from Gmail using provided credentials and filters. -## File {#d5d4bb78ce0a473d8a3b6a296d3e8383} +For more on creating a service account JSON, see [Service Account JSON](https://developers.google.com/identity/protocols/oauth2/service-account). +### Parameters ---- +| Input | Type | Description | +| ----------- | ---------------- | ------------------------------------------------------------------------------------ | +| json_string | SecretStrInput | JSON string containing OAuth 2.0 access token information for service account access | +| label_ids | MessageTextInput | Comma-separated list of label IDs to filter emails | +| max_results | MessageTextInput | Maximum number of emails to load | +| Output | Type | Description | +| ------ | ---- | ----------------- | +| data | Data | Loaded email data | -This component loads a file. +## Google Drive Loader +This component loads documents from Google Drive using provided credentials and a single document ID. -Use this component to load files, such as text or JSON files. Ensure you specify the correct path and configure other parameters as necessary. +For more on creating a service account JSON, see [Service Account JSON](https://developers.google.com/identity/protocols/oauth2/service-account). +### Parameters -**Parameters:** +| Input | Type | Description | +| ----------- | ---------------- | ------------------------------------------------------------------------------------ | +| json_string | SecretStrInput | JSON string containing OAuth 2.0 access token information for service account access | +| document_id | MessageTextInput | Single Google Drive document ID | -- **Path:** The file path. -- **Silent Errors:** Set to true to prevent exceptions on errors. +| Output | Type | Description | +| ------ | ---- | -------------------- | +| docs | Data | Loaded document data | -## URL {#1cc513827a0942d6885b3a9168eabc97} +## Google Drive Search +This component searches Google Drive files using provided credentials and query parameters. ---- +For more on creating a service account JSON, see [Service Account JSON](https://developers.google.com/identity/protocols/oauth2/service-account). +### Parameters -This component retrieves content from specified URLs. +| Input | Type | Description | +| -------------- | ---------------- | ------------------------------------------------------------------------------------ | +| token_string | SecretStrInput | JSON string containing OAuth 2.0 access token information for service account access | +| query_item | DropdownInput | The field to query | +| valid_operator | DropdownInput | Operator to use in the query | +| search_term | MessageTextInput | The value to search for in the specified query item | +| query_string | MessageTextInput | The query string used for searching (can be edited manually) | +| Output | Type | Description | +| ---------- | --------- | ----------------------------------------------- | +| doc_urls | List[str] | URLs of the found documents | +| doc_ids | List[str] | IDs of the found documents | +| doc_titles | List[str] | Titles of the found documents | +| Data | Data | Document titles and URLs in a structured format | -Ensure the URLs are valid and adjust other parameters as needed. **Parameters:** +## Webhook -- **URLs:** The URLs to retrieve content from. +This component defines a webhook input for the flow. The flow can be triggered by an external HTTP POST request (webhook) sending a JSON payload. -## Create Data {#aac4cad0cd38426191c2e7516285877b} +If the input is not valid JSON, the component will wrap it in a "payload" field. The component's status will reflect any errors or the processed data. +### Parameters ---- +#### Inputs +| Name | Type | Description | +| ---- | ------ | ---------------------------------------------- | +| data | String | JSON payload for testing the webhook component | -This component allows you to create a `Data` from a number of inputs. You can add as many key-value pairs as you want (as long as it is less than 15). Once you've picked that number you'll need to write the name of the Key and can pass `Text` values from other components to it. +#### Outputs +| Name | Type | Description | +| ----------- | ---- | ------------------------------------- | +| output_data | Data | Processed data from the webhook input | diff --git a/docs/docs/Components/components-embedding-models.md b/docs/docs/Components/components-embedding-models.md index 74e9e446c1a3..b44394ced6bd 100644 --- a/docs/docs/Components/components-embedding-models.md +++ b/docs/docs/Components/components-embedding-models.md @@ -4,170 +4,345 @@ sidebar_position: 6 slug: /components-embedding-models --- +# Embedding Models +Embeddings models are used to convert text into numerical vectors. These vectors can be used for various tasks such as similarity search, clustering, and classification. -:::info +## AI/ML -This page may contain outdated information. It will be updated as soon as possible. +This component generates embeddings using the [AI/ML API](https://docs.aimlapi.com/api-overview/embeddings). -::: +### Parameters + +#### Inputs + +| Name | Type | Description | +|------|------|-------------| +| model_name | String | The name of the AI/ML embedding model to use | +| aiml_api_key | SecretString | API key for authenticating with the AI/ML service | + +#### Outputs + +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance of AIMLEmbeddingsImpl for generating embeddings | + +## Amazon Bedrock Embeddings + +This component is used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------|------|-------------| +| credentials_profile_name | String | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information | +| model_id | String | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API | +| endpoint_url | String | URL to set a specific service endpoint other than the default AWS endpoint | +| region_name | String | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided | + +#### Outputs + +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using Amazon Bedrock | + +## Astra DB vectorize + +Connect this component to the **Embeddings** port of the [Astra DB vector store component](components-vector-stores#astra-db-serverless) to generate embeddings. + +This component requires that your Astra DB database has a collection that uses a vectorize embedding provider integration. +For more information and instructions, see [Embedding Generation](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html). + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| provider | Embedding Provider | The embedding provider to use | +| model_name | Model Name | The embedding model to use | +| authentication | Authentication | The name of the API key in Astra that stores your [vectorize embedding provider credentials](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html#embedding-provider-authentication). (Not required if using an [Astra-hosted embedding provider](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html#supported-embedding-providers).) | +| provider_api_key | Provider API Key | As an alternative to `authentication`, directly provide your embedding provider credentials. | +| model_parameters | Model Parameters | Additional model parameters | + +#### Outputs + +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using Astra vectorize | | | + +## Azure OpenAI Embeddings + +This component generates embeddings using Azure OpenAI models. + +### Parameters +#### Inputs +| Name | Type | Description | +|------|------|-------------| +| Model | String | Name of the model to use (default: `text-embedding-3-small`) | +| Azure Endpoint | String | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | +| Deployment Name | String | The name of the deployment | +| API Version | String | The API version to use, options include various dates | +| API Key | String | The API key to access the Azure OpenAI service | +#### Outputs -## Amazon Bedrock Embeddings {#4ddcfde8c1664e358d3f16d718e944d8} +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using Azure OpenAI | +## Cohere Embeddings + +This component is used to load embedding models from [Cohere](https://cohere.com/). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------|------|-------------| +| cohere_api_key | String | API key required to authenticate with the Cohere service | +| model | String | Language model used for embedding text documents and performing queries (default: `embed-english-v2.0`) | +| truncate | Boolean | Whether to truncate the input text to fit within the model's constraints (default: `False`) | + +#### Outputs + +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using Cohere | + +## Embedding similarity + +This component computes selected forms of similarity between two embedding vectors. + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| embedding_vectors | Embedding Vectors | A list containing exactly two data objects with embedding vectors to compare. | +| similarity_metric | Similarity Metric | Select the similarity metric to use. Options: "Cosine Similarity", "Euclidean Distance", "Manhattan Distance". | + +#### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| similarity_data | Similarity Data | Data object containing the computed similarity score and additional information. | + +## Google generative AI embeddings + +This component connects to Google's generative AI embedding service using the GoogleGenerativeAIEmbeddings class from the `langchain-google-genai` package. + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| api_key | API Key | Secret API key for accessing Google's generative AI service (required) | +| model_name | Model Name | Name of the embedding model to use (default: "models/text-embedding-004") | + +#### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| embeddings | Embeddings | Built GoogleGenerativeAIEmbeddings object | + +## Hugging Face Embeddings + +:::note +This component is deprecated as of Langflow version 1.0.18. +Instead, use the [Hugging Face API Embeddings component](#hugging-face-embeddings-inference-api). +::: -Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/). +This component loads embedding models from HuggingFace. +Use this component to generate embeddings using locally downloaded Hugging Face models. Ensure you have sufficient computational resources to run the models. -| **Parameter** | **Type** | **Description** | **Default** | -| -------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | | -| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | | -| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | | -| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | | +### Parameters +#### Inputs -## Astra vectorize {#c1e6d1373824424ea130e052ba0f46af} +| Name | Display Name | Info | +|------|--------------|------| +| Cache Folder | Cache Folder | Folder path to cache HuggingFace models | +| Encode Kwargs | Encoding Arguments | Additional arguments for the encoding process | +| Model Kwargs | Model Arguments | Additional arguments for the model | +| Model Name | Model Name | Name of the HuggingFace model to use | +| Multi Process | Multi-Process | Whether to use multiple processes | +## Hugging Face embeddings Inference API -Used to generate server-side embeddings using [DataStax Astra](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html). +This component generates embeddings using Hugging Face Inference API models. +Use this component to create embeddings with Hugging Face's hosted models. Ensure you have a valid Hugging Face API key. -| **Parameter** | **Type** | **Description** | **Default** | -| ------------------ | -------- | --------------------------------------------------------------------------------------------------------------------- | ----------- | -| `provider` | `str` | The embedding provider to use. | | -| `model_name` | `str` | The embedding model to use. | | -| `authentication` | `dict` | Authentication parameters. Use the Astra Portal to add the embedding provider integration to your Astra organization. | | -| `provider_api_key` | `str` | An alternative to the Astra Authentication that let you use directly the API key of the provider. | | -| `model_parameters` | `dict` | Additional model parameters. | | +### Parameters +#### Inputs -## Cohere Embeddings {#0c5b7b8790da448fabd4c5ddba1fcbde} +| Name | Display Name | Info | +|------|--------------|------| +| API Key | API Key | API key for accessing the Hugging Face Inference API | +| API URL | API URL | URL of the Hugging Face Inference API | +| Model Name | Model Name | Name of the model to use for embeddings | +| Cache Folder | Cache Folder | Folder path to cache Hugging Face models | +| Encode Kwargs | Encoding Arguments | Additional arguments for the encoding process | +| Model Kwargs | Model Arguments | Additional arguments for the model | +| Multi Process | Multi-Process | Whether to use multiple processes | +## MistralAI -Used to load embedding models from [Cohere](https://cohere.com/). +This component generates embeddings using MistralAI models. +### Parameters -| **Parameter** | **Type** | **Description** | **Default** | -| ---------------- | -------- | ------------------------------------------------------------------------- | -------------------- | -| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | | -| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` | -| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` | +#### Inputs +| Name | Type | Description | +|------|------|-------------| +| model | String | The MistralAI model to use (default: "mistral-embed") | +| mistral_api_key | SecretString | API key for authenticating with MistralAI | +| max_concurrent_requests | Integer | Maximum number of concurrent API requests (default: 64) | +| max_retries | Integer | Maximum number of retry attempts for failed requests (default: 5) | +| timeout | Integer | Request timeout in seconds (default: 120) | +| endpoint | String | Custom API endpoint URL (default: "https://api.mistral.ai/v1/") | -## Azure OpenAI Embeddings {#8ffb790d5a6c484dab3fe6c777638a44} +#### Outputs +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | MistralAIEmbeddings instance for generating embeddings | -Generate embeddings using Azure OpenAI models. +## NVIDIA +This component generates embeddings using NVIDIA models. -| **Parameter** | **Type** | **Description** | **Default** | -| ----------------- | -------- | -------------------------------------------------------------------------------------------------- | ----------- | -| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | | -| `Deployment Name` | `str` | The name of the deployment. | | -| `API Version` | `str` | The API version to use, options include various dates. | | -| `API Key` | `str` | The API key to access the Azure OpenAI service. | | +### Parameters +#### Inputs -## Hugging Face API Embeddings {#8536e4ee907b48688e603ae9bf7822cb} +| Name | Type | Description | +|------|------|-------------| +| model | String | The NVIDIA model to use for embeddings (e.g., nvidia/nv-embed-v1) | +| base_url | String | Base URL for the NVIDIA API (default: https://integrate.api.nvidia.com/v1) | +| nvidia_api_key | SecretString | API key for authenticating with NVIDIA's service | +| temperature | Float | Model temperature for embedding generation (default: 0.1) | +#### Outputs -Generate embeddings using Hugging Face Inference API models. +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | NVIDIAEmbeddings instance for generating embeddings | +## Ollama Embeddings -| **Parameter** | **Type** | **Description** | **Default** | -| --------------- | -------- | ----------------------------------------------------- | ------------------------ | -| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | | -| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` | -| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` | -| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | | -| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | | -| `Model Kwargs` | `dict` | Additional arguments for the model. | | -| `Multi Process` | `bool` | Whether to use multiple processes. | `False` | +This component generates embeddings using Ollama models. +### Parameters -## Hugging Face Embeddings {#b2b74732874743d3be6fdf8aae049e74} +#### Inputs +| Name | Type | Description | +|------|------|-------------| +| Ollama Model | String | Name of the Ollama model to use (default: `llama2`) | +| Ollama Base URL | String | Base URL of the Ollama API (default: `http://localhost:11434`) | +| Model Temperature | Float | Temperature parameter for the model. Adjusts the randomness in the generated embeddings | -Used to load embedding models from [HuggingFace](https://huggingface.co/). +#### Outputs +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using Ollama | -| **Parameter** | **Type** | **Description** | **Default** | -| --------------- | -------- | ---------------------------------------------- | ----------------------------------------- | -| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | | -| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | | -| `Model Kwargs` | `dict` | Additional arguments for the model. | | -| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` | -| `Multi Process` | `bool` | Whether to use multiple processes. | `False` | +## OpenAI Embeddings +This component is used to load embedding models from [OpenAI](https://openai.com/). -## OpenAI Embeddings {#af7630df05a245d1a632e1bf6db2a4c5} +### Parameters +#### Inputs -Used to load embedding models from [OpenAI](https://openai.com/). +| Name | Type | Description | +|------|------|-------------| +| OpenAI API Key | String | The API key to use for accessing the OpenAI API | +| Default Headers | Dict | Default headers for the HTTP requests | +| Default Query | NestedDict | Default query parameters for the HTTP requests | +| Allowed Special | List | Special tokens allowed for processing (default: `[]`) | +| Disallowed Special | List | Special tokens disallowed for processing (default: `["all"]`) | +| Chunk Size | Integer | Chunk size for processing (default: `1000`) | +| Client | Any | HTTP client for making requests | +| Deployment | String | Deployment name for the model (default: `text-embedding-3-small`) | +| Embedding Context Length | Integer | Length of embedding context (default: `8191`) | +| Max Retries | Integer | Maximum number of retries for failed requests (default: `6`) | +| Model | String | Name of the model to use (default: `text-embedding-3-small`) | +| Model Kwargs | NestedDict | Additional keyword arguments for the model | +| OpenAI API Base | String | Base URL of the OpenAI API | +| OpenAI API Type | String | Type of the OpenAI API | +| OpenAI API Version | String | Version of the OpenAI API | +| OpenAI Organization | String | Organization associated with the API key | +| OpenAI Proxy | String | Proxy server for the requests | +| Request Timeout | Float | Timeout for the HTTP requests | +| Show Progress Bar | Boolean | Whether to show a progress bar for processing (default: `False`) | +| Skip Empty | Boolean | Whether to skip empty inputs (default: `False`) | +| TikToken Enable | Boolean | Whether to enable TikToken (default: `True`) | +| TikToken Model Name | String | Name of the TikToken model | +#### Outputs -| **Parameter** | **Type** | **Description** | **Default** | -| -------------------------- | ---------------- | ------------------------------------------------ | ------------------------ | -| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | | -| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | | -| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | | -| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` | -| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` | -| `Chunk Size` | `int` | Chunk size for processing. | `1000` | -| `Client` | `Any` | HTTP client for making requests. | | -| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` | -| `Embedding Context Length` | `int` | Length of embedding context. | `8191` | -| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` | -| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` | -| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | | -| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | | -| `OpenAI API Type` | `str` | Type of the OpenAI API. | | -| `OpenAI API Version` | `str` | Version of the OpenAI API. | | -| `OpenAI Organization` | `str` | Organization associated with the API key. | | -| `OpenAI Proxy` | `str` | Proxy server for the requests. | | -| `Request Timeout` | `float` | Timeout for the HTTP requests. | | -| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` | -| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` | -| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` | -| `TikToken Model Name` | `str` | Name of the TikToken model. | | +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using OpenAI | +## Text embedder -## Ollama Embeddings {#a26d2cb92e6d44669c2cfff71a5e9431} +This component generates embeddings for a given message using a specified embedding model. +### Parameters -Generate embeddings using Ollama models. +#### Inputs +| Name | Display Name | Info | +|------|--------------|------| +| embedding_model | Embedding Model | The embedding model to use for generating embeddings. | +| message | Message | The message for which to generate embeddings. | -| **Parameter** | **Type** | **Description** | **Default** | -| ------------------- | -------- | ---------------------------------------------------------------------------------------- | ------------------------ | -| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` | -| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` | -| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | | +#### Outputs +| Name | Display Name | Info | +|------|--------------|------| +| embeddings | Embedding Data | Data object containing the original text and its embedding vector. | -## VertexAI Embeddings {#707b38c23cb9413fbbaab1ae7b872311} +## VertexAI Embeddings +This component is a wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings). -Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings). +### Parameters +#### Inputs -| **Parameter** | **Type** | **Description** | **Default** | -| --------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| `credentials` | `Credentials` | The default custom credentials to use. | | -| `location` | `str` | The default location to use when making API calls. | `us-central1` | -| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` | -| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison` | -| `project` | `str` | The default GCP project to use when making Vertex API calls. | | -| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` | -| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` | -| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` | -| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` | -| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | | -| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` | +| Name | Type | Description | +|------|------|-------------| +| credentials | Credentials | The default custom credentials to use | +| location | String | The default location to use when making API calls (default: `us-central1`) | +| max_output_tokens | Integer | Token limit determines the maximum amount of text output from one prompt (default: `128`) | +| model_name | String | The name of the Vertex AI large language model (default: `text-bison`) | +| project | String | The default GCP project to use when making Vertex API calls | +| request_parallelism | Integer | The amount of parallelism allowed for requests issued to VertexAI models (default: `5`) | +| temperature | Float | Tunes the degree of randomness in text generations. Should be a non-negative value (default: `0`) | +| top_k | Integer | How the model selects tokens for output, the next token is selected from the top `k` tokens (default: `40`) | +| top_p | Float | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value (default: `0.95`) | +| tuned_model_name | String | The name of a tuned model. If provided, `model_name` is ignored | +| verbose | Boolean | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug (default: `False`) | +#### Outputs -[Previous Vector Stores](/components-vector-stores) +| Name | Type | Description | +|------|------|-------------| +| embeddings | Embeddings | An instance for generating embeddings using VertexAI | diff --git a/docs/docs/Components/components-helpers.md b/docs/docs/Components/components-helpers.md index ef90af07e4ca..77234609a2d5 100644 --- a/docs/docs/Components/components-helpers.md +++ b/docs/docs/Components/components-helpers.md @@ -4,167 +4,244 @@ sidebar_position: 4 slug: /components-helpers --- +# Helpers +Helper components provide utility functions to help manage data, tasks, and other components in your flow. -:::info +## Chat Memory -This page may contain outdated information. It will be updated as soon as possible. +This component retrieves and manages chat messages from Langflow tables or an external memory. -::: +### Parameters +#### Inputs +| Name | Display Name | Info | +|------|--------------|------| +| memory | External Memory | Retrieve messages from an external memory. If empty, it will use the Langflow tables. | +| sender | Sender Type | Filter by sender type. | +| sender_name | Sender Name | Filter by sender name. | +| n_messages | Number of Messages | Number of messages to retrieve. | +| session_id | Session ID | The session ID of the chat. If empty, the current session ID parameter will be used. | +| order | Order | Order of the messages. | +| template | Template | The template to use for formatting the data. It can contain the keys `{text}`, `{sender}` or any other key in the message data. | +#### Outputs -## Chat memory {#304dc4a3bea74efb9068093ff18a56ad} +| Name | Display Name | Info | +|------|--------------|------| +| messages | Messages (Data) | Retrieved messages as Data objects. | +| messages_text | Messages (Text) | Retrieved messages formatted as text. | +| lc_memory | Memory | Built LangChain memory object. | +## Combine Text -This component retrieves stored chat messages based on a specific session ID. - - -### Parameters {#e0af57d97f844ce99789958161d19767} - -- **Sender type:** Choose the sender type from options like "Machine", "User", or "Both". -- **Sender name:** (Optional) The name of the sender. -- **Number of messages:** Number of messages to retrieve. -- **Session ID:** The session ID of the chat history. -- **Order:** Choose the message order, either "Ascending" or "Descending". -- **Data template:** (Optional) Template to convert a record to text. If left empty, the system dynamically sets it to the record's text key. - ---- - - -### Combine text {#13443183e6054d0694d65f8df08833d5} +This component concatenates two text sources into a single text chunk using a specified delimiter. +### Parameters -This component concatenates two text sources into a single text chunk using a specified delimiter. +#### Inputs +| Name | Display Name | Info | +|------|--------------|------| +| first_text | First Text | The first text input to concatenate. | +| second_text | Second Text | The second text input to concatenate. | +| delimiter | Delimiter | A string used to separate the two text inputs. Defaults to a space. | -### Parameters {#246676d119604fc5bf1be85fe93044aa} +## Create List -- **First text:** The first text input to concatenate. -- **Second text:** The second text input to concatenate. -- **Delimiter:** A string used to separate the two text inputs. Defaults to a space. +This component dynamically creates a record with a specified number of fields. ---- +### Parameters +#### Inputs -### Create record {#506f43345854473b8199631bf68a3b4a} +| Name | Display Name | Info | +|------|--------------|------| +| n_fields | Number of Fields | Number of fields to be added to the record. | +| text_key | Text Key | Key used as text. | +## Custom Component -This component dynamically creates a record with a specified number of fields. +Use this component as a template to create your custom component. +For more, see [Custom Components](components-custom-components). -### Parameters {#08735e90bd10406695771bad8a95976a} +## Filter Data -- **Number of fields:** Number of fields to be added to the record. -- **Text key:** Key used as text. +This component filters a Data object based on a list of keys. ---- +### Parameters +#### Inputs -### Custom component {#cda421d4bccb4e7db2e48615884ed753} +| Name | Display Name | Info | +|------|--------------|------| +| data | Data | Data object to filter. | +| filter_criteria | Filter Criteria | List of keys to filter by. | +#### Outputs -Use this component as a template to create your custom component. +| Name | Display Name | Info | +|------|--------------|------| +| filtered_data | Filtered Data | A new Data object containing only the key-value pairs that match the filter criteria. | +## Hierarchical Task -### Parameters {#04f9eb5e6da4431593a5bee8831f2327} +This component creates and manages hierarchical tasks for CrewAI agents in a Playground environment. -- **Parameter:** Describe the purpose of this parameter. +For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Hierarchical/). -INFO +### Parameters +#### Inputs -Customize the `build_config` and `build` methods according to your requirements. +| Name | Display Name | Info | +|------|--------------|------| +| task_description | Description | Descriptive text detailing task's purpose and execution. | +| expected_output | Expected Output | Clear definition of expected task outcome. | +| tools | Tools | List of tools/resources limited for task execution. Uses the Agent tools by default. | +#### Outputs -Learn more about creating custom components at [Custom Component](http://docs.langflow.org/components/custom). +| Name | Display Name | Info | +|------|--------------|------| +| task_output | Task | The built hierarchical task. | +## ID Generator ---- +This component generates a unique ID. +### Parameters -### Documents to Data {#53a6a99a54f0435e9209169cf7730c55} +#### Outputs +| Name | Display Name | Info | +|------|--------------|------| +| value | Value | Unique ID generated. | -Convert LangChain documents into Data. +## Parse JSON +This component converts and extracts JSON fields using JQ queries. -### Parameters {#0eb5fce528774c2db4a3677973e75cf8} +### Parameters -- **Documents:** Documents to be converted into Data. +#### Inputs ---- +| Name | Display Name | Info | +|------|--------------|------| +| input_value | Input | Data object to filter. Can be a Message or Data object. | +| query | JQ Query | JQ Query to filter the data. The input is always a JSON list. | +#### Outputs -### ID generator {#4a8fbfb95ebe44ee8718725546db5393} +| Name | Display Name | Info | +|------|--------------|------| +| filtered_data | Filtered Data | Filtered data as a list of Data objects. | +## Merge Data -Generates a unique ID. +This component combines multiple data sources into a single unified Data object. +The component iterates through the input list of Data objects, merging them into a single Data object. If the input list is empty, it returns an empty Data object. If there's only one input Data object, it returns that object unchanged. The merging process uses the addition operator to combine Data objects. -### Parameters {#4629dd15594c47399c97d9511060e114} +### Parameters -- **Value:** Unique ID generated. +#### Inputs ---- +| Name | Display Name | Info | +|------|--------------|------| +| data | Data | A list of Data objects to be merged | +#### Outputs -### Message history {#6a1a60688641490197c6443df573960e} +| Name | Display Name | Info | +|------|--------------|------| +| merged_data | Merged Data | A single Data object containing the combined information from all input Data objects | -Retrieves stored chat messages based on a specific session ID. +## Parse Data +The ParseData component converts Data objects into plain text using a specified template. +This component transforms structured data into human-readable text formats, allowing for customizable output through the use of templates. -### Parameters {#31c7fc2a3e8c4f7c89f923e700f4ea34} +### Parameters -- **Sender type:** Options for the sender type. -- **Sender name:** Sender name. -- **Number of messages:** Number of messages to retrieve. -- **Session ID:** Session ID of the chat history. -- **Order:** Order of the messages. +#### Inputs ---- +| Name | Display Name | Info | +|------|--------------|------| +| data | Data | The data to convert to text | +| template | Template | The template to use for formatting the data. It can contain the keys `{text}`, `{data}` or any other key in the Data | +| sep | Separator | The separator to use between multiple data items | +#### Outputs -### Data to text {#f60ab5bbc0db4b27b427897eba97fe29} +| Name | Display Name | Info | +|------|--------------|------| +| text | Text | The resulting formatted text string as a Message object | +## Sequential Task -Convert Data into plain text following a specified template. +This component creates and manage sequential tasks for CrewAI agents. It builds a SequentialTask object with the provided description, expected output, and agent, allowing for the specification of tools and asynchronous execution. +For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Sequential/). -### Parameters {#01b91376569149a49cfcfd9321323688} +### Parameters -- **Data:** The Data to convert to text. -- **Template:** The template used for formatting the Data. It can contain keys like `{text}`, `{data}`, or any other key in the record. +#### Inputs ---- +| Name | Display Name | Info | +|------|--------------|------| +| task_description | Description | Descriptive text detailing task's purpose and execution. | +| expected_output | Expected Output | Clear definition of expected task outcome. | +| tools | Tools | List of tools/resources limited for task execution. Uses the Agent tools by default. | +| agent | Agent | CrewAI Agent that will perform the task. | +| task | Task | CrewAI Task that will perform the task. | +| async_execution | Async Execution | Boolean flag indicating asynchronous task execution. | +#### Outputs -### Split text {#210be0ae518d411695d6caafdd7700eb} +| Name | Display Name | Info | +|------|--------------|------| +| task_output | Task | The built sequential task or list of tasks. | +## Split Text -Split text into chunks of a specified length. +This component splits text into chunks of a specified length. +### Parameters -### Parameters {#04197fcd05e64e10b189de1171a32682} +#### Inputs -- **Texts:** Texts to split. -- **Separators:** Characters to split on. Defaults to a space. -- **Max chunk size:** The maximum length (in characters) of each chunk. -- **Chunk overlap:** The amount of character overlap between chunks. -- **Recursive:** Whether to split recursively. +| Name | Display Name | Info | +|------|--------------|------| +| texts | Texts | Texts to split. | +| separators | Separators | Characters to split on. Defaults to a space. | +| max_chunk_size | Max Chunk Size | The maximum length (in characters) of each chunk. | +| chunk_overlap | Chunk Overlap | The amount of character overlap between chunks. | +| recursive | Recursive | Whether to split recursively. | ---- +## Store Message +This component stores chat messages or text into Langflow tables or an external memory. -### Update record {#d3b6116dfd8d4af080ad01bc8fd2b0b3} +It provides flexibility in managing message storage and retrieval within a chat system. +### Parameters -Update a record with text-based key/value pairs, similar to updating a Python dictionary. +#### Inputs +| Name | Display Name | Info | +|------|--------------|------| +| message | Message | The chat message to be stored. (Required) | +| memory | External Memory | The external memory to store the message. If empty, it will use the Langflow tables. | +| sender | Sender | The sender of the message. Can be Machine or User. If empty, the current sender parameter will be used. | +| sender_name | Sender Name | The name of the sender. Can be AI or User. If empty, the current sender parameter will be used. | +| session_id | Session ID | The session ID of the chat. If empty, the current session ID parameter will be used. | -### Parameters {#c830224edc1d486aaaa5e2889f4f6689} +#### Outputs -- **Data:** The record to update. -- **New data:** The new data to update the record with. +| Name | Display Name | Info | +|------|--------------|------| +| stored_messages | Stored Messages | The list of stored messages after the current message has been added. | diff --git a/docs/docs/Components/components-io.md b/docs/docs/Components/components-io.md index 1a9862c1689c..2b8f22c8d507 100644 --- a/docs/docs/Components/components-io.md +++ b/docs/docs/Components/components-io.md @@ -4,126 +4,86 @@ sidebar_position: 1 slug: /components-io --- +# Inputs & Outputs +This category of components defines where data enters and exits your flow. They dynamically alter the Playground and can be renamed to facilitate building and maintaining your flows. -Inputs and Outputs are a category of components that are used to define where data comes in and out of your flow. They also dynamically change the Playground and can be renamed to facilitate building and maintaining your flows. +## Inputs +Inputs are components used to define where data enters your flow. They can receive data from various sources, such as users, databases, or any other source that can be converted to Text or Data. -## Inputs {#6b1421ec66994d5ebe9fcce000829328} - - ---- - - -Inputs are components used to define where data enters your flow. They can receive data from the user, a database, or any other source that can be converted to Text or Data. +### Chat Input +This component collects user input from the chat. The difference between Chat Input and other Input components is the output format, the number of configurable fields, and the way they are displayed in the Playground. +Chat Input components can output Text or Data. When you want to pass the sender name or sender to the next component, use the Data output. To pass only the message, use the Text output. Passing only the message is useful when saving the message to a database or a memory system like Zep. -Chat Input components can output `Text` or `Data`. When you want to pass the sender name or sender to the next component, use the `Data` output. To pass only the message, use the `Text` output, useful when saving the message to a database or memory system like Zep. - - -You can find out more about Chat Input and other Inputs [here](/components-io). - - -### Chat Input {#2a5f02262f364f8fb75bcfa246e7bb26} - - -This component collects user input from the chat. - - -**Parameters** +#### Parameters -- **Sender Type:** Specifies the sender type. Defaults to `User`. Options are `Machine` and `User`. -- **Sender Name:** Specifies the name of the sender. Defaults to `User`. -- **Message:** Specifies the message text. It is a multiline text input. -- **Session ID:** Specifies the session ID of the chat history. If provided, the message will be saved in the Message History. +| Name | Display Name | Info | +|--------------|--------------|---------------------------------------------------------------------| +| Sender Type | Sender Type | Specifies the sender type (User or Machine). Defaults to User | +| Sender Name | Sender Name | Specifies the name of the sender. Defaults to User | +| Message | Message | Specifies the message text. Multiline text input | +| Session ID | Session ID | Specifies the session ID of the chat history | :::note - -If `As Data` is `true` and the `Message` is a `Data`, the data of the `Data` will be updated with the `Sender`, `Sender Name`, and `Session ID`. - +If "As Data" is true and the "Message" is a Data, the data will be updated with the Sender, Sender Name, and Session ID. ::: +### Text Input +This component adds an Input field on the Playground, allowing parameter definition while running and testing your flow. +The Data Template field specifies how a Data should be converted into Text. This is particularly useful when you want to extract specific information from a Data and pass it as text to the next component in the sequence. -One significant capability of the Chat Input component is its ability to transform the Playground into a chat window. This feature is particularly valuable for scenarios requiring user input to initiate or influence the flow. - - -### Text Input {#260aef3726834896b496b56cdefb6d4a} - - -The **Text Input** component adds an **Input** field on the Playground. This enables you to define parameters while running and testing your flow. - - -**Parameters** - -- **Value:** Specifies the text input value. This is where the user inputs text data that will be passed to the next component in the sequence. If no value is provided, it defaults to an empty string. -- **Data Template:** Specifies how a `Data` should be converted into `Text`. +For example, if you have a Data with the following structure: -The **Data Template** field is used to specify how a `Data` should be converted into `Text`. This is particularly useful when you want to extract specific information from a `Data` and pass it as text to the next component in the sequence. +```json +{ "name": "John Doe", "age": 30, "email": "johndoe@email.com"} +``` +A template with Name: `{name}, Age: {age}` will convert the Data into a text string of `Name: John Doe, Age: 30`. -For example, if you have a `Data` with the following structure: +If you pass more than one Data, the text will be concatenated with a new line separator. +#### Parameters -`{ "name": "John Doe", "age": 30, "email": "johndoe@email.com"}` +| Name | Display Name | Info | +|---------------|---------------|--------------------------------------------------------------------| +| Value | Value | Specifies the text input value. Defaults to an empty string | +| Data Template | Data Template | Specifies how a Data should be converted into Text | +## Outputs -A template with `Name: {name}, Age: {age}` will convert the `Data` into a text string of `Name: John Doe, Age: 30`. - - -If you pass more than one `Data`, the text will be concatenated with a new line separator. - - -## Outputs {#f62c5ad37a6f45a39b463c9b35ce7842} - - ---- - - -Outputs are components that are used to define where data comes out of your flow. They can be used to send data to the user, to the Playground, or to define how the data will be displayed in the Playground. - - -The Chat Output works similarly to the Chat Input but does not have a field that allows for written input. It is used as an Output definition and can be used to send data to the user. - - -You can find out more about it and the other Outputs [here](/components-io). - - -### Chat Output {#1edd49b72781432ea29d70acbda4e7e7} +Outputs define where data exits your flow. They can send data to the user, the Playground, or define how data will be displayed in the Playground. +### Chat Output This component sends a message to the chat. +#### Parameters -**Parameters** - -- **Sender Type:** Specifies the sender type. Default is `"Machine"`. Options are `"Machine"` and `"User"`. -- **Sender Name:** Specifies the sender's name. Default is `"AI"`. -- **Session ID:** Specifies the session ID of the chat history. If provided, messages are saved in the Message History. -- **Message:** Specifies the text of the message. +| Name | Display Name | Info | +|--------------|--------------|---------------------------------------------------------------------| +| Sender Type | Sender Type | Specifies the sender type (Machine or User). Defaults to Machine | +| Sender Name | Sender Name | Specifies the sender's name. Defaults to AI | +| Session ID | Session ID | Specifies the session ID of the chat history | +| Message | Message | Specifies the text of the message | :::note - -If `As Data` is `true` and the `Message` is a `Data`, the data in the `Data` is updated with the `Sender`, `Sender Name`, and `Session ID`. - +If "As Data" is true and the "Message" is a Data, the data will be updated with the Sender, Sender Name, and Session ID. ::: +### Text Output +This component displays text data to the user without sending it to the chat. Defaults to an empty string. +#### Parameters -### Text Output {#b607000bc0c5402db0433c1a7d734d01} - - -This component displays text data to the user. It is useful when you want to show text without sending it to the chat. - - -**Parameters** - -- **Value:** Specifies the text data to be displayed. Defaults to an empty string. - -The `TextOutput` component provides a simple way to display text data. It allows textual data to be visible in the chat window during your interaction flow. +| Name | Display Name | Info | +|-------|--------------|----------------------------------------------------------| +| Value | Value | Specifies the text data to be displayed | diff --git a/docs/docs/Components/components-loaders.md b/docs/docs/Components/components-loaders.md new file mode 100644 index 000000000000..d0d9cd910672 --- /dev/null +++ b/docs/docs/Components/components-loaders.md @@ -0,0 +1,74 @@ +--- +title: Loaders +sidebar_position: 10 +slug: /components-loaders +--- + +# Loaders + +Loaders are components used to load documents from various sources, such as databases, websites, and local files. They can be used to fetch data from external sources and convert it into a format that can be processed by other components. + +## Confluence + +The Confluence component integrates with the Confluence wiki collaboration platform to load and process documents. It utilizes the ConfluenceLoader from LangChain to fetch content from a specified Confluence space. + +### Parameters + +#### Inputs: + +| Name | Display Name | Info | +| --- | --- | --- | +| url | Site URL | The base URL of the Confluence Space (e.g., https://company.atlassian.net/wiki) | +| username | Username | Atlassian User E-mail (e.g., email@example.com) | +| api_key | API Key | Atlassian API Key (Create at: https://id.atlassian.com/manage-profile/security/api-tokens) | +| space_key | Space Key | The key of the Confluence space to access | +| cloud | Use Cloud? | Whether to use Confluence Cloud (default: true) | +| content_format | Content Format | Specify content format (default: STORAGE) | +| max_pages | Max Pages | Maximum number of pages to retrieve (default: 1000) | + +#### Outputs: + +| Name | Display Name | Info | +| --- | --- | --- | +| data | Data | List of Data objects containing the loaded Confluence documents | + +## GitLoader + +The GitLoader component uses the GitLoader from LangChain to fetch and load documents from a specified Git repository. + +### Parameters + +#### Inputs: + +| Name | Display Name | Info | +| --- | --- | --- | +| repo_path | Repository Path | The local path to the Git repository | +| clone_url | Clone URL | The URL to clone the Git repository from (optional) | +| branch | Branch | The branch to load files from (default: 'main') | +| file_filter | File Filter | Patterns to filter files (e.g., '.py' to include only .py files, '!.py' to exclude .py files) | +| content_filter | Content Filter | A regex pattern to filter files based on their content | + +#### Outputs: + +| Name | Display Name | Info | +| --- | --- | --- | +| data | Data | List of Data objects containing the loaded Git repository documents | + +## Unstructured + +This component uses the [Unstructured](https://unstructured.io/) library to load and parse PDF, DOCX, and TXT files into structured data. This component works with both the open-source library and the Unstructured API. + +### Parameters + +#### Inputs: + +| Name | Display Name | Info | +| --- | --- | --- | +| file | File | The path to the file to be parsed (supported types: pdf, docx, txt) | +| api_key | API Key | Unstructured API Key (optional, if not provided, open-source library will be used) | + +#### Outputs: + +| Name | Display Name | Info | +| --- | --- | --- | +| data | Data | List of Data objects containing the parsed content from the input file | \ No newline at end of file diff --git a/docs/docs/Components/components-logic.md b/docs/docs/Components/components-logic.md new file mode 100644 index 000000000000..3ab9002ad6f1 --- /dev/null +++ b/docs/docs/Components/components-logic.md @@ -0,0 +1,152 @@ +--- +title: Logic +sidebar_position: 13 +slug: /components-logic +--- +# Logic components in Langflow + +Logic components provide functionalities for routing, conditional processing, and flow management. + +## Conditional router + +This component routes an input message to a corresponding output based on text comparison. + +The ConditionalRouterComponent routes messages based on text comparison. It evaluates a condition by comparing two text inputs using a specified operator and routes the message accordingly. + +### Inputs + +| Name | Type | Description | +|----------------|----------|-------------------------------------------------------------------| +| input_text | String | The primary text input for the operation. | +| match_text | String | The text input to compare against. | +| operator | Dropdown | The operator to apply for comparing the texts. | +| case_sensitive | Boolean | If true, the comparison will be case sensitive. | +| message | Message | The message to pass through either route. | +| max_iterations | Integer | The maximum number of iterations for the conditional router. | +| default_route | Dropdown | The default route to take when max iterations are reached. | + +### Outputs + +| Name | Type | Description | +|--------------|---------|--------------------------------------------| +| true_result | Message | The output when the condition is true. | +| false_result | Message | The output when the condition is false. | + +## Data conditional router + +This component routes `Data` objects based on a condition applied to a specified key, including boolean validation. + +This component is particularly useful in workflows that require conditional routing of complex data structures, enabling dynamic decision-making based on data content. + +### Inputs + +| Name | Type | Description | +|---------------|----------|-----------------------------------------------------------------------------------| +| data_input | Data | The data object or list of data objects to process. | +| key_name | String | The name of the key in the data object to check. | +| operator | Dropdown | The operator to apply for comparing the values. | +| compare_value | String | The value to compare against (not used for boolean validator). | + +### Outputs + +| Name | Type | Description | +|--------------|-------------|------------------------------------------------------| +| true_output | Data/List | Output when the condition is met. | +| false_output | Data/List | Output when the condition is not met. | + + +## Flow as Tool {#flow-as-tool} + +This component constructs a tool from a function that runs a loaded flow. + +### Inputs + +| Name | Type | Description | +|------------------|----------|------------------------------------------------------------| +| flow_name | Dropdown | The name of the flow to run. | +| tool_name | String | The name of the tool. | +| tool_description | String | The description of the tool. | +| return_direct | Boolean | If true, returns the result directly from the tool. | + +### Outputs + +| Name | Type | Description | +|----------------|------|----------------------------------------| +| api_build_tool | Tool | The constructed tool from the flow. | + +## Listen + +This component listens for a notification and retrieves its associated state. + +### Inputs + +| Name | Type | Description | +|------|--------|------------------------------------------------| +| name | String | The name of the notification to listen for. | + +### Outputs + +| Name | Type | Description | +|--------|------|--------------------------------------------| +| output | Data | The state associated with the notification. | + +## Notify + +This component generates a notification for the Listen component to use. + +### Inputs + +| Name | Type | Description | +|--------|---------|-------------------------------------------------------------------| +| name | String | The name of the notification. | +| data | Data | The data to store in the notification. | +| append | Boolean | If true, the record will be appended to the existing notification.| + +### Outputs + +| Name | Type | Description | +|--------|------|-----------------------------------------| +| output | Data | The data stored in the notification. | + +## Run flow + +This component allows you to run a specified flow with given inputs and tweaks. + +The RunFlowComponent executes a specified flow within a larger workflow. It provides the ability to run a flow with custom inputs and apply tweaks to modify its behavior. + +### Inputs + +| Name | Type | Description | +|-------------|--------------|-------------------------------------------------------| +| input_value | String | The input value for the flow to process. | +| flow_name | Dropdown | The name of the flow to run. | +| tweaks | Nested Dict | Tweaks to apply to the flow. | + +### Outputs + +| Name | Type | Description | +|-------------|-------------|------------------------------------------------| +| run_outputs | List[Data] | The results generated from running the flow. | + +## Sub Flow + +This `SubFlowComponent` generates a component from a flow with all of its inputs and outputs. + +This component can integrate entire flows as components within a larger workflow. It dynamically generates inputs based on the selected flow and executes the flow with provided parameters. + +### Inputs + +| Name | Type | Description | +|-----------|----------|------------------------------------| +| flow_name | Dropdown | The name of the flow to run. | + +### Outputs + +| Name | Type | Description | +|--------------|-------------|---------------------------------------| +| flow_outputs | List[Data] | The outputs generated from the flow. | + + + + + diff --git a/docs/docs/Components/components-memories.md b/docs/docs/Components/components-memories.md new file mode 100644 index 000000000000..5dfd45d0a436 --- /dev/null +++ b/docs/docs/Components/components-memories.md @@ -0,0 +1,70 @@ +# Memories + +Chat memory components store and retrieve chat messages by `session_id`. + +## AstraDBChatMemory Component + +This component creates an `AstraDBChatMessageHistory` instance, which allows for storing and retrieving chat messages using Astra DB, a cloud-native database service. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------|---------------|-----------------------------------------------------------------------| +| collection_name | String | Name of the Astra DB collection for storing messages. Required. | +| token | SecretString | Authentication token for Astra DB access. Required. | +| api_endpoint | SecretString | API endpoint URL for the Astra DB service. Required. | +| namespace | String | Optional namespace within Astra DB for the collection. | +| session_id | MessageText | Chat session ID. Uses current session ID if not provided. | + +#### Outputs + +| Name | Type | Description | +|-----------------|-------------------------|-----------------------------------------------------------| +| message_history | BaseChatMessageHistory | An instance of AstraDBChatMessageHistory for the session. | + +## CassandraChatMemory Component + +This component creates a `CassandraChatMessageHistory` instance, enabling storage and retrieval of chat messages using Apache Cassandra or DataStax Astra DB. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|----------------|---------------|-------------------------------------------------------------------------------| +| database_ref | MessageText | Contact points for Cassandra or Astra DB database ID. Required. | +| username | MessageText | Username for Cassandra (leave empty for Astra DB). | +| token | SecretString | Password for Cassandra or token for Astra DB. Required. | +| keyspace | MessageText | Keyspace in Cassandra or namespace in Astra DB. Required. | +| table_name | MessageText | Name of the table or collection for storing messages. Required. | +| session_id | MessageText | Unique identifier for the chat session. Optional. | +| cluster_kwargs | Dictionary | Additional keyword arguments for Cassandra cluster configuration. Optional. | + +#### Outputs + +| Name | Type | Description | +|-----------------|-------------------------|--------------------------------------------------------------| +| message_history | BaseChatMessageHistory | An instance of CassandraChatMessageHistory for the session. | + +## ZepChatMemory Component + +This component creates a `ZepChatMessageHistory` instance, enabling storage and retrieval of chat messages using Zep, a memory server for Large Language Models (LLMs). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|---------------|---------------|-----------------------------------------------------------| +| url | MessageText | URL of the Zep instance. Required. | +| api_key | SecretString | API Key for authentication with the Zep instance. | +| api_base_path | Dropdown | API version to use. Options: "api/v1" or "api/v2". | +| session_id | MessageText | Unique identifier for the chat session. Optional. | + +#### Outputs + +| Name | Type | Description | +|-----------------|-------------------------|-------------------------------------------------------| +| message_history | BaseChatMessageHistory | An instance of ZepChatMessageHistory for the session. | \ No newline at end of file diff --git a/docs/docs/Components/components-models.md b/docs/docs/Components/components-models.md index e30383dd5ac8..bcc85cb83ed9 100644 --- a/docs/docs/Components/components-models.md +++ b/docs/docs/Components/components-models.md @@ -4,315 +4,359 @@ sidebar_position: 5 slug: /components-models --- +# Models +Model components are used to generate text using language models. These components can be used to generate text for various tasks such as chatbots, content generation, and more. -:::info +## AI/ML API -This page may contain outdated information. It will be updated as soon as possible. +This component creates a ChatOpenAI model instance using the AIML API. -::: +For more information, see [AIML documentation](https://docs.aimlapi.com/). +### Parameters +#### Inputs +| Name | Type | Description | +|--------------|-------------|---------------------------------------------------------------------------------------------| +| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. Range: 0-128000. | +| model_kwargs | Dictionary | Additional keyword arguments for the model. | +| model_name | String | The name of the AIML model to use. Options are predefined in AIML_CHAT_MODELS. | +| aiml_api_base| String | The base URL of the AIML API. Defaults to https://api.aimlapi.com. | +| api_key | SecretString| The AIML API Key to use for the model. | +| temperature | Float | Controls randomness in the output. Default: 0.1. | +| seed | Integer | Controls reproducibility of the job. | -## Amazon Bedrock {#3b8ceacef3424234814f95895a25bf43} +#### Outputs +| Name | Type | Description | +|-------|---------------|------------------------------------------------------------------| +| model | LanguageModel | An instance of ChatOpenAI configured with the specified parameters. | -This component facilitates the generation of text using the LLM (Large Language Model) model from Amazon Bedrock. +## Amazon Bedrock +This component generates text using Amazon Bedrock LLMs. -**Params** +For more information, see [Amazon Bedrock documentation](https://docs.aws.amazon.com/bedrock). -- **Input Value:** Specifies the input text for text generation. -- **System Message (Optional):** A system message to pass to the model. -- **Model ID (Optional):** Specifies the model ID to be used for text generation. Defaults to `"anthropic.claude-instant-v1"`. Available options include: - - `"ai21.j2-grande-instruct"` - - `"ai21.j2-jumbo-instruct"` - - `"ai21.j2-mid"` - - `"ai21.j2-mid-v1"` - - `"ai21.j2-ultra"` - - `"ai21.j2-ultra-v1"` - - `"anthropic.claude-instant-v1"` - - `"anthropic.claude-v1"` - - `"anthropic.claude-v2"` - - `"cohere.command-text-v14"` -- **Credentials Profile Name (Optional):** Specifies the name of the credentials profile. -- **Region Name (Optional):** Specifies the region name. -- **Model Kwargs (Optional):** Additional keyword arguments for the model. -- **Endpoint URL (Optional):** Specifies the endpoint URL. -- **Streaming (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **Cache (Optional):** Specifies whether to cache the response. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. +### Parameters -NOTE +#### Inputs +| Name | Type | Description | +|------------------------|--------------|-------------------------------------------------------------------------------------| +| model_id | String | The ID of the Amazon Bedrock model to use. Options include various models. | +| aws_access_key | SecretString | AWS Access Key for authentication. | +| aws_secret_key | SecretString | AWS Secret Key for authentication. | +| credentials_profile_name | String | Name of the AWS credentials profile to use (advanced). | +| region_name | String | AWS region name. Default: "us-east-1". | +| model_kwargs | Dictionary | Additional keyword arguments for the model (advanced). | +| endpoint_url | String | Custom endpoint URL for the Bedrock service (advanced). | -Ensure that necessary credentials are provided to connect to the Amazon Bedrock API. If connection fails, a ValueError will be raised. +#### Outputs +| Name | Type | Description | +|-------|---------------|-------------------------------------------------------------------| +| model | LanguageModel | An instance of ChatBedrock configured with the specified parameters. | ---- +## Anthropic +This component allows the generation of text using Anthropic Chat and Language models. -## Anthropic {#a6ae46f98c4c4d389d44b8408bf151a1} +For more information, see the [Anthropic documentation](https://docs.anthropic.com/en/docs/welcome). +### Parameters -This component allows the generation of text using Anthropic Chat&Completion large language models. +#### Inputs +| Name | Type | Description | +|---------------------|-------------|----------------------------------------------------------------------------------------| +| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. Default: 4096.| +| model | String | The name of the Anthropic model to use. Options include various Claude 3 models. | +| anthropic_api_key | SecretString| Your Anthropic API key for authentication. | +| temperature | Float | Controls randomness in the output. Default: 0.1. | +| anthropic_api_url | String | Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified (advanced). | +| prefill | String | Prefill text to guide the model's response (advanced). | -**Params** +#### Outputs -- **Model Name:** Specifies the name of the Anthropic model to be used for text generation. Available options include (and not limited to): - - `"claude-2.1"` - - `"claude-2.0"` - - `"claude-instant-1.2"` - - `"claude-instant-1"` -- **Anthropic API Key:** Your Anthropic API key. -- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to `256`. -- **Temperature (Optional):** Specifies the sampling temperature. Defaults to `0.7`. -- **API Endpoint (Optional):** Specifies the endpoint of the Anthropic API. Defaults to `"https://api.anthropic.com"`if not specified. -- **Input Value:** Specifies the input text for text generation. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** A system message to pass to the model. +| Name | Type | Description | +|-------|---------------|------------------------------------------------------------------| +| model | LanguageModel | An instance of ChatAnthropic configured with the specified parameters. | -For detailed documentation and integration guides, please refer to the [Anthropic Component Documentation](https://python.langchain.com/docs/integrations/chat/anthropic). +## Azure OpenAI +This component generates text using Azure OpenAI LLM. ---- +For more information, see the [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/). +### Parameters -## Azure OpenAI {#7e3bff29ce714479b07feeb4445680cd} +#### Inputs +| Name | Display Name | Info | +|---------------------|---------------------|---------------------------------------------------------------------------------| +| Model Name | Model Name | Specifies the name of the Azure OpenAI model to be used for text generation. | +| Azure Endpoint | Azure Endpoint | Your Azure endpoint, including the resource. | +| Deployment Name | Deployment Name | Specifies the name of the deployment. | +| API Version | API Version | Specifies the version of the Azure OpenAI API to be used. | +| API Key | API Key | Your Azure OpenAI API key. | +| Temperature | Temperature | Specifies the sampling temperature. Defaults to `0.7`. | +| Max Tokens | Max Tokens | Specifies the maximum number of tokens to generate. Defaults to `1000`. | +| Input Value | Input Value | Specifies the input text for text generation. | +| Stream | Stream | Specifies whether to stream the response from the model. Defaults to `False`. | -This component allows the generation of text using the LLM (Large Language Model) model from Azure OpenAI. +## Cohere +This component generates text using Cohere's language models. -**Params** +For more information, see the [Cohere documentation](https://cohere.ai/). -- **Model Name:** Specifies the name of the Azure OpenAI model to be used for text generation. Available options include: - - `"gpt-35-turbo"` - - `"gpt-35-turbo-16k"` - - `"gpt-35-turbo-instruct"` - - `"gpt-4"` - - `"gpt-4-32k"` - - `"gpt-4-vision"` - - `"gpt-4o"` -- **Azure Endpoint:** Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`. -- **Deployment Name:** Specifies the name of the deployment. -- **API Version:** Specifies the version of the Azure OpenAI API to be used. Available options include: - - `"2023-03-15-preview"` - - `"2023-05-15"` - - `"2023-06-01-preview"` - - `"2023-07-01-preview"` - - `"2023-08-01-preview"` - - `"2023-09-01-preview"` - - `"2023-12-01-preview"` -- **API Key:** Your Azure OpenAI API key. -- **Temperature (Optional):** Specifies the sampling temperature. Defaults to `0.7`. -- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to `1000`. -- **Input Value:** Specifies the input text for text generation. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** A system message to pass to the model. +### Parameters -For detailed documentation and integration guides, please refer to the [Azure OpenAI Component Documentation](https://python.langchain.com/docs/integrations/llms/azure_openai). +#### Inputs +| Name | Display Name | Info | +|---------------------|--------------------|----------------------------------------------------------| +| Cohere API Key | Cohere API Key | Your Cohere API key. | +| Max Tokens | Max Tokens | Specifies the maximum number of tokens to generate. Defaults to `256`. | +| Temperature | Temperature | Specifies the sampling temperature. Defaults to `0.75`. | +| Input Value | Input Value | Specifies the input text for text generation. | ---- +## Google Generative AI +This component generates text using Google's Generative AI models. -## Cohere {#706396a33bf94894966c95571252d78b} +For more information, see the [Google Generative AI documentation](https://cloud.google.com/ai-platform/training/docs/algorithms/gpt-3). +### Parameters -This component enables text generation using Cohere large language models. +#### Inputs +| Name | Display Name | Info | +|---------------------|--------------------|-----------------------------------------------------------------------| +| Google API Key | Google API Key | Your Google API key to use for the Google Generative AI. | +| Model | Model | The name of the model to use, such as `"gemini-pro"`. | +| Max Output Tokens | Max Output Tokens | The maximum number of tokens to generate. | +| Temperature | Temperature | Run inference with this temperature. | +| Top K | Top K | Consider the set of top K most probable tokens. | +| Top P | Top P | The maximum cumulative probability of tokens to consider when sampling. | +| N | N | Number of chat completions to generate for each prompt. | -**Params** +## Groq -- **Cohere API Key:** Your Cohere API key. -- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to `256`. -- **Temperature (Optional):** Specifies the sampling temperature. Defaults to `0.75`. -- **Input Value:** Specifies the input text for text generation. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** A system message to pass to the model. +This component generates text using Groq's language models. ---- +For more information, see the [Groq documentation](https://groq.com/). +### Parameters -## Google Generative AI {#074d9623463449f99d41b44699800e8a} +#### Inputs +| Name | Type | Description | +|----------------|---------------|-----------------------------------------------------------------| +| groq_api_key | SecretString | API key for the Groq API. | +| groq_api_base | String | Base URL path for API requests. Default: "https://api.groq.com" (advanced). | +| max_tokens | Integer | The maximum number of tokens to generate (advanced). | +| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.1. | +| n | Integer | Number of chat completions to generate for each prompt (advanced). | +| model_name | String | The name of the Groq model to use. Options are dynamically fetched from the Groq API. | -This component enables text generation using Google Generative AI. +#### Outputs +| Name | Type | Description | +|-------|---------------|------------------------------------------------------------------| +| model | LanguageModel | An instance of ChatGroq configured with the specified parameters. | -**Params** +## Hugging Face API -- **Google API Key:** Your Google API key to use for the Google Generative AI. -- **Model:** The name of the model to use. Supported examples are `"gemini-pro"` and `"gemini-pro-vision"`. -- **Max Output Tokens (Optional):** The maximum number of tokens to generate. -- **Temperature:** Run inference with this temperature. Must be in the closed interval [0.0, 1.0]. -- **Top K (Optional):** Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive. -- **Top P (Optional):** The maximum cumulative probability of tokens to consider when sampling. -- **N (Optional):** Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated. -- **Input Value:** The input to the model. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** A system message to pass to the model. +This component generates text using Hugging Face's language models. ---- +For more information, see the [Hugging Face documentation](https://huggingface.co/). +### Parameters -## Hugging Face API {#c1267b9a6b36487cb2ee127ce9b64dbb} +#### Inputs +| Name | Display Name | Info | +|---------------------|-------------------|-------------------------------------------| +| Endpoint URL | Endpoint URL | The URL of the Hugging Face Inference API endpoint. | +| Task | Task | Specifies the task for text generation. | +| API Token | API Token | The API token required for authentication.| +| Model Kwargs | Model Kwargs | Additional keyword arguments for the model.| +| Input Value | Input Value | The input text for text generation. | -This component facilitates text generation using LLM models from the Hugging Face Inference API. +## Maritalk +This component generates text using Maritalk LLMs. -**Params** +For more information, see [Maritalk documentation](https://www.maritalk.com/). -- **Endpoint URL:** The URL of the Hugging Face Inference API endpoint. Should be provided along with necessary authentication credentials. -- **Task:** Specifies the task for text generation. Options include `"text2text-generation"`, `"text-generation"`, and `"summarization"`. -- **API Token:** The API token required for authentication with the Hugging Face Hub. -- **Model Keyword Arguments (Optional):** Additional keyword arguments for the model. Should be provided as a Python dictionary. -- **Input Value:** The input text for text generation. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** A system message to pass to the model. +### Parameters ---- +#### Inputs +| Name | Type | Description | +|----------------|---------------|-----------------------------------------------------------------| +| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. Default: 512. | +| model_name | String | The name of the Maritalk model to use. Options: "sabia-2-small", "sabia-2-medium". Default: "sabia-2-small". | +| api_key | SecretString | The Maritalk API Key to use for authentication. | +| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.5. | +| endpoint_url | String | The Maritalk API endpoint. Default: https://api.maritalk.com. | -## LiteLLM Model {#9fb59dad3b294a05966320d39f483a50} +#### Outputs +| Name | Type | Description | +|-------|---------------|------------------------------------------------------------------| +| model | LanguageModel | An instance of ChatMaritalk configured with the specified parameters. | -Generates text using the `LiteLLM` collection of large language models. +## Mistral +This component generates text using MistralAI LLMs. -**Parameters** +For more information, see [Mistral AI documentation](https://docs.mistral.ai/). -- **Model name:** The name of the model to use. For example, `gpt-3.5-turbo`. (Type: str) -- **API key:** The API key to use for accessing the provider's API. (Type: str, Optional) -- **Provider:** The provider of the API key. (Type: str, Choices: "OpenAI", "Azure", "Anthropic", "Replicate", "Cohere", "OpenRouter") -- **Temperature:** Controls the randomness of the text generation. (Type: float, Default: 0.7) -- **Model kwargs:** Additional keyword arguments for the model. (Type: Dict, Optional) -- **Top p:** Filter responses to keep the cumulative probability within the top p tokens. (Type: float, Optional) -- **Top k:** Filter responses to only include the top k tokens. (Type: int, Optional) -- **N:** Number of chat completions to generate for each prompt. (Type: int, Default: 1) -- **Max tokens:** The maximum number of tokens to generate for each chat completion. (Type: int, Default: 256) -- **Max retries:** Maximum number of retries for failed requests. (Type: int, Default: 6) -- **Verbose:** Whether to print verbose output. (Type: bool, Default: False) -- **Input:** The input prompt for text generation. (Type: str) -- **Stream:** Whether to stream the output. (Type: bool, Default: False) -- **System message:** System message to pass to the model. (Type: str, Optional) +### Parameters ---- +#### Inputs +| Name | Type | Description | +|---------------------|--------------|-----------------------------------------------------------------------------------------------| +| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens (advanced). | +| model_name | String | The name of the Mistral AI model to use. Options include "open-mixtral-8x7b", "open-mixtral-8x22b", "mistral-small-latest", "mistral-medium-latest", "mistral-large-latest", and "codestral-latest". Default: "codestral-latest". | +| mistral_api_base | String | The base URL of the Mistral API. Defaults to https://api.mistral.ai/v1 (advanced). | +| api_key | SecretString | The Mistral API Key to use for authentication. | +| temperature | Float | Controls randomness in the output. Default: 0.5. | +| max_retries | Integer | Maximum number of retries for API calls. Default: 5 (advanced). | +| timeout | Integer | Timeout for API calls in seconds. Default: 60 (advanced). | +| max_concurrent_requests | Integer | Maximum number of concurrent API requests. Default: 3 (advanced). | +| top_p | Float | Nucleus sampling parameter. Default: 1 (advanced). | +| random_seed | Integer | Seed for random number generation. Default: 1 (advanced). | +| safe_mode | Boolean | Enables safe mode for content generation (advanced). | +#### Outputs +| Name | Type | Description | +|--------|---------------|-----------------------------------------------------| +| model | LanguageModel | An instance of ChatMistralAI configured with the specified parameters. | -## Ollama {#14e8e411d28d4711add53bfc3e52c6cd} - - -Generate text using Ollama Local LLMs. - - -**Parameters** - -- **Base URL:** Endpoint of the Ollama API. Defaults to '[http://localhost:11434](http://localhost:11434/)' if not specified. -- **Model Name:** The model name to use. Refer to [Ollama Library](https://ollama.ai/library) for more models. -- **Temperature:** Controls the creativity of model responses. (Default: 0.8) -- **Cache:** Enable or disable caching. (Default: False) -- **Format:** Specify the format of the output (e.g., json). (Advanced) -- **Metadata:** Metadata to add to the run trace. (Advanced) -- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity. (Default: Disabled) -- **Mirostat Eta:** Learning rate for Mirostat algorithm. (Default: None) (Advanced) -- **Mirostat Tau:** Controls the balance between coherence and diversity of the output. (Default: None) (Advanced) -- **Context Window Size:** Size of the context window for generating tokens. (Default: None) (Advanced) -- **Number of GPUs:** Number of GPUs to use for computation. (Default: None) (Advanced) -- **Number of Threads:** Number of threads to use during computation. (Default: None) (Advanced) -- **Repeat Last N:** How far back the model looks to prevent repetition. (Default: None) (Advanced) -- **Repeat Penalty:** Penalty for repetitions in generated text. (Default: None) (Advanced) -- **TFS Z:** Tail free sampling value. (Default: None) (Advanced) -- **Timeout:** Timeout for the request stream. (Default: None) (Advanced) -- **Top K:** Limits token selection to top K. (Default: None) (Advanced) -- **Top P:** Works together with top-k. (Default: None) (Advanced) -- **Verbose:** Whether to print out response text. -- **Tags:** Tags to add to the run trace. (Advanced) -- **Stop Tokens:** List of tokens to signal the model to stop generating text. (Advanced) -- **System:** System to use for generating text. (Advanced) -- **Template:** Template to use for generating text. (Advanced) -- **Input:** The input text. -- **Stream:** Whether to stream the response. -- **System Message:** System message to pass to the model. (Advanced) +## NVIDIA ---- +This component generates text using NVIDIA LLMs. +For more information, see [NVIDIA AI Foundation Models documentation](https://developer.nvidia.com/ai-foundation-models). -## OpenAI {#fe6cd793446748eda6eaad72e30f70b3} +### Parameters +#### Inputs +| Name | Type | Description | +|---------------------|--------------|-----------------------------------------------------------------------------------------------| +| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens (advanced). | +| model_name | String | The name of the NVIDIA model to use. Default: "mistralai/mixtral-8x7b-instruct-v0.1". | +| base_url | String | The base URL of the NVIDIA API. Default: "https://integrate.api.nvidia.com/v1". | +| nvidia_api_key | SecretString | The NVIDIA API Key for authentication. | +| temperature | Float | Controls randomness in the output. Default: 0.1. | +| seed | Integer | The seed controls the reproducibility of the job (advanced). Default: 1. | -This component facilitates text generation using OpenAI's models. +#### Outputs +| Name | Type | Description | +|--------|---------------|-----------------------------------------------------| +| model | LanguageModel | An instance of ChatNVIDIA configured with the specified parameters. | +## Ollama -**Params** +This component generates text using Ollama's language models. -- **Input Value:** The input text for text generation. -- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to `256`. -- **Model Kwargs (Optional):** Additional keyword arguments for the model. Should be provided as a nested dictionary. -- **Model Name (Optional):** The name of the model to use. Defaults to `gpt-4-1106-preview`. Supported options include: `gpt-4-turbo-preview`, `gpt-4-0125-preview`, `gpt-4-1106-preview`, `gpt-4-vision-preview`, `gpt-3.5-turbo-0125`, `gpt-3.5-turbo-1106`. -- **OpenAI API Base (Optional):** The base URL of the OpenAI API. Defaults to `https://api.openai.com/v1`. -- **OpenAI API Key (Optional):** The API key for accessing the OpenAI API. -- **Temperature:** Controls the creativity of model responses. Defaults to `0.7`. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** System message to pass to the model. +For more information, see [Ollama documentation](https://ollama.com/). ---- +### Parameters +#### Inputs +| Name | Display Name | Info | +|---------------------|---------------|---------------------------------------------| +| Base URL | Base URL | Endpoint of the Ollama API. | +| Model Name | Model Name | The model name to use. | +| Temperature | Temperature | Controls the creativity of model responses. | -## Qianfan {#6e4a6b2370ee4b9f8beb899e7cf9c8f6} +## OpenAI +This component generates text using OpenAI's language models. -This component facilitates the generation of text using Baidu Qianfan chat models. +For more information, see [OpenAI documentation](https://beta.openai.com/docs/). +### Parameters -**Params** +#### Inputs -- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include: - - `"ERNIE-Bot"` - - `"ERNIE-Bot-turbo"` - - `"BLOOMZ-7B"` - - `"Llama-2-7b-chat"` - - `"Llama-2-13b-chat"` - - `"Llama-2-70b-chat"` - - `"Qianfan-BLOOMZ-7B-compressed"` - - `"Qianfan-Chinese-Llama-2-7B"` - - `"ChatGLM2-6B-32K"` - - `"AquilaChat-7B"` -- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop). -- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop). -- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to `0.8`. -- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to `0.95`. -- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to `1.0`. -- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used. -- **Input Value:** Specifies the input text for text generation. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** A system message to pass to the model. +| Name | Type | Description | +|---------------------|---------------|------------------------------------------------------------------| +| api_key | SecretString | Your OpenAI API Key. | +| model | String | The name of the OpenAI model to use. Options include "gpt-3.5-turbo" and "gpt-4". | +| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. | +| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.7. | +| top_p | Float | Controls the nucleus sampling. Range: [0.0, 1.0]. Default: 1.0. | +| frequency_penalty | Float | Controls the frequency penalty. Range: [0.0, 2.0]. Default: 0.0. | +| presence_penalty | Float | Controls the presence penalty. Range: [0.0, 2.0]. Default: 0.0. | ---- +#### Outputs + +| Name | Type | Description | +|-------|---------------|------------------------------------------------------------------| +| model | LanguageModel | An instance of OpenAI model configured with the specified parameters. | + +## Qianfan + +This component generates text using Qianfan's language models. + +For more information, see [Qianfan documentation](https://github.com/baidubce/bce-qianfan-sdk). + +## Perplexity + +This component generates text using Perplexity's language models. + +For more information, see [Perplexity documentation](https://perplexity.ai/). + +### Parameters + +#### Inputs +| Name | Type | Description | +|---------------------|--------------|-----------------------------------------------------------------------------------------------| +| model_name | String | The name of the Perplexity model to use. Options include various Llama 3.1 models. | +| max_output_tokens | Integer | The maximum number of tokens to generate. | +| api_key | SecretString | The Perplexity API Key for authentication. | +| temperature | Float | Controls randomness in the output. Default: 0.75. | +| top_p | Float | The maximum cumulative probability of tokens to consider when sampling (advanced). | +| n | Integer | Number of chat completions to generate for each prompt (advanced). | +| top_k | Integer | Number of top tokens to consider for top-k sampling. Must be positive (advanced). | + +#### Outputs +| Name | Type | Description | +|--------|---------------|-----------------------------------------------------| +| model | LanguageModel | An instance of ChatPerplexity configured with the specified parameters. | +## VertexAI -## Vertex AI {#86b7d539e17c436fb758c47ec3ffb084} +This component generates text using Vertex AI LLMs. +For more information, see [Google Vertex AI documentation](https://cloud.google.com/vertex-ai). -The `ChatVertexAI` is a component for generating text using Vertex AI Chat large language models API. +### Parameters +#### Inputs +| Name | Type | Description | +|---------------------|--------------|-----------------------------------------------------------------------------------------------| +| credentials | File | JSON credentials file. Leave empty to fallback to environment variables. File type: JSON. | +| model_name | String | The name of the Vertex AI model to use. Default: "gemini-1.5-pro". | +| project | String | The project ID (advanced). | +| location | String | The location for the Vertex AI API. Default: "us-central1" (advanced). | +| max_output_tokens | Integer | The maximum number of tokens to generate (advanced). | +| max_retries | Integer | Maximum number of retries for API calls. Default: 1 (advanced). | +| temperature | Float | Controls randomness in the output. Default: 0.0. | +| top_k | Integer | The number of highest probability vocabulary tokens to keep for top-k-filtering (advanced). | +| top_p | Float | The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Default: 0.95 (advanced). | +| verbose | Boolean | Whether to print verbose output. Default: False (advanced). | -**Params** +#### Outputs +| Name | Type | Description | +|--------|---------------|-----------------------------------------------------| +| model | LanguageModel | An instance of ChatVertexAI configured with the specified parameters. | -- **Credentials:** The JSON file containing the credentials for accessing the Vertex AI Chat API. -- **Project:** The name of the project associated with the Vertex AI Chat API. -- **Examples (Optional):** List of examples to provide context for text generation. -- **Location:** The location of the Vertex AI Chat API service. Defaults to `us-central1`. -- **Max Output Tokens:** The maximum number of tokens to generate. Defaults to `128`. -- **Model Name:** The name of the model to use. Defaults to `chat-bison`. -- **Temperature:** Controls the creativity of model responses. Defaults to `0.0`. -- **Input Value:** The input text for text generation. -- **Top K:** Limits token selection to top K. Defaults to `40`. -- **Top P:** Works together with top-k. Defaults to `0.95`. -- **Verbose:** Whether to print out response text. Defaults to `False`. -- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`. -- **System Message (Optional):** System message to pass to the model. diff --git a/docs/docs/Components/components-overview.md b/docs/docs/Components/components-overview.md new file mode 100644 index 000000000000..53e89bdce330 --- /dev/null +++ b/docs/docs/Components/components-overview.md @@ -0,0 +1,71 @@ +--- +title: How to build flows with components +sidebar_position: 0 +slug: /components-overview +--- + +A component is a single building block within a flow. It consists of inputs, outputs, and parameters that define their functionality. These elements provide a convenient and straightforward way to compose LLM-based applications. Learn more about components and how they work below. + + +During the flow creation process, you will notice handles (colored circles) attached to one or both sides of a component. These handles use distinct colors to indicate the types of inputs and outputs that can be interconnected. Hover over a handle to see connection details. + + +Prompt component + + +On the top right corner of the component, you'll find the a play button to run a component. Once it runs, a status icon appears and you can hover over that to visualize success or error messages. Start interacting with your AI by clicking the **Playground** at the bottom right of the workspace. + + +## Component menu {#7e3f2f8ff5074b2fb3eee97c9cfaabe7} + + +Each component is unique, but they all have a menu bar at the top that looks something like this. + + +Prompt component + + +It consists of options such as: + +- **Code** — Modify the component's Python code and save it. +- **Controls** — Adjust all parameters of a component. +- **Freeze Path** — After a component runs, lock its previous output state to prevent it from re-running. + +Click **All** (the "..." button) to see all options. + + +## Output preview {#ed7b3c34e0774b8a916b0e68821c9a7a} + + +Langflow includes an output visualizer for components that opens a pop-up screen. This allows you to easily inspect and monitor transmissions between components, providing instant feedback on your workflows. + + +## Advanced settings {#b6430d4903df44f0ba4618a558c83d7b} + + +Langflow components can be edited by clicking the **Advanced Settings** button. + + +Hide parameters with the **Show** button to reduce complexity and keep the workspace clean and intuitive for experimentation. + + +You can also double-click a component's name and description to modify those. Component descriptions accept markdown syntax. + + +## Group components {#c3f5ed818e3b40ceb6534dc358e1a5f2} + + +Multiple components can be grouped into a single component for reuse. This is useful when combining large flows into single components (like RAG with a vector database, for example) and saving space. + +1. Hold **Shift** and drag to select components. +2. Select **Group**. +3. The components merge into a single component. +4. Double-click the name and description to change them. +5. Save your grouped component to in the sidebar for later use! + +## Component version {#887fd587589448dc8c27336d1c235b9b} + +A component's state is stored in a database, while sidebar components are like starter templates. As soon as you drag a component from the sidebar to the workspace, the two components are no longer in parity. + + +The component will keep the version number it was initialized to the workspace with. Click the **Update Component** icon (exclamation mark) to bring the component up to the `latest` version. This will change the code of the component in place so you can validate that the component was updated by checking its Python code before and after updating it. diff --git a/docs/docs/Components/components-prompts.md b/docs/docs/Components/components-prompts.md index 9a3baa8c7af0..55ff7d747362 100644 --- a/docs/docs/Components/components-prompts.md +++ b/docs/docs/Components/components-prompts.md @@ -4,36 +4,43 @@ sidebar_position: 2 slug: /components-prompts --- +# Prompts +A prompt serves as the input to a language model, comprising multiple components that can be parameterized using prompt templates. -:::info +Prompt templates provide a systematic approach for generating prompts, allowing for reproducible customization through defined input variables. -This page may contain outdated information. It will be updated as soon as possible. +### Parameters -::: +#### Inputs +| Name | Display Name | Info | +|----------|--------------|-------------------------------------------------------------------| +| template | Template | Create a prompt template with dynamic variables. | +#### Outputs +| Name | Display Name | Info | +|--------|----------------|--------------------------------------------------------| +| prompt | Prompt Message | The built prompt message returned by the `build_prompt` method. | -A prompt is the input provided to a language model, consisting of multiple components and can be parameterized using prompt templates. A prompt template offers a reproducible method for generating prompts, enabling easy customization through input variables. +## Langchain Hub Prompt Template +This component fetches prompts from the [Langchain Hub](https://docs.smith.langchain.com/old/category/prompt-hub). -### Prompt {#c852d1761e6c46b19ce72e5f7c70958c} +When a prompt is loaded, the component generates input fields for custom variables. For example, the default prompt "efriis/my-first-prompt" generates fields for `profession` and `question`. +### Parameters -This component creates a prompt template with dynamic variables. This is useful for structuring prompts and passing dynamic data to a language model. +#### Inputs +| Name | Display Name | Info | +|--------------------|---------------------------|------------------------------------------| +| langchain_api_key | Your LangChain API Key | The LangChain API Key to use. | +| langchain_hub_prompt| LangChain Hub Prompt | The LangChain Hub prompt to use. | -**Parameters** +#### Outputs -- **Template:** The template for the prompt. This field allows you to create other fields dynamically by using curly brackets `{}`. For example, if you have a template like `Hello {name}, how are you?`, a new field called `name` will be created. Prompt variables can be created with any name inside curly brackets, e.g. `{variable_name}`. - -### PromptTemplate {#6e32412f062b42efbdf56857eafb3651} - - -The `PromptTemplate` component enables users to create prompts and define variables that control how the model is instructed. Users can input a set of variables which the template uses to generate the prompt when a conversation starts. - - -After defining a variable in the prompt template, it acts as its own component input. - -- **template:** The template used to format an individual request. +| Name | Display Name | Info | +|--------|--------------|-------------------------------------------------------------------| +| prompt | Build Prompt | The built prompt message returned by the `build_prompt` method. | diff --git a/docs/docs/Components/components-rag.md b/docs/docs/Components/components-rag.md index df4b5691feb3..9af9468fbfa4 100644 --- a/docs/docs/Components/components-rag.md +++ b/docs/docs/Components/components-rag.md @@ -6,23 +6,31 @@ slug: /components-rag RAG (Retrieval-Augmented Generation) components process a user query by retrieving relevant documents and generating a concise summary that addresses the user's question. -### Vectara - -`Vectara` performs RAG using a Vectara corpus, including document retrieval, reranking results, and summary generation. - -**Parameters:** - -- **Vectara Customer ID:** Customer ID. -- **Vectara Corpus ID:** Corpus ID. -- **Vectara API Key:** API key. -- **Search Query:** User query. -- **Lexical Interpolation:** How much to weigh lexical vs. embedding scores. -- **Metadata Filters:** Filters to narrow down the search documents and parts. -- **Reranker Type:** How to rerank the retrieved results. -- **Number of Results to Rerank:** Maximum reranked results. -- **Diversity Bias:** How much to diversify retrieved results (only for MMR reranker). -- **Max Results to Summarize:** Maximum search results to provide to summarizer. -- **Response Language:** The language code (use ISO 639-1 or 639-3 codes) of the summary. -- **Prompt Name:** The summarizer prompt. - -For more information, consult the [Vectara documentation](https://docs.vectara.com/docs) +## Vectara RAG + +This component leverages Vectara's Retrieval Augmented Generation (RAG) capabilities to search and summarize documents based on the provided input. For more information, see the [Vectara documentation](https://docs.vectara.com/docs/). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-----------------------|--------------|------------------------------------------------------------| +| vectara_customer_id | String | Vectara customer ID | +| vectara_corpus_id | String | Vectara corpus ID | +| vectara_api_key | SecretString | Vectara API key | +| search_query | String | The query to receive an answer on | +| lexical_interpolation | Float | Hybrid search factor (0.005 to 0.1) | +| filter | String | Metadata filters to narrow the search | +| reranker | String | Reranker type (mmr, rerank_multilingual_v1, none) | +| reranker_k | Integer | Number of results to rerank (1 to 100) | +| diversity_bias | Float | Diversity bias for MMR reranker (0 to 1) | +| max_results | Integer | Maximum number of search results to summarize (1 to 100) | +| response_lang | String | Language code for the response (e.g., "eng", "auto") | +| prompt | String | Prompt name for summarization | + +#### Outputs + +| Name | Type | Description | +|--------|---------|-----------------------| +| answer | Message | Generated RAG response| \ No newline at end of file diff --git a/docs/docs/Components/components-tools.md b/docs/docs/Components/components-tools.md new file mode 100644 index 000000000000..a838a9b3ec89 --- /dev/null +++ b/docs/docs/Components/components-tools.md @@ -0,0 +1,344 @@ +# Tools + +Tool components are used to interact with external services, APIs, and tools. They can be used to search the web, query databases, and perform other tasks. + +## Bing Search API + +This component allows you to call the Bing Search API. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------------|--------------|---------------------------------------| +| bing_subscription_key | SecretString | Bing API subscription key | +| input_value | String | Search query input | +| bing_search_url | String | Custom Bing Search URL (optional) | +| k | Integer | Number of search results to return | + +#### Outputs + +| Name | Type | Description | +|---------|-----------|--------------------------------------| +| results | List[Data]| List of search results | +| tool | Tool | Bing Search tool for use in LangChain| + +## Calculator Tool + +This component creates a tool for performing basic arithmetic operations on a given expression. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------|--------|--------------------------------------------------------------------| +| expression | String | The arithmetic expression to evaluate (e.g., `4*4*(33/22)+12-20`). | + +#### Outputs + +| Name | Type | Description | +|--------|------|-------------------------------------------------| +| result | Tool | Calculator tool for use in LangChain | + +This component allows you to evaluate basic arithmetic expressions. It supports addition, subtraction, multiplication, division, and exponentiation. The tool uses a secure evaluation method that prevents the execution of arbitrary Python code. + +## Glean Search API + +This component allows you to call the Glean Search API. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------------|--------------|---------------------------------------| +| glean_api_url | String | URL of the Glean API | +| glean_access_token | SecretString | Access token for Glean API authentication | +| query | String | Search query input | +| page_size | Integer | Number of results per page (default: 10) | +| request_options | Dict | Additional options for the API request (optional) | + +#### Outputs + +| Name | Type | Description | +|---------|-----------|--------------------------------------| +| results | List[Data]| List of search results | +| tool | Tool | Glean Search tool for use in LangChain| + +## Google Search API + +This component allows you to call the Google Search API. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------------|--------------|---------------------------------------| +| google_api_key | SecretString | Google API key for authentication | +| google_cse_id | SecretString | Google Custom Search Engine ID | +| input_value | String | Search query input | +| k | Integer | Number of search results to return | + +#### Outputs + +| Name | Type | Description | +|---------|-----------|--------------------------------------| +| results | List[Data]| List of search results | +| tool | Tool | Google Search tool for use in LangChain| + +## Google Serper API + +This component allows you to call the Serper.dev Google Search API. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------------|--------------|---------------------------------------| +| serper_api_key | SecretString | API key for Serper.dev authentication | +| input_value | String | Search query input | +| k | Integer | Number of search results to return | + +#### Outputs + +| Name | Type | Description | +|---------|-----------|--------------------------------------| +| results | List[Data]| List of search results | +| tool | Tool | Google Serper search tool for use in LangChain| + +## Python Code Structured Tool + +This component creates a structured tool from Python code using a dataclass. + +The component dynamically updates its configuration based on the provided Python code, allowing for custom function arguments and descriptions. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------------|--------------|---------------------------------------| +| tool_code | String | Python code for the tool's dataclass | +| tool_name | String | Name of the tool | +| tool_description | String | Description of the tool | +| return_direct | Boolean | Whether to return the function output directly | +| tool_function | String | Selected function for the tool | +| global_variables | Dict | Global variables or data for the tool | + +#### Outputs + +| Name | Type | Description | +|-------------|-------|-----------------------------------------| +| result_tool | Tool │ Structured tool created from the Python code | + +## Python REPL Tool + +This component creates a Python REPL (Read-Eval-Print Loop) tool for executing Python code. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-----------------|--------------|--------------------------------------------------------| +| name | String | The name of the tool (default: "python_repl") | +| description | String | A description of the tool's functionality | +| global_imports | List[String] | List of modules to import globally (default: ["math"]) | + +#### Outputs + +| Name | Type | Description | +|------|------|--------------------------------------------| +| tool | Tool | Python REPL tool for use in LangChain | + +## Retriever Tool + +This component creates a tool for interacting with a retriever in LangChain. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------|---------------|---------------------------------------------| +| retriever | BaseRetriever | The retriever to interact with | +| name | String | The name of the tool | +| description | String | A description of the tool's functionality | + +#### Outputs + +| Name | Type | Description | +|------|------|--------------------------------------------| +| tool | Tool | Retriever tool for use in LangChain | + +## SearXNG Search Tool + +This component creates a tool for searching using SearXNG, a metasearch engine. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------|--------------|---------------------------------------| +| url | String | The URL of the SearXNG instance | +| max_results | Integer | Maximum number of results to return | +| categories | List[String] | Categories to search in | +| language | String | Language for the search results | + +#### Outputs + +| Name | Type | Description | +|-------------|------|--------------------------------------------| +| result_tool | Tool | SearXNG search tool for use in LangChain | + +## Search API + +This component calls the `searchapi.io` API. It can be used to search the web for information. + +For more information, see the [SearchAPI documentation](https://www.searchapi.io/docs/google). + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|----------------|---------------------|-----------------------------------------------------| +| engine | Engine | The search engine to use (default: "google") | +| api_key | SearchAPI API Key | The API key for authenticating with SearchAPI | +| input_value | Input | The search query or input for the API call | +| search_params | Search parameters | Additional parameters for customizing the search | + +#### Outputs + +| Name | Display Name | Info | +|------|-----------------|------------------------------------------------------| +| data | Search Results | List of Data objects containing search results | +| tool | Search API Tool | A Tool object for use in LangChain workflows | + +## Serp Search API + +This component creates a tool for searching using the Serp API. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------|--------------|---------------------------------------------| +| serpapi_api_key | SecretString | API key for Serp API authentication | +| input_value | String | Search query input | +| search_params | Dict | Additional search parameters (optional) | + +#### Outputs + +| Name | Type | Description | +|---------|-----------|---------------------------------------------| +| results | List[Data]| List of search results | +| tool | Tool | Serp API search tool for use in LangChain | + +## Wikipedia API + +This component creates a tool for searching and retrieving information from Wikipedia. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------------------|---------|-----------------------------------------------------------| +| input_value | String | Search query input | +| lang | String | Language code for Wikipedia (default: "en") | +| k | Integer | Number of results to return | +| load_all_available_meta | Boolean | Whether to load all available metadata (advanced) | +| doc_content_chars_max | Integer | Maximum number of characters for document content (advanced)| + +#### Outputs + +| Name | Type | Description | +|---------|-----------|---------------------------------------| +| results | List[Data]| List of Wikipedia search results | +| tool | Tool | Wikipedia search tool for use in LangChain | + +## Wolfram Alpha API + +This component creates a tool for querying the Wolfram Alpha API. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------|--------------|--------------------------------| +| input_value | String | Query input for Wolfram Alpha | +| app_id | SecretString | Wolfram Alpha API App ID | + +#### Outputs + +| Name | Type | Description | +|---------|-----------|------------------------------------------------| +| results | List[Data]| List containing the Wolfram Alpha API response | +| tool | Tool | Wolfram Alpha API tool for use in LangChain | + +## Yahoo Finance News Tool + +This component creates a tool for retrieving news from Yahoo Finance. + +### Parameters + +This component does not have any input parameters. + +#### Outputs + +| Name | Type | Description | +|------|------|----------------------------------------------| +| tool | Tool | Yahoo Finance News tool for use in LangChain | + + +## Astra DB Tool + +The `Astra DB Tool` allows agents to connect to and query data from Astra DB Collections. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------------|--------|----------------------------------------------------------------------------------------------------------------------------------| +| Tool Name | String | The name used to reference the tool in the agent's prompt. | +| Tool Description | String | A brief description of the tool. This helps the model decide when to use it. | +| Collection Name | String | The name of the Astra DB collection to query. | +| Token | SecretString | The authentication token for accessing Astra DB. | +| API Endpoint | String | The Astra DB API endpoint. | +| Projection Fields | String | The attributes to return, separated by commas. Default: "*". | +| Tool Parameters | Dict | Parameters the model needs to fill to execute the tool. For required parameters, use an exclamation mark (e.g., "!customer_id"). | +| Static Filters | Dict | Attribute-value pairs used to filter query results. | +| Limit | String | The number of documents to return. | + + + +## Astra DB CQL Tool + +The `Astra DB CQL Tool` allows agents to query data from CQL Tables in Astra DB. + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| Tool Name | String | The name used to reference the tool in the agent's prompt. | +| Tool Description | String | A brief description of the tool to guide the model in using it. | +| Keyspace | String | The name of the keyspace. | +| Table Name | String | The name of the Astra DB CQL table to query. | +| Token | SecretString | The authentication token for Astra DB. | +| API Endpoint | String | The Astra DB API endpoint. | +| Projection Fields | String | The attributes to return, separated by commas. Default: "*". | +| Partition Keys | Dict | Required parameters that the model must fill to query the tool. | +| Clustering Keys | Dict | Optional parameters the model can fill to refine the query. Required parameters should be marked with an exclamation mark (e.g., "!customer_id"). | +| Static Filters | Dict | Attribute-value pairs used to filter query results. | +| Limit | String | The number of records to return. | diff --git a/docs/docs/Components/components-vector-stores.md b/docs/docs/Components/components-vector-stores.md index 02a219f42448..da8c181d1ee7 100644 --- a/docs/docs/Components/components-vector-stores.md +++ b/docs/docs/Components/components-vector-stores.md @@ -3,586 +3,619 @@ title: Vector Stores sidebar_position: 7 slug: /components-vector-stores --- +# Vector Stores + +Vector databases are used to store and search for vectors. They can be used to store embeddings, search for similar vectors, and perform other vector operations. + +## Astra DB Vector Store + +This component implements a Vector Store using Astra DB with search capabilities. + +For more information, see the [DataStax documentation](https://docs.datastax.com/en/astra-db-serverless/databases/create-database.html). + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| collection_name | Collection Name | The name of the collection within Astra DB where the vectors will be stored (required) | +| token | Astra DB Application Token | Authentication token for accessing Astra DB (required) | +| api_endpoint | API Endpoint | API endpoint URL for the Astra DB service (required) | +| search_input | Search Input | Query string for similarity search | +| ingest_data | Ingest Data | Data to be ingested into the vector store | +| namespace | Namespace | Optional namespace within Astra DB to use for the collection | +| embedding_choice | Embedding Model or Astra Vectorize | Determines whether to use an Embedding Model or Astra Vectorize for the collection | +| embedding | Embedding Model | Allows an embedding model configuration (when using Embedding Model) | +| provider | Vectorize Provider | Provider for Astra Vectorize (when using Astra Vectorize) | +| metric | Metric | Optional distance metric for vector comparisons | +| batch_size | Batch Size | Optional number of data to process in a single batch | +| setup_mode | Setup Mode | Configuration mode for setting up the vector store (options: "Sync", "Async", "Off", default: "Sync") | +| pre_delete_collection | Pre Delete Collection | Boolean flag to determine whether to delete the collection before creating a new one | +| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) | +| search_type | Search Type | Search type to use (options: "Similarity", "Similarity with score threshold", "MMR (Max Marginal Relevance)") | +| search_score_threshold | Search Score Threshold | Minimum similarity score threshold for search results | +| search_filter | Search Metadata Filter | Optional dictionary of filters to apply to the search query | + +#### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| vector_store | Vector Store | Built Astra DB vector store | +| search_results | Search Results | Results of the similarity search as a list of Data objects | + +## Cassandra + +This component creates a Cassandra Vector Store with search capabilities. +For more information, see the [Cassandra documentation](https://cassandra.apache.org/doc/latest/cassandra/vector-search/overview.html). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------|------|-------------| +| database_ref | String | Contact points for the database or AstraDB database ID | +| username | String | Username for the database (leave empty for AstraDB) | +| token | SecretString | User password for the database or AstraDB token | +| keyspace | String | Table Keyspace or AstraDB namespace | +| table_name | String | Name of the table or AstraDB collection | +| ttl_seconds | Integer | Time-to-live for added texts | +| batch_size | Integer | Number of data to process in a single batch | +| setup_mode | String | Configuration mode for setting up the Cassandra table | +| cluster_kwargs | Dict | Additional keyword arguments for the Cassandra cluster | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | +| search_type | String | Type of search to perform | +| search_score_threshold | Float | Minimum similarity score for search results | +| search_filter | Dict | Metadata filters for search query | +| body_search | String | Document textual search terms | +| enable_body_search | Boolean | Flag to enable body search | + +#### Outputs + +| Name | Type | Description | +|------|------|-------------| +| vector_store | Cassandra | Cassandra vector store instance | +| search_results | List[Data] | Results of similarity search | + +## Cassandra Graph Vector Store + +This component implements a Cassandra Graph Vector Store with search capabilities. + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| database_ref | Contact Points / Astra Database ID | Contact points for the database or AstraDB database ID (required) | +| username | Username | Username for the database (leave empty for AstraDB) | +| token | Password / AstraDB Token | User password for the database or AstraDB token (required) | +| keyspace | Keyspace | Table Keyspace or AstraDB namespace (required) | +| table_name | Table Name | The name of the table or AstraDB collection where vectors will be stored (required) | +| setup_mode | Setup Mode | Configuration mode for setting up the Cassandra table (options: "Sync", "Off", default: "Sync") | +| cluster_kwargs | Cluster arguments | Optional dictionary of additional keyword arguments for the Cassandra cluster | +| search_query | Search Query | Query string for similarity search | +| ingest_data | Ingest Data | Data to be ingested into the vector store (list of Data objects) | +| embedding | Embedding | Embedding model to use | +| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) | +| search_type | Search Type | Search type to use (options: "Traversal", "MMR traversal", "Similarity", "Similarity with score threshold", "MMR (Max Marginal Relevance)", default: "Traversal") | +| depth | Depth of traversal | The maximum depth of edges to traverse (for "Traversal" or "MMR traversal" search types, default: 1) | +| search_score_threshold | Search Score Threshold | Minimum similarity score threshold for search results (for "Similarity with score threshold" search type) | +| search_filter | Search Metadata Filter | Optional dictionary of filters to apply to the search query | + +#### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| vector_store | Vector Store | Built Cassandra Graph vector store | +| search_results | Search Results | Results of the similarity search as a list of Data objects | + +## Chroma DB + +This component creates a Chroma Vector Store with search capabilities. +For more information, see the [Chroma documentation](https://docs.trychroma.com/). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|------------------------------|---------------|--------------------------------------------------| +| collection_name | String | The name of the Chroma collection. Default: "langflow". | +| persist_directory | String | The directory to persist the Chroma database. | +| search_query | String | The query to search for in the vector store. | +| ingest_data | Data | The data to ingest into the vector store (list of Data objects). | +| embedding | Embeddings | The embedding function to use for the vector store. | +| chroma_server_cors_allow_origins | String | CORS allow origins for the Chroma server. | +| chroma_server_host | String | Host for the Chroma server. | +| chroma_server_http_port | Integer | HTTP port for the Chroma server. | +| chroma_server_grpc_port | Integer | gRPC port for the Chroma server. | +| chroma_server_ssl_enabled | Boolean | Enable SSL for the Chroma server. | +| allow_duplicates | Boolean | Allow duplicate documents in the vector store. | +| search_type | String | Type of search to perform: "Similarity" or "MMR". | +| number_of_results | Integer | Number of results to return from the search. Default: 10. | +| limit | Integer | Limit the number of records to compare when Allow Duplicates is False. | + +#### Outputs + +| Name | Type | Description | +|----------------|---------------|--------------------------------| +| vector_store | Chroma | Chroma vector store instance | +| search_results | List[Data] | Results of similarity search | + +## Clickhouse + +This component implements a Clickhouse Vector Store with search capabilities. +For more information, see the [CLickhouse Documentation](https://clickhouse.com/docs/en/intro). + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| host | hostname | Clickhouse server hostname (required, default: "localhost") | +| port | port | Clickhouse server port (required, default: 8123) | +| database | database | Clickhouse database name (required) | +| table | Table name | Clickhouse table name (required) | +| username | The ClickHouse user name. | Username for authentication (required) | +| password | The password for username. | Password for authentication (required) | +| index_type | index_type | Type of the index (options: "annoy", "vector_similarity", default: "annoy") | +| metric | metric | Metric to compute distance (options: "angular", "euclidean", "manhattan", "hamming", "dot", default: "angular") | +| secure | Use https/TLS | Overrides inferred values from the interface or port arguments (default: false) | +| index_param | Param of the index | Index parameters (default: "'L2Distance',100") | +| index_query_params | index query params | Additional index query parameters | +| search_query | Search Query | Query string for similarity search | +| ingest_data | Ingest Data | Data to be ingested into the vector store | +| embedding | Embedding | Embedding model to use | +| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) | +| score_threshold | Score threshold | Threshold for similarity scores | + +#### Outputs + +| Name | Display Name | Info | +|------|--------------|------| +| vector_store | Vector Store | Built Clickhouse vector store | +| search_results | Search Results | Results of the similarity search as a list of Data objects | + +## Couchbase + +This component creates a Couchbase Vector Store with search capabilities. +For more information, see the [Couchbase documentation](https://docs.couchbase.com/home/index.html). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------------------|---------------|--------------------------------------------------| +| couchbase_connection_string | SecretString | Couchbase Cluster connection string (required). | +| couchbase_username | String | Couchbase username (required). | +| couchbase_password | SecretString | Couchbase password (required). | +| bucket_name | String | Name of the Couchbase bucket (required). | +| scope_name | String | Name of the Couchbase scope (required). | +| collection_name | String | Name of the Couchbase collection (required). | +| index_name | String | Name of the Couchbase index (required). | +| search_query | String | The query to search for in the vector store. | +| ingest_data | Data | The data to ingest into the vector store (list of Data objects). | +| embedding | Embeddings | The embedding function to use for the vector store. | +| number_of_results | Integer | Number of results to return from the search. Default: 4 (advanced). | + +#### Outputs + +| Name | Type | Description | +|----------------|------------------------|--------------------------------| +| vector_store | CouchbaseVectorStore | A Couchbase vector store instance configured with the specified parameters. | + +## FAISS + +This component creates a FAISS Vector Store with search capabilities. +For more information, see the [FAISS documentation](https://faiss.ai/index.html). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|---------------------------|---------------|--------------------------------------------------| +| index_name | String | The name of the FAISS index. Default: "langflow_index". | +| persist_directory | String | Path to save the FAISS index. It will be relative to where Langflow is running. | +| search_query | String | The query to search for in the vector store. | +| ingest_data | Data | The data to ingest into the vector store (list of Data objects or documents). | +| allow_dangerous_deserialization | Boolean | Set to True to allow loading pickle files from untrusted sources. Default: True (advanced). | +| embedding | Embeddings | The embedding function to use for the vector store. | +| number_of_results | Integer | Number of results to return from the search. Default: 4 (advanced). | + +#### Outputs + +| Name | Type | Description | +|----------------|------------------------|--------------------------------| +| vector_store | FAISS | A FAISS vector store instance configured with the specified parameters. | + +## Hyper-Converged Database (HCD) Vector Store + +This component implements a Vector Store using HCD. + +### Parameters + +#### Inputs + +| Name | Display Name | Info | +|------|--------------|------| +| collection_name | Collection Name | The name of the collection within HCD where the vectors will be stored (required) | +| username | HCD Username | Authentication username for accessing HCD (default: "hcd-superuser", required) | +| password | HCD Password | Authentication password for accessing HCD (required) | +| api_endpoint | HCD API Endpoint | API endpoint URL for the HCD service (required) | +| search_input | Search Input | Query string for similarity search | +| ingest_data | Ingest Data | Data to be ingested into the vector store | +| namespace | Namespace | Optional namespace within HCD to use for the collection (default: "default_namespace") | +| ca_certificate | CA Certificate | Optional CA certificate for TLS connections to HCD | +| metric | Metric | Optional distance metric for vector comparisons (options: "cosine", "dot_product", "euclidean") | +| batch_size | Batch Size | Optional number of data to process in a single batch | +| bulk_insert_batch_concurrency | Bulk Insert Batch Concurrency | Optional concurrency level for bulk insert operations | +| bulk_insert_overwrite_concurrency | Bulk Insert Overwrite Concurrency | Optional concurrency level for bulk insert operations that overwrite existing data | +| bulk_delete_concurrency | Bulk Delete Concurrency | Optional concurrency level for bulk delete operations | +| setup_mode | Setup Mode | Configuration mode for setting up the vector store (options: "Sync", "Async", "Off", default: "Sync") | +| pre_delete_collection | Pre Delete Collection | Boolean flag to determine whether to delete the collection before creating a new one | +| metadata_indexing_include | Metadata Indexing Include | Optional list of metadata fields to include in the indexing | +| embedding | Embedding or Astra Vectorize | Allows either an embedding model or an Astra Vectorize configuration | +| metadata_indexing_exclude | Metadata Indexing Exclude | Optional list of metadata fields to exclude from the indexing | +| collection_indexing_policy | Collection Indexing Policy | Optional dictionary defining the indexing policy for the collection | +| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) | +| search_type | Search Type | Search type to use (options: "Similarity", "Similarity with score threshold", "MMR (Max Marginal Relevance)", default: "Similarity") | +| search_score_threshold | Search Score Threshold | Minimum similarity score threshold for search results (default: 0) | +| search_filter | Search Metadata Filter | Optional dictionary of filters to apply to the search query | + +#### Outputs +| Name | Display Name | Info | +|------|--------------|------| +| vector_store | Vector Store | Built HCD vector store instance | +| search_results | Search Results | Results of similarity search as a list of Data objects | +## Milvus + +This component creates a Milvus Vector Store with search capabilities. +For more information, see the [Milvus documentation](https://milvus.io/docs). + +### Parameters + +#### Inputs + +| Name | Type | Description | +|-------------------------|---------------|--------------------------------------------------| +| collection_name | String | Name of the Milvus collection | +| collection_description | String | Description of the Milvus collection | +| uri | String | Connection URI for Milvus | +| password | SecretString | Password for Milvus | +| username | SecretString | Username for Milvus | +| batch_size | Integer | Number of data to process in a single batch | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | +| search_type | String | Type of search to perform | +| search_score_threshold | Float | Minimum similarity score for search results | +| search_filter | Dict | Metadata filters for search query | +| setup_mode | String | Configuration mode for setting up the vector store | +| vector_dimensions | Integer | Number of dimensions of the vectors | +| pre_delete_collection | Boolean | Whether to delete the collection before creating a new one | -:::info +#### Outputs -This page may contain outdated information. It will be updated as soon as possible. +| Name | Type | Description | +|----------------|------------------------|--------------------------------| +| vector_store | Milvus | A Milvus vector store instance configured with the specified parameters. | -::: +## MongoDB Atlas + +This component creates a MongoDB Atlas Vector Store with search capabilities. +For more information, see the [MongoDB Atlas documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/tutorials/vector-search-quick-start/). + +### Parameters + +#### Inputs + +| Name | Type | Description | +| ------------------------ | ------------ | ----------------------------------------- | +| mongodb_atlas_cluster_uri | SecretString | MongoDB Atlas Cluster URI | +| db_name | String | Database name | +| collection_name | String | Collection name | +| index_name | String | Index name | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | + +#### Outputs + +| Name | Type | Description | +| ------------- | ---------------------- | ----------------------------------------- | +| vector_store | MongoDBAtlasVectorSearch| MongoDB Atlas vector store instance | +| search_results| List[Data] | Results of similarity search | + + +## PGVector + +This component creates a PGVector Vector Store with search capabilities. +For more information, see the [PGVector documentation](https://github.com/pgvector/pgvector). + +### Parameters + +#### Inputs + +| Name | Type | Description | +| --------------- | ------------ | ----------------------------------------- | +| pg_server_url | SecretString | PostgreSQL server connection string | +| collection_name | String | Table name for the vector store | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | + +#### Outputs +| Name | Type | Description | +| ------------- | ----------- | ----------------------------------------- | +| vector_store | PGVector | PGVector vector store instance | +| search_results| List[Data] | Results of similarity search | +## Pinecone -### Astra DB {#453bcf5664154e37a920f1b602bd39da} +This component creates a Pinecone Vector Store with search capabilities. +For more information, see the [Pinecone documentation](https://docs.pinecone.io/home). +### Parameters -The `Astra DB` initializes a vector store using Astra DB from Data. It creates Astra DB-based vector indexes to efficiently store and retrieve documents. +#### Inputs +| Name | Type | Description | +| ----------------- | ------------ | ----------------------------------------- | +| index_name | String | Name of the Pinecone index | +| namespace | String | Namespace for the index | +| distance_strategy | String | Strategy for calculating distance between vectors | +| pinecone_api_key | SecretString | API key for Pinecone | +| text_key | String | Key in the record to use as text | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | -**Parameters:** +#### Outputs -- **Input:** Documents or Data for input. -- **Embedding or Astra vectorize:** External or server-side model Astra DB uses. -- **Collection Name:** Name of the Astra DB collection. -- **Token:** Authentication token for Astra DB. -- **API Endpoint:** API endpoint for Astra DB. -- **Namespace:** Astra DB namespace. -- **Metric:** Metric used by Astra DB. -- **Batch Size:** Batch size for operations. -- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts. -- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts. -- **Bulk Delete Concurrency:** Concurrency level for bulk deletions. -- **Setup Mode:** Setup mode for the vector store. -- **Pre Delete Collection:** Option to delete the collection before setup. -- **Metadata Indexing Include:** Fields to include in metadata indexing. -- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing. -- **Collection Indexing Policy:** Indexing policy for the collection. +| Name | Type | Description | +| ------------- | ---------- | ----------------------------------------- | +| vector_store | Pinecone | Pinecone vector store instance | +| search_results| List[Data] | Results of similarity search | -NOTE +## Qdrant -Ensure you configure the necessary Astra DB token and API endpoint before starting. +This component creates a Qdrant Vector Store with search capabilities. +For more information, see the [Qdrant documentation](https://qdrant.tech/documentation/). +### Parameters ---- - - -### Astra DB Search {#26f25d1933a9459bad2d6725f87beb11} - - -`Astra DBSearch` searches an existing Astra DB vector store for documents similar to the input. It uses the `Astra DB`component's functionality for efficient retrieval. - - -**Parameters:** - -- **Search Type:** Type of search, such as Similarity or MMR. -- **Input Value:** Value to search for. -- **Embedding or Astra vectorize:** External or server-side model Astra DB uses. -- **Collection Name:** Name of the Astra DB collection. -- **Token:** Authentication token for Astra DB. -- **API Endpoint:** API endpoint for Astra DB. -- **Namespace:** Astra DB namespace. -- **Metric:** Metric used by Astra DB. -- **Batch Size:** Batch size for operations. -- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts. -- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts. -- **Bulk Delete Concurrency:** Concurrency level for bulk deletions. -- **Setup Mode:** Setup mode for the vector store. -- **Pre Delete Collection:** Option to delete the collection before setup. -- **Metadata Indexing Include:** Fields to include in metadata indexing. -- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing. -- **Collection Indexing Policy:** Indexing policy for the collection. - ---- - - -### Chroma {#74730795605143cba53e1f4c4f2ef5d6} - - -`Chroma` sets up a vector store using Chroma for efficient vector storage and retrieval within language processing workflows. - - -**Parameters:** - -- **Collection Name:** Name of the collection. -- **Persist Directory:** Directory to persist the Vector Store. -- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server. -- **Server Host (Optional):** Host for the Chroma server. -- **Server Port (Optional):** Port for the Chroma server. -- **Server gRPC Port (Optional):** gRPC port for the Chroma server. -- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server. -- **Input:** Input data for creating the Vector Store. -- **Embedding:** Embeddings used for the Vector Store. - -For detailed documentation and integration guides, please refer to the [Chroma Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/chroma). - - ---- - - -### Chroma Search {#5718072a155441f3a443b944ad4d638f} - - -`ChromaSearch` searches a Chroma collection for documents similar to the input text. It leverages Chroma to ensure efficient document retrieval. - - -**Parameters:** - -- **Input:** Input text for search. -- **Search Type:** Type of search, such as Similarity or MMR. -- **Collection Name:** Name of the Chroma collection. -- **Index Directory:** Directory where the Chroma index is stored. -- **Embedding:** Embedding model used for vectorization. -- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server. -- **Server Host (Optional):** Host for the Chroma server. -- **Server Port (Optional):** Port for the Chroma server. -- **Server gRPC Port (Optional):** gRPC port for the Chroma server. -- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server. - ---- - - -### Couchbase {#6900a79347164f35af27ae27f0d64a6d} - - -`Couchbase` builds a Couchbase vector store from Data, streamlining the storage and retrieval of documents. - - -**Parameters:** - -- **Embedding:** Model used by Couchbase. -- **Input:** Documents or Data. -- **Couchbase Cluster Connection String:** Cluster Connection string. -- **Couchbase Cluster Username:** Cluster Username. -- **Couchbase Cluster Password:** Cluster Password. -- **Bucket Name:** Bucket identifier in Couchbase. -- **Scope Name:** Scope identifier in Couchbase. -- **Collection Name:** Collection identifier in Couchbase. -- **Index Name:** Index identifier. - -For detailed documentation and integration guides, please refer to the [Couchbase Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/couchbase). - - ---- - - -### Couchbase Search {#c77bb09425a3426f9677d38d8237d9ba} - - -`CouchbaseSearch` leverages the Couchbase component to search for documents based on similarity metric. - - -**Parameters:** - -- **Input:** Search query. -- **Embedding:** Model used in the Vector Store. -- **Couchbase Cluster Connection String:** Cluster Connection string. -- **Couchbase Cluster Username:** Cluster Username. -- **Couchbase Cluster Password:** Cluster Password. -- **Bucket Name:** Bucket identifier. -- **Scope Name:** Scope identifier. -- **Collection Name:** Collection identifier in Couchbase. -- **Index Name:** Index identifier. - ---- - - -### FAISS {#5b3f4e6592a847b69e07df2f674a03f0} - - -The `FAISS` component manages document ingestion into a FAISS Vector Store, optimizing document indexing and retrieval. - - -**Parameters:** - -- **Embedding:** Model used for vectorizing inputs. -- **Input:** Documents to ingest. -- **Folder Path:** Save path for the FAISS index, relative to Langflow. - -For more details, see the [FAISS Component Documentation](https://faiss.ai/index.html). - - ---- - - -### FAISS Search {#81ff12d7205940a3b14e3ddf304630f8} - - -`FAISSSearch` searches a FAISS Vector Store for documents similar to a given input, using similarity metrics for efficient retrieval. - - -**Parameters:** - -- **Embedding:** Model used in the FAISS Vector Store. -- **Folder Path:** Path to load the FAISS index from, relative to Langflow. -- **Input:** Search query. -- **Index Name:** Index identifier. - ---- - - -### MongoDB Atlas {#eba8892f7a204b97ad1c353e82948149} - - -`MongoDBAtlas` builds a MongoDB Atlas-based vector store from Data, streamlining the storage and retrieval of documents. - - -**Parameters:** - -- **Embedding:** Model used by MongoDB Atlas. -- **Input:** Documents or Data. -- **Collection Name:** Collection identifier in MongoDB Atlas. -- **Database Name:** Database identifier. -- **Index Name:** Index identifier. -- **MongoDB Atlas Cluster URI:** Cluster URI. -- **Search Kwargs:** Additional search parameters. - -NOTE - - -Ensure pymongo is installed for using MongoDB Atlas Vector Store. - - ---- - - -### MongoDB Atlas Search {#686ba0e30a54438cbc7153b81ee4b1df} - - -`MongoDBAtlasSearch` leverages the MongoDBAtlas component to search for documents based on similarity metrics. - - -**Parameters:** - -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input:** Search query. -- **Embedding:** Model used in the Vector Store. -- **Collection Name:** Collection identifier. -- **Database Name:** Database identifier. -- **Index Name:** Index identifier. -- **MongoDB Atlas Cluster URI:** Cluster URI. -- **Search Kwargs:** Additional search parameters. - ---- - - -### PGVector {#7ceebdd84ab14f8e8589c13c58370e5b} - - -`PGVector` integrates a Vector Store within a PostgreSQL database, allowing efficient storage and retrieval of vectors. - - -**Parameters:** - -- **Input:** Value for the Vector Store. -- **Embedding:** Model used. -- **PostgreSQL Server Connection String:** Server URL. -- **Table:** Table name in the PostgreSQL database. - -For more details, see the [PGVector Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/pgvector). - - -NOTE - - -Ensure the PostgreSQL server is accessible and configured correctly. +#### Inputs +| Name | Type | Description | +| -------------------- | ------------ | ----------------------------------------- | +| collection_name | String | Name of the Qdrant collection | +| host | String | Qdrant server host | +| port | Integer | Qdrant server port | +| grpc_port | Integer | Qdrant gRPC port | +| api_key | SecretString | API key for Qdrant | +| prefix | String | Prefix for Qdrant | +| timeout | Integer | Timeout for Qdrant operations | +| path | String | Path for Qdrant | +| url | String | URL for Qdrant | +| distance_func | String | Distance function for vector similarity | +| content_payload_key | String | Key for content payload | +| metadata_payload_key | String | Key for metadata payload | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | ---- - - -### PGVector Search {#196bf22ea2844bdbba971b5082750943} - - -`PGVectorSearch` extends `PGVector` to search for documents based on similarity metrics. - - -**Parameters:** - -- **Input:** Search query. -- **Embedding:** Model used. -- **PostgreSQL Server Connection String:** Server URL. -- **Table:** Table name. -- **Search Type:** Type of search, such as "Similarity" or "MMR". - ---- - - -### Pinecone {#67abbe3e27c34fb4bcb35926ce831727} - - -`Pinecone` constructs a Pinecone wrapper from Data, setting up Pinecone-based vector indexes for document storage and retrieval. - - -**Parameters:** - -- **Input:** Documents or Data. -- **Embedding:** Model used. -- **Index Name:** Index identifier. -- **Namespace:** Namespace used. -- **Pinecone API Key:** API key. -- **Pinecone Environment:** Environment settings. -- **Search Kwargs:** Additional search parameters. -- **Pool Threads:** Number of threads. - -:::info - -Ensure the Pinecone API key and environment are correctly configured. - -::: - - - - ---- - - -### Pinecone Search {#977944558cad4cf2ba332ea4f06bf485} - - -`PineconeSearch` searches a Pinecone Vector Store for documents similar to the input, using advanced similarity metrics. - - -**Parameters:** - -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input Value:** Search query. -- **Embedding:** Model used. -- **Index Name:** Index identifier. -- **Namespace:** Namespace used. -- **Pinecone API Key:** API key. -- **Pinecone Environment:** Environment settings. -- **Search Kwargs:** Additional search parameters. -- **Pool Threads:** Number of threads. - ---- - - -### Qdrant {#88df77f3044e4ac6980950835a919fb0} - - -`Qdrant` allows efficient similarity searches and retrieval operations, using a list of texts to construct a Qdrant wrapper. - - -**Parameters:** - -- **Input:** Documents or Data. -- **Embedding:** Model used. -- **API Key:** Qdrant API key. -- **Collection Name:** Collection identifier. -- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL. - ---- - - -### Qdrant Search {#5ba5f8dca0f249d7ad00778f49901e6c} - - -`QdrantSearch` extends `Qdrant` to search for documents similar to the input based on advanced similarity metrics. - - -**Parameters:** - -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input Value:** Search query. -- **Embedding:** Model used. -- **API Key:** Qdrant API key. -- **Collection Name:** Collection identifier. -- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL. - ---- +#### Outputs +| Name | Type | Description | +| ------------- | -------- | ----------------------------------------- | +| vector_store | Qdrant | Qdrant vector store instance | +| search_results| List[Data] | Results of similarity search | -### Redis {#a0fb8a9d244a40eb8439d0f8c22a2562} +## Redis -`Redis` manages a Vector Store in a Redis database, supporting efficient vector storage and retrieval. +This component creates a Redis Vector Store with search capabilities. +For more information, see the [Redis documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/vectors/). +### Parameters -**Parameters:** +#### Inputs -- **Index Name:** Default index name. -- **Input:** Data for building the Redis Vector Store. -- **Embedding:** Model used. -- **Schema:** Optional schema file (.yaml) for document structure. -- **Redis Server Connection String:** Server URL. -- **Redis Index:** Optional index name. +| Name | Type | Description | +| ----------------- | ------------ | ----------------------------------------- | +| redis_server_url | SecretString | Redis server connection string | +| redis_index_name | String | Name of the Redis index | +| code | String | Custom code for Redis (advanced) | +| schema | String | Schema for Redis index | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| number_of_results | Integer | Number of results to return in search | +| embedding | Embeddings | Embedding function to use | -For detailed documentation, refer to the [Redis Documentation](https://python.langchain.com/docs/integrations/vectorstores/redis). +#### Outputs +| Name | Type | Description | +| ------------- | -------- | ----------------------------------------- | +| vector_store | Redis | Redis vector store instance | +| search_results| List[Data]| Results of similarity search | -:::info -Ensure the Redis server URL and index name are configured correctly. Provide a schema if no documents are available. +## Supabase -::: +This component creates a connection to a Supabase Vector Store with search capabilities. +For more information, see the [Supabase documentation](https://supabase.com/docs/guides/ai). +### Parameters +#### Inputs +| Name | Type | Description | +| ------------------- | ------------ | ----------------------------------------- | +| supabase_url | String | URL of the Supabase instance | +| supabase_service_key| SecretString | Service key for Supabase authentication | +| table_name | String | Name of the table in Supabase | +| query_name | String | Name of the query to use | +| search_query | String | Query for similarity search | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use | +| number_of_results | Integer | Number of results to return in search | ---- - - -### Redis Search {#80aea4da515f490e979c8576099ee880} - - -`RedisSearch` searches a Redis Vector Store for documents similar to the input. - - -**Parameters:** - -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input Value:** Search query. -- **Index Name:** Default index name. -- **Embedding:** Model used. -- **Schema:** Optional schema file (.yaml) for document structure. -- **Redis Server Connection String:** Server URL. -- **Redis Index:** Optional index name. - ---- - - -### Supabase {#e86fb3cc507e4b5494f0a421f94e853b} - - -`Supabase` initializes a Supabase Vector Store from texts and embeddings, setting up an environment for efficient document retrieval. - - -**Parameters:** - -- **Input:** Documents or data. -- **Embedding:** Model used. -- **Query Name:** Optional query name. -- **Search Kwargs:** Advanced search parameters. -- **Supabase Service Key:** Service key. -- **Supabase URL:** Instance URL. -- **Table Name:** Optional table name. - -:::info - -Ensure the Supabase service key, URL, and table name are properly configured. - -::: - - - - ---- - +#### Outputs -### Supabase Search {#fd02d550b9b2457f91f2f4073656cb09} +| Name | Type | Description | +| ------------- | ------------------ | ----------------------------------------- | +| vector_store | SupabaseVectorStore | Supabase vector store instance | +| search_results| List[Data] | Results of similarity search | -`SupabaseSearch` searches a Supabase Vector Store for documents similar to the input. +## Upstash +This component creates an Upstash Vector Store with search capabilities. +For more information, see the [Upstash documentation](https://upstash.com/docs/introduction). -**Parameters:** +### Parameters -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input Value:** Search query. -- **Embedding:** Model used. -- **Query Name:** Optional query name. -- **Search Kwargs:** Advanced search parameters. -- **Supabase Service Key:** Service key. -- **Supabase URL:** Instance URL. -- **Table Name:** Optional table name. - ---- - - -### Upstash Vector - - -`UpstashVector` searches a Upstash Vector Store for documents similar to the input. It has it's own embedding -model which can be used to search documents without needing an external embedding model. - - -**Parameters:** - -- **Index URL:** The URL of the Upstash index. -- **Index Token:** The token for the Upstash index. -- **Text Key:** The key in the record to use as text. -- **Namespace:** The namespace name. A new namespace is created if not found. Leave empty for default namespace. -- **Search Query:** The search query. -- **Metadata Filter:** The metadata filter. Filters documents by metadata. Look at the [docs](https://upstash.com/docs/vector/features/filtering) for more information. -- **Embedding:** The embedding model used. To use Upstash's embeddings, don't provide an embedding. -- **Number of Results:** The number of results to return. - ---- - - -### Vectara {#b4e05230b62a47c792a89c5511af97ac} +#### Inputs +| Name | Type | Description | +| --------------- | ------------ | ----------------------------------------- | +| index_url | String | The URL of the Upstash index | +| index_token | SecretString | The token for the Upstash index | +| text_key | String | The key in the record to use as text | +| namespace | String | Namespace for the index | +| search_query | String | Query for similarity search | +| metadata_filter | String | Filters documents by metadata | +| ingest_data | Data | Data to be ingested into the vector store | +| embedding | Embeddings | Embedding function to use (optional) | +| number_of_results | Integer | Number of results to return in search | -`Vectara` sets up a Vectara Vector Store from files or upserted data, optimizing document retrieval. +#### Outputs +| Name | Type | Description | +| ------------- | ---------------- | ----------------------------------------- | +| vector_store | UpstashVectorStore| Upstash vector store instance | +| search_results| List[Data] | Results of similarity search | -**Parameters:** -- **Vectara Customer ID:** Customer ID. -- **Vectara Corpus ID:** Corpus ID. -- **Vectara API Key:** API key. -- **Files Url:** Optional URLs for file initialization. -- **Input:** Optional data for corpus upsert. +## Vectara -For more information, consult the [Vectara Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/vectara). +This component creates a Vectara Vector Store with search capabilities. +For more information, see the [Vectara documentation](https://docs.vectara.com/docs/). +### Parameters -:::info +#### Inputs -If inputs or files_url are provided, they will be processed accordingly. +| Name | Type | Description | +| ---------------- | ------------ | ----------------------------------------- | +| vectara_customer_id | String | Vectara customer ID | +| vectara_corpus_id | String | Vectara corpus ID | +| vectara_api_key | SecretString | Vectara API key | +| embedding | Embeddings | Embedding function to use (optional) | +| ingest_data | List[Document/Data] | Data to be ingested into the vector store | +| search_query | String | Query for similarity search | +| number_of_results | Integer | Number of results to return in search | -::: +#### Outputs +| Name | Type | Description | +| ------------- | ----------------- | ----------------------------------------- | +| vector_store | VectaraVectorStore | Vectara vector store instance | +| search_results| List[Data] | Results of similarity search | +## Vectara Search +This component searches a Vectara Vector Store for documents based on the provided input. +For more information, see the [Vectara documentation](https://docs.vectara.com/docs/). ---- - - -### Vectara Search {#31a47221c23f4fbba4a7465cf1d89eb0} - +### Parameters -`VectaraSearch` searches a Vectara Vector Store for documents based on the provided input. +#### Inputs +| Name | Type | Description | +|---------------------|--------------|-------------------------------------------| +| search_type | String | Type of search, such as "Similarity" or "MMR" | +| input_value | String | Search query | +| vectara_customer_id | String | Vectara customer ID | +| vectara_corpus_id | String | Vectara corpus ID | +| vectara_api_key | SecretString | Vectara API key | +| files_url | List[String] | Optional URLs for file initialization | -**Parameters:** - -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input Value:** Search query. -- **Vectara Customer ID:** Customer ID. -- **Vectara Corpus ID:** Corpus ID. -- **Vectara API Key:** API key. -- **Files Url:** Optional URLs for file initialization. - ---- +#### Outputs +| Name | Type | Description | +|----------------|------------|----------------------------| +| search_results | List[Data] | Results of similarity search | -### Weaviate {#57c7969574b1418dbb079ac5fc8cd857} +## Weaviate +This component facilitates a Weaviate Vector Store setup, optimizing text and document indexing and retrieval. +For more information, see the [Weaviate Documentation](https://weaviate.io/developers/weaviate). -`Weaviate` facilitates a Weaviate Vector Store setup, optimizing text and document indexing and retrieval. +### Parameters +#### Inputs -**Parameters:** +| Name | Type | Description | +|---------------|--------------|-------------------------------------------| +| weaviate_url | String | Default instance URL | +| search_by_text| Boolean | Indicates whether to search by text | +| api_key | SecretString | Optional API key for authentication | +| index_name | String | Optional index name | +| text_key | String | Default text extraction key | +| input | Document | Document or record | +| embedding | Embeddings | Model used | +| attributes | List[String] | Optional additional attributes | -- **Weaviate URL:** Default instance URL. -- **Search By Text:** Indicates whether to search by text. -- **API Key:** Optional API key for authentication. -- **Index Name:** Optional index name. -- **Text Key:** Default text extraction key. -- **Input:** Document or record. -- **Embedding:** Model used. -- **Attributes:** Optional additional attributes. +#### Outputs -For more details, see the [Weaviate Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/weaviate). +| Name | Type | Description | +|--------------|------------------|-------------------------------| +| vector_store | WeaviateVectorStore | Weaviate vector store instance | +**Note:** Ensure Weaviate instance is running and accessible. Verify API key, index name, text key, and attributes are set correctly. -NOTE - - -Ensure Weaviate instance is running and accessible. Verify API key, index name, text key, and attributes are set correctly. - - ---- - +## Weaviate Search -### Weaviate Search {#6d4e616dfd6143b28dc055bc1c40ecae} +This component searches a Weaviate Vector Store for documents similar to the input. +For more information, see the [Weaviate Documentation](https://weaviate.io/developers/weaviate). +### Parameters -`WeaviateSearch` searches a Weaviate Vector Store for documents similar to the input. +#### Inputs +| Name | Type | Description | +|---------------|--------------|-------------------------------------------| +| search_type | String | Type of search, such as "Similarity" or "MMR" | +| input_value | String | Search query | +| weaviate_url | String | Default instance URL | +| search_by_text| Boolean | Indicates whether to search by text | +| api_key | SecretString | Optional API key for authentication | +| index_name | String | Optional index name | +| text_key | String | Default text extraction key | +| embedding | Embeddings | Model used | +| attributes | List[String] | Optional additional attributes | -**Parameters:** +#### Outputs -- **Search Type:** Type of search, such as "Similarity" or "MMR". -- **Input Value:** Search query. -- **Weaviate URL:** Default instance URL. -- **Search By Text:** Indicates whether to search by text. -- **API Key:** Optional API key for authentication. -- **Index Name:** Optional index name. -- **Text Key:** Default text extraction key. -- **Embedding:** Model used. -- **Attributes:** Optional additional attributes. +| Name | Type | Description | +|----------------|------------|----------------------------| +| search_results | List[Data] | Results of similarity search | diff --git a/docs/docs/Components/components.md b/docs/docs/Components/components.md deleted file mode 100644 index 0598aebc9a0c..000000000000 --- a/docs/docs/Components/components.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Intro to Components -sidebar_position: 0 -slug: /components ---- - - - -## Component {#0323a728d8314767adb907b998036bb4} - - ---- - - -A component is a single building block within a flow. It consists of inputs, outputs, and parameters that define their functionality. These elements provide a convenient and straightforward way to compose LLM-based applications. Learn more about components and how they work below. - - -During the flow creation process, you will notice handles (colored circles) attached to one or both sides of a component. These handles use distinct colors to indicate the types of inputs and outputs that can be interconnected. Hover over a handle to see connection details. - - -![](./565424296.png) - - -On the top right corner of the component, you'll find the a play button to run a component. Once it runs, a status icon appears and you can hover over that to visualize success or error messages. Start interacting with your AI by clicking the **Playground** at the bottom right of the workspace. - - -### Component Menu {#7e3f2f8ff5074b2fb3eee97c9cfaabe7} - - -Each component is unique, but they all have a menu bar at the top that looks something like this. - - -![](./938852908.png) - - -It consists of options such as: - -- **Code** — displays the component's Python code. You can modify the code and save it. -- **Advanced** — See and adjust all parameters of a component. -- **Freeze** — After a component runs, lock its previous output state to prevent it from re-running. - -Click **All** (the "..." button) to see all options. - - -### Output Preview {#ed7b3c34e0774b8a916b0e68821c9a7a} - - -Langflow includes an output visualizer for components that opens a pop-up screen. This allows you to easily inspect and monitor transmissions between components, providing instant feedback on your workflows. - - -![](./987204819.png) - - -### Advanced Settings {#b6430d4903df44f0ba4618a558c83d7b} - - -Langflow components can be edited by clicking the **Advanced Settings** button. - - -Hide parameters with the **Show** button to reduce complexity and keep the workspace clean and intuitive for experimentation. - - -You can also double-click a component's name and description to modify those. Component descriptions accept markdown syntax. - - -### Group Components {#c3f5ed818e3b40ceb6534dc358e1a5f2} - - -Multiple components can be grouped into a single component for reuse. This is useful when combining large flows into single components (like RAG with a vector database, for example) and saving space. - -1. Hold **Shift** and drag to select components. -2. Select **Group**. -3. The components merge into a single component. -4. Double-click the name and description to change them. -5. Save your grouped component to in the sidebar for later use! - -[group video here] - - -### Component Version {#887fd587589448dc8c27336d1c235b9b} - - -A component's state is stored in a database, while sidebar components are like starter templates. As soon as you drag a component from the sidebar to the workspace, the two components are no longer in parity. - - -The component will keep the version number it was initialized to the workspace with. Click the **Update Component** icon (exclamation mark) to bring the component up to the `latest` version. This will change the code of the component in place so you can validate that the component was updated by checking its Python code before and after updating it. - - -![](./263391508.png) - diff --git a/docs/docs/Configuration/1009571828.png b/docs/docs/Configuration/1009571828.png deleted file mode 100644 index 3ffdd179367f..000000000000 Binary files a/docs/docs/Configuration/1009571828.png and /dev/null differ diff --git a/docs/docs/Configuration/1125619904.png b/docs/docs/Configuration/1125619904.png deleted file mode 100644 index 48349ff899b5..000000000000 Binary files a/docs/docs/Configuration/1125619904.png and /dev/null differ diff --git a/docs/docs/Configuration/1289235516.png b/docs/docs/Configuration/1289235516.png deleted file mode 100644 index 201b22a8728d..000000000000 Binary files a/docs/docs/Configuration/1289235516.png and /dev/null differ diff --git a/docs/docs/Configuration/1813063533.png b/docs/docs/Configuration/1813063533.png deleted file mode 100644 index 77077d463cf5..000000000000 Binary files a/docs/docs/Configuration/1813063533.png and /dev/null differ diff --git a/docs/docs/Configuration/1926471667.png b/docs/docs/Configuration/1926471667.png deleted file mode 100644 index 220e604fd8fd..000000000000 Binary files a/docs/docs/Configuration/1926471667.png and /dev/null differ diff --git a/docs/docs/Configuration/383358552.png b/docs/docs/Configuration/383358552.png deleted file mode 100644 index aa23164deaba..000000000000 Binary files a/docs/docs/Configuration/383358552.png and /dev/null differ diff --git a/docs/docs/Configuration/563306242.png b/docs/docs/Configuration/563306242.png deleted file mode 100644 index c8b887ca1bd0..000000000000 Binary files a/docs/docs/Configuration/563306242.png and /dev/null differ diff --git a/docs/docs/Configuration/596474918.png b/docs/docs/Configuration/596474918.png deleted file mode 100644 index eb1c8de37171..000000000000 Binary files a/docs/docs/Configuration/596474918.png and /dev/null differ diff --git a/docs/docs/Configuration/711485342.gif b/docs/docs/Configuration/711485342.gif deleted file mode 100644 index 5ba0f95b27c6..000000000000 Binary files a/docs/docs/Configuration/711485342.gif and /dev/null differ diff --git a/docs/docs/Configuration/945175915.png b/docs/docs/Configuration/945175915.png deleted file mode 100644 index a3b0883be1ab..000000000000 Binary files a/docs/docs/Configuration/945175915.png and /dev/null differ diff --git a/docs/docs/Configuration/My-Collection.md b/docs/docs/Configuration/My-Collection.md deleted file mode 100644 index ba631ae8014f..000000000000 --- a/docs/docs/Configuration/My-Collection.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: My Collection -sidebar_position: 3 -slug: /365085a8-a90a-43f9-a779-f8769ec7eca1 ---- - - - -:::info - -This page may contain outdated information. It will be updated as soon as possible. - -::: - - - - -My Collection is a space in Langflow where users can manage, organize, and access their flows and components. Flows and components are displayed as individual cards that provide relevant information. - - -![](./1289235516.png) - -- **Folders**: Users can organize their projects into folders. Default folders include "My Projects" and the ability to create new folders. Hover over a folder to access options to download or delete it. -- **Search Bar** Enables users to quickly search through their flows and components. -- **Select All**: This feature allows users to select all projects displayed on the page for batch actions like moving, deleting, or exporting. - -Click on a flow card to open it in Langflow Workspace or use the **Playground Button** for direct access to execute and interact with the flow’s chatbot interface. - - -## Folders {#776a3866273f4efbbbb2febdfc1baa12} - - ---- - - -Folders can help you keep your projects organized in Langflow. They help you manage and categorize your work efficiently, making it easier to find and access the resources you need. - - -![](./1926471667.png) - - -Multiple projects can be stored in **folders**. - - -Folders allow you to categorize flows and components into manageable groups. This makes it easier to find and access specific projects quickly. - - -**My Projects** is a default folder where all new projects and components are initially stored unless specified otherwise. Users can create custom folders to better organize their work according to specific needs. - - -Hovering over a folder in Langflow provides options to either remove or download the entire folder, allowing you to keep an offline copy or migrate projects between environments - - -Create new folders with the **New folder** button. One folder can store multiple projects (as the default My Projects folder does). - - -You can download folders of projects as a single JSON file, and upload files and flows to your folder. - - -Click the **Trash** icon to delete a folder. - - - -### How to Create Folders {#5ba5abe995c843e4a429e41413f9d539} - -1. **Navigate to the Home Screen:** - - Go to your Langflow Home Page (outside of projects). -2. **Create a New Folder:** - - Click on the "New Folder" button - - ![](./1125619904.png) - - - Double-click the new folder created to rename your folder appropriately to reflect its contents. - - ![](./945175915.png) - -3. **Move Files:** - - Drag and drop files into the corresponding folders and subfolders to keep everything organized. - - ![](./711485342.gif) - - -### Best Practices for Organizing Folders {#66f23f8e129a48598a7bb4565a508360} - -- **Categorize by Project:** Create a main folder for each project, then add projects for different aspects such as research, drafts, and final documents. -- **Use Descriptive Names:** Use clear and descriptive names for your folders to easily identify their contents at a glance. - -### Example Structure {#ebe6acad99c24d6f9aaabf18e4a17ff4} - - -Here's an example of how you might organize folders and subfolders for a Langflow project: - - -```text -Langflow -├── Research -│ ├── Articles Project -│ ├── Data Project -│ └── Notes Project -└── Documents - ├── RAG Project - └── Advanced RAG Project -``` - diff --git a/docs/docs/Configuration/configuration-api-keys.md b/docs/docs/Configuration/configuration-api-keys.md index e7f79b84aef7..68ce60752158 100644 --- a/docs/docs/Configuration/configuration-api-keys.md +++ b/docs/docs/Configuration/configuration-api-keys.md @@ -1,51 +1,29 @@ --- -title: API Keys +title: API keys sidebar_position: 1 slug: /configuration-api-keys --- - +Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header, query parameter, or as a command line argument to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow. :::info -This page may contain outdated information. It will be updated as soon as possible. +The default user and password are set using the LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD environment variables. The default values are `langflow` and `langflow`, respectively. ::: - - - -Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header or query parameter to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow. - - -:::info - -The default user and password are set using the LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD environment variables. The default values are langflow and langflow, respectively. - -::: - - - - -## Generate an API key {#c29986a69cad4cdbbe7537e383ea7207} - +## Generate an API key Generate a user-specific token to use with Langflow. +### Generate an API key with the Langflow UI -### Generate an API key with the Langflow UI {#3d90098ddd7c44b6836c0273acf57123} - -1. Click on the "API Key" icon. - - ![](./596474918.png) - -2. Click on "Create new secret key". -3. Give it an optional name. -4. Click on "Create secret key". -5. Copy the API key and store it in a secure location. - -### Generate an API key with the Langflow CLI {#2368f62fc4b8477e8080c9c2d3659d76} +1. Click your user icon and select **Settings**. +2. Click **Langflow API**, and then click **Add New**. +3. Name your key, and then click **Create Secret Key**. +4. Copy the API key and store it in a secure location. +### Generate an API key with the Langflow CLI ```shell langflow api-key @@ -64,148 +42,152 @@ python -m langflow api-key ``` - -## Use the Langflow API key {#ae787e4b0d3846aa9094fac75e0ac04f} - +## Authenticate requests with the Langflow API key Include your API key in API requests to authenticate requests to Langflow. +### Include the API key in the HTTP header -### Use the `x-api-key` header {#70965b3ad24d467ca4f90e7c13a1f394} - - -Include the `x-api-key` in the HTTP header when making API requests: - +To use the API key when making API requests with cURL, include the API key in the HTTP header. ```shell -curl -X POST \\ - \\ - -H 'Content-Type: application/json'\\ - -H 'x-api-key: '\\ +curl -X POST \ + "http://127.0.0.1:7860/api/v1/run/*`YOUR_FLOW_ID`*?stream=false" \ + -H 'Content-Type: application/json' \ + -H 'x-api-key: *`YOUR_API_KEY`*' \ -d '{"inputs": {"text":""}, "tweaks": {}}' - ``` +To instead pass the API key as a query parameter, do the following: -With Python using `requests`: +```shell +curl -X POST \ + "http://127.0.0.1:7860/api/v1/run/*`YOUR_FLOW_ID`*?x-api-key=*`YOUR_API_KEY`*?stream=false" \ + -H 'Content-Type: application/json' \ + -d '{"inputs": {"text":""}, "tweaks": {}}' +``` +To use the API key when making API requests with the Python `requests` library, include the API key as a variable string. ```python +import argparse +import json +from argparse import RawTextHelpFormatter import requests from typing import Optional +import warnings +try: + from langflow.load import upload_file +except ImportError: + warnings.warn("Langflow provides a function to help you upload files to the flow. Please install langflow to use it.") + upload_file = None + +BASE_API_URL = "http://127.0.0.1:7860" +FLOW_ID = "*`YOUR_FLOW_ID`*" +ENDPOINT = "" # You can set a specific endpoint name in the flow settings -BASE_API_URL = "" -FLOW_ID = "4441b773-0724-434e-9cee-19d995d8f2df" # You can tweak the flow by adding a tweaks dictionary # e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}} -TWEAKS = {} - -def run_flow(inputs: dict, - flow_id: str, - tweaks: Optional[dict] = None, - apiKey: Optional[str] = None) -> dict: +TWEAKS = { + "ChatInput-8a86T": {}, + "Prompt-pKfl9": {}, + "ChatOutput-WcGpD": {}, + "OpenAIModel-5UyvQ": {} +} + +def run_flow(message: str, + endpoint: str, + output_type: str = "chat", + input_type: str = "chat", + tweaks: Optional[dict] = None, + api_key: Optional[str] = None) -> dict: """ Run a flow with a given message and optional tweaks. :param message: The message to send to the flow - :param flow_id: The ID of the flow to run + :param endpoint: The ID or the endpoint name of the flow :param tweaks: Optional tweaks to customize the flow :return: The JSON response from the flow """ - api_url = f"{BASE_API_URL}/{flow_id}" - - payload = {"inputs": inputs} - headers = {} - + api_url = f"{BASE_API_URL}/api/v1/run/{endpoint}" + + payload = { + "input_value": message, + "output_type": output_type, + "input_type": input_type, + } + headers = None if tweaks: payload["tweaks"] = tweaks - if apiKey: - headers = {"x-api-key": apiKey} - + if api_key: + headers = {"x-api-key": api_key} response = requests.post(api_url, json=payload, headers=headers) return response.json() -# Setup any tweaks you want to apply to the flow -inputs = {"text":""} -api_key = "" -print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key)) +def main(): + parser = argparse.ArgumentParser(description="""Run a flow with a given message and optional tweaks. +Run it like: python .py "your message here" --endpoint "your_endpoint" --tweaks '{"key": "value"}'""", + formatter_class=RawTextHelpFormatter) + parser.add_argument("message", type=str, help="The message to send to the flow") + parser.add_argument("--endpoint", type=str, default=ENDPOINT or FLOW_ID, help="The ID or the endpoint name of the flow") + parser.add_argument("--tweaks", type=str, help="JSON string representing the tweaks to customize the flow", default=json.dumps(TWEAKS)) + parser.add_argument("--api_key", type=str, help="API key for authentication", default=None) + parser.add_argument("--output_type", type=str, default="chat", help="The output type") + parser.add_argument("--input_type", type=str, default="chat", help="The input type") + parser.add_argument("--upload_file", type=str, help="Path to the file to upload", default=None) + parser.add_argument("--components", type=str, help="Components to upload the file to", default=None) + + args = parser.parse_args() + try: + tweaks = json.loads(args.tweaks) + except json.JSONDecodeError: + raise ValueError("Invalid tweaks JSON string") + + if args.upload_file: + if not upload_file: + raise ImportError("Langflow is not installed. Please install it to use the upload_file function.") + elif not args.components: + raise ValueError("You need to provide the components to upload the file to.") + tweaks = upload_file(file_path=args.upload_file, host=BASE_API_URL, flow_id=args.endpoint, components=[args.components], tweaks=tweaks) + + response = run_flow( + message=args.message, + endpoint=args.endpoint, + output_type=args.output_type, + input_type=args.input_type, + tweaks=tweaks, + api_key=args.api_key + ) + + print(json.dumps(response, indent=2)) + +if __name__ == "__main__": + main() ``` - -### Use the query parameter {#febb797f3bb5403b9f070afc0fa4f453} - - -Include the API key as a query parameter in the URL: - +To pass the API key to your script with a command line argument, do the following: ```shell -curl -X POST \\ - ?x-api-key= \\ - -H 'Content-Type: application/json'\\ - -d '{"inputs": {"text":""}, "tweaks": {}}' - -``` - - -With Python using `requests`: - - -```python -import requests - -BASE_API_URL = "" -FLOW_ID = "4441b773-0724-434e-9cee-19d995d8f2df" -# You can tweak the flow by adding a tweaks dictionary -# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}} -TWEAKS = {} - -def run_flow(inputs: dict, - flow_id: str, - tweaks: Optional[dict] = None, - apiKey: Optional[str] = None) -> dict: - """ - Run a flow with a given message and optional tweaks. - - :param message: The message to send to the flow - :param flow_id: The ID of the flow to run - :param tweaks: Optional tweaks to customize the flow - :return: The JSON response from the flow - """ - api_url = f"{BASE_API_URL}/{flow_id}" - - payload = {"inputs": inputs} - headers = {} - - if tweaks: - payload["tweaks"] = tweaks - if apiKey: - api_url += f"?x-api-key={apiKey}" - - response = requests.post(api_url, json=payload, headers=headers) - return response.json() - -# Setup any tweaks you want to apply to the flow -inputs = {"text":""} -api_key = "" -print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key)) - +python your_script.py "*`YOUR_INPUT_MESSAGE`*" --api_key "*`YOUR_API_KEY`*" ``` - -## Security Considerations {#1273eb69a61344d19827b30dba46dfd5} +## Security considerations - **Visibility**: For security reasons, the API key cannot be retrieved again through the UI. - **Scope**: The key allows access only to the flows and components of the specific user to whom it was issued. -## Custom API endpoint {#da933a86690a4fdeac24024472caf8a9} - +## Custom API endpoint -Under **Project Settings** > **Endpoint Name**, you can pick a custom name for the endpoint used to call your flow from the API. +To choose a custom name for your API endpoint, select **Project Settings** > **Endpoint Name** and name your endpoint. +## Revoke an API key -## Revoke an API Key {#f0ea41ea167845cea91bb5e8f90d9df0} +To revoke an API key, delete it from the the list of keys in the **Settings** menu. +1. Click your user icon and select **Settings**. +2. Click **Langflow API**. +3. Select the keys you want to delete and click the trash can icon. -To revoke an API key, delete it from the UI. This action immediately invalidates the key and prevents it from being used again. +This action immediately invalidates the key and prevents it from being used again. diff --git a/docs/docs/Configuration/configuration-authentication.md b/docs/docs/Configuration/configuration-authentication.md index 77e6a5c18f10..622ace740713 100644 --- a/docs/docs/Configuration/configuration-authentication.md +++ b/docs/docs/Configuration/configuration-authentication.md @@ -4,149 +4,117 @@ sidebar_position: 0 slug: /configuration-authentication --- +The login functionality in Langflow serves to authenticate users and protect sensitive routes in the application. +Starting from version 0.5, Langflow introduces an enhanced login mechanism that is governed by a few environment variables. This allows new secure features. -:::info +## Create a superuser and new users in Langflow -This page may contain outdated information. It will be updated as soon as possible. +Learn how to create a new superuser, log in to Langflow, and add new users. -::: - - - - -## Sign Up and Sign In {#f480dac5d2094d75a433de0b8e195641} - - ---- +1. Create a `.env` file and open it in your preferred editor. +2. Add the following environment variables to your file. -The login functionality in Langflow serves to authenticate users and protect sensitive routes in the application. Starting from version 0.5, Langflow introduces an enhanced login mechanism that is governed by a few environment variables. This allows new secure features. - - -## Environment Variables {#3ed7cae6f5324ba0ac14783cf2a6cc07} - - -The following environment variables are crucial in configuring the login settings: - -- _`LANGFLOW_AUTO_LOGIN`_: Determines whether Langflow should automatically log users in. Default is `True`. -- _`LANGFLOW_SUPERUSER`_: The username of the superuser. -- _`LANGFLOW_SUPERUSER_PASSWORD`_: The password for the superuser. -- _`LANGFLOW_SECRET_KEY`_: A key used for encrypting the superuser's password. -- _`LANGFLOW_NEW_USER_IS_ACTIVE`_: Determines whether new users are automatically activated. Default is `False`. +```bash +LANGFLOW_AUTO_LOGIN=False +LANGFLOW_SUPERUSER=admin +LANGFLOW_SUPERUSER_PASSWORD=securepassword +LANGFLOW_SECRET_KEY=randomly_generated_secure_key +LANGFLOW_NEW_USER_IS_ACTIVE=False +``` -All of these variables can be passed to the CLI command _`langflow run`_ through the _`--env-file`_ option. For example: +For more information, see [Authentication configuration values](#values). +:::tip +The Langflow project includes a [`.env.example`](https://github.com/langflow-ai/langflow/blob/main/.env.example) file to help you get started. +You can copy the contents of this file into your own `.env` file and replace the example values with your own preferred settings. +::: -```shell -langflow run --env-file .env +3. Save your `.env` file. +4. Run Langflow with the configured environment variables. +```bash +python -m langflow run --env-file .env ``` +5. Sign in with your username `admin` and password `securepassword`. +6. To open the **Admin Page**, click your user profile image, and then select **Admin Page**. +You can also go to `http://127.0.0.1:7861/admin`. +7. To add a new user, click **New User**, and then add the **Username** and **Password**. +8. To activate the new user, select **Active**. +The user can only sign in if you select them as **Active**. +9. To give the user `superuser` priveleges, click **Superuser**. +10. Click **Save**. +11. To confirm your new user has been created, sign out of Langflow, and then sign back in using your new **Username** and **Password**. -:::caution +## Manage Superuser with the Langflow CLI - It is critical not to expose these environment variables in your code repository. Always set them securely in your deployment environment, for example, using Docker secrets, Kubernetes ConfigMaps/Secrets, or dedicated secure environment configuration systems like AWS Secrets Manager. +Langflow provides a command-line utility for interactively creating superusers: +1. Enter the CLI command: -::: +```bash +langflow superuser +``` +2. Langflow prompts you for a **Username** and **Password**: +``` +langflow superuser +Username: new_superuser_1 +Password: +Default folder created successfully. +Superuser created successfully. +``` +3. To confirm your new superuser was created successfully, go to the **Admin Page** at `http://127.0.0.1:7861/admin`. +## Authentication configuration values {#values} -### _`LANGFLOW_AUTO_LOGIN`_ {#8b10059e0fbc44f3bc8ce63fe7692e7e} +The following table lists the available authentication configuration variables, their descriptions, and default values: +| Variable | Description | Default | +|----------|-------------|---------| +| `LANGFLOW_AUTO_LOGIN` | Enables automatic login | `True` | +| `LANGFLOW_SUPERUSER` | Superuser username | - | +| `LANGFLOW_SUPERUSER_PASSWORD` | Superuser password | - | +| `LANGFLOW_SECRET_KEY` | Key for encrypting superuser password | - | +| `LANGFLOW_NEW_USER_IS_ACTIVE` | Automatically activates new users | `False` | -By default, this variable is set to `True`. When enabled (`True`), Langflow operates as it did in versions prior to 0.5—automatic login without requiring explicit user authentication. +### LANGFLOW_AUTO_LOGIN +By default, this variable is set to `True`. When enabled, Langflow operates as it did in versions prior to 0.5, including automatic login without requiring explicit user authentication. To disable automatic login and enforce user authentication: - ```shell export LANGFLOW_AUTO_LOGIN=False ``` +### LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD -### _`LANGFLOW_SUPERUSER`_ and _`LANGFLOW_SUPERUSER_PASSWORD`_ {#a61a651a0fc7443a82cec93c07a14503} - - -These environment variables are only relevant when `LANGFLOW_AUTO_LOGIN` is set to `False`. They specify the username and password for the superuser, which is essential for administrative tasks. - - +These environment variables are only relevant when LANGFLOW_AUTO_LOGIN is set to False. They specify the username and password for the superuser, which is essential for administrative tasks. To create a superuser manually: - -```shell +```bash export LANGFLOW_SUPERUSER=admin export LANGFLOW_SUPERUSER_PASSWORD=securepassword ``` - -You can also use the CLI command `langflow superuser` to set up a superuser interactively. - - -### _`LANGFLOW_SECRET_KEY`_ {#977aea34e6174c58bd76107990d62a1f} - +### LANGFLOW_SECRET_KEY This environment variable holds a secret key used for encrypting the superuser's password. Make sure to set this to a secure, randomly generated string. - -```shell +```bash export LANGFLOW_SECRET_KEY=randomly_generated_secure_key - -``` - - -### _`LANGFLOW_NEW_USER_IS_ACTIVE`_ {#c8f5df9283be4e20be51e14518f5272e} - - -By default, this variable is set to `False`. When enabled (`True`), new users are automatically activated and can log in without requiring explicit activation by the superuser. - - -## Manage superusers with the CLI {#3b0c36a5cc0f4acc95c884d3de858d46} - - -Langflow provides a command-line utility for managing superusers: - - -```shell -langflow superuser ``` +### LANGFLOW_NEW_USER_IS_ACTIVE -This command prompts you to enter the username and password for the superuser, unless they are already set using environment variables. - - -## Sign in {#736ebb8c854b4c268d5e748c119a08ea} - - -With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign up before they can log in. The sign-up page is the default landing page when a user visits Langflow for the first time. - - -![](./1009571828.png) - - -## Profile settings {#dd5926e12471448d99bd6849d2149dc8} - - -Once signed in, you can change your profile settings by clicking on the profile icon in the top right corner of the Langflow dashboard. This opens a dropdown menu with the following options: - -- **Admin Page**: Opens the admin page, which is only accessible to the superuser. -- **Profile Settings**: Opens the profile settings page. -- **Sign Out**: Logs the user out. - - ![](./563306242.png) - - -Select **Profile Settings** to change your password and your profile picture. - +By default, this variable is set to `False`. When enabled, new users are automatically activated and can log in without requiring explicit activation by the superuser. -![](./1813063533.png) -Select **Admin Page** to manage users and groups as the superuser. -![](./383358552.png) diff --git a/docs/docs/Configuration/configuration-auto-saving.md b/docs/docs/Configuration/configuration-auto-saving.md index de0f9457d532..b7283641a307 100644 --- a/docs/docs/Configuration/configuration-auto-saving.md +++ b/docs/docs/Configuration/configuration-auto-saving.md @@ -4,48 +4,45 @@ sidebar_position: 6 slug: /configuration-auto-save --- -Langflow currently supports both manual and auto-saving functionality. +Langflow supports both manual and auto-saving functionality. ## Auto-saving {#auto-saving} -When Langflow is in Auto-saving mode, all changes are saved automatically. Auto-save progress is indicated in the left side of the top bar: when a flow is being saved, a Loading icon indicates that the flow is being saved in the database. +When Langflow is in auto-saving mode, all changes are saved automatically. Auto-save progress is indicated in the left side of the top bar. -![](./saving-header.png) +* When a flow is being saved, a loading icon indicates that the flow is being saved in the database. -If you try to exit the flow page before auto-save completes, a dialog will appear to confirm you want to exit before the flow has saved. +* If you try to exit the flow page before auto-save completes, you are prompted to confirm you want to exit before the flow has saved. -![](./flow-saving.png) +* When the flow has successfully saved, click **Exit**. -When the flow has successfully saved, you can click the 'Exit' button. +## Disable auto-saving {#environment} -![](./flow-saved.png) +To disable auto-saving, -## Manual Saving {#manual-saving} +1. Set an environment variable in your `.env` file. -When auto-saving is disabled, you need to manually save your flow when making changes. You can still run the flow without saving, for testing purposes. - -If you make any changes and Langflow detects that the saved flow is different from the current flow, a Save button will be enabled and the last time saved will appear at the left side of the topbar. To save the flow, click the Save button, or hit Ctrl+S or Command+S. - -![](./manual-save-button.png) - -If you try to exit after making changes and not saving, a confirmation dialog will appear. You can choose to exit anyway or save and exit. - -![](./flow-unsaved.png) +```env +LANGFLOW_AUTO_SAVING=false +``` -## Disable Auto Saving {#environment} +2. Start Langflow with the values from your `.env` file. -In Langflow, all changes made in the flows are saved automatically. However, you may prefer to disable this functionality, if you want a quick way to prototype and test changes before they are actually saved to the database. +```shell +python -m langflow run --env-file .env +``` -If you wish to disable this functionality, you can run Langflow with an environment variable to tell Langflow to use manual saving. +Alternatively, disable auto-saving by passing the `--no-auto-saving` flag at startup. ```shell -langflow --no-auto-saving -# or python -m langflow --no-auto-saving ``` -If you installed the local version of Langflow, you can set an environment variable and load it automatically by inserting this line into the `.env` file inside the directory. +## Save a flow manually {#manual-saving} + +When auto-saving is disabled, you will need to manually save your flow when making changes. + +To manually save your flow, click the **Save** button or enter Ctrl+S or Command+S. + +If you try to exit after making changes and not saving, a confirmation dialog appears. -```env -LANGFLOW_AUTO_SAVING=false -``` diff --git a/docs/docs/Configuration/configuration-backend-only.md b/docs/docs/Configuration/configuration-backend-only.md index 7e6d906ae265..49aa54e9f93a 100644 --- a/docs/docs/Configuration/configuration-backend-only.md +++ b/docs/docs/Configuration/configuration-backend-only.md @@ -1,154 +1,123 @@ --- -title: Backend-Only +title: Run Langflow in backend-only mode sidebar_position: 4 slug: /configuration-backend-only --- +Langflow can run in `--backend-only` mode to expose a Langflow app as an API endpoint, without running the frontend UI. +This is also known as "headless" mode. Running Langflow without the frontend is useful for automation, testing, and situations where you just need to serve a flow as a workload without creating a new flow in the UI. +To run Langflow in backend-only mode, pass the `--backend-only` flag at startup. -:::info - -This page may contain outdated information. It will be updated as soon as possible. - -::: - - - - -You can run Langflow in `--backend-only` mode to expose your Langflow app as an API, without running the frontend UI. - +```python +python3 -m langflow run --backend-only +``` -Start langflow in backend-only mode with `python3 -m langflow run --backend-only`. +The terminal prints `Welcome to ⛓ Langflow`, and Langflow will now serve requests to its API without the frontend running. +## Set up a basic prompting flow in backend-only mode -The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`. -Langflow will now serve requests to its API without the frontend running. +This example shows you how to set up a [Basic Prompting flow](/starter-projects-basic-prompting) as an endpoint in backend-only mode. +However, you can use these same instructions as guidelines for using any type of flow in backend-only mode. +### Prerequisites -## Prerequisites {#81dfa9407ed648889081b9d08b0e5cfe} +- [Langflow is installed](/getting-started-installation) +- [You have an OpenAI API key](https://platform.openai.com/) +- [You have a Langflow Basic Prompting flow](/starter-projects-basic-prompting) -- [Langflow installed](/getting-started-installation) -- [OpenAI API key](https://platform.openai.com/) -- [A Langflow flow created](/starter-projects-basic-prompting) +### Get your flow's ID -## Download your flow's curl call {#d2cf1b694e4741eca07fd9806516007b} +This guide assumes you have created a [Basic Prompting flow](/starter-projects-basic-prompting) or have another working flow available. -1. Click API. -2. Click **curl** > **Copy code** and save the code to your local machine. +1. In the Langflow UI, click **API**. +2. Click **curl** > **Copy code** to copy the curl command. +This command will POST input to your flow's endpoint. It will look something like this: ```text -curl -X POST \\ - "" \\ - -H 'Content-Type: application/json'\\ +curl -X POST \ + "http://127.0.0.1:7861/api/v1/run/fff8dcaa-f0f6-4136-9df0-b7cb38de42e0?stream=false" \ + -H 'Content-Type: application/json'\ -d '{"input_value": "message", "output_type": "chat", "input_type": "chat", "tweaks": { - "Prompt-kvo86": {}, - "OpenAIModel-MilkD": {}, - "ChatOutput-ktwdw": {}, - "ChatInput-xXC4F": {} + "ChatInput-8a86T": {}, + "Prompt-pKfl9": {}, + "ChatOutput-WcGpD": {}, + "OpenAIModel-5UyvQ": {} }}' - ``` +The flow ID in this example is `fff8dcaa-f0f6-4136-9df0-b7cb38de42e0`, a UUID generated by Langflow and used in the endpoint URL. +See [API](/configuration-api-keys) to change the endpoint. -Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID in the UI as well to ensure you're querying the right flow. +3. To stop Langflow, press **Ctrl+C**. +### Start Langflow in backend-only mode -## Start Langflow in backend-only mode {#f0ba018daf3041c39c0d226dadf78d35} +1. Start Langflow in backend-only mode. -1. Stop Langflow with Ctrl+C. -2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`. -The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`. -Langflow will now serve requests to its API. -3. Run the curl code you copied from the UI. +```python +python3 -m langflow run --backend-only +``` + +The terminal prints `Welcome to ⛓ Langflow`. +Langflow is now serving requests to its API. + +2. Run the curl code you copied from the UI. You should get a result like this: ```shell {"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}% - ``` +This confirms Langflow is receiving your POST request, running the flow, and returning the result without running the frontend. -Again, note that the flow ID matches. -Langflow is receiving your POST request, running the flow, and returning the result, all without running the frontend. Cool! +You can interact with this endpoint using the other options in the **API** menu, including the Python and Javascript APIs. +### Query the Langflow endpoint with a Python script -## Download your flow's Python API call {#5923ff9dc40843c7a22a72fa6c66540c} +Using the same flow ID, run a Python sample script to send a query and get a prettified JSON response back. - -Instead of using curl, you can download your flow as a Python API call instead. - -1. Click API. -2. Click **Python API** > **Copy code** and save the code to your local machine. -The code will look something like this: +1. Create a Python file and name it `langflow_api_demo.py`. ```python import requests -from typing import Optional - -BASE_API_URL = "" -FLOW_ID = "ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef" -# You can tweak the flow by adding a tweaks dictionary -# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}} - -def run_flow(message: str, - flow_id: str, - output_type: str = "chat", - input_type: str = "chat", - tweaks: Optional[dict] = None, - api_key: Optional[str] = None) -> dict: - """Run a flow with a given message and optional tweaks. - - :param message: The message to send to the flow - :param flow_id: The ID of the flow to run - :param tweaks: Optional tweaks to customize the flow - :return: The JSON response from the flow - """ - api_url = f"{BASE_API_URL}/{flow_id}" - payload = { - "input_value": message, - "output_type": output_type, - "input_type": input_type, - } - headers = None - if tweaks: - payload["tweaks"] = tweaks - if api_key: - headers = {"x-api-key": api_key} - response = requests.post(api_url, json=payload, headers=headers) - return response.json() - - # Setup any tweaks you want to apply to the flow - - message = "message" - - print(run_flow(message=message, flow_id=FLOW_ID)) +import json -``` +def query_langflow(message): + url = "http://127.0.0.1:7861/api/v1/run/fff8dcaa-f0f6-4136-9df0-b7cb38de42e0" + headers = {"Content-Type": "application/json"} + data = {"input_value": message} + response = requests.post(url, headers=headers, json=data) + return response.json() -3. Run your Python app: +user_input = input("Enter your message: ") +result = query_langflow(user_input) - -```shell -python3 app.py +print(json.dumps(result, indent=2)) ``` +2. Run the script. +```python +python langflow_api_demo.py +``` -The result is similar to the curl call: +3. Enter your message when prompted. +You will get a prettified JSON response back containing a response to your message. +### Configure host and ports in backend-only mode -```json -{'session_id': 'ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880', 'outputs': [{'inputs': {'input_value': 'message'}, 'outputs': [{'results': {'result': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!"}, 'artifacts': {'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI'}, 'messages': [{'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI', 'component_id': 'ChatOutput-ktwdw'}], 'component_display_name': 'Chat Output', 'component_id': 'ChatOutput-ktwdw', 'used_frozen_result': False}]}]} +To change the host and port, pass the values as additional flags. +```python +python -m langflow run --host 127.0.0.1 --port 7860 --backend-only ``` -Your Python app POSTs to your Langflow server, and the server runs the flow and returns the result. -See [API](https://www.notion.so/administration/api) for more ways to interact with your headless Langflow server. diff --git a/docs/docs/Configuration/configuration-cli.md b/docs/docs/Configuration/configuration-cli.md index 770ff17fb228..205604103d04 100644 --- a/docs/docs/Configuration/configuration-cli.md +++ b/docs/docs/Configuration/configuration-cli.md @@ -1,168 +1,170 @@ --- -title: Command Line Interface (CLI) +title: Langflow CLI sidebar_position: 2 slug: /configuration-cli --- -:::info +# Langflow CLI -This page may contain outdated information. It will be updated as soon as possible. +The Langflow command line interface (Langflow CLI) is the main interface for managing and running the Langflow server. -::: - -Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs. +## CLI commands -The available commands are below. Navigate to their individual sections of this page to see the parameters. +The following sections describe the available CLI commands and their options, as well as their corresponding [environment variables](./environment-variables.md). -- [langflow](/configuration-cli) -- [langflow api-key](/configuration-cli) -- [langflow copy-db](/configuration-cli) -- [langflow migration](/configuration-cli) -- [langflow run](/configuration-cli) -- [langflow superuser](/configuration-cli) - -## Overview {#c50e5530289349cf8ed7bee22ba2211a} +### langflow Running the CLI without any arguments displays a list of available options and commands. -```shell -langflow -# or -langflow --help +```bash +langflow [OPTIONS] # or -python -m langflow - +python -m langflow [OPTIONS] ``` -| Command | Description | -| ----------- | ---------------------------------------------------------------------- | -| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. | -| `copy-db` | Copy the database files to the current directory (`which langflow`). | -| `migration` | Run or test migrations. | -| `run` | Run the Langflow. | -| `superuser` | Create a superuser. | - -### Options {#8a3b5b7ed55b4774ad6d533bb337ef47} +#### Options -| Option | Description | -| ---------------------- | -------------------------------------------------------------------------------- | -| `--install-completion` | Install completion for the current shell. | -| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. | -| `--help` | Show this message and exit. | +| Option | Default | Values | Description | +|--------|------|-----------|-------------| +| `--install-completion` | *Not applicable* | *Not applicable* | Install auto-completion for the current shell. | +| `--show-completion` | *Not applicable* | *Not applicable* | Show the location of the auto-completion config file (if installed). | +| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. | -## langflow api-key {#dbfc8c4c83474b83a38bdc7471bccf41} +### langflow api-key -Run the `api-key` command to create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`. +Create an API key for the default superuser if the [`LANGFLOW_AUTO_LOGIN` environment variable] is set to `true`. -```shell -langflow api-key +```bash +langflow api-key [OPTIONS] # or -python -m langflow api-key -╭─────────────────────────────────────────────────────────────────────╮ -│ API Key Created Successfully: │ -│ │ -│ sk-O0elzoWID1izAH8RUKrnnvyyMwIzHi2Wk-uXWoNJ2Ro │ -│ │ -│ This is the only time the API key will be displayed. │ -│ Make sure to store it in a secure location. │ -│ │ -│ The API key has been copied to your clipboard. Cmd + V to paste it. │ -╰────────────────────────────── - +python -m langflow api-key [OPTIONS] ``` -### Options {#ec2ef993dc984811b25838c8d8230b31} +#### Options -| Option | Type | Description | -| ----------- | ---- | ------------------------------------------------------------- | -| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] | -| --help | | Show this message and exit. | +| Option | Default | Values | Description | +|--------|---------|--------|-------------| +| `--log-level` | `critical` | `debug`
`info`
`warning`
`error`
`critical` | Set the logging level. | +| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. | -## langflow copy-db {#729a13f4847545e5973d8f9c20f8833d} +### langflow copy-db -Run the `copy-db` command to copy the cached `langflow.db` and `langflow-pre.db` database files to the current directory. +Copy the database files to the current directory. +Copy the Langflow database files, `langflow.db` and `langflow-pre.db` (if they exist), from the cache directory to the current directory. -If the files exist in the cache directory, they will be copied to the same directory as `__main__.py`, which can be found with `which langflow`. +:::note +The current directory is the directory containing `__main__.py`. +You can find this directory by running `which langflow`. +::: + +```bash +langflow copy-db +# or +python -m langflow copy-db +``` -### Options {#7b7e6bd02b3243218e1d666711854673} +#### Options -None. +| Option | Default | Values | Description | +|--------|---------|--------|-------------| +| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. | -## langflow migration {#7027c1925a444119a7a8ea2bff4bd16d} +### langflow migration -Run or test migrations with the Alembic database tool. +Run or test database migrations. -```shell -langflow migration +```bash +langflow migration [OPTIONS] # or -python -m langflow migration - +python -m langflow migration [OPTIONS] ``` -### Options {#0b38fbe97bb34edeb7740a7db58433e9} +#### Options -| Option | Description | -| ------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `--test, --no-test` | Run migrations in test mode. [default: test] | -| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] | -| `--help` | Show this message and exit. | +| Option | Default | Values | Description | +|--------|---------|--------|-------------| +| `--test` | `true` | [Boolean](#boolean) | Run migrations in test mode. Use `--no-test` to disable test mode. | +| `--fix` | `false` (`--no-fix`) | [Boolean](#boolean) | Fix migrations. This is a destructive operation, and all affected data will be deleted. Only use this option if you know what you are doing. | +| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. | -## langflow run {#fe050aa659cb4d33a560b859d54c94ea} -Run Langflow. +### langflow run -```shell -langflow run -# or -python -m langflow run +Start the Langflow server. +```bash +langflow run [OPTIONS] +# or +python -m langflow run [OPTIONS] ``` -### Options {#4e811481ec9142f1b60309bb1ce5a2ce} - -| Option | Description | -| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `--help` | Displays all available options. | -| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. | -| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. | -| `--timeout` | Sets the worker timeout in seconds. The default is `60`. | -| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. | -| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. | -| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. | -| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. | -| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. | -| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. | -| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. | -| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. | -| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. | -| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. | -| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. | -| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. | -| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. | -| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. | -| `--auto-saving` | This parameter, with a default value of `True`, enables the auto-saving functionality, use `--no-auto-saving` to deactivate it. It can be configured using the `LANGFLOW_AUTO_SAVING` environment variable. | - -### CLI environment variables {#5868aaccfcc74e26968538ef4d07e756} - -You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option. - -A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence. - -## langflow superuser {#5944233ce0c942878e928e1f2945d717} - -Create a superuser for Langflow. - -```shell -langflow superuser +#### Options + +| Option | Default | Values | Description | +|--------|---------|--------|-------------| +| `--host` | `127.0.0.1` | String | The host on which the Langflow server will run.
See [`LANGFLOW_HOST` variable](./environment-variables.md#LANGFLOW_HOST). | +| `--workers` | `1` | Integer | Number of worker processes.
See [`LANGFLOW_WORKERS` variable](./environment-variables.md#LANGFLOW_WORKERS). | +| `--worker-timeout` | `300` | Integer | Worker timeout in seconds.
See [`LANGFLOW_WORKER_TIMEOUT` variable](./environment-variables.md#LANGFLOW_WORKER_TIMEOUT). | +| `--port` | `7860` | Integer | The port on which the Langflow server will run. The server automatically selects a free port if the specified port is in use.
See [`LANGFLOW_PORT` variable](./environment-variables.md#LANGFLOW_PORT). | +| `--components-path` | `langflow/components` | String | Path to the directory containing custom components.
See [`LANGFLOW_COMPONENTS_PATH` variable](./environment-variables.md#LANGFLOW_COMPONENTS_PATH). | +| `--env-file` | Not set | String | Path to the `.env` file containing environment variables.
See [Import environment variables from a .env file](./environment-variables.md#configure-variables-env-file). | +| `--log-level` | `critical` | `debug`
`info`
`warning`
`error`
`critical` | Set the logging level.
See [`LANGFLOW_LOG_LEVEL` variable](./environment-variables.md#LANGFLOW_LOG_LEVEL). | +| `--log-file` | `logs/langflow.log` | String | Set the path to the log file for Langflow.
See [`LANGFLOW_LOG_FILE` variable](./environment-variables.md#LANGFLOW_LOG_FILE). | +| `--cache` | `InMemoryCache` | `InMemoryCache`
`SQLiteCache` | Type of cache to use.
See [`LANGFLOW_LANGCHAIN_CACHE` variable](./environment-variables.md#LANGFLOW_LANGCHAIN_CACHE). | +| `--dev` | `false` (`--no-dev`) | [Boolean](#boolean) | Run Langflow in development mode (may contain bugs).
See [`LANGFLOW_DEV` variable](./environment-variables.md#LANGFLOW_DEV). | +| `--frontend-path` | `./frontend` | String | Path to the frontend directory containing build files. This is for development purposes only.
See [`LANGFLOW_FRONTEND_PATH` variable](./environment-variables.md#LANGFLOW_FRONTEND_PATH). | +| `--open-browser` | `true` | [Boolean](#boolean) | Open the system web browser on startup. Use `--no-open-browser` to disable opening the system web browser on startup.
See [`LANGFLOW_OPEN_BROWSER` variable](./environment-variables.md#LANGFLOW_OPEN_BROWSER). | +| `--remove-api-keys` | `false` (`--no-remove-api-keys`) | [Boolean](#boolean) | Remove API keys from the projects saved in the database.
See [`LANGFLOW_REMOVE_API_KEYS` variable](./environment-variables.md#LANGFLOW_REMOVE_API_KEYS). | +| `--backend-only` | `false` (`--no-backend-only`) | [Boolean](#boolean) | Only run Langflow's backend server (no frontend).
See [`LANGFLOW_BACKEND_ONLY` variable](./environment-variables.md#LANGFLOW_BACKEND_ONLY). | +| `--store` | `true` | [Boolean](#boolean) | Enable the Langflow Store features. Use `--no-store` to disable the Langflow Store features.
See [`LANGFLOW_STORE` variable](./environment-variables.md#LANGFLOW_STORE). | +| `--auto-saving` | `true` | [Boolean](#boolean) | Enable flow auto-saving. Use `--no-auto-saving` to disable flow auto-saving.
See [`LANGFLOW_AUTO_SAVING` variable](./environment-variables.md#LANGFLOW_AUTO_SAVING). | +| `--auto-saving-interval` | `1000` | Integer | Set the interval for flow auto-saving in milliseconds.
See [`LANGFLOW_AUTO_SAVING_INTERVAL` variable](./environment-variables.md#LANGFLOW_AUTO_SAVING_INTERVAL). | +| `--health-check-max-retries` | `5` | Integer | Set the maximum number of retries for the health check. Use `--no-health-check-max-retries` to disable the maximum number of retries for the health check.
See [`LANGFLOW_HEALTH_CHECK_MAX_RETRIES` variable](./environment-variables.md#LANGFLOW_HEALTH_CHECK_MAX_RETRIES). | +| `--max-file-size-upload` | `100` | Integer | Set the maximum file size for the upload in megabytes.
See [`LANGFLOW_MAX_FILE_SIZE_UPLOAD` variable](./environment-variables.md#LANGFLOW_MAX_FILE_SIZE_UPLOAD). | +| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. | + +### langflow superuser + +Create a superuser account. + +```bash +langflow superuser [OPTIONS] # or -python -m langflow superuser +python -m langflow superuser [OPTIONS] ``` -### Options {#f333c5635ead4c3d95985467bb08cc8f} +#### Options + +| Option | Default | Values | Description | +|--------|---------|--------|-------------| +| `--username` | Required | String | Specify the name for the superuser.
See [`LANGFLOW_SUPERUSER` variable](./environment-variables.md#LANGFLOW_SUPERUSER). | +| `--password` | Required | String | Specify the password for the superuser.
See [`LANGFLOW_SUPERUSER_PASSWORD` variable](./environment-variables.md#LANGFLOW_SUPERUSER_PASSWORD). | +| `--log-level` | `critical` | `debug`
`info`
`warning`
`error`
`critical` | Set the logging level. | + +## Precedence + +Langflow CLI options override the values of corresponding [environment variables](./environment-variables.md). + +For example, if you have `LANGFLOW_PORT=7860` defined as an environment variable, but you run the CLI with `--port 7880`, then Langflow will set the port to **`7880`** (the value passed with the CLI). + +## Assign values + +There are two ways you can assign a value to a CLI option. +You can write the option flag and its value with a single space between them: `--option value`. +Or, you can write them using an equals sign (`=`) between the option flag and the value: `--option=value`. + +Values that contain spaces must be surrounded by quotation marks: `--option 'Value with Spaces'` or `--option='Value with Spaces'`. + +### Boolean values {#boolean} + +Boolean options turn a behavior on or off, and therefore accept no arguments. +To activate a boolean option, type it on the command line. +For example: + +```bash +langflow run --remove-api-keys +``` -| Option | Type | Description | -| ------------- | ---- | ------------------------------------------------------------- | -| `--username` | TEXT | Username for the superuser. [default: None] [required] | -| `--password` | TEXT | Password for the superuser. [default: None] [required] | -| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] | -| `--help` | | Show this message and exit. | +All boolean options have a corresponding option that negates it. +For example, the negating option for `--remove-api-keys` is `--no-remove-api-keys`. +These options let you negate boolean options that you may have set using [environment variables](./environment-variables.md). diff --git a/docs/docs/Configuration/configuration-custom-database.md b/docs/docs/Configuration/configuration-custom-database.md new file mode 100644 index 000000000000..a81a7cf116e8 --- /dev/null +++ b/docs/docs/Configuration/configuration-custom-database.md @@ -0,0 +1,76 @@ +--- +title: Configure an external PostgreSQL database +sidebar_position: 8 +--- +Langflow's default database is [SQLite](https://www.sqlite.org/docs.html), but you can configure Langflow to use PostgreSQL instead. + +This guide will walk you through the process of setting up an external database for Langflow by replacing the default SQLite connection string `sqlite:///./langflow.db` with PostgreSQL. + +## Prerequisite + +* A [PostgreSQL](https://www.pgadmin.org/download/) database + +## Connect Langflow to PostgreSQL + +To connect Langflow to PostgreSQL, follow these steps. + +1. Find your PostgreSQL database's connection string. +It looks like `postgresql://user:password@host:port/dbname`. +For example, if you started PostgreSQL with this Docker command: + +``` +docker run --name some-postgres -e POSTGRES_PASSWORD=mysecretpassword -d -p 5432:5432 postgres +``` + +Your connection string would be `postgresql://some-postgres:mysecretpassword@localhost:5432/postgres`. + +2. Create a `.env` file for configuring Langflow. +``` +touch .env +``` + +3. To set the database URL environment variable, add it to your `.env` file: +```plaintext +LANGFLOW_DATABASE_URL="postgresql://user:password@localhost:5432/dbname" +``` + +:::tip +The Langflow project includes a [`.env.example`](https://github.com/langflow-ai/langflow/blob/main/.env.example) file to help you get started. +You can copy the contents of this file into your own `.env` file and replace the example values with your own preferred settings. +Replace the value for `LANGFLOW_DATABASE_URL` with your PostgreSQL connection string. +::: + +4. Run Langflow with the `.env` file: +```bash +langflow run --env-file .env +``` + +5. In Langflow, create traffic by running a flow. +6. Inspect your PostgreSQL deployment's tables and activity. +You will see new tables and traffic created. + +## Example Langflow and PostgreSQL docker-compose.yml + +The Langflow project includes a [`docker-compose.yml`](https://github.com/langflow-ai/langflow/blob/main/docker_example/docker-compose.yml) file for quick deployment with PostgreSQL. + +This configuration launches Langflow and PostgreSQL containers, with Langflow pre-configured to use the PostgreSQL database. Customize the database credentials as needed. + +To start the services, navigate to the `/docker_example` directory, and then run `docker-compose up`. + +```yaml +services: + langflow: + image: langflow-ai/langflow:latest + environment: + - LANGFLOW_DATABASE_URL=postgresql://user:password@postgres:5432/langflow + depends_on: + - postgres + + postgres: + image: postgres:15 + environment: + - POSTGRES_USER=user + - POSTGRES_PASSWORD=password + - POSTGRES_DB=langflow +``` + diff --git a/docs/docs/Configuration/configuration-global-variables.md b/docs/docs/Configuration/configuration-global-variables.md new file mode 100644 index 000000000000..ea63dea6f14c --- /dev/null +++ b/docs/docs/Configuration/configuration-global-variables.md @@ -0,0 +1,207 @@ +--- +title: Global variables +sidebar_position: 5 +slug: /configuration-global-variables +--- + +import ReactPlayer from "react-player"; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Global variables let you store and reuse generic input values and credentials across your projects. +You can use a global variable in any text input field that displays the 🌐 icon. + +Langflow stores global variables in its internal database, and encrypts the values using a secret key. + +## Create a global variable {#3543d5ef00eb453aa459b97ba85501e5} + +1. In the Langflow UI, click your profile icon, and then select **Settings**. + +2. Click **Global Variables**. + +3. Click **Add New**. + +4. In the **Create Variable** dialog, enter a name for your variable in the **Variable Name** field. + +5. Optional: Select a **Type** for your global variable. The available types are **Generic** (default) and **Credential**. + + No matter which **Type** you select, Langflow still encrypts the **Value** of the global variable. + +6. Enter the **Value** for your global variable. + +7. Optional: Use the **Apply To Fields** menu to select one or more fields that you want Langflow to automatically apply your global variable to. +For example, if you select **OpenAI API Key**, Langflow will automatically apply the variable to any **OpenAI API Key** field. + +8. Click **Save Variable**. + +You can now select your global variable from any text input field that displays the 🌐 icon. + +:::info +Because values are encrypted, you can't view the actual values of your global variables. +In **Settings > Global Variables**, the **Value** column shows the encrypted hash for **Generic** type variables, and shows nothing for **Credential** type variables. +::: + +## Edit a global variable + +1. In the Langflow UI, click your profile icon, and then select **Settings**. + +2. Click **Global Variables**. + +3. Click on the global variable you want to edit. + +4. In the **Update Variable** dialog, you can edit the following fields: **Variable Name**, **Value**, and **Apply To Fields**. + +5. Click **Update Variable**. + +## Delete a global variable + +:::warning +Deleting a global variable permanently deletes any references to it from your existing projects. +::: + +1. In the Langflow UI, click your profile icon, and then select **Settings**. + +2. Click **Global Variables**. + +3. Click the checkbox next to the global variable that you want to delete. + +4. Click the Trash icon. + +The global variable, and any existing references to it, are deleted. + +## Add global variables from the environment {#76844a93dbbc4d1ba551ea1a4a89ccdd} + +You can use the `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` environment variable to source global variables from your runtime environment. + + + + + +If you installed Langflow locally, you must define the `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` environment variable in a `.env` file. + +1. Create a `.env` file and open it in your preferred editor. + +2. Add the `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` environment variable as follows: + + ```plaintext title=".env" + LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=VARIABLE1,VARIABLE2 + ``` + + Replace `VARIABLE1,VARIABLE2` with a comma-separated list (no spaces) of variables that you want Langflow to source from the environment. + For example, `my_key,some_string`. + +3. Save and close the file. + +4. Start Langflow with the `.env` file: + + ```bash + VARIABLE1="VALUE1" VARIABLE2="VALUE2" python -m langflow run --env-file .env + ``` + + :::note + In this example, the environment variables (`VARIABLE1="VALUE1"` and `VARIABLE2="VALUE2"`) are prefixed to the startup command. + This is a rudimentary method for exposing environment variables to Python on the command line, and is meant for illustrative purposes. + Make sure to expose your environment variables to Langflow in a manner that best suits your own environment. + ::: + +5. Confirm that Langflow successfully sourced the global variables from the environment. + + 1. In the Langflow UI, click your profile icon, and then select **Settings**. + + 2. Click **Global Variables**. + + The environment variables appear in the list of **Global Variables**. + + + + + +If you're using Docker, you can pass `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` directly from the command line or from a `.env` file. + +To pass `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` directly from the command line: + +```bash +docker run -it --rm \ + -p 7860:7860 \ + -e LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT="VARIABLE1,VARIABLE2" \ + -e VARIABLE1="VALUE1" \ + -e VARIABLE2="VALUE2" \ + langflowai/langflow:latest +``` + +To pass `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` from a `.env` file: + +```bash +docker run -it --rm \ + -p 7860:7860 \ + --env-file .env \ + -e VARIABLE1="VALUE1" \ + -e VARIABLE2="VALUE2" \ + langflowai/langflow:latest +``` + + + + + +:::info +When adding global variables from the environment, the following limitations apply: + +- You can only source the **Name** and **Value** from the environment. + To add additional parameters, such as the **Apply To Fields** parameter, you must edit the global variables in the Langflow UI. + +- Global variables that you add from the the environment always have the **Credential** type. +::: + +:::tip +If you want to explicitly prevent Langflow from sourcing global variables from the environment, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `false` in your `.env` file: + +```plaintext title=".env" +LANGFLOW_STORE_ENVIRONMENT_VARIABLES=false +``` + +::: + + +## Precautions + +Even though Langflow stores global variables in its internal database, and encrypts the values using a secret key, you should consider taking extra precautions to ensure the database and secret key are protected. + +### Use a custom secret key + +By default, Langflow generates a random secret key. +However, you should provide your own secret key, as it's more secure to use a key that is already known to you. + +Use the `LANGFLOW_SECRET_KEY` environment variable to provide a custom value for the secret key when you start Langflow. + +### Protect the secret key + +Make sure to store the secret key in a secure location. + +By default, Langflow stores the secret key in its configuration directory. +The location of the configuration directory depends on your operating system: + +- macOS: `~/Library/Caches/langflow/secret_key` +- Linux: `~/.cache/langflow/secret_key` +- Windows: `%USERPROFILE%\AppData\Local\langflow\secret_key` + +To change the location of the the configuration directory, and thus the location of the secret key, set the `LANGFLOW_CONFIG_DIR` environment variable to your preferred storage directory. + +### Protect the database + +Make sure to store Langflow's internal database file in a secure location, and take regular backups to prevent accidental data loss. + +By default, Langflow stores the database file in its installation directory. +The location of the file depends on your operating system and installation method: + +- macOS: `PYTHON_LOCATION/site-packages/langflow/langflow.db` +- Linux: `PYTHON_LOCATION/site-packages/langflow/langflow.db` +- Windows: `PYTHON_LOCATION\Lib\site-packages\langflow\langflow.db` + +To change the location of the database file, follow these steps: + +1. Set the `LANGFLOW_SAVE_DB_IN_CONFIG_DIR` environment variable to `true`. +2. Set the `LANGFLOW_CONFIG_DIR` environment variable to your preferred storage directory. + + + \ No newline at end of file diff --git a/docs/docs/Configuration/environment-variables.md b/docs/docs/Configuration/environment-variables.md new file mode 100644 index 000000000000..fa1d907b6a64 --- /dev/null +++ b/docs/docs/Configuration/environment-variables.md @@ -0,0 +1,144 @@ +--- +title: Environment variables +sidebar_position: 7 +slug: /environment-variables +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Langflow lets you configure a number of settings using environment variables. + +## Configure environment variables + +Langflow recognizes [supported environment variables](#supported-variables) from the following sources: + +- Environment variables that you've set in your terminal. +- Environment variables that you've imported from a `.env` file using the `--env-file` option in the Langflow CLI. + +You can choose to use one source exclusively, or use both sources together. +If you choose to use both sources together, be aware that environment variables imported from a `.env` file take [precedence](#precedence) over those set in your terminal. + +### Set environment variables in your terminal {#configure-variables-terminal} + +Run the following commands to set environment variables for your current terminal session: + + + + +```bash +export VARIABLE_NAME='VALUE' +``` + + + +``` +set VARIABLE_NAME='VALUE' +``` + + + +```bash +docker run -it --rm \ + -p 7860:7860 \ + -e VARIABLE_NAME='VALUE' \ + langflowai/langflow:latest +``` + + + + +When you start Langflow, it looks for environment variables that you've set in your terminal. +If it detects a supported environment variable, then it automatically adopts the specified value, subject to [precedence rules](#precedence). + +### Import environment variables from a .env file {#configure-variables-env-file} + +1. Create a `.env` file and open it in your preferred editor. + +2. Add your environment variables to the file: + + ```plaintext title=".env" + VARIABLE_NAME='VALUE' + VARIABLE_NAME='VALUE' + ``` + + :::tip + The Langflow project includes a [`.env.example`](https://github.com/langflow-ai/langflow/blob/main/.env.example) file to help you get started. + You can copy the contents of this file into your own `.env` file and replace the example values with your own preferred settings. + ::: + +3. Save and close the file. + +4. Start Langflow using the `--env-file` option to define the path to your `.env` file: + + + + + ```bash + python -m langflow run --env-file .env + ``` + + + + ```bash + docker run -it --rm \ + -p 7860:7860 \ + --env-file .env \ + langflowai/langflow:latest + ``` + + + + +On startup, Langflow imports the environment variables from your `.env` file, as well as any that you [set in your terminal](#configure-variables-terminal), and adopts their specified values. + +## Precedence {#precedence} + +Environment variables [defined in the .env file](#configure-variables-env-file) take precedence over those [set in your terminal](#configure-variables-terminal). +That means, if you happen to set the same environment variable in both your terminal and your `.env` file, Langflow adopts the value from the the `.env` file. + +:::info[CLI precedence] +[Langflow CLI options](./configuration-cli.md) override the value of corresponding environment variables defined in the `.env` file as well as any environment variables set in your terminal. +::: + +## Supported environment variables {#supported-variables} + +The following table lists the environment variables supported by Langflow. + +| Variable | Format / Values | Default | Description | +|----------|---------------|---------|-------------| +| `DO_NOT_TRACK` | Boolean | `false` | If enabled, Langflow will not track telemetry. | +| `LANGFLOW_AUTO_LOGIN` | Boolean | `true` | Enable automatic login for Langflow. Set to `false` to disable automatic login and require the login form to log into the Langflow UI. Setting to `false` requires [`LANGFLOW_SUPERUSER`](#LANGFLOW_SUPERUSER) and [`LANGFLOW_SUPERUSER_PASSWORD`](#LANGFLOW_SUPERUSER_PASSWORD) to be set. | +| `LANGFLOW_AUTO_SAVING` | Boolean | `true` | Enable flow auto-saving.
See [`--auto-saving` option](./configuration-cli.md#run-auto-saving). | +| `LANGFLOW_AUTO_SAVING_INTERVAL` | Integer | `1000` | Set the interval for flow auto-saving in milliseconds.
See [`--auto-saving-interval` option](./configuration-cli.md#run-auto-saving-interval). | +| `LANGFLOW_BACKEND_ONLY` | Boolean | `false` | Only run Langflow's backend server (no frontend).
See [`--backend-only` option](./configuration-cli.md#run-backend-only). | +| `LANGFLOW_CACHE_TYPE` | `async`
`redis`
`memory`
`disk`
`critical` | `async` | Set the cache type for Langflow.
If you set the type to `redis`, then you must also set the following environment variables: [`LANGFLOW_REDIS_HOST`](#LANGFLOW_REDIS_HOST), [`LANGFLOW_REDIS_PORT`](#LANGFLOW_REDIS_PORT), [`LANGFLOW_REDIS_DB`](#LANGFLOW_REDIS_DB), and [`LANGFLOW_REDIS_CACHE_EXPIRE`](#LANGFLOW_REDIS_CACHE_EXPIRE). | +| `LANGFLOW_COMPONENTS_PATH` | String | `langflow/components` | Path to the directory containing custom components.
See [`--components-path` option](./configuration-cli.md#run-components-path). | +| `LANGFLOW_CONFIG_DIR` | String | | Set the Langflow configuration directory where files, logs, and the Langflow database are stored. | +| `LANGFLOW_DATABASE_URL` | String | | Set the database URL for Langflow. If you don't provide one, Langflow uses an SQLite database. | +| `LANGFLOW_DEV` | Boolean | `false` | Run Langflow in development mode (may contain bugs).
See [`--dev` option](./configuration-cli.md#run-dev). | +| `LANGFLOW_FALLBACK_TO_ENV_VAR` | Boolean | `true` | If enabled, [global variables](../Configuration/configuration-global-variables.md) set in the Langflow UI fall back to an environment variable with the same name when Langflow fails to retrieve the variable value. | +| `LANGFLOW_FRONTEND_PATH` | String | `./frontend` | Path to the frontend directory containing build files. This is for development purposes only.
See [`--frontend-path` option](./configuration-cli.md#run-frontend-path). | +| `LANGFLOW_HEALTH_CHECK_MAX_RETRIES` | Integer | `5` | Set the maximum number of retries for the health check.
See [`--health-check-max-retries` option](./configuration-cli.md#run-health-check-max-retries). | +| `LANGFLOW_HOST` | String | `127.0.0.1` | The host on which the Langflow server will run.
See [`--host` option](./configuration-cli.md#run-host). | +| `LANGFLOW_LANGCHAIN_CACHE` | `InMemoryCache`
`SQLiteCache` | `InMemoryCache` | Type of cache to use.
See [`--cache` option](./configuration-cli.md#run-cache). | +| `LANGFLOW_LOG_FILE` | String | `logs/langflow.log` | Set the path to the log file for Langflow.
See [`--log-file` option](./configuration-cli.md#run-log-file). | +| `LANGFLOW_LOG_LEVEL` | `debug`
`info`
`warning`
`error`
`critical` | `critical` | Set the logging level.
See [`--log-level` option](./configuration-cli.md#run-log-level). | +| `LANGFLOW_MAX_FILE_SIZE_UPLOAD` | Integer | `100` | Set the maximum file size for the upload in megabytes.
See [`--max-file-size-upload` option](./configuration-cli.md#run-max-file-size-upload). | +| `LANGFLOW_OPEN_BROWSER` | Boolean | `true` | Open the system web browser on startup.
See [`--open-browser` option](./configuration-cli.md#run-open-browser). | +| `LANGFLOW_PORT` | Integer | `7860` | The port on which the Langflow server will run. The server automatically selects a free port if the specified port is in use.
See [`--port` option](./configuration-cli.md#run-port). | +| `LANGFLOW_PROMETHEUS_ENABLED` | Boolean | `false` | Expose Prometheus metrics. | +| `LANGFLOW_PROMETHEUS_PORT` | Integer | `9090` | Set the port on which Langflow exposes Prometheus metrics. | +| `LANGFLOW_REDIS_CACHE_EXPIRE` | Integer | `3600` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). | +| `LANGFLOW_REDIS_DB` | Integer | `0` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). | +| `LANGFLOW_REDIS_HOST` | String | `localhost` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). | +| `LANGFLOW_REDIS_PORT` | String | `6379` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). | +| `LANGFLOW_REMOVE_API_KEYS` | Boolean | `false` | Remove API keys from the projects saved in the database.
See [`--remove-api-keys` option](./configuration-cli.md#run-remove-api-keys). | +| `LANGFLOW_SAVE_DB_IN_CONFIG_DIR` | Boolean | `false` | Save the Langflow database in [`LANGFLOW_CONFIG_DIR`](#LANGFLOW_CONFIG_DIR) instead of in the Langflow package directory. Note, when this variable is set to default (`false`), the database isn't shared between different virtual environments and the database is deleted when you uninstall Langflow. | +| `LANGFLOW_STORE` | Boolean | `true` | Enable the Langflow Store.
See [`--store` option](./configuration-cli.md#run-store). | +| `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` | Boolean | `true` | Store environment variables as [global variables](../Configuration/configuration-global-variables.md) in the database. | +| `LANGFLOW_SUPERUSER` | String | Not set | Set the name for the superuser. Required if [`LANGFLOW_AUTO_LOGIN`](#LANGFLOW_AUTO_LOGIN) is set to `false`.
See [`superuser --username` option](./configuration-cli.md#superuser-username). | +| `LANGFLOW_SUPERUSER_PASSWORD` | String | Not set | Set the password for the superuser. Required if [`LANGFLOW_AUTO_LOGIN`](#LANGFLOW_AUTO_LOGIN) is set to `false`.
See [`superuser --password` option](./configuration-cli.md#superuser-password).| +| `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` | String | Not set | Comma-separated list of environment variables to get from the environment and store as [global variables](../Configuration/configuration-global-variables.md). | +| `LANGFLOW_WORKER_TIMEOUT` | Integer | `300` | Worker timeout in seconds.
See [`--worker-timeout` option](./configuration-cli.md#run-worker-timeout). | +| `LANGFLOW_WORKERS` | Integer | `1` | Number of worker processes.
See [`--workers` option](./configuration-cli.md#run-workers). | diff --git a/docs/docs/Configuration/flow-saved.png b/docs/docs/Configuration/flow-saved.png deleted file mode 100644 index c0ded77f201f..000000000000 Binary files a/docs/docs/Configuration/flow-saved.png and /dev/null differ diff --git a/docs/docs/Configuration/flow-saving.png b/docs/docs/Configuration/flow-saving.png deleted file mode 100644 index 65d34017f195..000000000000 Binary files a/docs/docs/Configuration/flow-saving.png and /dev/null differ diff --git a/docs/docs/Configuration/flow-unsaved.png b/docs/docs/Configuration/flow-unsaved.png deleted file mode 100644 index afab2a404bb7..000000000000 Binary files a/docs/docs/Configuration/flow-unsaved.png and /dev/null differ diff --git a/docs/docs/Configuration/manual-save-button.png b/docs/docs/Configuration/manual-save-button.png deleted file mode 100644 index 735725e5b3d7..000000000000 Binary files a/docs/docs/Configuration/manual-save-button.png and /dev/null differ diff --git a/docs/docs/Configuration/saving-header.png b/docs/docs/Configuration/saving-header.png deleted file mode 100644 index 607e02b65ce0..000000000000 Binary files a/docs/docs/Configuration/saving-header.png and /dev/null differ diff --git a/docs/docs/Contributing/683296796.png b/docs/docs/Contributing/683296796.png deleted file mode 100644 index 66965f8ed5c6..000000000000 Binary files a/docs/docs/Contributing/683296796.png and /dev/null differ diff --git a/docs/docs/Contributing/contributing-community.md b/docs/docs/Contributing/contributing-community.md index 7b8a0116e1b1..f37ae5d7e525 100644 --- a/docs/docs/Contributing/contributing-community.md +++ b/docs/docs/Contributing/contributing-community.md @@ -1,52 +1,26 @@ --- -title: Community -sidebar_position: 3 +title: Join the Langflow community +sidebar_position: 5 slug: /contributing-community --- -## 🤖 Join **Langflow** Discord server {#80011e0bda004e83a8012c7ec6eab29a} +## Join the Langflow Discord server +Join the [Langflow Discord Server](https://discord.gg/EqksyE2EX9) to ask questions and showcase your projects. ---- - - -Join us to ask questions and showcase your projects. - - -Let's bring together the building blocks of AI integration! - - -Langflow [Discord](https://discord.gg/EqksyE2EX9) server. - - -## 🐦 Stay tuned for **Langflow** on Twitter {#6a17ba5905ad4f7aa5347af7854779f6} - - ---- +## Follow Langflow on X +Follow [@langflow_ai](https://twitter.com/langflow_ai) on X to get the latest news about Langflow. -Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**. +## Star Langflow on GitHub +You can [star Langflow in GitHub](https://github.com/langflow-ai/langflow). -## ⭐️ Star **Langflow** on GitHub {#c903a569934643799bf52b7d1b3514e1} - - ---- - - -You can "star" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow). - - -By adding a star, other users will be able to find it more easily and see that it has been already useful for others. - - -## 👀 Watch the GitHub repository for releases {#d0a089ed717742308bd17430e5ae6309} - - ---- +By adding a star, other users will be able to find Langflow more easily, and see that it has been already useful for others. +## Watch the GitHub repository for releases -You can "watch" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow). If you select "Watching" instead of "Releases only" you will receive notifications when someone creates a new issue or question. You can also specify that you only want to be notified about new issues, discussions, PRs, etc. so you can try and help them solve those questions. +You can [watch Langflow in GitHub](https://github.com/langflow-ai/langflow). If you select **Watching** instead of **Releases only** you will receive notifications when someone creates a new issue or question. You can also specify that you want to be notified only about new issues, discussions, and PRs so you can try to help solve those issues. diff --git a/docs/docs/Contributing/contributing-components.md b/docs/docs/Contributing/contributing-components.md new file mode 100644 index 000000000000..0cac371e49b6 --- /dev/null +++ b/docs/docs/Contributing/contributing-components.md @@ -0,0 +1,24 @@ +--- +title: Contribute components +sidebar_position: 4 +slug: /contributing-components +--- + + +New components are added as objects of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/custom/custom_component/custom_component.py) class. + +Any dependencies are added to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/main/pyproject.toml#L148) file. + +### Contribute an example component to Langflow + +Anyone can contribute an example component. For example, if you created a new document loader called **MyCustomDocumentLoader**, you can follow these steps to contribute it to Langflow. + +1. Write your loader as an object of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/custom/custom_component/custom_component.py) class. You'll create a new class, `MyCustomDocumentLoader`, that will inherit from `CustomComponent` and override the base class's methods. +2. Define optional attributes like `display_name`, `description`, and `documentation` to provide information about your custom component. +3. Implement the `build_config` method to define the configuration options for your custom component. +4. Implement the `build` method to define the logic for taking input parameters specified in the `build_config` method and returning the desired output. +5. Add the code to the [/components/documentloaders](https://github.com/langflow-ai/langflow/tree/dev/src/backend/base/langflow/components) folder. +6. Add the dependency to [/documentloaders/__init__.py](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/components/documentloaders/__init__.py) as `from .MyCustomDocumentLoader import MyCustomDocumentLoader`. +7. Add any new dependencies to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/main/pyproject.toml#L148) file. +8. Submit documentation for your component. For this example, you'd submit documentation to the [loaders page](https://github.com/langflow-ai/langflow/blob/main/docs/docs/Components/components-loaders.md). +9. Submit your changes as a pull request. The Langflow team will have a look, suggest changes, and add your component to Langflow. \ No newline at end of file diff --git a/docs/docs/Contributing/contributing-github-discussion-board.md b/docs/docs/Contributing/contributing-github-discussion-board.md new file mode 100644 index 000000000000..e17d5366d768 --- /dev/null +++ b/docs/docs/Contributing/contributing-github-discussion-board.md @@ -0,0 +1,13 @@ +--- +title: Ask for help on the Discussions board +sidebar_position: 3 +slug: /contributing-github-discussions +--- + +If you're looking for help with your code, consider posting a question on the Langflow [GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). The Langflow team cannot provide individual support via email. The team also believes that help is much more valuable if it's shared publicly, so that more people can benefit from it. + +Since the Discussions board is public, please follow this guidance when posting your code questions. + +* When describing your issue, try to provide as many details as possible. What exactly goes wrong? _How_ is it failing? Is there an error? "XY doesn't work" usually isn't that helpful for tracking down problems. Always remember to include the code you ran and if possible, extract only the relevant parts and don't just dump your entire script. This will make it easier for us to reproduce the error. + +* When you include long code, logs, or tracebacks, wrap them in `
` and `
` tags. This [collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so the contents only becomes visible on click, making the issue easier to read and follow. \ No newline at end of file diff --git a/docs/docs/Contributing/contributing-github-issues.md b/docs/docs/Contributing/contributing-github-issues.md index 41e3e63172d8..11105a7e35a0 100644 --- a/docs/docs/Contributing/contributing-github-issues.md +++ b/docs/docs/Contributing/contributing-github-issues.md @@ -1,24 +1,7 @@ --- -title: GitHub Issues +title: Request an enhancement or report a bug sidebar_position: 2 slug: /contributing-github-issues --- - - -Our [issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date with bugs, improvements, and feature requests. There is a taxonomy of labels to help with sorting and discovery of issues of interest. - - -If you're looking for help with your code, consider posting a question on the [GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please understand that we won't be able to provide individual support via email. We also believe that help is much more valuable if it's **shared publicly**, so that more people can benefit from it. - -- **Describing your issue:** Try to provide as many details as possible. What exactly goes wrong? _How_ is it failing? Is there an error? "XY doesn't work" usually isn't that helpful for tracking down problems. Always remember to include the code you ran and if possible, extract only the relevant parts and don't just dump your entire script. This will make it easier for us to reproduce the error. -- **Sharing long blocks of code or logs:** If you need to include long code, logs or tracebacks, you can wrap them in `
` and `
`. This [collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so it only becomes visible on click, making the issue easier to read and follow. - -## Issue labels {#e19eae656c914ce7aedc4f55565cc0bc} - - ---- - - -[See this page](https://github.com/langflow-ai/langflow/labels) for an overview of the system we use to tag our issues and pull requests. - +The [Issues page in the Langflow repo](https://github.com/langflow-ai/langflow/issues) is kept up to date with bugs, improvements, and feature requests. Labels are used to help with sorting and discovery of issues of interest. For an overview of the system Langflow uses to tag issues and pull requests, see the Langflow repo's [labels page](https://github.com/langflow-ai/langflow/labels). \ No newline at end of file diff --git a/docs/docs/Contributing/contributing-how-to-contribute.md b/docs/docs/Contributing/contributing-how-to-contribute.md index 4b93e62cb14e..8a87bcd5d694 100644 --- a/docs/docs/Contributing/contributing-how-to-contribute.md +++ b/docs/docs/Contributing/contributing-how-to-contribute.md @@ -1,160 +1,116 @@ --- -title: How to contribute? +title: Contribute to Langflow sidebar_position: 1 slug: /contributing-how-to-contribute --- +This guide is intended to help you start contributing to Langflow. +As an open-source project in a rapidly developing field, Langflow welcomes contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation. +To contribute code or documentation to this project, follow the [fork and pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow. -:::info +## Contribute code -This page may contain outdated information. It will be updated as soon as possible. +Develop Langflow locally with [uv](https://docs.astral.sh/uv/getting-started/installation/) and [Node.js](https://nodejs.org/en/download/package-manager). -::: +### Prerequisites +* [uv(>=0.4)](https://docs.astral.sh/uv/getting-started/installation/) +* [Node.js](https://nodejs.org/en/download/package-manager) +### Clone the Langflow Repository +1. Navigate to the [Langflow GitHub repository](https://github.com/langflow-ai/langflow), and then click **Fork**. -👋 Hello there! +2. Add the new remote to your local repository on your local machine: -We welcome contributions from developers of all levels to our open-source project on [GitHub](https://github.com/langflow-ai/langflow). If you'd like to contribute, please check our contributing guidelines and help make Langflow more accessible. +```bash +git remote add fork https://github.com//langflow.git +``` +### Prepare the development environment +1. Create development hooks. -As an open-source project in a rapidly developing field, we are extremely open to contributions, whether in the form of a new feature, improved infra, or better documentation. +```bash +make init +``` +This command sets up the development environment by installing backend and frontend dependencies, building the frontend static files, and initializing the project. It runs `make install_backend`, `make install_frontend`, `make build_frontend`, and finally `uv run langflow run` to start the application. +2. Run `make lint`, `make format`, and `make unit_tests` before pushing to the repository. -To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow. Please do not try to push directly to this repo unless you are a maintainer. +### Debug +The repo includes a `.vscode/launch.json` file for debugging the backend in VSCode, which is faster than debugging with Docker Compose. To debug Langflow with the `launch.json` file in VSCode: -## Local Development {#0388cc3c758d434d994022863a6bafa9} +1. Open Langflow in VSCode. +2. Press **Ctrl+Shift+D** for Windows **or Cmd+Shift+D** for Mac to open the Run and Debug view. +3. From the **Run and Debug** dropdown, choose a debugging configuration. +4. Click the green **Play** button or press F5 to start debugging. +Use `launch.json` to quickly debug different parts of your application, like the backend, frontend, or CLI, directly from VSCode. ---- - - -You can develop Langflow using docker compose, or locally. - - -We provide a `.vscode/launch.json` file for debugging the backend in VSCode, which is a lot faster than using docker compose. - - -Setting up hooks: - - -`make init` - - -This will install the pre-commit hooks, which will run `make format` on every commit. - - -It is advised to run `make lint` before pushing to the repository. - - -## Run Locally {#5225c2ef0cd6403c9f6c6bbd888115e0} - - ---- - - -Langflow can run locally by cloning the repository and installing the dependencies. We recommend using a virtual environment to isolate the dependencies from your system. - - -Before you start, make sure you have the following installed: - -- Poetry (>=1.4) -- Node.js - -Then, in the root folder, install the dependencies and start the development server for the backend: - - -`make backend` - - -And the frontend: - - -`make frontend` +### Run Langflow locally +After setting up the environment with `make init`, you can run Langflow's backend and frontend separately for development. +Langflow recommends using a virtual environment like [venv](https://docs.python.org/3/library/venv.html) or [conda](https://anaconda.org/anaconda/conda) to isolate dependencies. -## Docker Compose {#b07f359414ff4220ac615afc364ee46e} +Before you begin, ensure you have [uv](https://docs.astral.sh/uv/getting-started/installation/) and [Node.js](https://nodejs.org/en/download/package-manager) installed. +1. In the repository root, install the dependencies and start the development server for the backend: ---- - - -The following snippet will run the backend and frontend in separate containers. The frontend will be available at `localhost:3000` and the backend at `localhost:7860`. - - -`docker compose up --build# ormake dev build=1` - - -## Documentation {#5f34bcaeccdc4489b0c5ee2c4a21354e} - - ---- - - -The documentation is built using [Docusaurus](https://docusaurus.io/). To run the documentation locally, run the following commands: - - -`cd docsnpm installnpm run start` - +```bash +make backend +``` -The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder. Once you are done with your changes, you can create a Pull Request to the `main` branch. +2. Install dependencies and start the frontend: +```bash +make frontend +``` -## Submitting Components {#9676353bc4504551a4014dd572ac8be8} - - ---- - - -New components are added as objects of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class and any dependencies are added to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file. - - -### Add an example component {#8caae106c853465d83183e7f5272e4d8} - - -You have a new document loader called **MyCustomDocumentLoader** and it would look awesome in Langflow. - -1. Write your loader as an object of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class. You'll create a new class, `MyCustomDocumentLoader`, that will inherit from `CustomComponent` and override the base class's methods. -2. Define optional attributes like `display_name`, `description`, and `documentation` to provide information about your custom component. -3. Implement the `build_config` method to define the configuration options for your custom component. -4. Implement the `build` method to define the logic for taking input parameters specified in the `build_config` method and returning the desired output. -5. Add the code to the [/components/documentloaders](https://github.com/langflow-ai/langflow/tree/dev/src/backend/base/langflow/components) folder. -6. Add the dependency to [/documentloaders/__init__.py](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/components/documentloaders/__init__.py) as `from .MyCustomDocumentLoader import MyCustomDocumentLoader`. -7. Add any new dependencies to the outer [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file. -8. Submit documentation for your component. For this example, you'd submit documentation to the [loaders page](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/components/loaders). -9. Submit your changes as a pull request. The Langflow team will have a look, suggest changes, and add your component to Langflow. - -## User Sharing {#34ac32e11f344eab892b94531a21d2c9} - - ---- - +This approach allows you to work on the backend and frontend independently, with hot-reloading for faster development. -You might want to share and test your custom component with others, but don't need it merged into the main source code. +## Contribute documentation +The documentation is built using [Docusaurus](https://docusaurus.io/) and written in [Markdown](https://docusaurus.io/docs/markdown-features). -If so, you can share your component on the Langflow store. +### Prerequisites +* [Node.js](https://nodejs.org/en/download/package-manager) -1. [Register at the Langflow store](https://www.langflow.store/login/). +### Clone the Langflow repository +1. Navigate to the [Langflow GitHub repository](https://github.com/langflow-ai/langflow), and then click **Fork**. -2. Undergo pre-validation before receiving an API key. +2. Add the new remote to your local repository on your local machine: +```bash +git remote add fork https://github.com//langflow.git +``` -3. To deploy your amazing component directly to the Langflow store, without it being merged into the main source code, navigate to your flow, and then click **Share**. The share window appears: +3. To run the documentation locally, run the following commands: +```bash +cd docs +npm install +npm run start +``` -![](./683296796.png) +The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder. +## Open a pull request -4. Choose whether you want to flow to be public or private. You can also **Export** your flow as a JSON file from this window. When you're ready to share the flow, click **Share Flow**. You should see a **Flow shared successfully** popup. +Once you have written and manually tested your changes with `make lint` and `make unit_tests`, open a pull request to send your changes upstream to the main Langflow repository. +1. Open a new GitHub pull request with your patch against the `main` branch. +2. Ensure the PR title follows semantic commit conventions. For example, features are `feat: add new feature` and fixes are `fix: correct issue with X`. +3. A Langflow maintainer will review your pull request. Thanks for your contribution! -5. To confirm, navigate to the **Langflow Store** and filter results by **Created By Me**. You should see your new flow on the **Langflow Store**. +Some additional guidance on pull request titles: +* Ensure the pull request description clearly describes the problem and solution. If the PR fixes an issue, include a link to the fixed issue in the PR description with `Fixes #1234`. +* Pull request titles appear in Langflow's release notes, so they should explain what the PR does as explicitly as possible. +* Pull requests should strive to fix one thing **only**, and should contain a good description of what is being fixed. +For more information, see the [Python Developer's Guide](https://devguide.python.org/getting-started/pull-request-lifecycle/index.html#making-good-commits). \ No newline at end of file diff --git a/docs/docs/Contributing/contributing-telemetry.md b/docs/docs/Contributing/contributing-telemetry.md index 21c77bc8e2d8..c374565caace 100644 --- a/docs/docs/Contributing/contributing-telemetry.md +++ b/docs/docs/Contributing/contributing-telemetry.md @@ -4,33 +4,15 @@ sidebar_position: 0 slug: /contributing-telemetry --- - - -:::info - -This page may contain outdated information. It will be updated as soon as possible. - -::: - - - - -Our system uses anonymous telemetry to collect essential usage statistics to enhance functionality and user experience. This data helps us identify commonly used features and areas needing improvement, ensuring our development efforts align with what you need. - - -:::note +Langflow uses anonymous telemetry to collect essential usage statistics to enhance functionality and the user experience. This data helps us identify popular features and areas that need improvement, and ensures development efforts align with what you need. We respect your privacy and are committed to protecting your data. We do not collect any personal information or sensitive data. All telemetry data is anonymized and used solely for improving Langflow. -You can opt-out of telemetry by setting the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_TRACK` environment variable to `true` before running Langflow. This will disable telemetry data collection. - -::: - +## Opt out of telemetry +To opt out of telemetry, set the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_TRACK` environment variable to `true` before running Langflow. This disables telemetry data collection. - -## Data Collected Includes {#1734ed50fb4a4a45aaa84185b44527ca} - +## Data that Langflow collects ### Run {#2d427dca4f0148ae867997f6789e8bfb} @@ -66,5 +48,3 @@ You can opt-out of telemetry by setting the `LANGFLOW_DO_NOT_TRACK` or `DO_NO - **Success**: Whether the component operated successfully, which helps in quality control. - **ErrorMessage**: Details of any errors encountered, crucial for debugging and improvement. -This telemetry data is crucial for enhancing Langflow and ensuring that our development efforts align with your needs. Your feedback and suggestions are invaluable in shaping the future of Langflow, and we appreciate your support in making Langflow better for everyone. - diff --git a/docs/docs/Deployment/1098199232.png b/docs/docs/Deployment/1098199232.png deleted file mode 100644 index 884c6bf74e11..000000000000 Binary files a/docs/docs/Deployment/1098199232.png and /dev/null differ diff --git a/docs/docs/Deployment/_category_.json b/docs/docs/Deployment/_category_.json index 6d03dfaa4f29..41f76d47b9b7 100644 --- a/docs/docs/Deployment/_category_.json +++ b/docs/docs/Deployment/_category_.json @@ -1 +1 @@ -{"position":6, "label":"Deployment"} \ No newline at end of file +{"position":7, "label":"Deployment"} \ No newline at end of file diff --git a/docs/docs/Deployment/deployment-gcp.md b/docs/docs/Deployment/deployment-gcp.md index 70644a429902..bcae2a36c792 100644 --- a/docs/docs/Deployment/deployment-gcp.md +++ b/docs/docs/Deployment/deployment-gcp.md @@ -4,43 +4,37 @@ sidebar_position: 3 slug: /deployment-gcp --- -:::info +# Deploy on Google Cloud Platform -This page may contain outdated information. It will be updated as soon as possible. +To deploy Langflow on Google Cloud Platform using Cloud Shell, use the below script. +The script will guide you through setting up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow dev environment in GCP. -::: +## Prerequisites -## Deploy on Google Cloud Platform {#4ee01cda736c4f7396936409f23cdb52} +* A GCP account with the necessary permissions to create resources +* A project on GCP where you want to deploy Langflow ---- +## Deploy Langflow in GCP -### Run Langflow from a New Google Cloud Project {#ce729796d7404ccdb627bee47d6a4399} +1. Click below to launch Cloud Shell. -This guide will help you set up a Langflow development VM in a Google Cloud Platform project using Google Cloud Shell. +[![GCP Deploy](/logos/cloud_deploy.svg) Deploy to Google Cloud](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md) -:::info - -When Cloud Shell opens, be sure to select Trust repo. Some gcloud commands might not run in an ephemeral Cloud Shell environment. - -::: +2. Click **Trust repo**. Some gcloud commands might not run in an ephemeral Cloud Shell environment. +3. Click **Start** and follow the tutorial to deploy Langflow. -### Standard VM {#245b47b450dd4159a5c56a5124bab84f} +## Spot/Preemptible Instance -[![GCP Deploy](https://camo.githubusercontent.com/c1a4a499c1d93d7038fd7af8c4f3fce222050f3bbdf4275dafbfde2491e4b8c4/68747470733a2f2f677374617469632e636f6d2f636c6f75647373682f696d616765732f6f70656e2d62746e2e737667)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md) +When running a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts. -This script sets up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow Dev environment. +## Pricing (approximate) ---- - -## Spot/Preemptible Instance {#de9b8f7c71284cbb98e8137a3c44553d} - -When running as a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts. +:::info ---- +For more information, see the [GCP Pricing Calculator](https://cloud.google.com/products/calculator?hl=en). -## Pricing (approximate) {#2289f4ba9f544e6e9d4b915ef5aacd24} +::: -> For a more accurate breakdown of costs, please use the GCP Pricing Calculator | Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes | | ------------------ | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | -------------------------------------------------------------------------- | diff --git a/docs/docs/Deployment/deployment-hugging-face-spaces.md b/docs/docs/Deployment/deployment-hugging-face-spaces.md index 42484dec7b2c..c9bb552d6d07 100644 --- a/docs/docs/Deployment/deployment-hugging-face-spaces.md +++ b/docs/docs/Deployment/deployment-hugging-face-spaces.md @@ -1,5 +1,5 @@ --- -title: 🤗 Spaces +title: HuggingFace Spaces sidebar_position: 0 slug: /deployment-hugging-face-spaces --- @@ -24,8 +24,7 @@ HuggingFace provides a great alternative for running Langflow in their Spaces en - **Visibility Settings**: Choose between Public or Private visibility for your Space. - After setting these parameters, click on **Duplicate Space** to initiate the setup. -![](./257215214.png) - +![](/img/hugging-face-deployment.png) 3. **Complete Installation**: The duplication and setup process begins immediately after you click **Duplicate Space**. Once completed, you will be automatically redirected to the main page of your new Space. diff --git a/docs/docs/Deployment/deployment-railway.md b/docs/docs/Deployment/deployment-railway.md index cf148b3166d5..69cf2b30d5a1 100644 --- a/docs/docs/Deployment/deployment-railway.md +++ b/docs/docs/Deployment/deployment-railway.md @@ -14,10 +14,8 @@ Deploying Langflow to Railway involves a few simple steps: 1. **Click the Button Below**: Start by clicking the deployment button provided below. This will redirect you to the Railway platform. - [![Deploy on Railway](./railway-deploy.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg) + [![Deploy on Railway](/logos/railway-deploy.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg) 2. **Deploy**: Proceed to deploy your Langflow instance. Click Deploy Now to deploy the instance. Railway will handle the rest, including setting up the infrastructure, deploying the Langflow instance, and starting the application. - ![](./1098199232.png) - By following these steps, your Langflow instance will be successfully deployed on Railway. diff --git a/docs/docs/Deployment/deployment-render.md b/docs/docs/Deployment/deployment-render.md index dc9e141f5879..0e057efe55a2 100644 --- a/docs/docs/Deployment/deployment-render.md +++ b/docs/docs/Deployment/deployment-render.md @@ -14,15 +14,11 @@ Deploying Langflow to Render is a straightforward process that can be completed 1. **Click the Button Below**: Start by clicking the deployment button provided below. This will redirect you to the Render platform. - [![Deploy to Render](./render-deploy.svg)](https://render.com/deploy?repo=https%3A%2F%2Fgithub.com%2Flangflow-ai%2Flangflow%2Ftree%2Fdev) + [![Deploy to Render](/logos/render-deploy.svg)](https://render.com/deploy?repo=https%3A%2F%2Fgithub.com%2Flangflow-ai%2Flangflow%2Ftree%2Fdev) 2. **Select the Blueprint Configuration**: Once on the Render platform, you will be prompted to provide a blueprint name and to select the branch for your `render.yaml` file in Langflow. This configuration file includes all the necessary settings and resources to deploy Langflow in Render. The default is `main`. 3. The `render.yaml` file specifies a `standard` Render instance, because Langflow requires at least 2 GB of RAM to run. This may require a credit card to sign up. Review the pricing details on the Render platform to understand any costs involved before proceeding. If you need to change your plan later, from the Render dashboard, go to **Settings** > **Instance Type**. - ![](./render-deploy-plan.png) - 4. Click **Deploy Blueprint** to deploy Langflow. Render will handle the rest, including setting up the database, deploying the Langflow instance, and starting the application. - ![](./render-deployed-overview.png) - By following these steps, your Langflow instance will be successfully deployed on Render. diff --git a/docs/docs/Deployment/render-deploy-plan.png b/docs/docs/Deployment/render-deploy-plan.png deleted file mode 100644 index 6a41696d7405..000000000000 Binary files a/docs/docs/Deployment/render-deploy-plan.png and /dev/null differ diff --git a/docs/docs/Deployment/render-deployed-overview.png b/docs/docs/Deployment/render-deployed-overview.png deleted file mode 100644 index 85ffcb73e393..000000000000 Binary files a/docs/docs/Deployment/render-deployed-overview.png and /dev/null differ diff --git a/docs/docs/Getting-Started/1160086633.gif b/docs/docs/Getting-Started/1160086633.gif deleted file mode 100644 index 023a294e0275..000000000000 Binary files a/docs/docs/Getting-Started/1160086633.gif and /dev/null differ diff --git a/docs/docs/Getting-Started/1390293355.png b/docs/docs/Getting-Started/1390293355.png deleted file mode 100644 index 8885b24f0f99..000000000000 Binary files a/docs/docs/Getting-Started/1390293355.png and /dev/null differ diff --git a/docs/docs/Getting-Started/1835734464.png b/docs/docs/Getting-Started/1835734464.png deleted file mode 100644 index 1b5cc15ab750..000000000000 Binary files a/docs/docs/Getting-Started/1835734464.png and /dev/null differ diff --git a/docs/docs/Getting-Started/221680153.png b/docs/docs/Getting-Started/221680153.png deleted file mode 100644 index b4903e7da049..000000000000 Binary files a/docs/docs/Getting-Started/221680153.png and /dev/null differ diff --git a/docs/docs/Getting-Started/487525520.png b/docs/docs/Getting-Started/487525520.png deleted file mode 100644 index b56696f44e5a..000000000000 Binary files a/docs/docs/Getting-Started/487525520.png and /dev/null differ diff --git a/docs/docs/Getting-Started/602374500.png b/docs/docs/Getting-Started/602374500.png deleted file mode 100644 index 59b4faaa8ce2..000000000000 Binary files a/docs/docs/Getting-Started/602374500.png and /dev/null differ diff --git a/docs/docs/Getting-Started/690736575.png b/docs/docs/Getting-Started/690736575.png deleted file mode 100644 index 280969e76865..000000000000 Binary files a/docs/docs/Getting-Started/690736575.png and /dev/null differ diff --git a/docs/docs/Getting-Started/getting-started-common-installation-issues.md b/docs/docs/Getting-Started/getting-started-common-installation-issues.md deleted file mode 100644 index 4be683cce422..000000000000 --- a/docs/docs/Getting-Started/getting-started-common-installation-issues.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: ❗️ Common Installation Issues -sidebar_position: 3 -slug: /getting-started-common-installation-issues ---- - - - -This is a list of possible issues that you may encounter when installing Langflow and how to solve them. - - ---- - - -```bash -> No module named 'langflow.__main__' -``` - -1. Run `python -m langflow run` instead of `langflow run`. -2. If that doesn't work, reinstall Langflow with `_python -m pip install langflow --pre -U`. -3. If that doesn't work, reinstall Langflow and its dependencies with `python -m pip install langflow --pre -U --force-reinstall`. - -When you try to run Langflow using the command `langflow run`, you may encounter the following error: - - -```bash -> langflow runTraceback (most recent call last): File ".../langflow", line 5, in from langflow.__main__ import mainModuleNotFoundError: No module named 'langflow.__main__' -``` - - -There are two possible reasons for this error: - -1. You've installed Langflow using `pip install langflow` but you already had a previous version of Langflow installed in your system. In this case, you might be running the wrong executable. To solve this issue, run the correct executable by running `python -m langflow run` instead of `langflow run`. If that doesn't work, try uninstalling and reinstalling Langflow with `python -m pip install langflow --pre -U`. -2. Some version conflicts might have occurred during the installation process. Run `python -m pip install langflow --pre -U --force-reinstall` to reinstall Langflow and its dependencies. - -```bash -> Something went wrong running migrations. Please, run 'langflow migration --fix' -``` - - -Clear the cache by deleting the contents of the cache folder. - - -This folder can be found at: - -- **Linux or WSL2 on Windows**: `home//.cache/langflow/` -- **MacOS**: `/Users//Library/Caches/langflow/` - -This error can occur during Langflow upgrades when the new version can't override `langflow-pre.db` in `.cache/langflow/`. Clearing the cache removes this file but will also erase your settings. - - -If you wish to retain your files, back them up before clearing the folder. - diff --git a/docs/docs/Getting-Started/getting-started-installation.md b/docs/docs/Getting-Started/getting-started-installation.md index 9a6872da3eb0..b60412ea3d59 100644 --- a/docs/docs/Getting-Started/getting-started-installation.md +++ b/docs/docs/Getting-Started/getting-started-installation.md @@ -1,97 +1,115 @@ --- -title: 📦 Installation +title: Install Langflow sidebar_position: 1 slug: /getting-started-installation --- +You can deploy Langflow either locally or as a hosted service with [**Datastax Langflow**](#datastax-langflow). +## Install Langflow locally -## **DataStax Langflow** {#a3b4bd8dec5a49ebbfca4828492133e9} +Install Langflow locally with [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/). +### Prerequisites ---- - - -The easiest way to get started with Langflow is through the DataStax Cloud Service! +* [Python 3.10 to 3.12](https://www.python.org/downloads/release/python-3100/) installed +* [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) installed +* Before installing Langflow, we recommend creating a virtual environment to isolate your Python dependencies with [venv](https://docs.python.org/3/library/venv.html) or [conda](https://anaconda.org/anaconda/conda) +### Install Langflow with pip or pipx +Install Langflow with pip: -**DataStax Langflow** is a hosted version of Langflow integrated with [Astra DB](https://www.datastax.com/products/datastax-astra). Be up and running in minutes with no installation or setup required. [Sign up for free](https://astra.datastax.com/signup?type=langflow). - - -![](./602374500.png) - - -## **Install Langflow Locally** {#ef364ee864c545649d248113ad7d3038} - +```bash +python -m pip install langflow +``` ---- +Install Langflow with pipx using the Python 3.10 executable: +```bash +pipx install langflow --python python3.10 +``` -:::caution +## Run Langflow -Langflow **requires** Python version 3.10 or greater and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) to be installed on your system. +1. To run Langflow, enter the following command. -::: +```bash +python -m langflow run +``` +2. Confirm that a local Langflow instance starts by visiting `http://127.0.0.1:7860` in a Chromium-based browser. +Now that Langflow is running, follow the [Quickstart](/getting-started-quickstart) to create your first flow. +## Manage Langflow versions -Install Langflow with pip: - +To upgrade Langflow to the latest version, use the pip upgrade command. ```bash python -m pip install langflow -U ``` - -Install Langflow with pipx: - +To install a specific verison of the Langflow package, add the required version to the command. ```bash -pipx install langflow --python python3.10 --fetch-missing-python +python -m pip install langflow==1.1 ``` +To reinstall Langflow and all of its dependencies, add the `--force-reinstall` flag to the command. -Pipx can fetch the missing Python version for you with `--fetch-missing-python`, but you can also install the Python version manually. Use `--force-reinstall` to ensure you have the latest version of Langflow and its dependencies. - - -## Having a problem? {#86a16dad1d6e481cafb90efea2b9ff93} - - ---- +```bash +python -m pip install langflow --force-reinstall +``` +## DataStax Langflow {#datastax-langflow} -If you encounter a problem, see [Common Installation Issues](/getting-started-common-installation-issues). +**DataStax Langflow** is a hosted version of Langflow integrated with [Astra DB](https://www.datastax.com/products/datastax-astra). Be up and running in minutes with no installation or setup required. [Sign up for free](https://astra.datastax.com/signup?type=langflow). +## Common installation issues -To get help in the Langflow CLI: +This is a list of possible issues that you may encounter when installing and running Langflow. +### No `langflow.__main__` module +When you try to run Langflow with the command `langflow run`, you encounter the following error: ```bash -python -m langflow --help +> No module named 'langflow.__main__' ``` +1. Run `python -m langflow run` instead of `langflow run`. +2. If that doesn't work, reinstall the latest Langflow version with `python -m pip install langflow -U`. +3. If that doesn't work, reinstall Langflow and its dependencies with `python -m pip install langflow --pre -U --force-reinstall`. +### Langflow runTraceback -## ⛓️ Run Langflow {#d318c4d486b74f5383c45b4f6859dcaa} - +When you try to run Langflow using the command `langflow run`, you encounter the following error: ---- +```bash +> langflow runTraceback (most recent call last): File ".../langflow", line 5, in from langflow.__main__ import mainModuleNotFoundError: No module named 'langflow.__main__' +``` +There are two possible reasons for this error: -1. To run Langflow, enter the following command. +1. You've installed Langflow using `pip install langflow` but you already had a previous version of Langflow installed in your system. In this case, you might be running the wrong executable. To solve this issue, run the correct executable by running `python -m langflow run` instead of `langflow run`. If that doesn't work, try uninstalling and reinstalling Langflow with `python -m pip install langflow --pre -U`. +2. Some version conflicts might have occurred during the installation process. Run `python -m pip install langflow --pre -U --force-reinstall` to reinstall Langflow and its dependencies. +### Something went wrong running migrations ```bash -python -m langflow run +> Something went wrong running migrations. Please, run 'langflow migration --fix' ``` +Clear the cache by deleting the contents of the cache folder. -2. Confirm that a local Langflow instance starts by visiting `http://127.0.0.1:7860` in a Chromium-based browser. +This folder can be found at: + +- **Linux or WSL2 on Windows**: `home//.cache/langflow/` +- **MacOS**: `/Users//Library/Caches/langflow/` + +This error can occur during Langflow upgrades when the new version can't override `langflow-pre.db` in `.cache/langflow/`. Clearing the cache removes this file but also erases your settings. +If you wish to retain your files, back them up before clearing the folder. -![](./221680153.png) -3. Continue on to the [Quickstart](/getting-started-quickstart). diff --git a/docs/docs/Getting-Started/getting-started-quickstart.md b/docs/docs/Getting-Started/getting-started-quickstart.md index 73bc7476350b..232de7d24834 100644 --- a/docs/docs/Getting-Started/getting-started-quickstart.md +++ b/docs/docs/Getting-Started/getting-started-quickstart.md @@ -1,5 +1,5 @@ --- -title: ⚡️ Quickstart +title: Quickstart sidebar_position: 2 slug: /getting-started-quickstart --- @@ -9,25 +9,14 @@ slug: /getting-started-quickstart ## Prerequisites {#b5f154a3a1d242c7bdf57acf0a552732} ---- - -- [Python >=3.10](https://www.python.org/downloads/release/python-3100/) and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) -- [OpenAI API key](https://platform.openai.com/) -- [Langflow installed and running](/getting-started-installation) - -## Basic Prompting {#67e7cd59d0fa43e3926bdc75134f7472} - - ---- - - -### Create the basic prompting project {#c029603ddb1f42f5b28df92e58426ab3} +* [Python 3.10 to 3.12](https://www.python.org/downloads/release/python-3100/) installed +* [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) installed +* Before installing Langflow, we recommend creating a virtual environment to isolate your Python dependencies with [venv](https://docs.python.org/3/library/venv.html) or [conda](https://anaconda.org/anaconda/conda) +## Create the basic prompting flow -1. From the Langflow dashboard, click **New Project**. - -![](./1835734464.png) +1. From the Langflow dashboard, click **New Flow**. 2. Select **Basic Prompting**. @@ -36,37 +25,41 @@ slug: /getting-started-quickstart 3. The **Basic Prompting** flow is created. -![](./487525520.png) +![](/img/starter-flow-basic-prompting.png) -This flow allows you to chat with the **OpenAI** component through the **Prompt** component. +This flow allows you to chat with the **OpenAI** component through the **Prompt** component. +4. To examine the flow's **Prompt** component, click on the **Template** field of the **Prompt** component. -Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.` This should be interesting... +```plain +Answer the user as if you were a pirate. +User: {user_input} -![](./690736575.png) +Answer: +``` +The **Template** instructs the LLM to accept `{user_input}` and `Answer the user as if you were a pirate.`. -4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**. +5. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**. 1. In the **Variable Name** field, enter `openai_api_key`. 2. In the **Value** field, paste your OpenAI API Key (`sk-...`). 3. Click **Save Variable**. -![](./1390293355.png) - -### Run {#ef0e8283bfb646f99bbb825462d8cbab} +## Run the Basic Prompting flow {#ef0e8283bfb646f99bbb825462d8cbab} -1. Click the **Playground** button on the control panel (bottom right side of the workspace). This is where you can interact with your AI. +1. To open the **Playground** pane, click **Playground**. +This is where you can interact with your AI. 2. Type a message and press Enter. The bot should respond in a markedly piratical manner! -### Modify the prompt for a different result {#dcea9df0cd51434db76717c78b1e9a94} +## Modify the prompt for a different result {#dcea9df0cd51434db76717c78b1e9a94} 1. To modify your prompt results, in the **Prompt** template, click the **Template** field. The **Edit Prompt** window opens. 2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Hermione Granger.` -3. Run the workflow again. The response will be markedly different. +3. Run the workflow again. The response will be very different. ## Next steps {#63b6db6cb571489c86b3ae89051f1a4f} @@ -79,6 +72,6 @@ Well done! You've built your first prompt in Langflow. 🎉 By dragging Langflow components to your workspace, you can create all sorts of interesting behaviors. Here are a couple of examples: -- [Memory Chatbot](https://docs.langflow.org/starter-projects/memory-chatbot) -- [Blog Writer](https://docs.langflow.org/starter-projects/blog-writer) -- [Document QA](https://docs.langflow.org/starter-projects/document-qa) +- [Memory Chatbot](/starter-projects-memory-chatbot) +- [Blog Writer](/starter-projects-blog-writer) +- [Document QA](/starter-projects-document-qa) diff --git a/docs/docs/Getting-Started/welcome-to-langflow.md b/docs/docs/Getting-Started/welcome-to-langflow.md new file mode 100644 index 000000000000..ce4ca71388d7 --- /dev/null +++ b/docs/docs/Getting-Started/welcome-to-langflow.md @@ -0,0 +1,40 @@ +--- +title: Welcome to Langflow +sidebar_position: 0 +slug: / +--- + +Langflow is a new, visual framework for building multi-agent and RAG applications. It is open-source, Python-powered, fully customizable, and LLM and vector store agnostic. + +Its intuitive interface allows for easy manipulation of AI building blocks, enabling developers to quickly prototype and turn their ideas into powerful, real-world solutions. + +Langflow empowers developers to rapidly prototype and build AI applications with its user-friendly interface and powerful features. Whether you're a seasoned AI developer or just starting out, Langflow provides the tools you need to bring your AI ideas to life. + +## Visual flow builder + +Langflow is an intuitive visual flow builder. This drag-and-drop interface allows developers to create complex AI workflows without writing extensive code. You can easily connect different components, such as prompts, language models, and data sources, to build sophisticated AI applications. + +![Langflow in action](/img/playground-response.png) + +## Use cases + +Langflow can be used for a wide range of AI applications, including: + +* [Craft intelligent chatbots](/starter-projects-memory-chatbot) +* [Build document analysis systems](/starter-projects-document-qa) +* [Generate compelling content](/starter-projects-blog-writer) +* [Orchestrate multi-agent applications](/starter-projects-simple-agent) + +## Community and support + +Join Langflow's vibrant community of developers and AI enthusiasts. See the following resources to join discussions, share your projects, and get support: + +* [Contribute to Langflow](contributing-how-to-contribute) +* [Langflow Discord Server](https://discord.gg/EqksyE2EX9) +* [@langflow_ai](https://twitter.com/langflow_ai)  + +## Get started with Langflow + +- [Install Langflow](/getting-started-installation) +- [Quickstart](/getting-started-quickstart) + diff --git "a/docs/docs/Getting-Started/\360\237\221\213\302\240Welcome-to-Langflow.md" "b/docs/docs/Getting-Started/\360\237\221\213\302\240Welcome-to-Langflow.md" deleted file mode 100644 index b914952527c9..000000000000 --- "a/docs/docs/Getting-Started/\360\237\221\213\302\240Welcome-to-Langflow.md" +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: 👋 Welcome to Langflow -sidebar_position: 0 -slug: / ---- - - - -## Introduction {#e12578e9f465459592d89dbe47a54460} - - ---- - - -Langflow is a new, visual framework for building multi-agent and RAG applications. It is open-source, Python-powered, fully customizable, LLM and vector store agnostic. - - -Its intuitive interface allows for easy manipulation of AI building blocks, enabling developers to quickly prototype and turn their ideas into powerful, real-world solutions. - - -![](./1160086633.gif) - - -## 🚀 First steps {#c1ccb5e315b040edaa8d9a67f4960d81} - - ---- - -- [Install Langflow](/getting-started-installation) - Install and start a local Langflow server. -- [Quickstart](/getting-started-quickstart) - Create a flow and run it. -- [Langflow Workspace](/workspace) - Learn more about the Langflow Workspace. - -Learn more about the exciting changes in Langflow 1.0 in [A new chapter for Langflow](/whats-new-a-new-chapter-langflow). - diff --git a/docs/docs/Guides/1313358839.png b/docs/docs/Guides/1313358839.png deleted file mode 100644 index d353d23fa88f..000000000000 Binary files a/docs/docs/Guides/1313358839.png and /dev/null differ diff --git a/docs/docs/Guides/1988919422.png b/docs/docs/Guides/1988919422.png deleted file mode 100644 index fc738ffb8051..000000000000 Binary files a/docs/docs/Guides/1988919422.png and /dev/null differ diff --git a/docs/docs/Guides/207457678.png b/docs/docs/Guides/207457678.png deleted file mode 100644 index 94f3eb5448d6..000000000000 Binary files a/docs/docs/Guides/207457678.png and /dev/null differ diff --git a/docs/docs/Guides/403427222.png b/docs/docs/Guides/403427222.png deleted file mode 100644 index 0615c051eb89..000000000000 Binary files a/docs/docs/Guides/403427222.png and /dev/null differ diff --git a/docs/docs/Guides/948333764.png b/docs/docs/Guides/948333764.png deleted file mode 100644 index 7bdfa07050e4..000000000000 Binary files a/docs/docs/Guides/948333764.png and /dev/null differ diff --git a/docs/docs/Guides/guides-chat-memory.md b/docs/docs/Guides/guides-chat-memory.md index 248c1a0eea14..100d19a19987 100644 --- a/docs/docs/Guides/guides-chat-memory.md +++ b/docs/docs/Guides/guides-chat-memory.md @@ -6,7 +6,7 @@ slug: /guides-chat-memory -Langflow allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create separate _memories_ for agents to store and recall information as needed. +Langflow allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create separate _memories_ for agents to store and recall information as needed. In any project, as long as there are [**Chat**](/components-io) being used, memories are always being stored by default. These are messages from a user to the AI or vice-versa. @@ -15,22 +15,21 @@ In any project, as long as there are [**Chat**](/components-io) being used, memo To see and access this history of messages, Langflow features a component called **Chat Memory**. It retrieves previous messages and outputs them in structured format or parsed. -![](./403427222.png) - - -To learn the basics about memory in Langflow, check out the [Memory Chatbot ](/starter-projects-memory-chatbot)starter example. +To learn the basics about memory in Langflow, check out the [Memory Chatbot](/starter-projects-memory-chatbot) starter example. Memories can be visualized and managed directly from the **Playground**. You can edit and remove previous messages to inspect and validate the AI’s response behavior. You can remove or edit previous messages to get your models acting just right. -![](./1988919422.png) +![](/img/playground.png) Modifying these memories will influence the behavior of the chatbot responses, as long as an agent uses them. Here you have the ability to remove or edit previous messages, allowing them to manipulate and explore how these changes affect model responses. +To modify chat memories, from the playground, click the **Options** menu of any session, and then select **Message Logs**. + -![](./948333764.png) +![](/img/logs.png) ## Session ID {#4ee86e27d1004e8288a72c633c323703} @@ -42,20 +41,13 @@ Modifying these memories will influence the behavior of the chatbot responses, a Chat conversations store messages categorized by a `Session ID`. A a single flow can host multiple session IDs, and different flows can also share the same one. -The **Chat Memory** component also retrieves message histories by `Session ID` which users can change in the advanced settings. - - -![](./207457678.png) +The **Chat Memory** component also retrieves message histories by `Session ID`, which users can change in the component's **Controls** pane. +![](/img/chat-input-controls-pane.png) - - - -By default, if the `Session ID` value is empty, it is set to match the the same value as the `Flow ID`. - +By default, if the `Session ID` value is empty, it is set to the same value as `Flow ID`. You can also display all messages stored across every flow and session by going to **Settings** > **Messages**. - -![](./1313358839.png) +![](/img/settings-messages.png) diff --git a/docs/docs/Guides/guides-data-message.md b/docs/docs/Guides/guides-data-message.md index e65fcbe1fd4f..a8e48ec11c4b 100644 --- a/docs/docs/Guides/guides-data-message.md +++ b/docs/docs/Guides/guides-data-message.md @@ -60,10 +60,10 @@ print(data.title) # Outputs: "Hello, World!" because "title" key is in the data The `Data` object is also convenient for visualization of outputs, since the output preview has visual elements to inspect data as a table and its cells as pop ups for basic types. The idea is to create a unified way to work and visualize complex information in Langflow. -To receive `Data` objects in a component input, you can use the `DataInput` input type. +To receive `Data` objects in a component input, you can use the `DataInput` input type. -## The Message Object {#f4f17cad02a545068f407d515cbc2902} +## Message object attributes {#f4f17cad02a545068f407d515cbc2902} --- @@ -71,17 +71,27 @@ To receive `Data` objects in a component input, you can use the `DataInput` inpu The `Message` object extends the functionality of `Data` and includes additional attributes and methods for chat interactions. -- **Main Attributes:** - - `text_key`: Key to retrieve the primary text data. - - `text`: The main text content of the message. - - `sender`: Identifier for the sender (e.g., "User" or "AI"). - - `sender_name`: Name of the sender. - - `files`: List of files associated with the message. - - `session_id`: Identifier for the chat session. - - `timestamp`: Timestamp when the message was created. - - `flow_id`: Identifier for the flow. - -The `Message` object can be used to send, store and manipulate chat messages within Langflow. You can create a `Message` object by directly assigning key-value pairs to it. For example: + +- **Core message data:** + - `text`: The main text content of the message + - `sender`: Identifier for the sender (e.g., "User" or "AI") + - `sender_name`: Name of the sender + - `session_id`: Identifier for the chat session + - `timestamp`: Timestamp when the message was created (UTC) + - `flow_id`: Identifier for the flow + - `id`: Unique identifier for the message + +- **Content and files:** + - `files`: List of files or images associated with the message + - `content_blocks`: List of structured content blocks + - `properties`: Additional properties including visual styling and source information + +- **Message state:** + - `error`: Boolean indicating if there was an error + - `edit`: Boolean indicating if the message was edited + - `category`: Message category ("message", "error", "warning", "info") + +The `Message` object can be used to send, store, and manipulate chat messages within Langflow. You can create a `Message` object by directly assigning key-value pairs to it. For example: ```python diff --git a/docs/docs/Integrations/AssemblyAI_Flow.json b/docs/docs/Integrations/AssemblyAI_Flow.json new file mode 100644 index 000000000000..195bb1906abf --- /dev/null +++ b/docs/docs/Integrations/AssemblyAI_Flow.json @@ -0,0 +1,1431 @@ +{ + "name": "AssemblyAI Transcription and Speech AI Flow", + "icon": null, + "is_component": false, + "endpoint_name": null, + "data": { + "nodes": [ + { + "id": "Prompt-IO8Cq", + "type": "genericNode", + "position": { + "x": -1376.3296370680628, + "y": 928.8860970980681 + }, + "data": { + "type": "Prompt", + "node": { + "template": { + "_type": "Component", + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "template": { + "trace_as_input": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "Provide a brief summary of the transcript.", + "display_name": "Template", + "advanced": false, + "dynamic": false, + "info": "", + "title_case": false, + "type": "prompt", + "_input_type": "PromptInput" + } + }, + "description": "Create a prompt template with dynamic variables.", + "icon": "prompts", + "is_input": null, + "is_output": null, + "is_composition": null, + "base_classes": [ + "Message" + ], + "name": "", + "display_name": "Prompt", + "documentation": "", + "custom_fields": { + "template": [] + }, + "output_types": [], + "full_path": null, + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "prompt", + "hidden": null, + "display_name": "Prompt Message", + "method": "build_prompt", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "template" + ], + "beta": false, + "error": null, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "Prompt-IO8Cq" + }, + "selected": false, + "width": 384, + "height": 324, + "positionAbsolute": { + "x": -1376.3296370680628, + "y": 928.8860970980681 + }, + "dragging": false + }, + { + "id": "AssemblyAITranscriptionJobCreator-Idt7P", + "type": "genericNode", + "position": { + "x": -1957.7132501771657, + "y": 470.79685053457587 + }, + "data": { + "type": "AssemblyAITranscriptionJobCreator", + "node": { + "template": { + "_type": "Component", + "audio_file": { + "trace_as_metadata": true, + "file_path": "fa69381c-d1c4-4535-bc23-bc2fb4956e1e/2024-09-26_16-47-01_sports_injuries.mp3", + "fileTypes": [ + "3ga", + "8svx", + "aac", + "ac3", + "aif", + "aiff", + "alac", + "amr", + "ape", + "au", + "dss", + "flac", + "flv", + "m4a", + "m4b", + "m4p", + "m4r", + "mp3", + "mpga", + "ogg", + "oga", + "mogg", + "opus", + "qcp", + "tta", + "voc", + "wav", + "wma", + "wv", + "webm", + "mts", + "m2ts", + "ts", + "mov", + "mp2", + "mp4", + "m4p", + "m4v", + "mxf" + ], + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "audio_file", + "value": "sports_injuries.mp3", + "display_name": "Audio File", + "advanced": false, + "dynamic": false, + "info": "The audio file to transcribe", + "title_case": false, + "type": "file", + "_input_type": "FileInput", + "load_from_db": false + }, + "api_key": { + "load_from_db": false, + "required": false, + "placeholder": "", + "show": true, + "name": "api_key", + "value": null, + "display_name": "Assembly API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "audio_file_url": { + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "audio_file_url", + "value": "", + "display_name": "Audio File URL", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The URL of the audio file to transcribe (Can be used instead of a File)", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import os\n\nimport assemblyai as aai\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, FileInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAITranscriptionJobCreator(Component):\n display_name = \"AssemblyAI Start Transcript\"\n description = \"Create a transcription job for an audio file using AssemblyAI with advanced options\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n FileInput(\n name=\"audio_file\",\n display_name=\"Audio File\",\n file_types=[\n \"3ga\",\n \"8svx\",\n \"aac\",\n \"ac3\",\n \"aif\",\n \"aiff\",\n \"alac\",\n \"amr\",\n \"ape\",\n \"au\",\n \"dss\",\n \"flac\",\n \"flv\",\n \"m4a\",\n \"m4b\",\n \"m4p\",\n \"m4r\",\n \"mp3\",\n \"mpga\",\n \"ogg\",\n \"oga\",\n \"mogg\",\n \"opus\",\n \"qcp\",\n \"tta\",\n \"voc\",\n \"wav\",\n \"wma\",\n \"wv\",\n \"webm\",\n \"mts\",\n \"m2ts\",\n \"ts\",\n \"mov\",\n \"mp2\",\n \"mp4\",\n \"m4p\",\n \"m4v\",\n \"mxf\",\n ],\n info=\"The audio file to transcribe\",\n ),\n MessageTextInput(\n name=\"audio_file_url\",\n display_name=\"Audio File URL\",\n info=\"The URL of the audio file to transcribe (Can be used instead of a File)\",\n advanced=True,\n ),\n DropdownInput(\n name=\"speech_model\",\n display_name=\"Speech Model\",\n options=[\n \"best\",\n \"nano\",\n ],\n value=\"best\",\n info=\"The speech model to use for the transcription\",\n advanced=True,\n ),\n BoolInput(\n name=\"language_detection\",\n display_name=\"Automatic Language Detection\",\n info=\"Enable automatic language detection\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"language_code\",\n display_name=\"Language\",\n info=\"\"\"\n The language of the audio file. Can be set manually if automatic language detection is disabled. \n See https://www.assemblyai.com/docs/getting-started/supported-languages for a list of supported language codes.\n \"\"\",\n advanced=True,\n ),\n BoolInput(\n name=\"speaker_labels\",\n display_name=\"Enable Speaker Labels\",\n info=\"Enable speaker diarization\",\n ),\n MessageTextInput(\n name=\"speakers_expected\",\n display_name=\"Expected Number of Speakers\",\n info=\"Set the expected number of speakers (optional, enter a number)\",\n advanced=True,\n ),\n BoolInput(\n name=\"punctuate\",\n display_name=\"Punctuate\",\n info=\"Enable automatic punctuation\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"format_text\",\n display_name=\"Format Text\",\n info=\"Enable text formatting\",\n advanced=True,\n value=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcript ID\", name=\"transcript_id\", method=\"create_transcription_job\"),\n ]\n\n def create_transcription_job(self) -> Data:\n aai.settings.api_key = self.api_key\n\n # Convert speakers_expected to int if it's not empty\n speakers_expected = None\n if self.speakers_expected and self.speakers_expected.strip():\n try:\n speakers_expected = int(self.speakers_expected)\n except ValueError:\n self.status = \"Error: Expected Number of Speakers must be a valid integer\"\n return Data(data={\"error\": \"Error: Expected Number of Speakers must be a valid integer\"})\n\n language_code = self.language_code if self.language_code else None\n\n config = aai.TranscriptionConfig(\n speech_model=self.speech_model,\n language_detection=self.language_detection,\n language_code=language_code,\n speaker_labels=self.speaker_labels,\n speakers_expected=speakers_expected,\n punctuate=self.punctuate,\n format_text=self.format_text,\n )\n\n audio = None\n if self.audio_file:\n if self.audio_file_url:\n logger.warning(\"Both an audio file an audio URL were specified. The audio URL was ignored.\")\n\n # Check if the file exists\n if not os.path.exists(self.audio_file):\n self.status = \"Error: Audio file not found\"\n return Data(data={\"error\": \"Error: Audio file not found\"})\n audio = self.audio_file\n elif self.audio_file_url:\n audio = self.audio_file_url\n else:\n self.status = \"Error: Either an audio file or an audio URL must be specified\"\n return Data(data={\"error\": \"Error: Either an audio file or an audio URL must be specified\"})\n\n try:\n transcript = aai.Transcriber().submit(audio, config=config)\n\n if transcript.error:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n else:\n result = Data(data={\"transcript_id\": transcript.id})\n self.status = result\n return result\n except Exception as e:\n self.status = f\"An error occurred: {str(e)}\"\n return Data(data={\"error\": f\"An error occurred: {str(e)}\"})\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "format_text": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "format_text", + "value": true, + "display_name": "Format Text", + "advanced": true, + "dynamic": false, + "info": "Enable text formatting", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "language_code": { + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "language_code", + "value": "", + "display_name": "Language", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "\n The language of the audio file. Can be set manually if automatic language detection is disabled. \n See https://www.assemblyai.com/docs/getting-started/supported-languages for a list of supported language codes.\n ", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "language_detection": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "language_detection", + "value": false, + "display_name": "Automatic Language Detection", + "advanced": true, + "dynamic": false, + "info": "Enable automatic language detection", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "punctuate": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "punctuate", + "value": true, + "display_name": "Punctuate", + "advanced": true, + "dynamic": false, + "info": "Enable automatic punctuation", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "speaker_labels": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "speaker_labels", + "value": true, + "display_name": "Enable Speaker Labels", + "advanced": false, + "dynamic": false, + "info": "Enable speaker diarization", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput", + "load_from_db": false + }, + "speakers_expected": { + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "speakers_expected", + "value": "", + "display_name": "Expected Number of Speakers", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Set the expected number of speakers (optional, enter a number)", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "speech_model": { + "trace_as_metadata": true, + "options": [ + "best", + "nano" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "speech_model", + "value": "best", + "display_name": "Speech Model", + "advanced": true, + "dynamic": false, + "info": "The speech model to use for the transcription", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + } + }, + "description": "Create a transcription job for an audio file using AssemblyAI with advanced options", + "icon": "AssemblyAI", + "base_classes": [ + "Data" + ], + "display_name": "AssemblyAI Start Transcript", + "documentation": "https://www.assemblyai.com/docs", + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": true, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "transcript_id", + "display_name": "Transcript ID", + "method": "create_transcription_job", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "api_key", + "audio_file", + "audio_file_url", + "speech_model", + "language_detection", + "language_code", + "speaker_labels", + "speakers_expected", + "punctuate", + "format_text" + ], + "beta": false, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "AssemblyAITranscriptionJobCreator-Idt7P", + "description": "Create a transcription job for an audio file using AssemblyAI with advanced options", + "display_name": "AssemblyAI Start Transcript" + }, + "selected": false, + "width": 384, + "height": 482, + "positionAbsolute": { + "x": -1957.7132501771657, + "y": 470.79685053457587 + }, + "dragging": false + }, + { + "id": "AssemblyAITranscriptionJobPoller-F46nf", + "type": "genericNode", + "position": { + "x": -1408.0967182254753, + "y": 461.5039554434261 + }, + "data": { + "type": "AssemblyAITranscriptionJobPoller", + "node": { + "template": { + "_type": "Component", + "transcript_id": { + "trace_as_metadata": true, + "list": false, + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "transcript_id", + "value": "", + "display_name": "Transcript ID", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The ID of the transcription job to poll", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "api_key": { + "load_from_db": false, + "required": false, + "placeholder": "", + "show": true, + "name": "api_key", + "value": null, + "display_name": "Assembly API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import DataInput, FloatInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAITranscriptionJobPoller(Component):\n display_name = \"AssemblyAI Poll Transcript\"\n description = \"Poll for the status of a transcription job using AssemblyAI\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n DataInput(\n name=\"transcript_id\",\n display_name=\"Transcript ID\",\n info=\"The ID of the transcription job to poll\",\n ),\n FloatInput(\n name=\"polling_interval\",\n display_name=\"Polling Interval\",\n value=3.0,\n info=\"The polling interval in seconds\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcription Result\", name=\"transcription_result\", method=\"poll_transcription_job\"),\n ]\n\n def poll_transcription_job(self) -> Data:\n \"\"\"Polls the transcription status until completion and returns the Data.\"\"\"\n aai.settings.api_key = self.api_key\n aai.settings.polling_interval = self.polling_interval\n\n # check if it's an error message from the previous step\n if self.transcript_id.data.get(\"error\"):\n self.status = self.transcript_id.data[\"error\"]\n return self.transcript_id\n\n try:\n transcript = aai.Transcript.get_by_id(self.transcript_id.data[\"transcript_id\"])\n except Exception as e:\n error = f\"Getting transcription failed: {str(e)}\"\n self.status = error\n return Data(data={\"error\": error})\n\n if transcript.status == aai.TranscriptStatus.completed:\n json_response = transcript.json_response\n text = json_response.pop(\"text\", None)\n utterances = json_response.pop(\"utterances\", None)\n transcript_id = json_response.pop(\"id\", None)\n sorted_data = { \"text\": text, \"utterances\": utterances, \"id\": transcript_id}\n sorted_data.update(json_response)\n data = Data(data=sorted_data)\n self.status = data\n return data\n else:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "polling_interval": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "polling_interval", + "value": 3, + "display_name": "Polling Interval", + "advanced": true, + "dynamic": false, + "info": "The polling interval in seconds", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + } + }, + "description": "Poll for the status of a transcription job using AssemblyAI", + "icon": "AssemblyAI", + "base_classes": [ + "Data" + ], + "display_name": "AssemblyAI Poll Transcript", + "documentation": "https://www.assemblyai.com/docs", + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "transcription_result", + "display_name": "Transcription Result", + "method": "poll_transcription_job", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "api_key", + "transcript_id", + "polling_interval" + ], + "beta": false, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "AssemblyAITranscriptionJobPoller-F46nf", + "description": "Poll for the status of a transcription job using AssemblyAI", + "display_name": "AssemblyAI Poll Transcript" + }, + "selected": false, + "width": 384, + "height": 368, + "positionAbsolute": { + "x": -1408.0967182254753, + "y": 461.5039554434261 + }, + "dragging": false + }, + { + "id": "AssemblyAIGetSubtitles-3sjU6", + "type": "genericNode", + "position": { + "x": -867.5862690424032, + "y": 368.91683022842676 + }, + "data": { + "type": "AssemblyAIGetSubtitles", + "node": { + "template": { + "_type": "Component", + "transcription_result": { + "trace_as_metadata": true, + "list": false, + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "transcription_result", + "value": "", + "display_name": "Transcription Result", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The transcription result from AssemblyAI", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "api_key": { + "load_from_db": false, + "required": false, + "placeholder": "", + "show": true, + "name": "api_key", + "value": null, + "display_name": "Assembly API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "chars_per_caption": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "chars_per_caption", + "value": 0, + "display_name": "Characters per Caption", + "advanced": true, + "dynamic": false, + "info": "The maximum number of characters per caption (0 for no limit)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import DataInput, DropdownInput, IntInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAIGetSubtitles(Component):\n display_name = \"AssemblyAI Get Subtitles\"\n description = \"Export your transcript in SRT or VTT format for subtitles and closed captions\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n DataInput(\n name=\"transcription_result\",\n display_name=\"Transcription Result\",\n info=\"The transcription result from AssemblyAI\",\n ),\n DropdownInput(\n name=\"subtitle_format\",\n display_name=\"Subtitle Format\",\n options=[\"srt\", \"vtt\"],\n value=\"srt\",\n info=\"The format of the captions (SRT or VTT)\",\n ),\n IntInput(\n name=\"chars_per_caption\",\n display_name=\"Characters per Caption\",\n info=\"The maximum number of characters per caption (0 for no limit)\",\n value=0,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Subtitles\", name=\"subtitles\", method=\"get_subtitles\"),\n ]\n\n def get_subtitles(self) -> Data:\n aai.settings.api_key = self.api_key\n\n # check if it's an error message from the previous step\n if self.transcription_result.data.get(\"error\"):\n self.status = self.transcription_result.data[\"error\"]\n return self.transcription_result\n\n try:\n transcript_id = self.transcription_result.data[\"id\"]\n transcript = aai.Transcript.get_by_id(transcript_id)\n except Exception as e:\n error = f\"Getting transcription failed: {str(e)}\"\n self.status = error\n return Data(data={\"error\": error})\n\n if transcript.status == aai.TranscriptStatus.completed:\n subtitles = None\n chars_per_caption = self.chars_per_caption if self.chars_per_caption > 0 else None\n if self.subtitle_format == \"srt\":\n subtitles = transcript.export_subtitles_srt(chars_per_caption)\n else:\n subtitles = transcript.export_subtitles_vtt(chars_per_caption)\n\n result = Data(\n subtitles=subtitles,\n format=self.subtitle_format,\n transcript_id=transcript_id,\n chars_per_caption=chars_per_caption,\n )\n\n self.status = result\n return result\n else:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "subtitle_format": { + "trace_as_metadata": true, + "options": [ + "srt", + "vtt" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "subtitle_format", + "value": "srt", + "display_name": "Subtitle Format", + "advanced": false, + "dynamic": false, + "info": "The format of the captions (SRT or VTT)", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + } + }, + "description": "Export your transcript in SRT or VTT format for subtitles and closed captions", + "icon": "AssemblyAI", + "base_classes": [ + "Data" + ], + "display_name": "AssemblyAI Get Subtitles", + "documentation": "https://www.assemblyai.com/docs", + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "subtitles", + "display_name": "Subtitles", + "method": "get_subtitles", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "api_key", + "transcription_result", + "subtitle_format", + "chars_per_caption" + ], + "beta": false, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "AssemblyAIGetSubtitles-3sjU6", + "description": "Export your transcript in SRT or VTT format for subtitles and closed captions", + "display_name": "AssemblyAI Get Subtitles" + }, + "selected": false, + "width": 384, + "height": 454, + "positionAbsolute": { + "x": -867.5862690424032, + "y": 368.91683022842676 + }, + "dragging": false + }, + { + "id": "AssemblyAIListTranscripts-3prc4", + "type": "genericNode", + "position": { + "x": -380.99808133361984, + "y": 401.2674645310267 + }, + "data": { + "type": "AssemblyAIListTranscripts", + "node": { + "template": { + "_type": "Component", + "api_key": { + "load_from_db": false, + "required": false, + "placeholder": "", + "show": true, + "name": "api_key", + "value": null, + "display_name": "Assembly API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAIListTranscripts(Component):\n display_name = \"AssemblyAI List Transcripts\"\n description = \"Retrieve a list of transcripts from AssemblyAI with filtering options\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n IntInput(\n name=\"limit\",\n display_name=\"Limit\",\n info=\"Maximum number of transcripts to retrieve (default: 20, use 0 for all)\",\n value=20,\n ),\n DropdownInput(\n name=\"status_filter\",\n display_name=\"Status Filter\",\n options=[\"all\", \"queued\", \"processing\", \"completed\", \"error\"],\n value=\"all\",\n info=\"Filter by transcript status\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"created_on\",\n display_name=\"Created On\",\n info=\"Only get transcripts created on this date (YYYY-MM-DD)\",\n advanced=True,\n ),\n BoolInput(\n name=\"throttled_only\",\n display_name=\"Throttled Only\",\n info=\"Only get throttled transcripts, overrides the status filter\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcript List\", name=\"transcript_list\", method=\"list_transcripts\"),\n ]\n\n def list_transcripts(self) -> list[Data]:\n aai.settings.api_key = self.api_key\n\n params = aai.ListTranscriptParameters()\n if self.limit:\n params.limit = self.limit\n if self.status_filter != \"all\":\n params.status = self.status_filter\n if self.created_on and self.created_on.text:\n params.created_on = self.created_on.text\n if self.throttled_only:\n params.throttled_only = True\n\n try:\n transcriber = aai.Transcriber()\n\n def convert_page_to_data_list(page):\n return [Data(**t.dict()) for t in page.transcripts]\n\n if self.limit == 0:\n # paginate over all pages\n params.limit = 100\n page = transcriber.list_transcripts(params)\n transcripts = convert_page_to_data_list(page)\n\n while page.page_details.before_id_of_prev_url is not None:\n params.before_id = page.page_details.before_id_of_prev_url\n page = transcriber.list_transcripts(params)\n transcripts.extend(convert_page_to_data_list(page))\n else:\n # just one page\n page = transcriber.list_transcripts(params)\n transcripts = convert_page_to_data_list(page)\n\n self.status = transcripts\n return transcripts\n except Exception as e:\n error_data = Data(data={\"error\": f\"An error occurred: {str(e)}\"})\n self.status = [error_data]\n return [error_data]\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "created_on": { + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "created_on", + "value": "", + "display_name": "Created On", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Only get transcripts created on this date (YYYY-MM-DD)", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "limit": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "limit", + "value": 20, + "display_name": "Limit", + "advanced": false, + "dynamic": false, + "info": "Maximum number of transcripts to retrieve (default: 20, use 0 for all)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "status_filter": { + "trace_as_metadata": true, + "options": [ + "all", + "queued", + "processing", + "completed", + "error" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "status_filter", + "value": "all", + "display_name": "Status Filter", + "advanced": true, + "dynamic": false, + "info": "Filter by transcript status", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "throttled_only": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "throttled_only", + "value": false, + "display_name": "Throttled Only", + "advanced": true, + "dynamic": false, + "info": "Only get throttled transcripts, overrides the status filter", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + } + }, + "description": "Retrieve a list of transcripts from AssemblyAI with filtering options", + "icon": "AssemblyAI", + "base_classes": [ + "Data" + ], + "display_name": "AssemblyAI List Transcripts", + "documentation": "https://www.assemblyai.com/docs", + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "transcript_list", + "display_name": "Transcript List", + "method": "list_transcripts", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "api_key", + "limit", + "status_filter", + "created_on", + "throttled_only" + ], + "beta": false, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "AssemblyAIListTranscripts-3prc4", + "description": "Retrieve a list of transcripts from AssemblyAI with filtering options", + "display_name": "AssemblyAI List Transcripts" + }, + "selected": false, + "width": 384, + "height": 410, + "positionAbsolute": { + "x": -380.99808133361984, + "y": 401.2674645310267 + }, + "dragging": false + }, + { + "id": "AssemblyAILeMUR-jzwHZ", + "type": "genericNode", + "position": { + "x": -875.6482330011189, + "y": 887.1705799007382 + }, + "data": { + "type": "AssemblyAILeMUR", + "node": { + "template": { + "_type": "Component", + "transcription_result": { + "trace_as_metadata": true, + "list": false, + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "transcription_result", + "value": "", + "display_name": "Transcription Result", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The transcription result from AssemblyAI", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "api_key": { + "load_from_db": false, + "required": false, + "placeholder": "", + "show": true, + "name": "api_key", + "value": null, + "display_name": "Assembly API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import DataInput, DropdownInput, FloatInput, IntInput, MultilineInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAILeMUR(Component):\n display_name = \"AssemblyAI LeMUR\"\n description = \"Apply Large Language Models to spoken data using the AssemblyAI LeMUR framework\"\n documentation = \"https://www.assemblyai.com/docs/lemur\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n advanced=False,\n ),\n DataInput(\n name=\"transcription_result\",\n display_name=\"Transcription Result\",\n info=\"The transcription result from AssemblyAI\",\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Input Prompt\",\n info=\"The text to prompt the model\",\n ),\n DropdownInput(\n name=\"final_model\",\n display_name=\"Final Model\",\n options=[\"claude3_5_sonnet\", \"claude3_opus\", \"claude3_haiku\", \"claude3_sonnet\"],\n value=\"claude3_5_sonnet\",\n info=\"The model that is used for the final prompt after compression is performed\",\n advanced=True,\n ),\n FloatInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n advanced=True,\n value=0.0,\n info=\"The temperature to use for the model\",\n ),\n IntInput(\n name=\"max_output_size\",\n display_name=\" Max Output Size\",\n advanced=True,\n value=2000,\n info=\"Max output size in tokens, up to 4000\",\n ),\n DropdownInput(\n name=\"endpoint\",\n display_name=\"Endpoint\",\n options=[\"task\", \"summary\", \"question-answer\"],\n value=\"task\",\n info=\"The LeMUR endpoint to use. For 'summary' and 'question-answer', no prompt input is needed. See https://www.assemblyai.com/docs/api-reference/lemur/ for more info.\",\n advanced=True,\n ),\n MultilineInput(\n name=\"questions\",\n display_name=\"Questions\",\n info=\"Comma-separated list of your questions. Only used if Endpoint is 'question-answer'\",\n advanced=True,\n ),\n MultilineInput(\n name=\"transcript_ids\",\n display_name=\"Transcript IDs\",\n info=\"Comma-separated list of transcript IDs. LeMUR can perform actions over multiple transcripts. If provided, the Transcription Result is ignored.\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"LeMUR Response\", name=\"lemur_response\", method=\"run_lemur\"),\n ]\n\n def run_lemur(self) -> Data:\n \"\"\"Use the LeMUR task endpoint to input the LLM prompt.\"\"\"\n aai.settings.api_key = self.api_key\n\n if not self.transcription_result and not self.transcript_ids:\n error = \"Either a Transcription Result or Transcript IDs must be provided\"\n self.status = error\n return Data(data={\"error\": error})\n elif self.transcription_result and self.transcription_result.data.get(\"error\"):\n # error message from the previous step\n self.status = self.transcription_result.data[\"error\"]\n return self.transcription_result\n elif self.endpoint == \"task\" and not self.prompt:\n self.status = \"No prompt specified for the task endpoint\"\n return Data(data={\"error\": \"No prompt specified\"})\n elif self.endpoint == \"question-answer\" and not self.questions:\n error = \"No Questions were provided for the question-answer endpoint\"\n self.status = error\n return Data(data={\"error\": error})\n\n # Check for valid transcripts\n transcript_ids = None\n if self.transcription_result and \"id\" in self.transcription_result.data:\n transcript_ids = [self.transcription_result.data[\"id\"]]\n elif self.transcript_ids:\n transcript_ids = self.transcript_ids.split(\",\")\n transcript_ids = [t.strip() for t in transcript_ids]\n \n if not transcript_ids:\n error = \"Either a valid Transcription Result or valid Transcript IDs must be provided\"\n self.status = error\n return Data(data={\"error\": error})\n\n # Get TranscriptGroup and check if there is any error\n transcript_group = aai.TranscriptGroup(transcript_ids=transcript_ids)\n transcript_group, failures = transcript_group.wait_for_completion(return_failures=True)\n if failures:\n error = f\"Getting transcriptions failed: {failures[0]}\"\n self.status = error\n return Data(data={\"error\": error})\n \n for t in transcript_group.transcripts:\n if t.status == aai.TranscriptStatus.error:\n self.status = t.error\n return Data(data={\"error\": t.error})\n\n # Perform LeMUR action\n try:\n response = self.perform_lemur_action(transcript_group, self.endpoint)\n result = Data(data=response)\n self.status = result\n return result\n except Exception as e:\n error = f\"An Error happened: {str(e)}\"\n self.status = error\n return Data(data={\"error\": error})\n\n def perform_lemur_action(self, transcript_group: aai.TranscriptGroup, endpoint: str) -> dict:\n print(\"Endpoint:\", endpoint, type(endpoint))\n if endpoint == \"task\":\n result = transcript_group.lemur.task(\n prompt=self.prompt,\n final_model=self.get_final_model(self.final_model),\n temperature=self.temperature,\n max_output_size=self.max_output_size,\n )\n elif endpoint == \"summary\":\n result = transcript_group.lemur.summarize(\n final_model=self.get_final_model(self.final_model),\n temperature=self.temperature,\n max_output_size=self.max_output_size,\n )\n elif endpoint == \"question-answer\":\n questions = self.questions.split(\",\")\n questions = [aai.LemurQuestion(question=q) for q in questions]\n result = transcript_group.lemur.question(\n questions=questions,\n final_model=self.get_final_model(self.final_model),\n temperature=self.temperature,\n max_output_size=self.max_output_size,\n )\n else:\n raise ValueError(f\"Endpoint not supported: {endpoint}\")\n\n return result.dict()\n \n def get_final_model(self, model_name: str) -> aai.LemurModel:\n if model_name == \"claude3_5_sonnet\":\n return aai.LemurModel.claude3_5_sonnet\n elif model_name == \"claude3_opus\":\n return aai.LemurModel.claude3_opus\n elif model_name == \"claude3_haiku\":\n return aai.LemurModel.claude3_haiku\n elif model_name == \"claude3_sonnet\":\n return aai.LemurModel.claude3_sonnet\n else:\n raise ValueError(f\"Model name not supported: {model_name}\")\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "endpoint": { + "trace_as_metadata": true, + "options": [ + "task", + "summary", + "question-answer" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "endpoint", + "value": "task", + "display_name": "Endpoint", + "advanced": true, + "dynamic": false, + "info": "The LeMUR endpoint to use. For 'summary' and 'question-answer', no prompt input is needed. See https://www.assemblyai.com/docs/api-reference/lemur/ for more info.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "final_model": { + "trace_as_metadata": true, + "options": [ + "claude3_5_sonnet", + "claude3_opus", + "claude3_haiku", + "claude3_sonnet" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "final_model", + "value": "claude3_5_sonnet", + "display_name": "Final Model", + "advanced": true, + "dynamic": false, + "info": "The model that is used for the final prompt after compression is performed", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "max_output_size": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "max_output_size", + "value": 2000, + "display_name": " Max Output Size", + "advanced": true, + "dynamic": false, + "info": "Max output size in tokens, up to 4000", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "prompt": { + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "prompt", + "value": "", + "display_name": "Input Prompt", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The text to prompt the model", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "questions": { + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "questions", + "value": "", + "display_name": "Questions", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Comma-separated list of your questions. Only used if Endpoint is 'question-answer'", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "temperature": { + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0, + "display_name": "Temperature", + "advanced": true, + "dynamic": false, + "info": "The temperature to use for the model", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + }, + "transcript_ids": { + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "transcript_ids", + "value": "", + "display_name": "Transcript IDs", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Comma-separated list of transcript IDs. LeMUR can perform actions over multiple transcripts. If provided, the Transcription Result is ignored.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + } + }, + "description": "Apply Large Language Models to spoken data using the AssemblyAI LeMUR framework", + "icon": "AssemblyAI", + "base_classes": [ + "Data" + ], + "display_name": "AssemblyAI LeMUR", + "documentation": "https://www.assemblyai.com/docs/lemur", + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "lemur_response", + "display_name": "LeMUR Response", + "method": "run_lemur", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "api_key", + "transcription_result", + "prompt", + "final_model", + "temperature", + "max_output_size", + "endpoint", + "questions", + "transcript_ids" + ], + "beta": false, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "AssemblyAILeMUR-jzwHZ", + "description": "Apply Large Language Models to spoken data using the AssemblyAI LeMUR framework", + "display_name": "AssemblyAI LeMUR" + }, + "selected": false, + "width": 384, + "height": 454, + "positionAbsolute": { + "x": -875.6482330011189, + "y": 887.1705799007382 + }, + "dragging": false + }, + { + "id": "ParseData-th7JM", + "type": "genericNode", + "position": { + "x": -862.5843195492909, + "y": -56.71774780191424 + }, + "data": { + "type": "ParseData", + "node": { + "template": { + "_type": "Component", + "data": { + "trace_as_metadata": true, + "list": false, + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "data", + "value": "", + "display_name": "Data", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The data to convert to text.", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "sep": { + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sep", + "value": "\n", + "display_name": "Separator", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "template": { + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "{text}", + "display_name": "Template", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + } + }, + "description": "Convert Data into plain text following a specified template.", + "icon": "braces", + "base_classes": [ + "Message" + ], + "display_name": "Parse Data", + "documentation": "", + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text", + "display_name": "Text", + "method": "parse_data", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "data", + "template", + "sep" + ], + "beta": false, + "edited": false, + "lf_version": "1.0.18" + }, + "id": "ParseData-th7JM" + }, + "selected": false, + "width": 384, + "height": 368, + "positionAbsolute": { + "x": -862.5843195492909, + "y": -56.71774780191424 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "AssemblyAITranscriptionJobCreator-Idt7P", + "sourceHandle": "{œdataTypeœ:œAssemblyAITranscriptionJobCreatorœ,œidœ:œAssemblyAITranscriptionJobCreator-Idt7Pœ,œnameœ:œtranscript_idœ,œoutput_typesœ:[œDataœ]}", + "target": "AssemblyAITranscriptionJobPoller-F46nf", + "targetHandle": "{œfieldNameœ:œtranscript_idœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "transcript_id", + "id": "AssemblyAITranscriptionJobPoller-F46nf", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "AssemblyAITranscriptionJobCreator", + "id": "AssemblyAITranscriptionJobCreator-Idt7P", + "name": "transcript_id", + "output_types": [ + "Data" + ] + } + }, + "id": "reactflow__edge-AssemblyAITranscriptionJobCreator-Idt7P{œdataTypeœ:œAssemblyAITranscriptionJobCreatorœ,œidœ:œAssemblyAITranscriptionJobCreator-Idt7Pœ,œnameœ:œtranscript_idœ,œoutput_typesœ:[œDataœ]}-AssemblyAITranscriptionJobPoller-F46nf{œfieldNameœ:œtranscript_idœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "", + "selected": false + }, + { + "source": "AssemblyAITranscriptionJobPoller-F46nf", + "sourceHandle": "{œdataTypeœ:œAssemblyAITranscriptionJobPollerœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œnameœ:œtranscription_resultœ,œoutput_typesœ:[œDataœ]}", + "target": "AssemblyAIGetSubtitles-3sjU6", + "targetHandle": "{œfieldNameœ:œtranscription_resultœ,œidœ:œAssemblyAIGetSubtitles-3sjU6œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "transcription_result", + "id": "AssemblyAIGetSubtitles-3sjU6", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "AssemblyAITranscriptionJobPoller", + "id": "AssemblyAITranscriptionJobPoller-F46nf", + "name": "transcription_result", + "output_types": [ + "Data" + ] + } + }, + "id": "reactflow__edge-AssemblyAITranscriptionJobPoller-F46nf{œdataTypeœ:œAssemblyAITranscriptionJobPollerœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œnameœ:œtranscription_resultœ,œoutput_typesœ:[œDataœ]}-AssemblyAIGetSubtitles-3sjU6{œfieldNameœ:œtranscription_resultœ,œidœ:œAssemblyAIGetSubtitles-3sjU6œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "AssemblyAITranscriptionJobPoller-F46nf", + "sourceHandle": "{œdataTypeœ:œAssemblyAITranscriptionJobPollerœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œnameœ:œtranscription_resultœ,œoutput_typesœ:[œDataœ]}", + "target": "ParseData-th7JM", + "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œParseData-th7JMœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "data", + "id": "ParseData-th7JM", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "AssemblyAITranscriptionJobPoller", + "id": "AssemblyAITranscriptionJobPoller-F46nf", + "name": "transcription_result", + "output_types": [ + "Data" + ] + } + }, + "id": "reactflow__edge-AssemblyAITranscriptionJobPoller-F46nf{œdataTypeœ:œAssemblyAITranscriptionJobPollerœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œnameœ:œtranscription_resultœ,œoutput_typesœ:[œDataœ]}-ParseData-th7JM{œfieldNameœ:œdataœ,œidœ:œParseData-th7JMœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "Prompt-IO8Cq", + "sourceHandle": "{œdataTypeœ:œPromptœ,œidœ:œPrompt-IO8Cqœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}", + "target": "AssemblyAILeMUR-jzwHZ", + "targetHandle": "{œfieldNameœ:œpromptœ,œidœ:œAssemblyAILeMUR-jzwHZœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "prompt", + "id": "AssemblyAILeMUR-jzwHZ", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "Prompt", + "id": "Prompt-IO8Cq", + "name": "prompt", + "output_types": [ + "Message" + ] + } + }, + "id": "reactflow__edge-Prompt-IO8Cq{œdataTypeœ:œPromptœ,œidœ:œPrompt-IO8Cqœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-AssemblyAILeMUR-jzwHZ{œfieldNameœ:œpromptœ,œidœ:œAssemblyAILeMUR-jzwHZœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + }, + { + "source": "AssemblyAITranscriptionJobPoller-F46nf", + "sourceHandle": "{œdataTypeœ:œAssemblyAITranscriptionJobPollerœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œnameœ:œtranscription_resultœ,œoutput_typesœ:[œDataœ]}", + "target": "AssemblyAILeMUR-jzwHZ", + "targetHandle": "{œfieldNameœ:œtranscription_resultœ,œidœ:œAssemblyAILeMUR-jzwHZœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "transcription_result", + "id": "AssemblyAILeMUR-jzwHZ", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "AssemblyAITranscriptionJobPoller", + "id": "AssemblyAITranscriptionJobPoller-F46nf", + "name": "transcription_result", + "output_types": [ + "Data" + ] + } + }, + "id": "reactflow__edge-AssemblyAITranscriptionJobPoller-F46nf{œdataTypeœ:œAssemblyAITranscriptionJobPollerœ,œidœ:œAssemblyAITranscriptionJobPoller-F46nfœ,œnameœ:œtranscription_resultœ,œoutput_typesœ:[œDataœ]}-AssemblyAILeMUR-jzwHZ{œfieldNameœ:œtranscription_resultœ,œidœ:œAssemblyAILeMUR-jzwHZœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + } + ], + "viewport": { + "x": 733.3920447354355, + "y": -42.8262727047815, + "zoom": 0.2612816498236053 + } + }, + "user_id": "9c01eee4-17dd-460e-8c52-bba36d635a9d", + "folder_id": "54fc9211-d42d-4c3f-a932-ee4987f61988", + "description": "Transcribe and analyze audio with AssemblyAI", + "icon_bg_color": null, + "updated_at": "2024-09-26T14:55:47+00:00", + "webhook": false, + "id": "fa69381c-d1c4-4535-bc23-bc2fb4956e1e" +} \ No newline at end of file diff --git a/docs/docs/Integrations/Google/_category_.json b/docs/docs/Integrations/Google/_category_.json new file mode 100644 index 000000000000..8fd84656cebb --- /dev/null +++ b/docs/docs/Integrations/Google/_category_.json @@ -0,0 +1 @@ +{ "position": 2, "label": "Google" } diff --git a/docs/docs/Integrations/Google/integrations-setup-google-oauth-langflow.md b/docs/docs/Integrations/Google/integrations-setup-google-oauth-langflow.md new file mode 100644 index 000000000000..0bfcd3a63209 --- /dev/null +++ b/docs/docs/Integrations/Google/integrations-setup-google-oauth-langflow.md @@ -0,0 +1,69 @@ +--- +title: Integrate Google OAuth with Langflow +slug: /integrations-setup-google-oauth-langflow +sidebar_position: 3 +description: "A comprehensive guide on creating a Google OAuth app, obtaining tokens, and integrating them with Langflow's Google components." +--- + +import TOCInline from '@theme/TOCInline'; + +Langflow integrates with [Google OAuth](https://developers.google.com/identity/protocols/oauth2) for authenticating the [Gmail loader](/components-data#gmail-loader), [Google Drive loader](components-data#google-drive-loader), and [Google Drive Search](/components-data#google-drive-search) components. + +Learn how to create an OAuth app in Google Cloud, obtain the necessary credentials and access tokens, and add them to Langflow’s Google components. + +## Create an OAuth Application in Google Cloud {#5b8981b15d86192d17b0e5725c1f95e7} + +1. Navigate to the [Google Cloud Console](https://console.cloud.google.com/). + +2. Click **Select a project**, and then click **New Project** to create a new project. + +![OAuth Client ID and Secret](/img/google/create-a-google-cloud-project.gif) + +3. To enable APIs for the project, select **APIs & Services**, and then click **Library**. Enable the APIs you need for your project. For example, if your flow uses the Google Drive component, enable the Google Drive API. +4. To navigate to the OAuth consent screen, click **APIs & Services**, and then click **OAuth consent screen**. +5. Populate your OAuth consent screen with the application name, user support email, required [scopes](https://developers.google.com/identity/protocols/oauth2/scopes), and authorized domains. +6. To create an **OAuth Client ID**, navigate to **Clients**, and then click **Create Client**. +7. Choose **Desktop app** as the application type, and then name your client ID. +8. Click **Create**. A Client ID and Client Secret are created. Download the credentials as a JSON file to your local machine and save it securely. + +![OAuth Client ID and Secret](/img/google/create-oauth-client-id.gif) + +--- + +## Retrieve Access and Refresh Tokens + +With your OAuth application configured and your credentials JSON file created, follow these steps to authenticate the Langflow application. + +1. Create a new project in Langflow. +2. Add a **Google OAuth Token** component to your flow. +3. In the **Credentials File** field of the Google OAuth Token component, enter the path to your **Credentials File**, the JSON file containing the Client ID credentials you downloaded from Google in the previous steps. +4. To authenticate your application, in the **Google OAuth Token** component, click **Play**. +A new tab opens in the browser to authenticate your application using your Google Cloud account. You must authenticate the application with the same Google account that created the OAuth credentials. + +:::info +If a new tab does not open automatically, check the Langflow **Logs** for the Google authentication URL. Open this URL in your browser to complete the authentication. +::: + +5. After successful authentication, your Langflow application can now request and refresh tokens. These tokens enable Langflow to interact with Google services on your behalf and execute the requests you’ve specified. + +## Create a flow with Google Drive loader + +For a pre-built JSON file of a flow that uses the Google Drive loader component, download the Google Drive Document Translation Example Flow JSON to your local machine. + +In this example, the **Google Drive loader** component loads a text file hosted on Google Drive, translates the text to Spanish, and returns it to a chat output. + +1. Download the Google Drive Document Translation Example Flow JSON to your local machine. +2. To import the downloaded JSON to Langflow, click **Options**, and then select **Import**. +3. In the **Credentials File** field of the Google OAuth Token component, enter the path to your **Credentials File**, the JSON file containing the Client ID credentials you downloaded from Google in the previous steps. +4. In the Google Drive loader component, in the `JSON String of the Service Account Token` field, enter the JSON string containing the token returned in the output of the Google OAuth Token component. + +The example flow includes a **Parse data** component to convert the `data` output of the Google OAuth Token component to the `text` input of the JSON Cleaner component. + +5. To allow the Langflow component to access the file in Google Drive, copy the Google Drive File ID from the document's URL. +:::info +The file ID is located between `/d/` and `/edit` in a Google Drive document's URL. +For example, in the URL `https://drive.google.com/file/d/1a2b3c4D5E6F7gHI8J9klmnopQ/edit`, the File ID is `1a2b3c4D5E6F7gHI8J9klmnopQ`. +::: +6. In the Google Drive loader component, in the **Document ID** field, paste the document URL. +7. Click the **Chat output** component, and then click **Play**. +The chat output should display a translated document. diff --git a/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json b/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json new file mode 100644 index 000000000000..2b8b4063d855 --- /dev/null +++ b/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json @@ -0,0 +1 @@ +{"id":"e070f0be-edc4-4512-bb0f-e53307062a26","data":{"nodes":[{"id":"AddContentToPage-ZezUn","type":"genericNode","position":{"x":1416.217259177943,"y":1709.6205867919527},"data":{"type":"AddContentToPage","node":{"template":{"_type":"Component","block_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"block_id","value":"","display_name":"Page/Block ID","advanced":true,"dynamic":false,"info":"The ID of the page/block to add the content.","title_case":false,"type":"str","_input_type":"StrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nfrom markdown import markdown\nfrom bs4 import BeautifulSoup\nimport requests\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\n\nclass AddContentToPage(LCToolComponent):\n display_name: str = \"Add Content to Page \"\n description: str = \"Convert markdown text to Notion blocks and append them to a Notion page.\"\n documentation: str = \"https://developers.notion.com/reference/patch-block-children\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n MultilineInput(\n name=\"markdown_text\",\n display_name=\"Markdown Text\",\n info=\"The markdown text to convert to Notion blocks.\",\n ),\n StrInput(\n name=\"block_id\",\n display_name=\"Page/Block ID\",\n info=\"The ID of the page/block to add the content.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class AddContentToPageSchema(BaseModel):\n markdown_text: str = Field(..., description=\"The markdown text to convert to Notion blocks.\")\n block_id: str = Field(..., description=\"The ID of the page/block to add the content.\")\n\n def run_model(self) -> Data:\n result = self._add_content_to_page(self.markdown_text, self.block_id)\n return Data(data=result, text=json.dumps(result))\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"add_content_to_notion_page\",\n description=\"Convert markdown text to Notion blocks and append them to a Notion page.\",\n func=self._add_content_to_page,\n args_schema=self.AddContentToPageSchema,\n )\n\n def _add_content_to_page(self, markdown_text: str, block_id: str) -> Union[Dict[str, Any], str]:\n try:\n html_text = markdown(markdown_text)\n soup = BeautifulSoup(html_text, \"html.parser\")\n blocks = self.process_node(soup)\n\n url = f\"https://api.notion.com/v1/blocks/{block_id}/children\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"children\": blocks,\n }\n\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n\n return response.json()\n except requests.exceptions.RequestException as e:\n error_message = f\"Error: Failed to add content to Notion page. {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n except Exception as e:\n return f\"Error: An unexpected error occurred while adding content to Notion page. {str(e)}\"\n\n def process_node(self, node):\n blocks = []\n if isinstance(node, str):\n text = node.strip()\n if text:\n if text.startswith(\"#\"):\n heading_level = text.count(\"#\", 0, 6)\n heading_text = text[heading_level:].strip()\n if heading_level == 1:\n blocks.append(self.create_block(\"heading_1\", heading_text))\n elif heading_level == 2:\n blocks.append(self.create_block(\"heading_2\", heading_text))\n elif heading_level == 3:\n blocks.append(self.create_block(\"heading_3\", heading_text))\n else:\n blocks.append(self.create_block(\"paragraph\", text))\n elif node.name == \"h1\":\n blocks.append(self.create_block(\"heading_1\", node.get_text(strip=True)))\n elif node.name == \"h2\":\n blocks.append(self.create_block(\"heading_2\", node.get_text(strip=True)))\n elif node.name == \"h3\":\n blocks.append(self.create_block(\"heading_3\", node.get_text(strip=True)))\n elif node.name == \"p\":\n code_node = node.find(\"code\")\n if code_node:\n code_text = code_node.get_text()\n language, code = self.extract_language_and_code(code_text)\n blocks.append(self.create_block(\"code\", code, language=language))\n elif self.is_table(str(node)):\n blocks.extend(self.process_table(node))\n else:\n blocks.append(self.create_block(\"paragraph\", node.get_text(strip=True)))\n elif node.name == \"ul\":\n blocks.extend(self.process_list(node, \"bulleted_list_item\"))\n elif node.name == \"ol\":\n blocks.extend(self.process_list(node, \"numbered_list_item\"))\n elif node.name == \"blockquote\":\n blocks.append(self.create_block(\"quote\", node.get_text(strip=True)))\n elif node.name == \"hr\":\n blocks.append(self.create_block(\"divider\", \"\"))\n elif node.name == \"img\":\n blocks.append(self.create_block(\"image\", \"\", image_url=node.get(\"src\")))\n elif node.name == \"a\":\n blocks.append(self.create_block(\"bookmark\", node.get_text(strip=True), link_url=node.get(\"href\")))\n elif node.name == \"table\":\n blocks.extend(self.process_table(node))\n\n for child in node.children:\n if isinstance(child, str):\n continue\n blocks.extend(self.process_node(child))\n\n return blocks\n\n def extract_language_and_code(self, code_text):\n lines = code_text.split(\"\\n\")\n language = lines[0].strip()\n code = \"\\n\".join(lines[1:]).strip()\n return language, code\n\n def is_code_block(self, text):\n return text.startswith(\"```\")\n\n def extract_code_block(self, text):\n lines = text.split(\"\\n\")\n language = lines[0].strip(\"`\").strip()\n code = \"\\n\".join(lines[1:]).strip(\"`\").strip()\n return language, code\n\n def is_table(self, text):\n rows = text.split(\"\\n\")\n if len(rows) < 2:\n return False\n\n has_separator = False\n for i, row in enumerate(rows):\n if \"|\" in row:\n cells = [cell.strip() for cell in row.split(\"|\")]\n cells = [cell for cell in cells if cell] # Remove empty cells\n if i == 1 and all(set(cell) <= set(\"-|\") for cell in cells):\n has_separator = True\n elif not cells:\n return False\n\n return has_separator and len(rows) >= 3\n\n def process_list(self, node, list_type):\n blocks = []\n for item in node.find_all(\"li\"):\n item_text = item.get_text(strip=True)\n checked = item_text.startswith(\"[x]\")\n is_checklist = item_text.startswith(\"[ ]\") or checked\n\n if is_checklist:\n item_text = item_text.replace(\"[x]\", \"\").replace(\"[ ]\", \"\").strip()\n blocks.append(self.create_block(\"to_do\", item_text, checked=checked))\n else:\n blocks.append(self.create_block(list_type, item_text))\n return blocks\n\n def process_table(self, node):\n blocks = []\n header_row = node.find(\"thead\").find(\"tr\") if node.find(\"thead\") else None\n body_rows = node.find(\"tbody\").find_all(\"tr\") if node.find(\"tbody\") else []\n\n if header_row or body_rows:\n table_width = max(\n len(header_row.find_all([\"th\", \"td\"])) if header_row else 0,\n max(len(row.find_all([\"th\", \"td\"])) for row in body_rows),\n )\n\n table_block = self.create_block(\"table\", \"\", table_width=table_width, has_column_header=bool(header_row))\n blocks.append(table_block)\n\n if header_row:\n header_cells = [cell.get_text(strip=True) for cell in header_row.find_all([\"th\", \"td\"])]\n header_row_block = self.create_block(\"table_row\", header_cells)\n blocks.append(header_row_block)\n\n for row in body_rows:\n cells = [cell.get_text(strip=True) for cell in row.find_all([\"th\", \"td\"])]\n row_block = self.create_block(\"table_row\", cells)\n blocks.append(row_block)\n\n return blocks\n\n def create_block(self, block_type: str, content: str, **kwargs) -> Dict[str, Any]:\n block: dict[str, Any] = {\n \"object\": \"block\",\n \"type\": block_type,\n block_type: {},\n }\n\n if block_type in [\n \"paragraph\",\n \"heading_1\",\n \"heading_2\",\n \"heading_3\",\n \"bulleted_list_item\",\n \"numbered_list_item\",\n \"quote\",\n ]:\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n elif block_type == \"to_do\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"checked\"] = kwargs.get(\"checked\", False)\n elif block_type == \"code\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"language\"] = kwargs.get(\"language\", \"plain text\")\n elif block_type == \"image\":\n block[block_type] = {\"type\": \"external\", \"external\": {\"url\": kwargs.get(\"image_url\", \"\")}}\n elif block_type == \"divider\":\n pass\n elif block_type == \"bookmark\":\n block[block_type][\"url\"] = kwargs.get(\"link_url\", \"\")\n elif block_type == \"table\":\n block[block_type][\"table_width\"] = kwargs.get(\"table_width\", 0)\n block[block_type][\"has_column_header\"] = kwargs.get(\"has_column_header\", False)\n block[block_type][\"has_row_header\"] = kwargs.get(\"has_row_header\", False)\n elif block_type == \"table_row\":\n block[block_type][\"cells\"] = [[{\"type\": \"text\", \"text\": {\"content\": cell}} for cell in content]]\n\n return block\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"markdown_text":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"markdown_text","value":"","display_name":"Markdown Text","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The markdown text to convert to Notion blocks.","title_case":false,"type":"str","_input_type":"MultilineInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Convert markdown text to Notion blocks and append them to a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Add Content to Page ","documentation":"https://developers.notion.com/reference/patch-block-children","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["markdown_text","block_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"AddContentToPage-ZezUn","description":"Convert markdown text to Notion blocks and append them to a Notion page.","display_name":"Add Content to Page "},"selected":false,"width":384,"height":330,"dragging":false,"positionAbsolute":{"x":1416.217259177943,"y":1709.6205867919527}},{"id":"NotionPageCreator-6SCB5","type":"genericNode","position":{"x":1413.9782390799146,"y":2051.645785494985},"data":{"type":"NotionPageCreator","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nimport requests\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionPageCreator(LCToolComponent):\n display_name: str = \"Create Page \"\n description: str = \"A component for creating Notion pages.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-create\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n MultilineInput(\n name=\"properties_json\",\n display_name=\"Properties (JSON)\",\n info=\"The properties of the new page as a JSON string.\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageCreatorSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n properties_json: str = Field(..., description=\"The properties of the new page as a JSON string.\")\n\n def run_model(self) -> Data:\n result = self._create_notion_page(self.database_id, self.properties_json)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the created page data\n output = \"Created page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"create_notion_page\",\n description=\"Create a new page in a Notion database. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._create_notion_page,\n args_schema=self.NotionPageCreatorSchema,\n )\n\n def _create_notion_page(self, database_id: str, properties_json: str) -> Union[Dict[str, Any], str]:\n if not database_id or not properties_json:\n return \"Invalid input. Please provide 'database_id' and 'properties_json'.\"\n\n try:\n properties = json.loads(properties_json)\n except json.JSONDecodeError as e:\n return f\"Invalid properties format. Please provide a valid JSON string. Error: {str(e)}\"\n\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"parent\": {\"database_id\": database_id},\n \"properties\": properties,\n }\n\n try:\n response = requests.post(\"https://api.notion.com/v1/pages\", headers=headers, json=data)\n response.raise_for_status()\n result = response.json()\n return result\n except requests.exceptions.RequestException as e:\n error_message = f\"Failed to create Notion page. Error: {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._create_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"properties_json":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties_json","value":"","display_name":"Properties (JSON)","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties of the new page as a JSON string.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"A component for creating Notion pages.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Create Page ","documentation":"https://docs.langflow.org/integrations/notion/page-create","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret","properties_json"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageCreator-6SCB5","description":"A component for creating Notion pages.","display_name":"Create Page "},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1413.9782390799146,"y":2051.645785494985}},{"id":"NotionDatabaseProperties-aeWil","type":"genericNode","position":{"x":1004.5753613670959,"y":1713.914531491452},"data":{"type":"NotionDatabaseProperties","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionDatabaseProperties(LCToolComponent):\n display_name: str = \"List Database Properties \"\n description: str = \"Retrieve properties of a Notion database.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/list-database-properties\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionDatabasePropertiesSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n\n def run_model(self) -> Data:\n result = self._fetch_database_properties(self.database_id)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the properties\n return Data(text=str(result), data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_database_properties\",\n description=\"Retrieve properties of a Notion database. Input should include the database ID.\",\n func=self._fetch_database_properties,\n args_schema=self.NotionDatabasePropertiesSchema,\n )\n\n def _fetch_database_properties(self, database_id: str) -> Union[Dict, str]:\n url = f\"https://api.notion.com/v1/databases/{database_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n data = response.json()\n properties = data.get(\"properties\", {})\n return properties\n except requests.exceptions.RequestException as e:\n return f\"Error fetching Notion database properties: {str(e)}\"\n except ValueError as e:\n return f\"Error parsing Notion API response: {str(e)}\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve properties of a Notion database.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Database Properties ","documentation":"https://docs.langflow.org/integrations/notion/list-database-properties","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionDatabaseProperties-aeWil","description":"Retrieve properties of a Notion database.","display_name":"List Database Properties "},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1004.5753613670959,"y":1713.914531491452}},{"id":"NotionListPages-znA3w","type":"genericNode","position":{"x":1006.1848442547046,"y":2022.7880909242833},"data":{"type":"NotionListPages","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nimport json\nfrom typing import Dict, Any, List, Optional\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionListPages(LCToolComponent):\n display_name: str = \"List Pages \"\n description: str = (\n \"Query a Notion database with filtering and sorting. \"\n \"The input should be a JSON string containing the 'filter' and 'sorts' objects. \"\n \"Example input:\\n\"\n '{\"filter\": {\"property\": \"Status\", \"select\": {\"equals\": \"Done\"}}, \"sorts\": [{\"timestamp\": \"created_time\", \"direction\": \"descending\"}]}'\n )\n documentation: str = \"https://docs.langflow.org/integrations/notion/list-pages\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database to query.\",\n ),\n MultilineInput(\n name=\"query_json\",\n display_name=\"Database query (JSON)\",\n info=\"A JSON string containing the filters and sorts that will be used for querying the database. Leave empty for no filters or sorts.\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n class NotionListPagesSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database to query.\")\n query_json: Optional[str] = Field(\n default=\"\",\n description=\"A JSON string containing the filters and sorts for querying the database. Leave empty for no filters or sorts.\",\n )\n\n def run_model(self) -> List[Data]:\n result = self._query_notion_database(self.database_id, self.query_json)\n\n if isinstance(result, str):\n # An error occurred, return it as a single record\n return [Data(text=result)]\n\n records = []\n combined_text = f\"Pages found: {len(result)}\\n\\n\"\n\n for page in result:\n page_data = {\n \"id\": page[\"id\"],\n \"url\": page[\"url\"],\n \"created_time\": page[\"created_time\"],\n \"last_edited_time\": page[\"last_edited_time\"],\n \"properties\": page[\"properties\"],\n }\n\n text = (\n f\"id: {page['id']}\\n\"\n f\"url: {page['url']}\\n\"\n f\"created_time: {page['created_time']}\\n\"\n f\"last_edited_time: {page['last_edited_time']}\\n\"\n f\"properties: {json.dumps(page['properties'], indent=2)}\\n\\n\"\n )\n\n combined_text += text\n records.append(Data(text=text, **page_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_pages\",\n description=self.description,\n func=self._query_notion_database,\n args_schema=self.NotionListPagesSchema,\n )\n\n def _query_notion_database(self, database_id: str, query_json: Optional[str] = None) -> List[Dict[str, Any]] | str:\n url = f\"https://api.notion.com/v1/databases/{database_id}/query\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n query_payload = {}\n if query_json and query_json.strip():\n try:\n query_payload = json.loads(query_json)\n except json.JSONDecodeError as e:\n return f\"Invalid JSON format for query: {str(e)}\"\n\n try:\n response = requests.post(url, headers=headers, json=query_payload)\n response.raise_for_status()\n results = response.json()\n return results[\"results\"]\n except requests.exceptions.RequestException as e:\n return f\"Error querying Notion database: {str(e)}\"\n except KeyError:\n return \"Unexpected response format from Notion API\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database to query.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query_json":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query_json","value":"","display_name":"Database query (JSON)","advanced":true,"input_types":["Message"],"dynamic":false,"info":"A JSON string containing the filters and sorts that will be used for querying the database. Leave empty for no filters or sorts.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Query a Notion database with filtering and sorting. The input should be a JSON string containing the 'filter' and 'sorts' objects. Example input:\n{\"filter\": {\"property\": \"Status\", \"select\": {\"equals\": \"Done\"}}, \"sorts\": [{\"timestamp\": \"created_time\", \"direction\": \"descending\"}]}","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Pages ","documentation":"https://docs.langflow.org/integrations/notion/list-pages","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["notion_secret","database_id","query_json"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionListPages-znA3w","description":"Query a Notion database with filtering and sorting. The input should be a JSON string containing the 'filter' and 'sorts' objects. Example input:\n{\"filter\": {\"property\": \"Status\", \"select\": {\"equals\": \"Done\"}}, \"sorts\": [{\"timestamp\": \"created_time\", \"direction\": \"descending\"}]}","display_name":"List Pages "},"selected":false,"width":384,"height":470,"dragging":false,"positionAbsolute":{"x":1006.1848442547046,"y":2022.7880909242833}},{"id":"NotionUserList-C3eGn","type":"genericNode","position":{"x":2260.15497405973,"y":1717.4551881467207},"data":{"type":"NotionUserList","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import List, Dict\nfrom pydantic import BaseModel\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionUserList(LCToolComponent):\n display_name = \"List Users \"\n description = \"Retrieve users from Notion.\"\n documentation = \"https://docs.langflow.org/integrations/notion/list-users\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionUserListSchema(BaseModel):\n pass\n\n def run_model(self) -> List[Data]:\n users = self._list_users()\n records = []\n combined_text = \"\"\n\n for user in users:\n output = \"User:\\n\"\n for key, value in user.items():\n output += f\"{key.replace('_', ' ').title()}: {value}\\n\"\n output += \"________________________\\n\"\n\n combined_text += output\n records.append(Data(text=output, data=user))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_users\",\n description=\"Retrieve users from Notion.\",\n func=self._list_users,\n args_schema=self.NotionUserListSchema,\n )\n\n def _list_users(self) -> List[Dict]:\n url = \"https://api.notion.com/v1/users\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n data = response.json()\n results = data[\"results\"]\n\n users = []\n for user in results:\n user_data = {\n \"id\": user[\"id\"],\n \"type\": user[\"type\"],\n \"name\": user.get(\"name\", \"\"),\n \"avatar_url\": user.get(\"avatar_url\", \"\"),\n }\n users.append(user_data)\n\n return users\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve users from Notion.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Users ","documentation":"https://docs.langflow.org/integrations/notion/list-users","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionUserList-C3eGn","description":"Retrieve users from Notion.","display_name":"List Users "},"selected":true,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":2260.15497405973,"y":1717.4551881467207}},{"id":"NotionPageContent-SlL21","type":"genericNode","position":{"x":1826.4242329724448,"y":1715.6365113286927},"data":{"type":"NotionPageContent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionPageContent(LCToolComponent):\n display_name = \"Page Content Viewer \"\n description = \"Retrieve the content of a Notion page as plain text.\"\n documentation = \"https://docs.langflow.org/integrations/notion/page-content-viewer\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"page_id\",\n display_name=\"Page ID\",\n info=\"The ID of the Notion page to retrieve.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageContentSchema(BaseModel):\n page_id: str = Field(..., description=\"The ID of the Notion page to retrieve.\")\n\n def run_model(self) -> Data:\n result = self._retrieve_page_content(self.page_id)\n if isinstance(result, str) and result.startswith(\"Error:\"):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the content\n return Data(text=result, data={\"content\": result})\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_page_content\",\n description=\"Retrieve the content of a Notion page as plain text.\",\n func=self._retrieve_page_content,\n args_schema=self.NotionPageContentSchema,\n )\n\n def _retrieve_page_content(self, page_id: str) -> str:\n blocks_url = f\"https://api.notion.com/v1/blocks/{page_id}/children?page_size=100\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n try:\n blocks_response = requests.get(blocks_url, headers=headers)\n blocks_response.raise_for_status()\n blocks_data = blocks_response.json()\n return self.parse_blocks(blocks_data.get(\"results\", []))\n except requests.exceptions.RequestException as e:\n error_message = f\"Error: Failed to retrieve Notion page content. {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n except Exception as e:\n return f\"Error: An unexpected error occurred while retrieving Notion page content. {str(e)}\"\n\n def parse_blocks(self, blocks: list) -> str:\n content = \"\"\n for block in blocks:\n block_type = block.get(\"type\")\n if block_type in [\"paragraph\", \"heading_1\", \"heading_2\", \"heading_3\", \"quote\"]:\n content += self.parse_rich_text(block[block_type].get(\"rich_text\", [])) + \"\\n\\n\"\n elif block_type in [\"bulleted_list_item\", \"numbered_list_item\"]:\n content += self.parse_rich_text(block[block_type].get(\"rich_text\", [])) + \"\\n\"\n elif block_type == \"to_do\":\n content += self.parse_rich_text(block[\"to_do\"].get(\"rich_text\", [])) + \"\\n\"\n elif block_type == \"code\":\n content += self.parse_rich_text(block[\"code\"].get(\"rich_text\", [])) + \"\\n\\n\"\n elif block_type == \"image\":\n content += f\"[Image: {block['image'].get('external', {}).get('url', 'No URL')}]\\n\\n\"\n elif block_type == \"divider\":\n content += \"---\\n\\n\"\n return content.strip()\n\n def parse_rich_text(self, rich_text: list) -> str:\n return \"\".join(segment.get(\"plain_text\", \"\") for segment in rich_text)\n\n def __call__(self, *args, **kwargs):\n return self._retrieve_page_content(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"page_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"page_id","value":"","display_name":"Page ID","advanced":true,"dynamic":false,"info":"The ID of the Notion page to retrieve.","title_case":false,"type":"str","_input_type":"StrInput"}},"description":"Retrieve the content of a Notion page as plain text.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Page Content Viewer ","documentation":"https://docs.langflow.org/integrations/notion/page-content-viewer","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["page_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageContent-SlL21","description":"Retrieve the content of a Notion page as plain text.","display_name":"Page Content Viewer "},"selected":false,"width":384,"height":330,"dragging":false,"positionAbsolute":{"x":1826.4242329724448,"y":1715.6365113286927}},{"id":"NotionSearch-VS2mI","type":"genericNode","position":{"x":2258.1166047519732,"y":2034.3959294952945},"data":{"type":"NotionSearch","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Any, List\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, DropdownInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionSearch(LCToolComponent):\n display_name: str = \"Search \"\n description: str = \"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/search\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The text that the API compares page and database titles against.\",\n ),\n DropdownInput(\n name=\"filter_value\",\n display_name=\"Filter Type\",\n info=\"Limits the results to either only pages or only databases.\",\n options=[\"page\", \"database\"],\n value=\"page\",\n ),\n DropdownInput(\n name=\"sort_direction\",\n display_name=\"Sort Direction\",\n info=\"The direction to sort the results.\",\n options=[\"ascending\", \"descending\"],\n value=\"descending\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionSearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query text.\")\n filter_value: str = Field(default=\"page\", description=\"Filter type: 'page' or 'database'.\")\n sort_direction: str = Field(default=\"descending\", description=\"Sort direction: 'ascending' or 'descending'.\")\n\n def run_model(self) -> List[Data]:\n results = self._search_notion(self.query, self.filter_value, self.sort_direction)\n records = []\n combined_text = f\"Results found: {len(results)}\\n\\n\"\n\n for result in results:\n result_data = {\n \"id\": result[\"id\"],\n \"type\": result[\"object\"],\n \"last_edited_time\": result[\"last_edited_time\"],\n }\n\n if result[\"object\"] == \"page\":\n result_data[\"title_or_url\"] = result[\"url\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['url']}\\n\"\n elif result[\"object\"] == \"database\":\n if \"title\" in result and isinstance(result[\"title\"], list) and len(result[\"title\"]) > 0:\n result_data[\"title_or_url\"] = result[\"title\"][0][\"plain_text\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['title'][0]['plain_text']}\\n\"\n else:\n result_data[\"title_or_url\"] = \"N/A\"\n text = f\"id: {result['id']}\\ntitle_or_url: N/A\\n\"\n\n text += f\"type: {result['object']}\\nlast_edited_time: {result['last_edited_time']}\\n\\n\"\n combined_text += text\n records.append(Data(text=text, data=result_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_search\",\n description=\"Search Notion pages and databases. Input should include the search query and optionally filter type and sort direction.\",\n func=self._search_notion,\n args_schema=self.NotionSearchSchema,\n )\n\n def _search_notion(\n self, query: str, filter_value: str = \"page\", sort_direction: str = \"descending\"\n ) -> List[Dict[str, Any]]:\n url = \"https://api.notion.com/v1/search\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"query\": query,\n \"filter\": {\"value\": filter_value, \"property\": \"object\"},\n \"sort\": {\"direction\": sort_direction, \"timestamp\": \"last_edited_time\"},\n }\n\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n\n results = response.json()\n return results[\"results\"]\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"filter_value":{"trace_as_metadata":true,"options":["page","database"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"filter_value","value":"page","display_name":"Filter Type","advanced":true,"dynamic":false,"info":"Limits the results to either only pages or only databases.","title_case":false,"type":"str","_input_type":"DropdownInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query","value":"","display_name":"Search Query","advanced":true,"dynamic":false,"info":"The text that the API compares page and database titles against.","title_case":false,"type":"str","_input_type":"StrInput"},"sort_direction":{"trace_as_metadata":true,"options":["ascending","descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sort_direction","value":"descending","display_name":"Sort Direction","advanced":true,"dynamic":false,"info":"The direction to sort the results.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Search ","documentation":"https://docs.langflow.org/integrations/notion/search","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["notion_secret","query","filter_value","sort_direction"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionSearch-VS2mI","description":"Searches all pages and databases that have been shared with an integration.","display_name":"Search "},"selected":false,"width":384,"height":386,"dragging":false,"positionAbsolute":{"x":2258.1166047519732,"y":2034.3959294952945}},{"id":"NotionPageUpdate-6FyYd","type":"genericNode","position":{"x":1827.0574354713603,"y":2055.9948126656136},"data":{"type":"NotionPageUpdate","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nimport requests\nfrom typing import Dict, Any, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom langflow.io import Output\n\nclass NotionPageUpdate(LCToolComponent):\n display_name: str = \"Update Page Property \"\n description: str = \"Update the properties of a Notion page.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-update\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"page_id\",\n display_name=\"Page ID\",\n info=\"The ID of the Notion page to update.\",\n ),\n MultilineInput(\n name=\"properties\",\n display_name=\"Properties\",\n info=\"The properties to update on the page (as a JSON string or a dictionary).\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageUpdateSchema(BaseModel):\n page_id: str = Field(..., description=\"The ID of the Notion page to update.\")\n properties: Union[str, Dict[str, Any]] = Field(\n ..., description=\"The properties to update on the page (as a JSON string or a dictionary).\"\n )\n\n def run_model(self) -> Data:\n result = self._update_notion_page(self.page_id, self.properties)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the updated page data\n output = \"Updated page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"update_notion_page\",\n description=\"Update the properties of a Notion page. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._update_notion_page,\n args_schema=self.NotionPageUpdateSchema,\n )\n\n def _update_notion_page(self, page_id: str, properties: Union[str, Dict[str, Any]]) -> Union[Dict[str, Any], str]:\n url = f\"https://api.notion.com/v1/pages/{page_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n\n # Parse properties if it's a string\n if isinstance(properties, str):\n try:\n parsed_properties = json.loads(properties)\n except json.JSONDecodeError as e:\n error_message = f\"Invalid JSON format for properties: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n else:\n parsed_properties = properties\n\n data = {\"properties\": parsed_properties}\n\n try:\n logger.info(f\"Sending request to Notion API: URL: {url}, Data: {json.dumps(data)}\")\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n updated_page = response.json()\n\n logger.info(f\"Successfully updated Notion page. Response: {json.dumps(updated_page)}\")\n return updated_page\n except requests.exceptions.HTTPError as e:\n error_message = f\"HTTP Error occurred: {str(e)}\"\n if e.response is not None:\n error_message += f\"\\nStatus code: {e.response.status_code}\"\n error_message += f\"\\nResponse body: {e.response.text}\"\n logger.error(error_message)\n return error_message\n except requests.exceptions.RequestException as e:\n error_message = f\"An error occurred while making the request: {str(e)}\"\n logger.error(error_message)\n return error_message\n except Exception as e:\n error_message = f\"An unexpected error occurred: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._update_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"page_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"page_id","value":"","display_name":"Page ID","advanced":true,"dynamic":false,"info":"The ID of the Notion page to update.","title_case":false,"type":"str","_input_type":"StrInput"},"properties":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties","value":"","display_name":"Properties","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties to update on the page (as a JSON string or a dictionary).","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Update the properties of a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Update Page Property ","documentation":"https://docs.langflow.org/integrations/notion/page-update","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["page_id","properties","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageUpdate-6FyYd","description":"Update the properties of a Notion page.","display_name":"Update Page Property "},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1827.0574354713603,"y":2055.9948126656136}},{"id":"ToolCallingAgent-50Gcd","type":"genericNode","position":{"x":2186.0530739759893,"y":612.1744804997304},"data":{"type":"ToolCallingAgent","node":{"template":{"_type":"Component","chat_history":{"trace_as_metadata":true,"list":true,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"chat_history","value":"","display_name":"Chat History","advanced":false,"input_types":["Data"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"DataInput"},"llm":{"trace_as_metadata":true,"list":false,"required":true,"placeholder":"","show":true,"name":"llm","value":"","display_name":"Language Model","advanced":false,"input_types":["LanguageModel"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool","BaseTool"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"LangChain\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"handle_parsing_errors":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"handle_parsing_errors","value":true,"display_name":"Handle Parse Errors","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":false,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageTextInput"},"max_iterations":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"max_iterations","value":15,"display_name":"Max Iterations","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"system_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_prompt","value":"","display_name":"System Prompt","advanced":false,"input_types":["Message"],"dynamic":false,"info":"System prompt for the agent.","title_case":false,"type":"str","_input_type":"MultilineInput"},"user_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"user_prompt","value":"{input}","display_name":"Prompt","advanced":true,"input_types":["Message"],"dynamic":false,"info":"This prompt must contain 'input' key.","title_case":false,"type":"str","_input_type":"MultilineInput"},"verbose":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"verbose","value":true,"display_name":"Verbose","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Agent that uses tools","icon":"LangChain","base_classes":["AgentExecutor","Message"],"display_name":"Tool Calling Agent","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["AgentExecutor"],"selected":"AgentExecutor","name":"agent","display_name":"Agent","method":"build_agent","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Message"],"selected":"Message","name":"response","display_name":"Response","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","handle_parsing_errors","verbose","max_iterations","tools","llm","system_prompt","user_prompt","chat_history"],"beta":true,"edited":false,"lf_version":"1.0.17"},"id":"ToolCallingAgent-50Gcd"},"selected":false,"width":384,"height":532,"dragging":false,"positionAbsolute":{"x":2186.0530739759893,"y":612.1744804997304}},{"id":"ChatOutput-TSCup","type":"genericNode","position":{"x":2649.190603849412,"y":841.0466487848925},"data":{"type":"ChatOutput","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"data_template":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"data_template","value":"{text}","display_name":"Data Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Message to be passed as output.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"Machine","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Type of sender.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"AI","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Name of the sender.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"should_store_message":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"should_store_message","value":true,"display_name":"Store Messages","advanced":true,"dynamic":false,"info":"Store the message in the history.","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Display a chat message in the Playground.","icon":"ChatOutput","base_classes":["Message"],"display_name":"Chat Output","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"message","display_name":"Message","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","should_store_message","sender","sender_name","session_id","data_template"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ChatOutput-TSCup"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2649.190603849412,"y":841.0466487848925},"dragging":false},{"id":"ChatInput-bcq6D","type":"genericNode","position":{"x":557.6262725075026,"y":724.8518930903978},"data":{"type":"ChatInput","node":{"template":{"_type":"Component","files":{"trace_as_metadata":true,"file_path":"","fileTypes":["txt","md","mdx","csv","json","yaml","yml","xml","html","htm","pdf","docx","py","sh","sql","js","ts","tsx","jpg","jpeg","png","bmp","image"],"list":true,"required":false,"placeholder":"","show":true,"name":"files","value":"","display_name":"Files","advanced":true,"dynamic":false,"info":"Files to be sent with the message.","title_case":false,"type":"file","_input_type":"FileInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, MESSAGE_SENDER_NAME_USER\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n name = \"ChatInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n )\n\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"list users","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Message to be passed as input.","title_case":false,"type":"str","_input_type":"MultilineInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"User","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Type of sender.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"User","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Name of the sender.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"should_store_message":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"should_store_message","value":true,"display_name":"Store Messages","advanced":true,"dynamic":false,"info":"Store the message in the history.","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Get chat inputs from the Playground.","icon":"ChatInput","base_classes":["Message"],"display_name":"Chat Input","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"message","display_name":"Message","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","should_store_message","sender","sender_name","session_id","files"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ChatInput-bcq6D"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":557.6262725075026,"y":724.8518930903978},"dragging":false},{"id":"ToolkitComponent-2lNG0","type":"genericNode","position":{"x":1731.8884789245508,"y":1378.7846304343796},"data":{"type":"ToolkitComponent","node":{"template":{"_type":"Component","tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool"],"dynamic":false,"info":"List of tools to combine.","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import List\r\nfrom langflow.custom import Component\r\nfrom langflow.inputs import HandleInput, MessageTextInput\r\nfrom langflow.template import Output\r\nfrom langflow.field_typing import Tool, Embeddings\r\nfrom langchain.tools.base import BaseTool, StructuredTool\r\nfrom langflow.schema import Data\r\n\r\nclass ToolkitComponent(Component):\r\n display_name = \"Toolkit\"\r\n description = \"Combines multiple tools into a single list of tools.\"\r\n icon = \"pocket-knife\"\r\n\r\n inputs = [\r\n HandleInput(\r\n name=\"tools\",\r\n display_name=\"Tools\",\r\n input_types=[\"Tool\"],\r\n info=\"List of tools to combine.\",\r\n is_list=True,\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Tools\", name=\"generated_tools\", method=\"generate_toolkit\"),\r\n Output(display_name=\"Tool Data\", name=\"tool_data\", method=\"generate_tool_data\"),\r\n ]\r\n\r\n def generate_toolkit(self) -> List[BaseTool]:\r\n combined_tools = []\r\n name_count = {}\r\n for index, tool in enumerate(self.tools):\r\n self.log(f\"Processing tool {index}: {type(tool)}\")\r\n if isinstance(tool, (BaseTool, StructuredTool)):\r\n processed_tool = tool\r\n elif hasattr(tool, 'build_tool'):\r\n processed_tool = tool.build_tool()\r\n else:\r\n self.log(f\"Unsupported tool type: {type(tool)}. Attempting to process anyway.\")\r\n processed_tool = tool\r\n\r\n original_name = getattr(processed_tool, 'name', f\"UnnamedTool_{index}\")\r\n self.log(f\"Original tool name: {original_name}\")\r\n\r\n if original_name not in name_count:\r\n name_count[original_name] = 0\r\n final_name = original_name\r\n else:\r\n name_count[original_name] += 1\r\n final_name = f\"{original_name}_{name_count[original_name]}\"\r\n\r\n if hasattr(processed_tool, 'name'):\r\n processed_tool.name = final_name\r\n\r\n self.log(f\"Final tool name: {final_name}\")\r\n\r\n if isinstance(processed_tool, StructuredTool) and hasattr(processed_tool, 'args_schema'):\r\n processed_tool.args_schema.name = f\"{final_name}_Schema\"\r\n\r\n combined_tools.append(processed_tool)\r\n\r\n debug_info = \"\\n\".join([f\"Tool {i}: {getattr(tool, 'name', f'UnnamedTool_{i}')} (Original: {getattr(tool, '_original_name', 'N/A')}) - Type: {type(tool)}\" for i, tool in enumerate(combined_tools)])\r\n self.log(\"Final toolkit composition:\")\r\n self.log(debug_info)\r\n\r\n\r\n self.status = combined_tools\r\n return combined_tools\r\n\r\n def generate_tool_data(self) -> List[Data]:\r\n tool_data = []\r\n for tool in self.generate_toolkit():\r\n tool_data.append(Data(\r\n data={\r\n \"name\": getattr(tool, 'name', 'Unnamed Tool'),\r\n \"description\": getattr(tool, 'description', 'No description available')\r\n }\r\n ))\r\n return tool_data","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false}},"description":"Combines multiple tools into a single list of tools.","icon":"pocket-knife","base_classes":["BaseTool","Data"],"display_name":"Toolkit","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["BaseTool"],"selected":"BaseTool","name":"generated_tools","display_name":"Tools","method":"generate_toolkit","value":"__UNDEFINED__","cache":true},{"types":["Data"],"selected":"Data","name":"tool_data","display_name":"Tool Data","method":"generate_tool_data","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["tools"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"ToolkitComponent-2lNG0"},"selected":false,"width":384,"height":292,"dragging":false,"positionAbsolute":{"x":1731.8884789245508,"y":1378.7846304343796}},{"id":"OpenAIModel-BJWIg","type":"genericNode","position":{"x":1718.9773974162958,"y":603.4642741725065},"data":{"type":"OpenAIModel","node":{"template":{"_type":"Component","api_key":{"load_from_db":true,"required":false,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"OpenAI API Key","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The OpenAI API Key to use for the OpenAI model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import operator\nfrom functools import reduce\n\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = LCModelComponent._base_inputs + [\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageInput"},"json_mode":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"json_mode","value":false,"display_name":"JSON Mode","advanced":true,"dynamic":false,"info":"If True, it will output JSON regardless of passing a schema.","title_case":false,"type":"bool","_input_type":"BoolInput"},"max_tokens":{"trace_as_metadata":true,"range_spec":{"step_type":"float","min":0,"max":128000,"step":0.1},"list":false,"required":false,"placeholder":"","show":true,"name":"max_tokens","value":"","display_name":"Max Tokens","advanced":true,"dynamic":false,"info":"The maximum number of tokens to generate. Set to 0 for unlimited tokens.","title_case":false,"type":"int","_input_type":"IntInput"},"model_kwargs":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"model_kwargs","value":{},"display_name":"Model Kwargs","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"dict","_input_type":"DictInput"},"model_name":{"trace_as_metadata":true,"options":["gpt-4o-mini","gpt-4o","gpt-4-turbo","gpt-4-turbo-preview","gpt-4","gpt-3.5-turbo","gpt-3.5-turbo-0125"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"model_name","value":"gpt-4o","display_name":"Model Name","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"DropdownInput"},"openai_api_base":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"openai_api_base","value":"","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.","title_case":false,"type":"str","_input_type":"StrInput"},"output_schema":{"trace_as_input":true,"list":true,"required":false,"placeholder":"","show":true,"name":"output_schema","value":{},"display_name":"Schema","advanced":true,"dynamic":false,"info":"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.","title_case":false,"type":"dict","_input_type":"DictInput"},"seed":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"seed","value":1,"display_name":"Seed","advanced":true,"dynamic":false,"info":"The seed controls the reproducibility of the job.","title_case":false,"type":"int","_input_type":"IntInput"},"stream":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"stream","value":false,"display_name":"Stream","advanced":true,"dynamic":false,"info":"Stream the response from the model. Streaming works only in Chat.","title_case":false,"type":"bool","_input_type":"BoolInput"},"system_message":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_message","value":"","display_name":"System Message","advanced":true,"input_types":["Message"],"dynamic":false,"info":"System message to pass to the model.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"temperature":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"temperature","value":"0.2","display_name":"Temperature","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"float","_input_type":"FloatInput"}},"description":"Generates text using OpenAI LLMs.","icon":"OpenAI","base_classes":["LanguageModel","Message"],"display_name":"OpenAI","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text_output","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true},{"types":["LanguageModel"],"selected":"LanguageModel","name":"model_output","display_name":"Language Model","method":"build_model","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","system_message","stream","max_tokens","model_kwargs","json_mode","output_schema","model_name","openai_api_base","api_key","temperature","seed"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"OpenAIModel-BJWIg"},"selected":false,"width":384,"height":433,"positionAbsolute":{"x":1718.9773974162958,"y":603.4642741725065},"dragging":false},{"id":"Memory-CTQWu","type":"genericNode","position":{"x":1240.7186213296432,"y":1059.5754404393747},"data":{"type":"Memory","node":{"template":{"_type":"Component","memory":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"memory","value":"","display_name":"External Memory","advanced":true,"input_types":["BaseChatMessageHistory"],"dynamic":false,"info":"Retrieve messages from an external memory. If empty, it will use the Langflow tables.","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langchain.memory import ConversationBufferMemory\n\nfrom langflow.custom import Component\nfrom langflow.field_typing import BaseChatMemory\nfrom langflow.helpers.data import data_to_text\nfrom langflow.inputs import HandleInput\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output\nfrom langflow.memory import LCBuiltinChatMemory, get_messages\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Chat Memory\"\n description = \"Retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n\n inputs = [\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"BaseChatMessageHistory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Messages (Data)\", name=\"messages\", method=\"retrieve_messages\"),\n Output(display_name=\"Messages (Text)\", name=\"messages_text\", method=\"retrieve_messages_as_text\"),\n Output(display_name=\"Memory\", name=\"lc_memory\", method=\"build_lc_memory\"),\n ]\n\n def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = self.memory.messages\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = get_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return stored\n\n def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, self.retrieve_messages())\n self.status = stored_text\n return Message(text=stored_text)\n\n def build_lc_memory(self) -> BaseChatMemory:\n if self.memory:\n chat_memory = self.memory\n else:\n chat_memory = LCBuiltinChatMemory(flow_id=self.flow_id, session_id=self.session_id)\n return ConversationBufferMemory(chat_memory=chat_memory)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"n_messages":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"n_messages","value":100,"display_name":"Number of Messages","advanced":true,"dynamic":false,"info":"Number of messages to retrieve.","title_case":false,"type":"int","_input_type":"IntInput"},"order":{"trace_as_metadata":true,"options":["Ascending","Descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"order","value":"Ascending","display_name":"Order","advanced":true,"dynamic":false,"info":"Order of the messages.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User","Machine and User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"Machine and User","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Filter by sender type.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Filter by sender name.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{sender_name}: {text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Retrieves stored chat messages from Langflow tables or an external memory.","icon":"message-square-more","base_classes":["BaseChatMemory","Data","Message"],"display_name":"Chat Memory","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"messages","display_name":"Messages (Data)","method":"retrieve_messages","value":"__UNDEFINED__","cache":true},{"types":["Message"],"selected":"Message","name":"messages_text","display_name":"Messages (Text)","method":"retrieve_messages_as_text","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["BaseChatMemory"],"selected":"BaseChatMemory","name":"lc_memory","display_name":"Memory","method":"build_lc_memory","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["memory","sender","sender_name","n_messages","session_id","order","template"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"Memory-CTQWu"},"selected":false,"width":384,"height":244,"dragging":false,"positionAbsolute":{"x":1240.7186213296432,"y":1059.5754404393747}},{"id":"Prompt-0dWZu","type":"genericNode","position":{"x":1227.4862876736101,"y":616.3826667128244},"data":{"type":"Prompt","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"template":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"\nYou are a Notion Agent, an AI assistant designed to help users interact with their Notion workspace. Your role is to understand user requests, utilize the appropriate Notion tools to fulfill these requests, and communicate clearly with the user throughout the process.\n\nGeneral Guidelines:\n\n1. Carefully analyze each user request to determine which tool(s) you need to use.\n\n2. Before using any tool, ensure you have all the necessary information. If you need more details, ask the user clear and concise questions.\n\n3. When using a tool, provide a brief explanation to the user about what you're doing and why.\n\n4. After using a tool, interpret the results for the user in a clear, concise manner.\n\n5. If a task requires multiple steps, outline your plan to the user before proceeding.\n\n6. If you encounter an error or limitation, explain it to the user and suggest possible solutions or alternative approaches.\n\n7. Always maintain a helpful and professional tone in your interactions.\n\n8. Be proactive in offering suggestions or alternatives if the user's initial request can't be fulfilled exactly as stated.\n\n9. When providing information or results, focus on relevance and clarity. Summarize when necessary, but provide details when they're important.\n\n10. If a user's request is unclear or could be interpreted in multiple ways, ask for clarification before proceeding.\n\n11. After completing a task, summarize what was accomplished and suggest any relevant next steps or additional actions the user might want to take.\n\n12. If a user asks about capabilities you don't have or tools you can't access, clearly explain your limitations and suggest alternative ways to assist if possible.\n\nRemember, your primary goal is to assist the user effectively with their Notion-related tasks using the provided tools. Always strive for clarity, accuracy, and helpfulness in your interactions. Adapt your communication style to the user's level of technical understanding and familiarity with Notion.\n\nNow, you're ready to assist the user\n\nToday is: {CURRENT_DATE}\n","display_name":"Template","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"prompt","_input_type":"PromptInput"},"CURRENT_DATE":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"CURRENT_DATE","display_name":"CURRENT_DATE","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"}},"description":"Create a prompt template with dynamic variables.","icon":"prompts","is_input":null,"is_output":null,"is_composition":null,"base_classes":["Message"],"name":"","display_name":"Prompt","documentation":"","custom_fields":{"template":["CURRENT_DATE"]},"output_types":[],"full_path":null,"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"prompt","hidden":null,"display_name":"Prompt Message","method":"build_prompt","value":"__UNDEFINED__","cache":true}],"field_order":["template"],"beta":false,"error":null,"edited":false,"lf_version":"1.0.17"},"id":"Prompt-0dWZu"},"selected":false,"width":384,"height":416,"positionAbsolute":{"x":1227.4862876736101,"y":616.3826667128244},"dragging":false},{"id":"CurrentDateComponent-NSNQ8","type":"genericNode","position":{"x":1092.5108512311297,"y":868.3249850335523},"data":{"type":"CurrentDateComponent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from datetime import datetime\r\nfrom zoneinfo import ZoneInfo\r\nfrom typing import List\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import DropdownInput, Output\r\nfrom langflow.schema.message import Message\r\n\r\nclass CurrentDateComponent(Component):\r\n display_name = \"Current Date 🕰️\"\r\n description = \"Returns the current date and time in the selected timezone.\"\r\n icon = \"clock\"\r\n\r\n inputs = [\r\n DropdownInput(\r\n name=\"timezone\",\r\n display_name=\"Timezone\",\r\n options=[\r\n \"UTC\",\r\n \"US/Eastern\",\r\n \"US/Central\",\r\n \"US/Mountain\",\r\n \"US/Pacific\",\r\n \"Europe/London\",\r\n \"Europe/Paris\",\r\n \"Asia/Tokyo\",\r\n \"Australia/Sydney\",\r\n \"America/Sao_Paulo\",\r\n \"America/Cuiaba\",\r\n ],\r\n value=\"UTC\",\r\n info=\"Select the timezone for the current date and time.\",\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Current Date\", name=\"current_date\", method=\"get_current_date\"),\r\n ]\r\n\r\n def get_current_date(self) -> Message:\r\n try:\r\n tz = ZoneInfo(self.timezone)\r\n current_date = datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S %Z\")\r\n result = f\"Current date and time in {self.timezone}: {current_date}\"\r\n self.status = result\r\n return Message(text=result)\r\n except Exception as e:\r\n error_message = f\"Error: {str(e)}\"\r\n self.status = error_message\r\n return Message(text=error_message)","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"timezone":{"trace_as_metadata":true,"options":["UTC","US/Eastern","US/Central","US/Mountain","US/Pacific","Europe/London","Europe/Paris","Asia/Tokyo","Australia/Sydney","America/Sao_Paulo","America/Cuiaba"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"timezone","value":"UTC","display_name":"Timezone","advanced":false,"dynamic":false,"info":"Select the timezone for the current date and time.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Returns the current date and time in the selected timezone.","icon":"clock","base_classes":["Message"],"display_name":"Current Date","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"current_date","display_name":"Current Date","method":"get_current_date","value":"__UNDEFINED__","cache":true}],"field_order":["timezone"],"beta":false,"edited":true,"official":false,"lf_version":"1.0.17"},"id":"CurrentDateComponent-NSNQ8","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":1092.5108512311297,"y":868.3249850335523}}],"edges":[{"source":"ChatInput-bcq6D","target":"ToolCallingAgent-50Gcd","sourceHandle":"{œdataTypeœ:œChatInputœ,œidœ:œChatInput-bcq6Dœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}","targetHandle":"{œfieldNameœ:œinput_valueœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","id":"reactflow__edge-ChatInput-bcq6D{œdataTypeœ:œChatInputœ,œidœ:œChatInput-bcq6Dœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-ToolCallingAgent-50Gcd{œfieldNameœ:œinput_valueœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"input_value","id":"ToolCallingAgent-50Gcd","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"ChatInput","id":"ChatInput-bcq6D","name":"message","output_types":["Message"]}},"selected":false,"className":""},{"source":"ToolCallingAgent-50Gcd","target":"ChatOutput-TSCup","sourceHandle":"{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-50Gcdœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}","targetHandle":"{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-TSCupœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","id":"reactflow__edge-ToolCallingAgent-50Gcd{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-50Gcdœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-TSCup{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-TSCupœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"input_value","id":"ChatOutput-TSCup","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"ToolCallingAgent","id":"ToolCallingAgent-50Gcd","name":"response","output_types":["Message"]}},"selected":false,"className":""},{"source":"ToolkitComponent-2lNG0","target":"ToolCallingAgent-50Gcd","sourceHandle":"{œdataTypeœ:œToolkitComponentœ,œidœ:œToolkitComponent-2lNG0œ,œnameœ:œgenerated_toolsœ,œoutput_typesœ:[œBaseToolœ]}","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","id":"reactflow__edge-ToolkitComponent-2lNG0{œdataTypeœ:œToolkitComponentœ,œidœ:œToolkitComponent-2lNG0œ,œnameœ:œgenerated_toolsœ,œoutput_typesœ:[œBaseToolœ]}-ToolCallingAgent-50Gcd{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-50Gcd","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"ToolkitComponent","id":"ToolkitComponent-2lNG0","name":"generated_tools","output_types":["BaseTool"]}},"selected":false,"className":""},{"source":"NotionPageUpdate-6FyYd","sourceHandle":"{œdataTypeœ:œNotionPageUpdateœ,œidœ:œNotionPageUpdate-6FyYdœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageUpdate","id":"NotionPageUpdate-6FyYd","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageUpdate-6FyYd{œdataTypeœ:œNotionPageUpdateœ,œidœ:œNotionPageUpdate-6FyYdœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"NotionPageCreator-6SCB5","sourceHandle":"{œdataTypeœ:œNotionPageCreatorœ,œidœ:œNotionPageCreator-6SCB5œ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageCreator","id":"NotionPageCreator-6SCB5","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageCreator-6SCB5{œdataTypeœ:œNotionPageCreatorœ,œidœ:œNotionPageCreator-6SCB5œ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"AddContentToPage-ZezUn","sourceHandle":"{œdataTypeœ:œAddContentToPageœ,œidœ:œAddContentToPage-ZezUnœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"AddContentToPage","id":"AddContentToPage-ZezUn","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-AddContentToPage-ZezUn{œdataTypeœ:œAddContentToPageœ,œidœ:œAddContentToPage-ZezUnœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"NotionDatabaseProperties-aeWil","sourceHandle":"{œdataTypeœ:œNotionDatabasePropertiesœ,œidœ:œNotionDatabaseProperties-aeWilœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionDatabaseProperties","id":"NotionDatabaseProperties-aeWil","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionDatabaseProperties-aeWil{œdataTypeœ:œNotionDatabasePropertiesœ,œidœ:œNotionDatabaseProperties-aeWilœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"NotionListPages-znA3w","sourceHandle":"{œdataTypeœ:œNotionListPagesœ,œidœ:œNotionListPages-znA3wœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionListPages","id":"NotionListPages-znA3w","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionListPages-znA3w{œdataTypeœ:œNotionListPagesœ,œidœ:œNotionListPages-znA3wœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"NotionPageContent-SlL21","sourceHandle":"{œdataTypeœ:œNotionPageContentœ,œidœ:œNotionPageContent-SlL21œ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageContent","id":"NotionPageContent-SlL21","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageContent-SlL21{œdataTypeœ:œNotionPageContentœ,œidœ:œNotionPageContent-SlL21œ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"NotionUserList-C3eGn","sourceHandle":"{œdataTypeœ:œNotionUserListœ,œidœ:œNotionUserList-C3eGnœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionUserList","id":"NotionUserList-C3eGn","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionUserList-C3eGn{œdataTypeœ:œNotionUserListœ,œidœ:œNotionUserList-C3eGnœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"NotionSearch-VS2mI","sourceHandle":"{œdataTypeœ:œNotionSearchœ,œidœ:œNotionSearch-VS2mIœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionSearch","id":"NotionSearch-VS2mI","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionSearch-VS2mI{œdataTypeœ:œNotionSearchœ,œidœ:œNotionSearch-VS2mIœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolkitComponent-2lNG0{œfieldNameœ:œtoolsœ,œidœ:œToolkitComponent-2lNG0œ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"OpenAIModel-BJWIg","sourceHandle":"{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-BJWIgœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}","target":"ToolCallingAgent-50Gcd","targetHandle":"{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"llm","id":"ToolCallingAgent-50Gcd","inputTypes":["LanguageModel"],"type":"other"},"sourceHandle":{"dataType":"OpenAIModel","id":"OpenAIModel-BJWIg","name":"model_output","output_types":["LanguageModel"]}},"id":"reactflow__edge-OpenAIModel-BJWIg{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-BJWIgœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}-ToolCallingAgent-50Gcd{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"Memory-CTQWu","sourceHandle":"{œdataTypeœ:œMemoryœ,œidœ:œMemory-CTQWuœ,œnameœ:œmessagesœ,œoutput_typesœ:[œDataœ]}","target":"ToolCallingAgent-50Gcd","targetHandle":"{œfieldNameœ:œchat_historyœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"chat_history","id":"ToolCallingAgent-50Gcd","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"Memory","id":"Memory-CTQWu","name":"messages","output_types":["Data"]}},"id":"reactflow__edge-Memory-CTQWu{œdataTypeœ:œMemoryœ,œidœ:œMemory-CTQWuœ,œnameœ:œmessagesœ,œoutput_typesœ:[œDataœ]}-ToolCallingAgent-50Gcd{œfieldNameœ:œchat_historyœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","className":"","selected":false},{"source":"Prompt-0dWZu","sourceHandle":"{œdataTypeœ:œPromptœ,œidœ:œPrompt-0dWZuœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}","target":"ToolCallingAgent-50Gcd","targetHandle":"{œfieldNameœ:œsystem_promptœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"system_prompt","id":"ToolCallingAgent-50Gcd","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"Prompt","id":"Prompt-0dWZu","name":"prompt","output_types":["Message"]}},"id":"reactflow__edge-Prompt-0dWZu{œdataTypeœ:œPromptœ,œidœ:œPrompt-0dWZuœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-ToolCallingAgent-50Gcd{œfieldNameœ:œsystem_promptœ,œidœ:œToolCallingAgent-50Gcdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","className":"","selected":false},{"source":"CurrentDateComponent-NSNQ8","sourceHandle":"{œdataTypeœ:œCurrentDateComponentœ,œidœ:œCurrentDateComponent-NSNQ8œ,œnameœ:œcurrent_dateœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-0dWZu","targetHandle":"{œfieldNameœ:œCURRENT_DATEœ,œidœ:œPrompt-0dWZuœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"CURRENT_DATE","id":"Prompt-0dWZu","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"CurrentDateComponent","id":"CurrentDateComponent-NSNQ8","name":"current_date","output_types":["Message"]}},"id":"reactflow__edge-CurrentDateComponent-NSNQ8{œdataTypeœ:œCurrentDateComponentœ,œidœ:œCurrentDateComponent-NSNQ8œ,œnameœ:œcurrent_dateœ,œoutput_typesœ:[œMessageœ]}-Prompt-0dWZu{œfieldNameœ:œCURRENT_DATEœ,œidœ:œPrompt-0dWZuœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","className":"","selected":false}],"viewport":{"x":97.72528949998423,"y":-211.85229348429561,"zoom":0.41621432461249197}},"description":"This flow creates an AI assistant that interacts with your Notion workspace. It understands natural language requests, performs actions in Notion (like creating pages or searching for information), and provides helpful responses. To use it, simply start a conversation by asking the agent to perform a Notion-related task, and it will guide you through the process, making it easy to manage your Notion workspace through chat.","name":"Conversational Notion Agent","last_tested_version":"1.0.17","endpoint_name":null,"is_component":false} \ No newline at end of file diff --git a/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json b/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json new file mode 100644 index 000000000000..56d8e74e7a30 --- /dev/null +++ b/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json @@ -0,0 +1 @@ +{"id":"b6de0fdb-31a2-40bf-b921-719bc0890a0e","data":{"nodes":[{"id":"TextInput-iJPEJ","type":"genericNode","position":{"x":94.43614181571661,"y":387.24602783243165},"data":{"type":"TextInput","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.io.text import TextComponent\nfrom langflow.io import MultilineInput, Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n message = Message(\n text=self.input_value,\n )\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"Good morning. Thanks for joining this project review meeting. We've got quite a few tasks to discuss, especially some Notion-related ones. Shall we get started?\n\nMorning, Felipe. Absolutely, let's dive in. I see we have several projects and tasks on our plate.\n\nGreat. Let's begin with the AI Content Gen project. I'm currently working on \"Montar base agente seletor de cortes.\" It's in progress, and I'm aiming to complete it by June 14th. Have you had a chance to look at this task, Cezar?\n\nI haven't been directly involved with that one. Can you give me an overview of what it entails?\n\nOf course. Essentially, we're building a base agent that can intelligently select and edit content. It's part of our larger AI-driven content generation initiative. The challenge is creating an algorithm that can understand context and make smart editing decisions.\n\nInteresting. How's the progress so far?\n\nIt's coming along. I've set up the basic framework, but fine-tuning the selection criteria is proving to be more complex than initially anticipated. I might need an extra day or two beyond the June 14th deadline.\n\nUnderstood, Felipe. Keep me posted if you need any resources or if the deadline needs to be adjusted. By the way, I've been meaning to ask - have you had a chance to look into that new NLP library I mentioned last week? I think it could be useful for this project.\n\nActually, Cezar, I haven't gotten to that yet. Should we add it as a new task? Maybe \"Evaluate NLP library for content selection\"?\n\nGood idea. Let's add that to our task list with a due date of next Friday. Now, moving on to the next task in this project - \"Create Notion Task Automation.\" It's assigned to you and set for June 19th, but you haven't started it yet, right? This is where I'd like to focus our discussion today.\n\nThat's correct. So, the goal is to streamline our workflow by automating certain tasks within Notion. I'm thinking we could create scripts or use Notion's API to automatically create, assign, and update tasks based on certain triggers or schedules.\n\nThat sounds like it could save us a lot of time. What specific automations are you considering?\n\nI'm glad you asked, Cezar. I'm thinking of a few key areas:\n1. Automatic task creation based on project milestones\n2. Assigning tasks to team members based on their expertise and current workload\n3. Updating task statuses based on linked database entries\n4. Generating weekly progress reports\n5. Setting up reminders for overdue tasks\n\nThose all sound valuable. Have you looked into the technical requirements for implementing these?\n\nI've done some initial research. Notion's API seems robust enough to handle these automations. We'll likely need to use a combination of Notion's API and a server to run our scripts. I'm thinking of using Node.js for this.\n\nGood thinking. Do you foresee any challenges?\n\nThe main challenge will be ensuring our automations are flexible enough to handle different project structures and team dynamics. We'll need to build in some configurability.\n\nAgreed. Let's make sure we involve the team in defining these automations. Their input will be crucial for making this truly useful. Oh, and speaking of team input, I think we should add a task for \"Conduct team survey on Notion pain points.\" This could help us prioritize which automations to tackle first.\n\nThat's an excellent idea, Cezar. I'll create that task and aim to complete the survey by next Wednesday. Now, I see we have another Notion-related task: \"Subir Notion Agent no Langflow Prod.\" Can you remind me what this entails?\n\nYes, this task is about deploying our Notion integration agent to the Langflow production environment. It's not started yet, but it's a crucial step in making our Notion automations available to the whole team.\n\nI see. What's the timeline for this?\n\nWe haven't set a specific deadline yet, but I think we should aim to complete this shortly after the automation task. Let's tentatively say by the end of June?\n\nSounds reasonable. Make sure to coordinate with the DevOps team for a smooth deployment. And while we're on the topic of deployment, we should probably add a task for \"Create documentation for Notion Agent usage.\" We want to make sure the team knows how to use these new tools once they're available.\n\nYou're right, Felipe. I'll add that to our task list. Now, switching gears a bit, let's talk about the Internal Projects. I see you're working on \"Crypto Links\" - it's in progress.\n\nAh yes, our blockchain initiative. It's moving forward. I'm researching various blockchain platforms and their potential applications for our projects. I'm particularly interested in smart contract capabilities.\n\nInteresting. Keep me updated on any promising findings. By the way, have you considered reaching out to any blockchain experts for consultation? It might be worth adding a task for \"Schedule blockchain expert consultation.\"\n\nThat's a great suggestion, Cezar. I'll add it to my to-do list. Now, for the Internal Tasks, I see you're assigned to \"Revisar modos do Charlinho, preparar para open source.\" What's the status on that?\n\nI haven't started yet, but it's on my radar. The deadline is June 7th, so I'll be diving into it this week. Essentially, we need to review and refine Charlinho's modes before we open-source the project.\n\nSounds good. Let me know if you need any assistance with that. Oh, and don't forget we need to add a task for \"Prepare Charlinho documentation for open source.\" We want to make sure our project is well-documented when we release it.\n\nYou're right, Felipe. I'll make sure to include that in our task list. Now, I see you have several tasks assigned to you in the Internal Tasks section. Can you give me a quick rundown?\n\nOf course. I'm working on finding a freelancer to create flows in ComfyUI - that's in progress and due May 28th. I'm also handling the conception of the Agent UI, due May 30th. Both are moving along well.\n\nThere's also a task to \"Check, install and test Gladia to use a bot in Google Meet.\" That's in progress, and I'm collaborating with C on it.\n\nThat's quite a workload. How are you managing all these tasks?\n\nIt's challenging, but I'm prioritizing based on deadlines and dependencies. The Notion automation project is a high priority because it'll help us manage tasks more efficiently in the long run.\n\nGood strategy, Felipe. Is there anything you need from me or the team to help move these tasks forward?\n\nActually, yes. For the \"pegar os arquivos necessários para tentarmos montar um stinger com ffmpeg\" task, I could use some input on which files are critical for this. It's a low-priority task due June 2nd, but any insights would be helpful.\n\nI'll review our asset library and send you a list of potential files by tomorrow. Oh, and let's add a task for \"Create ffmpeg stinger tutorial\" once we figure out the process. It could be useful for the team in the future.\n\nGreat idea, Cezar. I'll add that to our backlog. Anything else we should discuss?\n\nI think we've covered the major points. Oh, one last thing - for the \"Create Notion Task Automation\" project, I was thinking of setting up a series of short daily meetings next week to keep everyone aligned. What do you think?\n\nThat's a good idea. Maybe 15-minute stand-ups? We can use those to address any roadblocks quickly. And let's add a task for \"Set up Notion Automation progress tracking board\" to help visualize our progress during these stand-ups.\n\nPerfect. I'll send out calendar invites this afternoon and create that tracking board task. Any final thoughts or concerns, Cezar?\n\nNot from my side. I think we have a clear path forward, especially with the Notion-related tasks and the new items we've added to our list.\n\nAgreed. Let's plan to reconvene next week to check on progress, particularly for the Notion automation project and these new tasks we've discussed. Thanks for the comprehensive update, Felipe.\n\nThank you, Cezar. I'll send out a summary of our discussion and action items shortly, including all the new tasks we've identified during this meeting.\n","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Text to be passed as input.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Get text inputs from the Playground.","icon":"type","base_classes":["Message"],"display_name":"Meeting Transcript","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"TextInput-iJPEJ"},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":94.43614181571661,"y":387.24602783243165}},{"id":"NotionUserList-TvIKS","type":"genericNode","position":{"x":80.49204196902156,"y":741.0568511678105},"data":{"type":"NotionUserList","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import List, Dict\nfrom pydantic import BaseModel\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionUserList(LCToolComponent):\n display_name = \"List Users \"\n description = \"Retrieve users from Notion.\"\n documentation = \"https://docs.langflow.org/integrations/notion/list-users\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionUserListSchema(BaseModel):\n pass\n\n def run_model(self) -> List[Data]:\n users = self._list_users()\n records = []\n combined_text = \"\"\n\n for user in users:\n output = \"User:\\n\"\n for key, value in user.items():\n output += f\"{key.replace('_', ' ').title()}: {value}\\n\"\n output += \"________________________\\n\"\n\n combined_text += output\n records.append(Data(text=output, data=user))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_users\",\n description=\"Retrieve users from Notion.\",\n func=self._list_users,\n args_schema=self.NotionUserListSchema,\n )\n\n def _list_users(self) -> List[Dict]:\n url = \"https://api.notion.com/v1/users\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n data = response.json()\n results = data[\"results\"]\n\n users = []\n for user in results:\n user_data = {\n \"id\": user[\"id\"],\n \"type\": user[\"type\"],\n \"name\": user.get(\"name\", \"\"),\n \"avatar_url\": user.get(\"avatar_url\", \"\"),\n }\n users.append(user_data)\n\n return users\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve users from Notion.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Users ","documentation":"https://docs.langflow.org/integrations/notion/list-users","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionUserList-TvIKS","description":"Retrieve users from Notion.","display_name":"List Users "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":80.49204196902156,"y":741.0568511678105},"dragging":false},{"id":"NotionSearch-M66HF","type":"genericNode","position":{"x":1095.6934863134345,"y":407.8718765800806},"data":{"type":"NotionSearch","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Any, List\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, DropdownInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionSearch(LCToolComponent):\n display_name: str = \"Search \"\n description: str = \"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/search\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The text that the API compares page and database titles against.\",\n ),\n DropdownInput(\n name=\"filter_value\",\n display_name=\"Filter Type\",\n info=\"Limits the results to either only pages or only databases.\",\n options=[\"page\", \"database\"],\n value=\"page\",\n ),\n DropdownInput(\n name=\"sort_direction\",\n display_name=\"Sort Direction\",\n info=\"The direction to sort the results.\",\n options=[\"ascending\", \"descending\"],\n value=\"descending\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionSearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query text.\")\n filter_value: str = Field(default=\"page\", description=\"Filter type: 'page' or 'database'.\")\n sort_direction: str = Field(default=\"descending\", description=\"Sort direction: 'ascending' or 'descending'.\")\n\n def run_model(self) -> List[Data]:\n results = self._search_notion(self.query, self.filter_value, self.sort_direction)\n records = []\n combined_text = f\"Results found: {len(results)}\\n\\n\"\n\n for result in results:\n result_data = {\n \"id\": result[\"id\"],\n \"type\": result[\"object\"],\n \"last_edited_time\": result[\"last_edited_time\"],\n }\n\n if result[\"object\"] == \"page\":\n result_data[\"title_or_url\"] = result[\"url\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['url']}\\n\"\n elif result[\"object\"] == \"database\":\n if \"title\" in result and isinstance(result[\"title\"], list) and len(result[\"title\"]) > 0:\n result_data[\"title_or_url\"] = result[\"title\"][0][\"plain_text\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['title'][0]['plain_text']}\\n\"\n else:\n result_data[\"title_or_url\"] = \"N/A\"\n text = f\"id: {result['id']}\\ntitle_or_url: N/A\\n\"\n\n text += f\"type: {result['object']}\\nlast_edited_time: {result['last_edited_time']}\\n\\n\"\n combined_text += text\n records.append(Data(text=text, data=result_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_search\",\n description=\"Search Notion pages and databases. Input should include the search query and optionally filter type and sort direction.\",\n func=self._search_notion,\n args_schema=self.NotionSearchSchema,\n )\n\n def _search_notion(\n self, query: str, filter_value: str = \"page\", sort_direction: str = \"descending\"\n ) -> List[Dict[str, Any]]:\n url = \"https://api.notion.com/v1/search\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"query\": query,\n \"filter\": {\"value\": filter_value, \"property\": \"object\"},\n \"sort\": {\"direction\": sort_direction, \"timestamp\": \"last_edited_time\"},\n }\n\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n\n results = response.json()\n return results[\"results\"]\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"filter_value":{"trace_as_metadata":true,"options":["page","database"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"filter_value","value":"database","display_name":"Filter Type","advanced":true,"dynamic":false,"info":"Limits the results to either only pages or only databases.","title_case":false,"type":"str","_input_type":"DropdownInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query","value":"","display_name":"Search Query","advanced":true,"dynamic":false,"info":"The text that the API compares page and database titles against.","title_case":false,"type":"str","_input_type":"StrInput"},"sort_direction":{"trace_as_metadata":true,"options":["ascending","descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sort_direction","value":"descending","display_name":"Sort Direction","advanced":true,"dynamic":false,"info":"The direction to sort the results.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Search ","documentation":"https://docs.langflow.org/integrations/notion/search","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":false}],"field_order":["notion_secret","query","filter_value","sort_direction"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionSearch-M66HF","description":"Searches all pages and databases that have been shared with an integration.","display_name":"Search "},"selected":false,"width":384,"height":386,"positionAbsolute":{"x":1095.6934863134345,"y":407.8718765800806},"dragging":false},{"id":"Prompt-19rub","type":"genericNode","position":{"x":688.7954025956392,"y":456.4686463487848},"data":{"type":"Prompt","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"template":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"\nYou are an AI assistant specialized in analyzing meeting transcripts and identifying tasks. Your goal is to extract relevant tasks from the given transcript, search for related existing tasks in Notion, and provide a comprehensive list of tasks with their current status and any needed updates.\n\nYou have access to the following input:\n\n\n{TRANSCRIPT}\n\n\n\n{USERS}\n\n\nFollow these steps to complete your task:\n\n1. Carefully read through the transcript and identify any mentioned tasks, action items, or follow-ups.\n\n2. For each identified task:\n a. Use the notion_search tool to find if there's an existing related task in Notion.\n b. If a related task is found, note its ID and current status.\n c. If no related task is found, mark it as a new task.\n\n3. For each task (existing or new), determine:\n a. The task name or description\n b. The assigned person (if mentioned)\n c. The current status (for existing tasks) or suggested status (for new tasks)\n d. Any updates or changes mentioned in the transcript\n\n4. Compile your findings into a list of tasks using the following format:\n\n\n\n[Notion page ID if existing, or \"NEW\" if new task]\n[Task name or description]\n[Assigned person, if mentioned]\n[Current status for existing tasks, or suggested status for new tasks]\n[Any updates or changes mentioned in the transcript]\n\n\n\nRemember to focus on tasks that are directly related to the meeting discussion. Do not include general conversation topics or unrelated mentions as tasks.\n\nProvide your final output in the format specified above, with each task enclosed in its own tags within the overall structure.\n\nToday is: {CURRENT_DATE}\n\n\n\n","display_name":"Template","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"prompt","_input_type":"PromptInput"},"TRANSCRIPT":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"TRANSCRIPT","display_name":"TRANSCRIPT","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"USERS":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"USERS","display_name":"USERS","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"CURRENT_DATE":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"CURRENT_DATE","display_name":"CURRENT_DATE","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"}},"description":"Create a prompt template with dynamic variables.","icon":"prompts","is_input":null,"is_output":null,"is_composition":null,"base_classes":["Message"],"name":"","display_name":"Prompt","documentation":"","custom_fields":{"template":["TRANSCRIPT","USERS","CURRENT_DATE"]},"output_types":[],"full_path":null,"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"prompt","hidden":null,"display_name":"Prompt Message","method":"build_prompt","value":"__UNDEFINED__","cache":true}],"field_order":["template"],"beta":false,"error":null,"edited":false,"lf_version":"1.0.17"},"id":"Prompt-19rub"},"selected":false,"width":384,"height":588,"positionAbsolute":{"x":688.7954025956392,"y":456.4686463487848},"dragging":false},{"id":"ParseData-aNk1v","type":"genericNode","position":{"x":540.4151030255898,"y":834.2819856588019},"data":{"type":"ParseData","node":{"template":{"_type":"Component","data":{"trace_as_metadata":true,"list":false,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"data","value":"","display_name":"Data","advanced":false,"input_types":["Data"],"dynamic":false,"info":"The data to convert to text.","title_case":false,"type":"other","_input_type":"DataInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"sep":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sep","value":"\n","display_name":"Separator","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"StrInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Convert Data into plain text following a specified template.","icon":"braces","base_classes":["Message"],"display_name":"Parse Data","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"parse_data","value":"__UNDEFINED__","cache":true}],"field_order":["data","template","sep"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ParseData-aNk1v","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":540.4151030255898,"y":834.2819856588019}},{"id":"ToolCallingAgent-rVWeq","type":"genericNode","position":{"x":1566.291217492157,"y":583.6687094567968},"data":{"type":"ToolCallingAgent","node":{"template":{"_type":"Component","chat_history":{"trace_as_metadata":true,"list":true,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"chat_history","value":"","display_name":"Chat History","advanced":true,"input_types":["Data"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"DataInput"},"llm":{"trace_as_metadata":true,"list":false,"required":true,"placeholder":"","show":true,"name":"llm","value":"","display_name":"Language Model","advanced":false,"input_types":["LanguageModel"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool","BaseTool"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"LangChain\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"handle_parsing_errors":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"handle_parsing_errors","value":true,"display_name":"Handle Parse Errors","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"Analyze this meeting","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageTextInput"},"max_iterations":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"max_iterations","value":15,"display_name":"Max Iterations","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"system_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_prompt","value":"","display_name":"System Prompt","advanced":false,"input_types":["Message"],"dynamic":false,"info":"System prompt for the agent.","title_case":false,"type":"str","_input_type":"MultilineInput"},"user_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"user_prompt","value":"{input}","display_name":"Prompt","advanced":true,"input_types":["Message"],"dynamic":false,"info":"This prompt must contain 'input' key.","title_case":false,"type":"str","_input_type":"MultilineInput"},"verbose":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"verbose","value":true,"display_name":"Verbose","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Agent that uses tools","icon":"LangChain","base_classes":["AgentExecutor","Message"],"display_name":"Tool Calling Agent","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["AgentExecutor"],"selected":"AgentExecutor","name":"agent","display_name":"Agent","method":"build_agent","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Message"],"selected":"Message","name":"response","display_name":"Response","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","handle_parsing_errors","verbose","max_iterations","tools","llm","system_prompt","user_prompt","chat_history"],"beta":true,"edited":false,"lf_version":"1.0.17"},"id":"ToolCallingAgent-rVWeq"},"selected":false,"width":384,"height":398,"positionAbsolute":{"x":1566.291217492157,"y":583.6687094567968},"dragging":false},{"id":"OpenAIModel-Ht8xI","type":"genericNode","position":{"x":1097.0545781920632,"y":805.60631548423},"data":{"type":"OpenAIModel","node":{"template":{"_type":"Component","api_key":{"load_from_db":false,"required":false,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"OpenAI API Key","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The OpenAI API Key to use for the OpenAI model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import operator\nfrom functools import reduce\n\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = LCModelComponent._base_inputs + [\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageInput"},"json_mode":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"json_mode","value":false,"display_name":"JSON Mode","advanced":true,"dynamic":false,"info":"If True, it will output JSON regardless of passing a schema.","title_case":false,"type":"bool","_input_type":"BoolInput"},"max_tokens":{"trace_as_metadata":true,"range_spec":{"step_type":"float","min":0,"max":128000,"step":0.1},"list":false,"required":false,"placeholder":"","show":true,"name":"max_tokens","value":"","display_name":"Max Tokens","advanced":true,"dynamic":false,"info":"The maximum number of tokens to generate. Set to 0 for unlimited tokens.","title_case":false,"type":"int","_input_type":"IntInput"},"model_kwargs":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"model_kwargs","value":{},"display_name":"Model Kwargs","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"dict","_input_type":"DictInput"},"model_name":{"trace_as_metadata":true,"options":["gpt-4o-mini","gpt-4o","gpt-4-turbo","gpt-4-turbo-preview","gpt-4","gpt-3.5-turbo","gpt-3.5-turbo-0125"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"model_name","value":"gpt-4o","display_name":"Model Name","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"DropdownInput"},"openai_api_base":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"openai_api_base","value":"","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.","title_case":false,"type":"str","_input_type":"StrInput"},"output_schema":{"trace_as_input":true,"list":true,"required":false,"placeholder":"","show":true,"name":"output_schema","value":{},"display_name":"Schema","advanced":true,"dynamic":false,"info":"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.","title_case":false,"type":"dict","_input_type":"DictInput"},"seed":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"seed","value":1,"display_name":"Seed","advanced":true,"dynamic":false,"info":"The seed controls the reproducibility of the job.","title_case":false,"type":"int","_input_type":"IntInput"},"stream":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"stream","value":false,"display_name":"Stream","advanced":true,"dynamic":false,"info":"Stream the response from the model. Streaming works only in Chat.","title_case":false,"type":"bool","_input_type":"BoolInput"},"system_message":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_message","value":"","display_name":"System Message","advanced":true,"input_types":["Message"],"dynamic":false,"info":"System message to pass to the model.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"temperature":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"temperature","value":0.1,"display_name":"Temperature","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"float","_input_type":"FloatInput"}},"description":"Generates text using OpenAI LLMs.","icon":"OpenAI","base_classes":["LanguageModel","Message"],"display_name":"OpenAI","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text_output","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["LanguageModel"],"selected":"LanguageModel","name":"model_output","display_name":"Language Model","method":"build_model","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","system_message","stream","max_tokens","model_kwargs","json_mode","output_schema","model_name","openai_api_base","api_key","temperature","seed"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"OpenAIModel-Ht8xI"},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1097.0545781920632,"y":805.60631548423}},{"id":"Prompt-Lbxk6","type":"genericNode","position":{"x":3042.6844997246735,"y":416.83992118486856},"data":{"type":"Prompt","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"template":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"\nYou are an AI assistant responsible for updating tasks in Notion based on the information provided from a meeting analysis. Your goal is to create new tasks and update existing ones using the Notion API tools available to you, and then provide a summary in a simple markdown format suitable for a chat interface.\n\nYou have access to the following inputs:\n\n\n{TASK_LIST}\n\n\n\n{DATABASES}\n\n\n\n{USERS}\n\n\nFollow these steps to update the tasks in Notion and generate a markdown summary:\n\n1. Identify the Task database ID from the provided list.\n\n2. Before processing any tasks, retrieve the database properties for the Task database:\n a. Use the notion_database_properties and carefully review the properties, their types, and any options for select or multi-select properties.\n b. Pay attention to the properties format for further usage.\n\n3. For each task in the task list:\n a. If the task ID is \"NEW\", create a new task using the create_notion_page tool.\n b. If the task has an existing ID, update the task using the update_notion_page tool.\n c. Remember to use the properties from the DB retrieved from the notion_database_properties tool\n\n4. When creating a new task:\n a. Use the create_notion_page tool.\n b. Include the task name, assignee (if available), status, and any other relevant properties based on the database structure.\n c. Ensure that the property names and types match exactly with what you retrieved from the notion_database_properties call.\n\n5. When updating an existing task:\n a. Use the update_notion_page tool.\n b. Update the status, assignee, or any other relevant properties mentioned in the field.\n c. Ensure that the property names and types match exactly with what you retrieved from the notion_database_properties call.\n\n6. After each function call, wait for the before proceeding to the next task.\n\n7. If you encounter any errors during the process, note them and continue with the next task.\n\n8. Provide a summary of your actions for each task in a simple markdown format. Use the following structure:\n # Task Update Summary\n\n ## Created Tasks\n - **[Task Name]**: Assigned to [Assignee], Status: [Status]\n - Details: [Brief description of the new task]\n\n ## Updated Tasks\n - **[Task Name]** (ID: [Notion Page ID])\n - Changes: [Brief description of changes]\n - Status: [Success/Error]\n\n ## Errors\n - **[Task Name or ID]**: [Description of the error encountered]\n\n\nRemember to use the exact property names, types, and options as specified in the Notion database properties you retrieved at the beginning. This is crucial for ensuring that all updates and creations are done correctly.\n\nIf you encounter any errors or uncertainties, include them in the Errors section of the markdown summary. With enough detail to the user understand the issues.\n\nProvide your final output as a complete markdown document containing all the tasks you've processed, whether they were created, updated, or encountered errors. Use only basic markdown formatting (headers, bold, lists) to ensure compatibility with chat interfaces. Do not include any XML tags or complex formatting in your final output.\n\nToday is: {CURRENT_DATE}\n\n","display_name":"Template","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"prompt","_input_type":"PromptInput"},"TASK_LIST":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"TASK_LIST","display_name":"TASK_LIST","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"DATABASES":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"DATABASES","display_name":"DATABASES","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"USERS":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"USERS","display_name":"USERS","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"CURRENT_DATE":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"CURRENT_DATE","display_name":"CURRENT_DATE","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"}},"description":"Create a prompt template with dynamic variables.","icon":"prompts","is_input":null,"is_output":null,"is_composition":null,"base_classes":["Message"],"name":"","display_name":"Prompt","documentation":"","custom_fields":{"template":["TASK_LIST","DATABASES","USERS","CURRENT_DATE"]},"output_types":[],"full_path":null,"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"prompt","hidden":null,"display_name":"Prompt Message","method":"build_prompt","value":"__UNDEFINED__","cache":true}],"field_order":["template"],"beta":false,"error":null,"edited":false},"id":"Prompt-Lbxk6"},"selected":false,"width":384,"height":674,"positionAbsolute":{"x":3042.6844997246735,"y":416.83992118486856},"dragging":false},{"id":"ToolCallingAgent-GurdE","type":"genericNode","position":{"x":3974.1377259893243,"y":867.4647271037014},"data":{"type":"ToolCallingAgent","node":{"template":{"_type":"Component","chat_history":{"trace_as_metadata":true,"list":true,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"chat_history","value":"","display_name":"Chat History","advanced":true,"input_types":["Data"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"DataInput"},"llm":{"trace_as_metadata":true,"list":false,"required":true,"placeholder":"","show":true,"name":"llm","value":"","display_name":"Language Model","advanced":false,"input_types":["LanguageModel"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool","BaseTool"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"LangChain\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"handle_parsing_errors":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"handle_parsing_errors","value":true,"display_name":"Handle Parse Errors","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"Do your task.","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageTextInput"},"max_iterations":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"max_iterations","value":15,"display_name":"Max Iterations","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"system_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_prompt","value":"","display_name":"System Prompt","advanced":false,"input_types":["Message"],"dynamic":false,"info":"System prompt for the agent.","title_case":false,"type":"str","_input_type":"MultilineInput"},"user_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"user_prompt","value":"{input}","display_name":"Prompt","advanced":true,"input_types":["Message"],"dynamic":false,"info":"This prompt must contain 'input' key.","title_case":false,"type":"str","_input_type":"MultilineInput"},"verbose":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"verbose","value":true,"display_name":"Verbose","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Agent that uses tools","icon":"LangChain","base_classes":["AgentExecutor","Message"],"display_name":"Tool Calling Agent","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["AgentExecutor"],"selected":"AgentExecutor","name":"agent","display_name":"Agent","method":"build_agent","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Message"],"selected":"Message","name":"response","display_name":"Response","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","handle_parsing_errors","verbose","max_iterations","tools","llm","system_prompt","user_prompt","chat_history"],"beta":true,"edited":false,"lf_version":"1.0.17"},"id":"ToolCallingAgent-GurdE"},"selected":false,"width":384,"height":398,"positionAbsolute":{"x":3974.1377259893243,"y":867.4647271037014},"dragging":false},{"id":"OpenAIModel-OTfnt","type":"genericNode","position":{"x":3513.5648778762093,"y":710.2099422974287},"data":{"type":"OpenAIModel","node":{"template":{"_type":"Component","api_key":{"load_from_db":false,"required":false,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"OpenAI API Key","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The OpenAI API Key to use for the OpenAI model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import operator\nfrom functools import reduce\n\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = LCModelComponent._base_inputs + [\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageInput"},"json_mode":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"json_mode","value":false,"display_name":"JSON Mode","advanced":true,"dynamic":false,"info":"If True, it will output JSON regardless of passing a schema.","title_case":false,"type":"bool","_input_type":"BoolInput"},"max_tokens":{"trace_as_metadata":true,"range_spec":{"step_type":"float","min":0,"max":128000,"step":0.1},"list":false,"required":false,"placeholder":"","show":true,"name":"max_tokens","value":"","display_name":"Max Tokens","advanced":true,"dynamic":false,"info":"The maximum number of tokens to generate. Set to 0 for unlimited tokens.","title_case":false,"type":"int","_input_type":"IntInput"},"model_kwargs":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"model_kwargs","value":{},"display_name":"Model Kwargs","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"dict","_input_type":"DictInput"},"model_name":{"trace_as_metadata":true,"options":["gpt-4o-mini","gpt-4o","gpt-4-turbo","gpt-4-turbo-preview","gpt-4","gpt-3.5-turbo","gpt-3.5-turbo-0125"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"model_name","value":"gpt-4o","display_name":"Model Name","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"DropdownInput"},"openai_api_base":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"openai_api_base","value":"","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.","title_case":false,"type":"str","_input_type":"StrInput"},"output_schema":{"trace_as_input":true,"list":true,"required":false,"placeholder":"","show":true,"name":"output_schema","value":{},"display_name":"Schema","advanced":true,"dynamic":false,"info":"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.","title_case":false,"type":"dict","_input_type":"DictInput"},"seed":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"seed","value":1,"display_name":"Seed","advanced":true,"dynamic":false,"info":"The seed controls the reproducibility of the job.","title_case":false,"type":"int","_input_type":"IntInput"},"stream":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"stream","value":false,"display_name":"Stream","advanced":true,"dynamic":false,"info":"Stream the response from the model. Streaming works only in Chat.","title_case":false,"type":"bool","_input_type":"BoolInput"},"system_message":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_message","value":"","display_name":"System Message","advanced":true,"input_types":["Message"],"dynamic":false,"info":"System message to pass to the model.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"temperature":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"temperature","value":0.1,"display_name":"Temperature","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"float","_input_type":"FloatInput"}},"description":"Generates text using OpenAI LLMs.","icon":"OpenAI","base_classes":["LanguageModel","Message"],"display_name":"OpenAI","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text_output","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["LanguageModel"],"selected":"LanguageModel","name":"model_output","display_name":"Language Model","method":"build_model","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","system_message","stream","max_tokens","model_kwargs","json_mode","output_schema","model_name","openai_api_base","api_key","temperature","seed"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"OpenAIModel-OTfnt"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":3513.5648778762093,"y":710.2099422974287},"dragging":false},{"id":"AddContentToPage-vrAvx","type":"genericNode","position":{"x":2649.2991466550634,"y":1050.6250104897197},"data":{"type":"AddContentToPage","node":{"template":{"_type":"Component","block_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"block_id","value":"","display_name":"Page/Block ID","advanced":true,"dynamic":false,"info":"The ID of the page/block to add the content.","title_case":false,"type":"str","_input_type":"StrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nfrom markdown import markdown\nfrom bs4 import BeautifulSoup\nimport requests\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\n\nclass AddContentToPage(LCToolComponent):\n display_name: str = \"Add Content to Page \"\n description: str = \"Convert markdown text to Notion blocks and append them to a Notion page.\"\n documentation: str = \"https://developers.notion.com/reference/patch-block-children\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n MultilineInput(\n name=\"markdown_text\",\n display_name=\"Markdown Text\",\n info=\"The markdown text to convert to Notion blocks.\",\n ),\n StrInput(\n name=\"block_id\",\n display_name=\"Page/Block ID\",\n info=\"The ID of the page/block to add the content.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class AddContentToPageSchema(BaseModel):\n markdown_text: str = Field(..., description=\"The markdown text to convert to Notion blocks.\")\n block_id: str = Field(..., description=\"The ID of the page/block to add the content.\")\n\n def run_model(self) -> Data:\n result = self._add_content_to_page(self.markdown_text, self.block_id)\n return Data(data=result, text=json.dumps(result))\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"add_content_to_notion_page\",\n description=\"Convert markdown text to Notion blocks and append them to a Notion page.\",\n func=self._add_content_to_page,\n args_schema=self.AddContentToPageSchema,\n )\n\n def _add_content_to_page(self, markdown_text: str, block_id: str) -> Union[Dict[str, Any], str]:\n try:\n html_text = markdown(markdown_text)\n soup = BeautifulSoup(html_text, \"html.parser\")\n blocks = self.process_node(soup)\n\n url = f\"https://api.notion.com/v1/blocks/{block_id}/children\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"children\": blocks,\n }\n\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n\n return response.json()\n except requests.exceptions.RequestException as e:\n error_message = f\"Error: Failed to add content to Notion page. {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n except Exception as e:\n return f\"Error: An unexpected error occurred while adding content to Notion page. {str(e)}\"\n\n def process_node(self, node):\n blocks = []\n if isinstance(node, str):\n text = node.strip()\n if text:\n if text.startswith(\"#\"):\n heading_level = text.count(\"#\", 0, 6)\n heading_text = text[heading_level:].strip()\n if heading_level == 1:\n blocks.append(self.create_block(\"heading_1\", heading_text))\n elif heading_level == 2:\n blocks.append(self.create_block(\"heading_2\", heading_text))\n elif heading_level == 3:\n blocks.append(self.create_block(\"heading_3\", heading_text))\n else:\n blocks.append(self.create_block(\"paragraph\", text))\n elif node.name == \"h1\":\n blocks.append(self.create_block(\"heading_1\", node.get_text(strip=True)))\n elif node.name == \"h2\":\n blocks.append(self.create_block(\"heading_2\", node.get_text(strip=True)))\n elif node.name == \"h3\":\n blocks.append(self.create_block(\"heading_3\", node.get_text(strip=True)))\n elif node.name == \"p\":\n code_node = node.find(\"code\")\n if code_node:\n code_text = code_node.get_text()\n language, code = self.extract_language_and_code(code_text)\n blocks.append(self.create_block(\"code\", code, language=language))\n elif self.is_table(str(node)):\n blocks.extend(self.process_table(node))\n else:\n blocks.append(self.create_block(\"paragraph\", node.get_text(strip=True)))\n elif node.name == \"ul\":\n blocks.extend(self.process_list(node, \"bulleted_list_item\"))\n elif node.name == \"ol\":\n blocks.extend(self.process_list(node, \"numbered_list_item\"))\n elif node.name == \"blockquote\":\n blocks.append(self.create_block(\"quote\", node.get_text(strip=True)))\n elif node.name == \"hr\":\n blocks.append(self.create_block(\"divider\", \"\"))\n elif node.name == \"img\":\n blocks.append(self.create_block(\"image\", \"\", image_url=node.get(\"src\")))\n elif node.name == \"a\":\n blocks.append(self.create_block(\"bookmark\", node.get_text(strip=True), link_url=node.get(\"href\")))\n elif node.name == \"table\":\n blocks.extend(self.process_table(node))\n\n for child in node.children:\n if isinstance(child, str):\n continue\n blocks.extend(self.process_node(child))\n\n return blocks\n\n def extract_language_and_code(self, code_text):\n lines = code_text.split(\"\\n\")\n language = lines[0].strip()\n code = \"\\n\".join(lines[1:]).strip()\n return language, code\n\n def is_code_block(self, text):\n return text.startswith(\"```\")\n\n def extract_code_block(self, text):\n lines = text.split(\"\\n\")\n language = lines[0].strip(\"`\").strip()\n code = \"\\n\".join(lines[1:]).strip(\"`\").strip()\n return language, code\n\n def is_table(self, text):\n rows = text.split(\"\\n\")\n if len(rows) < 2:\n return False\n\n has_separator = False\n for i, row in enumerate(rows):\n if \"|\" in row:\n cells = [cell.strip() for cell in row.split(\"|\")]\n cells = [cell for cell in cells if cell] # Remove empty cells\n if i == 1 and all(set(cell) <= set(\"-|\") for cell in cells):\n has_separator = True\n elif not cells:\n return False\n\n return has_separator and len(rows) >= 3\n\n def process_list(self, node, list_type):\n blocks = []\n for item in node.find_all(\"li\"):\n item_text = item.get_text(strip=True)\n checked = item_text.startswith(\"[x]\")\n is_checklist = item_text.startswith(\"[ ]\") or checked\n\n if is_checklist:\n item_text = item_text.replace(\"[x]\", \"\").replace(\"[ ]\", \"\").strip()\n blocks.append(self.create_block(\"to_do\", item_text, checked=checked))\n else:\n blocks.append(self.create_block(list_type, item_text))\n return blocks\n\n def process_table(self, node):\n blocks = []\n header_row = node.find(\"thead\").find(\"tr\") if node.find(\"thead\") else None\n body_rows = node.find(\"tbody\").find_all(\"tr\") if node.find(\"tbody\") else []\n\n if header_row or body_rows:\n table_width = max(\n len(header_row.find_all([\"th\", \"td\"])) if header_row else 0,\n max(len(row.find_all([\"th\", \"td\"])) for row in body_rows),\n )\n\n table_block = self.create_block(\"table\", \"\", table_width=table_width, has_column_header=bool(header_row))\n blocks.append(table_block)\n\n if header_row:\n header_cells = [cell.get_text(strip=True) for cell in header_row.find_all([\"th\", \"td\"])]\n header_row_block = self.create_block(\"table_row\", header_cells)\n blocks.append(header_row_block)\n\n for row in body_rows:\n cells = [cell.get_text(strip=True) for cell in row.find_all([\"th\", \"td\"])]\n row_block = self.create_block(\"table_row\", cells)\n blocks.append(row_block)\n\n return blocks\n\n def create_block(self, block_type: str, content: str, **kwargs) -> Dict[str, Any]:\n block: dict[str, Any] = {\n \"object\": \"block\",\n \"type\": block_type,\n block_type: {},\n }\n\n if block_type in [\n \"paragraph\",\n \"heading_1\",\n \"heading_2\",\n \"heading_3\",\n \"bulleted_list_item\",\n \"numbered_list_item\",\n \"quote\",\n ]:\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n elif block_type == \"to_do\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"checked\"] = kwargs.get(\"checked\", False)\n elif block_type == \"code\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"language\"] = kwargs.get(\"language\", \"plain text\")\n elif block_type == \"image\":\n block[block_type] = {\"type\": \"external\", \"external\": {\"url\": kwargs.get(\"image_url\", \"\")}}\n elif block_type == \"divider\":\n pass\n elif block_type == \"bookmark\":\n block[block_type][\"url\"] = kwargs.get(\"link_url\", \"\")\n elif block_type == \"table\":\n block[block_type][\"table_width\"] = kwargs.get(\"table_width\", 0)\n block[block_type][\"has_column_header\"] = kwargs.get(\"has_column_header\", False)\n block[block_type][\"has_row_header\"] = kwargs.get(\"has_row_header\", False)\n elif block_type == \"table_row\":\n block[block_type][\"cells\"] = [[{\"type\": \"text\", \"text\": {\"content\": cell}} for cell in content]]\n\n return block\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"markdown_text":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"markdown_text","value":"","display_name":"Markdown Text","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The markdown text to convert to Notion blocks.","title_case":false,"type":"str","_input_type":"MultilineInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Convert markdown text to Notion blocks and append them to a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Add Content to Page ","documentation":"https://developers.notion.com/reference/patch-block-children","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["markdown_text","block_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"AddContentToPage-vrAvx","description":"Convert markdown text to Notion blocks and append them to a Notion page.","display_name":"Add Content to Page "},"selected":false,"width":384,"height":330,"positionAbsolute":{"x":2649.2991466550634,"y":1050.6250104897197},"dragging":false},{"id":"NotionPageCreator-Exc7f","type":"genericNode","position":{"x":3050.8201437255634,"y":1391.0449862668834},"data":{"type":"NotionPageCreator","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nimport requests\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionPageCreator(LCToolComponent):\n display_name: str = \"Create Page \"\n description: str = \"A component for creating Notion pages.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-create\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n MultilineInput(\n name=\"properties_json\",\n display_name=\"Properties (JSON)\",\n info=\"The properties of the new page as a JSON string.\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageCreatorSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n properties_json: str = Field(..., description=\"The properties of the new page as a JSON string.\")\n\n def run_model(self) -> Data:\n result = self._create_notion_page(self.database_id, self.properties_json)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the created page data\n output = \"Created page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"create_notion_page\",\n description=\"Create a new page in a Notion database. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._create_notion_page,\n args_schema=self.NotionPageCreatorSchema,\n )\n\n def _create_notion_page(self, database_id: str, properties_json: str) -> Union[Dict[str, Any], str]:\n if not database_id or not properties_json:\n return \"Invalid input. Please provide 'database_id' and 'properties_json'.\"\n\n try:\n properties = json.loads(properties_json)\n except json.JSONDecodeError as e:\n return f\"Invalid properties format. Please provide a valid JSON string. Error: {str(e)}\"\n\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"parent\": {\"database_id\": database_id},\n \"properties\": properties,\n }\n\n try:\n response = requests.post(\"https://api.notion.com/v1/pages\", headers=headers, json=data)\n response.raise_for_status()\n result = response.json()\n return result\n except requests.exceptions.RequestException as e:\n error_message = f\"Failed to create Notion page. Error: {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._create_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"properties_json":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties_json","value":"","display_name":"Properties (JSON)","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties of the new page as a JSON string.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"A component for creating Notion pages.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Create Page ","documentation":"https://docs.langflow.org/integrations/notion/page-create","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret","properties_json"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageCreator-Exc7f","description":"A component for creating Notion pages.","display_name":"Create Page "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":3050.8201437255634,"y":1391.0449862668834},"dragging":false},{"id":"NotionDatabaseProperties-IjzLV","type":"genericNode","position":{"x":3053.0023230574693,"y":1061.535907149244},"data":{"type":"NotionDatabaseProperties","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionDatabaseProperties(LCToolComponent):\n display_name: str = \"List Database Properties \"\n description: str = \"Retrieve properties of a Notion database.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/list-database-properties\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionDatabasePropertiesSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n\n def run_model(self) -> Data:\n result = self._fetch_database_properties(self.database_id)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the properties\n return Data(text=str(result), data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_database_properties\",\n description=\"Retrieve properties of a Notion database. Input should include the database ID.\",\n func=self._fetch_database_properties,\n args_schema=self.NotionDatabasePropertiesSchema,\n )\n\n def _fetch_database_properties(self, database_id: str) -> Union[Dict, str]:\n url = f\"https://api.notion.com/v1/databases/{database_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n data = response.json()\n properties = data.get(\"properties\", {})\n return properties\n except requests.exceptions.RequestException as e:\n return f\"Error fetching Notion database properties: {str(e)}\"\n except ValueError as e:\n return f\"Error parsing Notion API response: {str(e)}\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve properties of a Notion database.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Database Properties ","documentation":"https://docs.langflow.org/integrations/notion/list-database-properties","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionDatabaseProperties-IjzLV","description":"Retrieve properties of a Notion database.","display_name":"List Database Properties "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":3053.0023230574693,"y":1061.535907149244},"dragging":false},{"id":"NotionPageUpdate-bexvy","type":"genericNode","position":{"x":2649.2991466550625,"y":1385.262204377853},"data":{"type":"NotionPageUpdate","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nimport requests\nfrom typing import Dict, Any, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom langflow.io import Output\n\nclass NotionPageUpdate(LCToolComponent):\n display_name: str = \"Update Page Property \"\n description: str = \"Update the properties of a Notion page.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-update\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"page_id\",\n display_name=\"Page ID\",\n info=\"The ID of the Notion page to update.\",\n ),\n MultilineInput(\n name=\"properties\",\n display_name=\"Properties\",\n info=\"The properties to update on the page (as a JSON string or a dictionary).\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageUpdateSchema(BaseModel):\n page_id: str = Field(..., description=\"The ID of the Notion page to update.\")\n properties: Union[str, Dict[str, Any]] = Field(\n ..., description=\"The properties to update on the page (as a JSON string or a dictionary).\"\n )\n\n def run_model(self) -> Data:\n result = self._update_notion_page(self.page_id, self.properties)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the updated page data\n output = \"Updated page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"update_notion_page\",\n description=\"Update the properties of a Notion page. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._update_notion_page,\n args_schema=self.NotionPageUpdateSchema,\n )\n\n def _update_notion_page(self, page_id: str, properties: Union[str, Dict[str, Any]]) -> Union[Dict[str, Any], str]:\n url = f\"https://api.notion.com/v1/pages/{page_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n\n # Parse properties if it's a string\n if isinstance(properties, str):\n try:\n parsed_properties = json.loads(properties)\n except json.JSONDecodeError as e:\n error_message = f\"Invalid JSON format for properties: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n else:\n parsed_properties = properties\n\n data = {\"properties\": parsed_properties}\n\n try:\n logger.info(f\"Sending request to Notion API: URL: {url}, Data: {json.dumps(data)}\")\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n updated_page = response.json()\n\n logger.info(f\"Successfully updated Notion page. Response: {json.dumps(updated_page)}\")\n return updated_page\n except requests.exceptions.HTTPError as e:\n error_message = f\"HTTP Error occurred: {str(e)}\"\n if e.response is not None:\n error_message += f\"\\nStatus code: {e.response.status_code}\"\n error_message += f\"\\nResponse body: {e.response.text}\"\n logger.error(error_message)\n return error_message\n except requests.exceptions.RequestException as e:\n error_message = f\"An error occurred while making the request: {str(e)}\"\n logger.error(error_message)\n return error_message\n except Exception as e:\n error_message = f\"An unexpected error occurred: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._update_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"page_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"page_id","value":"","display_name":"Page ID","advanced":true,"dynamic":false,"info":"The ID of the Notion page to update.","title_case":false,"type":"str","_input_type":"StrInput"},"properties":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties","value":"","display_name":"Properties","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties to update on the page (as a JSON string or a dictionary).","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Update the properties of a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Update Page Property ","documentation":"https://docs.langflow.org/integrations/notion/page-update","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["page_id","properties","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageUpdate-bexvy","description":"Update the properties of a Notion page.","display_name":"Update Page Property "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2649.2991466550625,"y":1385.262204377853},"dragging":false},{"id":"NotionSearch-EdSJb","type":"genericNode","position":{"x":2435.4455721283834,"y":357.45573905064634},"data":{"type":"NotionSearch","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Any, List\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, DropdownInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionSearch(LCToolComponent):\n display_name: str = \"Search \"\n description: str = \"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/search\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The text that the API compares page and database titles against.\",\n ),\n DropdownInput(\n name=\"filter_value\",\n display_name=\"Filter Type\",\n info=\"Limits the results to either only pages or only databases.\",\n options=[\"page\", \"database\"],\n value=\"page\",\n ),\n DropdownInput(\n name=\"sort_direction\",\n display_name=\"Sort Direction\",\n info=\"The direction to sort the results.\",\n options=[\"ascending\", \"descending\"],\n value=\"descending\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionSearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query text.\")\n filter_value: str = Field(default=\"page\", description=\"Filter type: 'page' or 'database'.\")\n sort_direction: str = Field(default=\"descending\", description=\"Sort direction: 'ascending' or 'descending'.\")\n\n def run_model(self) -> List[Data]:\n results = self._search_notion(self.query, self.filter_value, self.sort_direction)\n records = []\n combined_text = f\"Results found: {len(results)}\\n\\n\"\n\n for result in results:\n result_data = {\n \"id\": result[\"id\"],\n \"type\": result[\"object\"],\n \"last_edited_time\": result[\"last_edited_time\"],\n }\n\n if result[\"object\"] == \"page\":\n result_data[\"title_or_url\"] = result[\"url\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['url']}\\n\"\n elif result[\"object\"] == \"database\":\n if \"title\" in result and isinstance(result[\"title\"], list) and len(result[\"title\"]) > 0:\n result_data[\"title_or_url\"] = result[\"title\"][0][\"plain_text\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['title'][0]['plain_text']}\\n\"\n else:\n result_data[\"title_or_url\"] = \"N/A\"\n text = f\"id: {result['id']}\\ntitle_or_url: N/A\\n\"\n\n text += f\"type: {result['object']}\\nlast_edited_time: {result['last_edited_time']}\\n\\n\"\n combined_text += text\n records.append(Data(text=text, data=result_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_search\",\n description=\"Search Notion pages and databases. Input should include the search query and optionally filter type and sort direction.\",\n func=self._search_notion,\n args_schema=self.NotionSearchSchema,\n )\n\n def _search_notion(\n self, query: str, filter_value: str = \"page\", sort_direction: str = \"descending\"\n ) -> List[Dict[str, Any]]:\n url = \"https://api.notion.com/v1/search\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"query\": query,\n \"filter\": {\"value\": filter_value, \"property\": \"object\"},\n \"sort\": {\"direction\": sort_direction, \"timestamp\": \"last_edited_time\"},\n }\n\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n\n results = response.json()\n return results[\"results\"]\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"filter_value":{"trace_as_metadata":true,"options":["page","database"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"filter_value","value":"database","display_name":"Filter Type","advanced":true,"dynamic":false,"info":"Limits the results to either only pages or only databases.","title_case":false,"type":"str","_input_type":"DropdownInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query","value":"","display_name":"Search Query","advanced":true,"dynamic":false,"info":"The text that the API compares page and database titles against.","title_case":false,"type":"str","_input_type":"StrInput"},"sort_direction":{"trace_as_metadata":true,"options":["ascending","descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sort_direction","value":"descending","display_name":"Sort Direction","advanced":true,"dynamic":false,"info":"The direction to sort the results.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"List All Databases","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Databases","documentation":"https://docs.langflow.org/integrations/notion/search","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":false},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["notion_secret","query","filter_value","sort_direction"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionSearch-EdSJb","description":"Searches all pages and databases that have been shared with an integration.","display_name":"Search "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2435.4455721283834,"y":357.45573905064634},"dragging":false},{"id":"ParseData-vYVwu","type":"genericNode","position":{"x":2871.5903532688335,"y":563.1965154816405},"data":{"type":"ParseData","node":{"template":{"_type":"Component","data":{"trace_as_metadata":true,"list":false,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"data","value":"","display_name":"Data","advanced":false,"input_types":["Data"],"dynamic":false,"info":"The data to convert to text.","title_case":false,"type":"other","_input_type":"DataInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"sep":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sep","value":"\n","display_name":"Separator","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"StrInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Convert Data into plain text following a specified template.","icon":"braces","base_classes":["Message"],"display_name":"Parse Data","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"parse_data","value":"__UNDEFINED__","cache":true}],"field_order":["data","template","sep"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ParseData-vYVwu","showNode":false},"selected":false,"width":96,"height":96,"positionAbsolute":{"x":2871.5903532688335,"y":563.1965154816405},"dragging":false},{"id":"ChatOutput-zBv53","type":"genericNode","position":{"x":4429.812566227955,"y":940.6072472757681},"data":{"type":"ChatOutput","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"data_template":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"data_template","value":"{text}","display_name":"Data Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Message to be passed as output.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"Machine","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Type of sender.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"AI","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Name of the sender.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"should_store_message":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"should_store_message","value":true,"display_name":"Store Messages","advanced":true,"dynamic":false,"info":"Store the message in the history.","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Display a chat message in the Playground.","icon":"ChatOutput","base_classes":["Message"],"display_name":"Chat Output","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"message","display_name":"Message","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","should_store_message","sender","sender_name","session_id","data_template"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ChatOutput-zBv53"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":4429.812566227955,"y":940.6072472757681},"dragging":false},{"id":"NotionUserList-wFEb1","type":"genericNode","position":{"x":2390.6365450681037,"y":694.4867003504073},"data":{"type":"NotionUserList","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import List, Dict\nfrom pydantic import BaseModel\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionUserList(LCToolComponent):\n display_name = \"List Users \"\n description = \"Retrieve users from Notion.\"\n documentation = \"https://docs.langflow.org/integrations/notion/list-users\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionUserListSchema(BaseModel):\n pass\n\n def run_model(self) -> List[Data]:\n users = self._list_users()\n records = []\n combined_text = \"\"\n\n for user in users:\n output = \"User:\\n\"\n for key, value in user.items():\n output += f\"{key.replace('_', ' ').title()}: {value}\\n\"\n output += \"________________________\\n\"\n\n combined_text += output\n records.append(Data(text=output, data=user))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_users\",\n description=\"Retrieve users from Notion.\",\n func=self._list_users,\n args_schema=self.NotionUserListSchema,\n )\n\n def _list_users(self) -> List[Dict]:\n url = \"https://api.notion.com/v1/users\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n data = response.json()\n results = data[\"results\"]\n\n users = []\n for user in results:\n user_data = {\n \"id\": user[\"id\"],\n \"type\": user[\"type\"],\n \"name\": user.get(\"name\", \"\"),\n \"avatar_url\": user.get(\"avatar_url\", \"\"),\n }\n users.append(user_data)\n\n return users\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve users from Notion.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Users ","documentation":"https://docs.langflow.org/integrations/notion/list-users","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionUserList-wFEb1","description":"Retrieve users from Notion.","display_name":"List Users "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2390.6365450681037,"y":694.4867003504073},"dragging":false},{"id":"ParseData-WKjW6","type":"genericNode","position":{"x":2877.571533084884,"y":856.8480898893301},"data":{"type":"ParseData","node":{"template":{"_type":"Component","data":{"trace_as_metadata":true,"list":false,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"data","value":"","display_name":"Data","advanced":false,"input_types":["Data"],"dynamic":false,"info":"The data to convert to text.","title_case":false,"type":"other","_input_type":"DataInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"sep":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sep","value":"\n","display_name":"Separator","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"StrInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Convert Data into plain text following a specified template.","icon":"braces","base_classes":["Message"],"display_name":"Parse Data","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"parse_data","value":"__UNDEFINED__","cache":true}],"field_order":["data","template","sep"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ParseData-WKjW6","showNode":false},"selected":false,"width":96,"height":96,"positionAbsolute":{"x":2877.571533084884,"y":856.8480898893301},"dragging":false},{"id":"CurrentDateComponent-WOwNq","type":"genericNode","position":{"x":536.7929500860405,"y":617.6055631700241},"data":{"type":"CurrentDateComponent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from datetime import datetime\r\nfrom zoneinfo import ZoneInfo\r\nfrom typing import List\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import DropdownInput, Output\r\nfrom langflow.schema.message import Message\r\n\r\nclass CurrentDateComponent(Component):\r\n display_name = \"Current Date 🕰️\"\r\n description = \"Returns the current date and time in the selected timezone.\"\r\n icon = \"clock\"\r\n\r\n inputs = [\r\n DropdownInput(\r\n name=\"timezone\",\r\n display_name=\"Timezone\",\r\n options=[\r\n \"UTC\",\r\n \"US/Eastern\",\r\n \"US/Central\",\r\n \"US/Mountain\",\r\n \"US/Pacific\",\r\n \"Europe/London\",\r\n \"Europe/Paris\",\r\n \"Asia/Tokyo\",\r\n \"Australia/Sydney\",\r\n \"America/Sao_Paulo\",\r\n \"America/Cuiaba\",\r\n ],\r\n value=\"UTC\",\r\n info=\"Select the timezone for the current date and time.\",\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Current Date\", name=\"current_date\", method=\"get_current_date\"),\r\n ]\r\n\r\n def get_current_date(self) -> Message:\r\n try:\r\n tz = ZoneInfo(self.timezone)\r\n current_date = datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S %Z\")\r\n result = f\"Current date and time in {self.timezone}: {current_date}\"\r\n self.status = result\r\n return Message(text=result)\r\n except Exception as e:\r\n error_message = f\"Error: {str(e)}\"\r\n self.status = error_message\r\n return Message(text=error_message)","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"timezone":{"trace_as_metadata":true,"options":["UTC","US/Eastern","US/Central","US/Mountain","US/Pacific","Europe/London","Europe/Paris","Asia/Tokyo","Australia/Sydney","America/Sao_Paulo","America/Cuiaba"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"timezone","value":"UTC","display_name":"Timezone","advanced":false,"dynamic":false,"info":"Select the timezone for the current date and time.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Returns the current date and time in the selected timezone.","icon":"clock","base_classes":["Message"],"display_name":"Current Date","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"current_date","display_name":"Current Date","method":"get_current_date","value":"__UNDEFINED__","cache":true}],"field_order":["timezone"],"beta":false,"edited":true},"id":"CurrentDateComponent-WOwNq","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":536.7929500860405,"y":617.6055631700241}},{"id":"CurrentDateComponent-PZ8xJ","type":"genericNode","position":{"x":2871.6341688682833,"y":453.3374434097356},"data":{"type":"CurrentDateComponent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from datetime import datetime\r\nfrom zoneinfo import ZoneInfo\r\nfrom typing import List\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import DropdownInput, Output\r\nfrom langflow.schema.message import Message\r\n\r\nclass CurrentDateComponent(Component):\r\n display_name = \"Current Date 🕰️\"\r\n description = \"Returns the current date and time in the selected timezone.\"\r\n icon = \"clock\"\r\n\r\n inputs = [\r\n DropdownInput(\r\n name=\"timezone\",\r\n display_name=\"Timezone\",\r\n options=[\r\n \"UTC\",\r\n \"US/Eastern\",\r\n \"US/Central\",\r\n \"US/Mountain\",\r\n \"US/Pacific\",\r\n \"Europe/London\",\r\n \"Europe/Paris\",\r\n \"Asia/Tokyo\",\r\n \"Australia/Sydney\",\r\n \"America/Sao_Paulo\",\r\n \"America/Cuiaba\",\r\n ],\r\n value=\"UTC\",\r\n info=\"Select the timezone for the current date and time.\",\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Current Date\", name=\"current_date\", method=\"get_current_date\"),\r\n ]\r\n\r\n def get_current_date(self) -> Message:\r\n try:\r\n tz = ZoneInfo(self.timezone)\r\n current_date = datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S %Z\")\r\n result = f\"Current date and time in {self.timezone}: {current_date}\"\r\n self.status = result\r\n return Message(text=result)\r\n except Exception as e:\r\n error_message = f\"Error: {str(e)}\"\r\n self.status = error_message\r\n return Message(text=error_message)","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"timezone":{"trace_as_metadata":true,"options":["UTC","US/Eastern","US/Central","US/Mountain","US/Pacific","Europe/London","Europe/Paris","Asia/Tokyo","Australia/Sydney","America/Sao_Paulo","America/Cuiaba"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"timezone","value":"UTC","display_name":"Timezone","advanced":false,"dynamic":false,"info":"Select the timezone for the current date and time.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Returns the current date and time in the selected timezone.","icon":"clock","base_classes":["Message"],"display_name":"Current Date","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"current_date","display_name":"Current Date","method":"get_current_date","value":"__UNDEFINED__","cache":true}],"field_order":["timezone"],"beta":false,"edited":true,"official":false},"id":"CurrentDateComponent-PZ8xJ","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":2871.6341688682833,"y":453.3374434097356}}],"edges":[{"source":"TextInput-iJPEJ","sourceHandle":"{œdataTypeœ:œTextInputœ,œidœ:œTextInput-iJPEJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-19rub","targetHandle":"{œfieldNameœ:œTRANSCRIPTœ,œidœ:œPrompt-19rubœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"TRANSCRIPT","id":"Prompt-19rub","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"TextInput","id":"TextInput-iJPEJ","name":"text","output_types":["Message"]}},"id":"reactflow__edge-TextInput-iJPEJ{œdataTypeœ:œTextInputœ,œidœ:œTextInput-iJPEJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-19rub{œfieldNameœ:œTRANSCRIPTœ,œidœ:œPrompt-19rubœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"NotionUserList-TvIKS","sourceHandle":"{œdataTypeœ:œNotionUserListœ,œidœ:œNotionUserList-TvIKSœ,œnameœ:œexample_outputœ,œoutput_typesœ:[œDataœ]}","target":"ParseData-aNk1v","targetHandle":"{œfieldNameœ:œdataœ,œidœ:œParseData-aNk1vœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"data","id":"ParseData-aNk1v","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"NotionUserList","id":"NotionUserList-TvIKS","name":"example_output","output_types":["Data"]}},"id":"reactflow__edge-NotionUserList-TvIKS{œdataTypeœ:œNotionUserListœ,œidœ:œNotionUserList-TvIKSœ,œnameœ:œexample_outputœ,œoutput_typesœ:[œDataœ]}-ParseData-aNk1v{œfieldNameœ:œdataœ,œidœ:œParseData-aNk1vœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"ParseData-aNk1v","sourceHandle":"{œdataTypeœ:œParseDataœ,œidœ:œParseData-aNk1vœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-19rub","targetHandle":"{œfieldNameœ:œUSERSœ,œidœ:œPrompt-19rubœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"USERS","id":"Prompt-19rub","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ParseData","id":"ParseData-aNk1v","name":"text","output_types":["Message"]}},"id":"reactflow__edge-ParseData-aNk1v{œdataTypeœ:œParseDataœ,œidœ:œParseData-aNk1vœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-19rub{œfieldNameœ:œUSERSœ,œidœ:œPrompt-19rubœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"Prompt-19rub","sourceHandle":"{œdataTypeœ:œPromptœ,œidœ:œPrompt-19rubœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}","target":"ToolCallingAgent-rVWeq","targetHandle":"{œfieldNameœ:œsystem_promptœ,œidœ:œToolCallingAgent-rVWeqœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"system_prompt","id":"ToolCallingAgent-rVWeq","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"Prompt","id":"Prompt-19rub","name":"prompt","output_types":["Message"]}},"id":"reactflow__edge-Prompt-19rub{œdataTypeœ:œPromptœ,œidœ:œPrompt-19rubœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-ToolCallingAgent-rVWeq{œfieldNameœ:œsystem_promptœ,œidœ:œToolCallingAgent-rVWeqœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"NotionSearch-M66HF","sourceHandle":"{œdataTypeœ:œNotionSearchœ,œidœ:œNotionSearch-M66HFœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolCallingAgent-rVWeq","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-rVWeqœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-rVWeq","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionSearch","id":"NotionSearch-M66HF","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionSearch-M66HF{œdataTypeœ:œNotionSearchœ,œidœ:œNotionSearch-M66HFœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-rVWeq{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-rVWeqœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"OpenAIModel-Ht8xI","sourceHandle":"{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Ht8xIœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}","target":"ToolCallingAgent-rVWeq","targetHandle":"{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-rVWeqœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"llm","id":"ToolCallingAgent-rVWeq","inputTypes":["LanguageModel"],"type":"other"},"sourceHandle":{"dataType":"OpenAIModel","id":"OpenAIModel-Ht8xI","name":"model_output","output_types":["LanguageModel"]}},"id":"reactflow__edge-OpenAIModel-Ht8xI{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Ht8xIœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}-ToolCallingAgent-rVWeq{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-rVWeqœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"ToolCallingAgent-rVWeq","sourceHandle":"{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-rVWeqœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-Lbxk6","targetHandle":"{œfieldNameœ:œTASK_LISTœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"TASK_LIST","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ToolCallingAgent","id":"ToolCallingAgent-rVWeq","name":"response","output_types":["Message"]}},"id":"reactflow__edge-ToolCallingAgent-rVWeq{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-rVWeqœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-Prompt-Lbxk6{œfieldNameœ:œTASK_LISTœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"OpenAIModel-OTfnt","sourceHandle":"{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-OTfntœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"llm","id":"ToolCallingAgent-GurdE","inputTypes":["LanguageModel"],"type":"other"},"sourceHandle":{"dataType":"OpenAIModel","id":"OpenAIModel-OTfnt","name":"model_output","output_types":["LanguageModel"]}},"id":"reactflow__edge-OpenAIModel-OTfnt{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-OTfntœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}-ToolCallingAgent-GurdE{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"Prompt-Lbxk6","sourceHandle":"{œdataTypeœ:œPromptœ,œidœ:œPrompt-Lbxk6œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{œfieldNameœ:œsystem_promptœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"system_prompt","id":"ToolCallingAgent-GurdE","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"Prompt","id":"Prompt-Lbxk6","name":"prompt","output_types":["Message"]}},"id":"reactflow__edge-Prompt-Lbxk6{œdataTypeœ:œPromptœ,œidœ:œPrompt-Lbxk6œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-ToolCallingAgent-GurdE{œfieldNameœ:œsystem_promptœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"AddContentToPage-vrAvx","sourceHandle":"{œdataTypeœ:œAddContentToPageœ,œidœ:œAddContentToPage-vrAvxœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"AddContentToPage","id":"AddContentToPage-vrAvx","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-AddContentToPage-vrAvx{œdataTypeœ:œAddContentToPageœ,œidœ:œAddContentToPage-vrAvxœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-GurdE{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"NotionPageCreator-Exc7f","sourceHandle":"{œdataTypeœ:œNotionPageCreatorœ,œidœ:œNotionPageCreator-Exc7fœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageCreator","id":"NotionPageCreator-Exc7f","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageCreator-Exc7f{œdataTypeœ:œNotionPageCreatorœ,œidœ:œNotionPageCreator-Exc7fœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-GurdE{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"NotionDatabaseProperties-IjzLV","sourceHandle":"{œdataTypeœ:œNotionDatabasePropertiesœ,œidœ:œNotionDatabaseProperties-IjzLVœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionDatabaseProperties","id":"NotionDatabaseProperties-IjzLV","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionDatabaseProperties-IjzLV{œdataTypeœ:œNotionDatabasePropertiesœ,œidœ:œNotionDatabaseProperties-IjzLVœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-GurdE{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"NotionPageUpdate-bexvy","sourceHandle":"{œdataTypeœ:œNotionPageUpdateœ,œidœ:œNotionPageUpdate-bexvyœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageUpdate","id":"NotionPageUpdate-bexvy","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageUpdate-bexvy{œdataTypeœ:œNotionPageUpdateœ,œidœ:œNotionPageUpdate-bexvyœ,œnameœ:œexample_tool_outputœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-GurdE{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-GurdEœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"NotionSearch-EdSJb","sourceHandle":"{œdataTypeœ:œNotionSearchœ,œidœ:œNotionSearch-EdSJbœ,œnameœ:œexample_outputœ,œoutput_typesœ:[œDataœ]}","target":"ParseData-vYVwu","targetHandle":"{œfieldNameœ:œdataœ,œidœ:œParseData-vYVwuœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"data","id":"ParseData-vYVwu","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"NotionSearch","id":"NotionSearch-EdSJb","name":"example_output","output_types":["Data"]}},"id":"reactflow__edge-NotionSearch-EdSJb{œdataTypeœ:œNotionSearchœ,œidœ:œNotionSearch-EdSJbœ,œnameœ:œexample_outputœ,œoutput_typesœ:[œDataœ]}-ParseData-vYVwu{œfieldNameœ:œdataœ,œidœ:œParseData-vYVwuœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","selected":false,"className":""},{"source":"ParseData-vYVwu","sourceHandle":"{œdataTypeœ:œParseDataœ,œidœ:œParseData-vYVwuœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-Lbxk6","targetHandle":"{œfieldNameœ:œDATABASESœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"DATABASES","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ParseData","id":"ParseData-vYVwu","name":"text","output_types":["Message"]}},"id":"reactflow__edge-ParseData-vYVwu{œdataTypeœ:œParseDataœ,œidœ:œParseData-vYVwuœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-Lbxk6{œfieldNameœ:œDATABASESœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"ToolCallingAgent-GurdE","sourceHandle":"{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-GurdEœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}","target":"ChatOutput-zBv53","targetHandle":"{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-zBv53œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"input_value","id":"ChatOutput-zBv53","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"ToolCallingAgent","id":"ToolCallingAgent-GurdE","name":"response","output_types":["Message"]}},"id":"reactflow__edge-ToolCallingAgent-GurdE{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-GurdEœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-zBv53{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-zBv53œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}","selected":false,"className":""},{"source":"NotionUserList-wFEb1","sourceHandle":"{œdataTypeœ:œNotionUserListœ,œidœ:œNotionUserList-wFEb1œ,œnameœ:œexample_outputœ,œoutput_typesœ:[œDataœ]}","target":"ParseData-WKjW6","targetHandle":"{œfieldNameœ:œdataœ,œidœ:œParseData-WKjW6œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","data":{"targetHandle":{"fieldName":"data","id":"ParseData-WKjW6","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"NotionUserList","id":"NotionUserList-wFEb1","name":"example_output","output_types":["Data"]}},"id":"reactflow__edge-NotionUserList-wFEb1{œdataTypeœ:œNotionUserListœ,œidœ:œNotionUserList-wFEb1œ,œnameœ:œexample_outputœ,œoutput_typesœ:[œDataœ]}-ParseData-WKjW6{œfieldNameœ:œdataœ,œidœ:œParseData-WKjW6œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}","className":""},{"source":"ParseData-WKjW6","sourceHandle":"{œdataTypeœ:œParseDataœ,œidœ:œParseData-WKjW6œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-Lbxk6","targetHandle":"{œfieldNameœ:œUSERSœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"USERS","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ParseData","id":"ParseData-WKjW6","name":"text","output_types":["Message"]}},"id":"reactflow__edge-ParseData-WKjW6{œdataTypeœ:œParseDataœ,œidœ:œParseData-WKjW6œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-Lbxk6{œfieldNameœ:œUSERSœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","className":""},{"source":"CurrentDateComponent-WOwNq","sourceHandle":"{œdataTypeœ:œCurrentDateComponentœ,œidœ:œCurrentDateComponent-WOwNqœ,œnameœ:œcurrent_dateœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-19rub","targetHandle":"{œfieldNameœ:œCURRENT_DATEœ,œidœ:œPrompt-19rubœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"CURRENT_DATE","id":"Prompt-19rub","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"CurrentDateComponent","id":"CurrentDateComponent-WOwNq","name":"current_date","output_types":["Message"]}},"id":"reactflow__edge-CurrentDateComponent-WOwNq{œdataTypeœ:œCurrentDateComponentœ,œidœ:œCurrentDateComponent-WOwNqœ,œnameœ:œcurrent_dateœ,œoutput_typesœ:[œMessageœ]}-Prompt-19rub{œfieldNameœ:œCURRENT_DATEœ,œidœ:œPrompt-19rubœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","className":""},{"source":"CurrentDateComponent-PZ8xJ","sourceHandle":"{œdataTypeœ:œCurrentDateComponentœ,œidœ:œCurrentDateComponent-PZ8xJœ,œnameœ:œcurrent_dateœ,œoutput_typesœ:[œMessageœ]}","target":"Prompt-Lbxk6","targetHandle":"{œfieldNameœ:œCURRENT_DATEœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","data":{"targetHandle":{"fieldName":"CURRENT_DATE","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"CurrentDateComponent","id":"CurrentDateComponent-PZ8xJ","name":"current_date","output_types":["Message"]}},"id":"reactflow__edge-CurrentDateComponent-PZ8xJ{œdataTypeœ:œCurrentDateComponentœ,œidœ:œCurrentDateComponent-PZ8xJœ,œnameœ:œcurrent_dateœ,œoutput_typesœ:[œMessageœ]}-Prompt-Lbxk6{œfieldNameœ:œCURRENT_DATEœ,œidœ:œPrompt-Lbxk6œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}","className":""}],"viewport":{"x":-65.48833753518215,"y":119.49034539812101,"zoom":0.5588906662759379}},"description":"The Notion Agent for Meeting Notes is an AI-powered tool that automatically processes meeting transcripts and updates your Notion workspace accordingly. It identifies tasks, action items, and key points from your meetings, then creates new tasks or updates existing ones in Notion without manual input.\n\nTo use it, simply add your API Keys and provide a meeting transcript. The agent will analyze it, interact with your Notion workspace to make necessary updates, and give you a summary of actions taken. This streamlines your workflow, ensuring important meeting outcomes are captured and organized in Notion effortlessly.","name":"Notion Agent - Meeting Notes ","last_tested_version":"1.0.17.dev8","endpoint_name":null,"is_component":false} \ No newline at end of file diff --git a/docs/docs/Integrations/Notion/_category_.json b/docs/docs/Integrations/Notion/_category_.json new file mode 100644 index 000000000000..c245462efa38 --- /dev/null +++ b/docs/docs/Integrations/Notion/_category_.json @@ -0,0 +1 @@ +{"position":5, "label":"Notion"} \ No newline at end of file diff --git a/docs/docs/Integrations/Notion/integrations-notion.md b/docs/docs/Integrations/Notion/integrations-notion.md new file mode 100644 index 000000000000..6664991a0717 --- /dev/null +++ b/docs/docs/Integrations/Notion/integrations-notion.md @@ -0,0 +1,89 @@ +--- +title: Setup +sidebar_position: 0 +slug: /integrations/notion/setup +--- + +# Set up a Notion App + +To use Notion components in Langflow, you first need to create a Notion integration and configure it with the necessary capabilities. This guide will walk you through the process of setting up a Notion integration and granting it access to your Notion databases. + +## Prerequisites + +- A Notion account with access to the workspace where you want to use the integration. +- Admin permissions in the Notion workspace to create and manage integrations. + +## Create a Notion Integration + +1. Go to the [Notion Integrations](https://www.notion.com/my-integrations) page. +2. Click on the "New integration" button. +3. Give your integration a name and select the workspace where you want to use it. +4. Click "Submit" to create the integration. + +:::info +When creating the integration, make sure to enable the necessary capabilities based on your requirements. Refer to the [Notion Integration Capabilities](https://developers.notion.com/reference/capabilities) documentation for more information on each capability. +::: + + +## Configure Integration Capabilities + +After creating the integration, you need to configure its capabilities to define what actions it can perform and what data it can access. + +1. In the integration settings page, go to the **Capabilities** tab. +2. Enable the required capabilities for your integration. For example: + - If your integration needs to read data from Notion, enable the "Read content" capability. + - If your integration needs to create new content in Notion, enable the "Insert content" capability. + - If your integration needs to update existing content in Notion, enable the "Update content" capability. +3. Configure the user information access level based on your integration's requirements. +4. Save the changes. + +## Obtain Integration Token + +To authenticate your integration with Notion, you need to obtain an integration token. + +1. In the integration settings page, go to the "Secrets" tab. +2. Copy the "Internal Integration Token" value. This token will be used to authenticate your integration with Notion. + +:::warning +Your integration token is a sensitive piece of information. Make sure to keep it secure and never share it publicly. Store it safely in your Langflow configuration or environment variables. +::: + +## Grant Integration Access to Notion Databases + +For your integration to interact with Notion databases, you need to grant it access to the specific databases it will be working with. + +1. Open the Notion database that you want your integration to access. +2. Click on the "Share" button in the top-right corner of the page. +3. In the "Invite" section, select your integration from the list. +4. Click "Invite" to grant the integration access to the database. + +:::info +If your database contains references to other databases, you need to grant the integration access to those referenced databases as well. Repeat step 4 for each referenced database to ensure your integration has the necessary access. +::: + +## Build with Notion Components in Langflow + +Once you have set up your Notion integration and granted it access to the required databases, you can start using the Notion components in Langflow. + +Langflow provides the following Notion components: + +- **Search**: Searches all pages and databases that have been shared with the integration. You can filter results to either pages or databases and specify the sort direction. +- **List Users**: Retrieves a list of users from the Notion workspace. +- **List Database Properties**: Retrieves the properties of a specified Notion database. +- **Create Page**: Creates a new page in a specified Notion database with the provided properties. +- **Update Page Property**: Updates the properties of an existing Notion page. +- **Add Content to Page**: Converts markdown text to Notion blocks and appends them to a specified Notion page. +- **List Pages**: Queries a Notion database with filtering and sorting options. +- **Page Content Viewer**: Retrieves the content of a Notion page as plain text. + +Each of these components output both "Data" and "Tool": +- The "Data" output can be used directly in your Langflow for further processing or display. +- The "Tool" output can be utilized in Langflow Agents, allowing them to interact with Notion programmatically. + + +## Additional Resources + +- [Notion API Documentation](https://developers.notion.com/docs/getting-started) +- [Notion Integration Capabilities](https://developers.notion.com/reference/capabilities) + +If you encounter any issues or have questions, please reach out to our support team or consult the Langflow community forums. diff --git a/docs/docs/Integrations/Notion/notion-agent-conversational.md b/docs/docs/Integrations/Notion/notion-agent-conversational.md new file mode 100644 index 000000000000..150c6e53c6e5 --- /dev/null +++ b/docs/docs/Integrations/Notion/notion-agent-conversational.md @@ -0,0 +1,145 @@ +--- +title: Notion Conversational Agent +sidebar_position: 2 +slug: /integrations/notion/notion-agent-conversational +--- + +The Notion Conversational Agent is an AI-powered assistant that interacts with your Notion workspace through natural language conversations. This flow performs Notion-related tasks like creating pages, searching for information, and managing content, all through a chat interface. + +## Prerequisites + +--- + +- [Notion App](/integrations/notion/setup) +- [Notion account and API key](https://www.notion.so/my-integrations) +- [OpenAI API key](https://platform.openai.com/account/api-keys) +- [Download Flow Conversation Agent Flow](./Conversational_Notion_Agent.json)(Download link) + +![Notion Components Toolkit](./notion_conversational_agent_tools.png) + +## Flow Components + +--- + + +### Input and Output +- **Chat Input**: Accepts user queries and commands +- **Chat Output**: Displays the agent's responses + +### Language Model +- **OpenAI Model**: Processes user input and generates responses + +### Agent and Tools +- **Tool Calling Agent**: Coordinates the use of various Notion tools based on user input +- **Toolkit**: Combines multiple Notion-specific tools into a single toolkit +- **Notion Tools**: Various components for interacting with Notion, including: + - List Users + - List Database Properties + - List Pages + - Page Content Viewer + - Create Page + - Update Page Property + - Add Content to Page + - Search + +### Memory and Prompt +- **Chat Memory**: Stores conversation history +- **Prompt**: Provides system instructions and context for the agent +- **Current Date**: Supplies the current date and time for context + + +## Run the Conversational Notion Agent + +--- + +1. Open Langflow and create a new project. +2. Add the components listed above to your flow canvas, or Download the [Conversation Agent Flow](./Conversational_Notion_Agent.json)(Download link) and **Import** the JSON file into Langflow. +3. Connect the components as shown in the flow diagram. +4. Input the Notion and OpenAI API keys in their respective components. +5. To start the flow, click the "Play" button in Langflow. +6. In the chat interface, enter a Notion-related request or question. For example: + - "Create a new page in my task database" + - "Search for pages about project management" + - "List all users in my Notion workspace" +7. The agent will process your request, use the appropriate Notion tools, and provide a response. +8. Continue the conversation, asking follow-up questions or giving new commands as needed. + +## Example Interactions + +--- + +``` +User: List all the users in my Notion workspace. + +Agent: Certainly! I'll use the Notion User List tool to retrieve that information for you. One moment please. + +[Agent uses the Notion User List tool] + +Here's a list of users in your Notion workspace: + +1. John Doe (john@example.com) +2. Jane Smith (jane@example.com) +3. Alex Johnson (alex@example.com) + +Is there anything specific you'd like to know about these users or any other Notion-related task I can help you with? + +User: Create a new page in my Projects database with the title "Website Redesign". + +Agent: Certainly! I'd be happy to create a new page in your Projects database with the title "Website Redesign". I'll use the Notion Page Creator tool to do this. First, let me check the properties of your Projects database to ensure we include all necessary information. + +[Agent uses the Notion Database Properties tool] + +I've retrieved the properties for your Projects database. Now, I'll create the new page with the title "Website Redesign". One moment please. + +[Agent uses the Notion Page Creator tool] + +Great news! I've successfully created a new page in your Projects database with the title "Website Redesign". Here are the details: + +- Title: Website Redesign +- Page ID: [page_id_here] +- URL: [page_url_here] + +Is there any additional information you'd like me to add to this new project page, such as a description, due date, or assigned team members? + +User: Yes, please add a description: "Redesign company website to improve user experience and modernize the look." + +Agent: Certainly! I'd be happy to add that description to the "Website Redesign" project page. I'll use the Add Content to Page tool to update the page with this information. + +[Agent uses the Add Content to Page tool] + +I've successfully added the description to your "Website Redesign" project page. Here's a summary of what I've done: + +1. Located the "Website Redesign" page using the page ID from our previous interaction. +2. Added the following content to the page: + + Description: Redesign company website to improve user experience and modernize the look. + +The description has been added as a new text block on the page. Is there anything else you'd like me to add or modify on this project page? + +``` + + +## Customization + +--- + +The flow can be customized to meet your team's specific needs. + +Customize this flow by: + +1. Adjusting the system prompt to change the agent's behavior or knowledge base. +2. Adding or removing Notion tools based on your specific needs. +3. Modifying the OpenAI model parameters (e.g., temperature) to adjust the agent's response style. + +## Troubleshooting + +--- + +If you encounter issues: + +1. Ensure all API keys are correctly set and have the necessary permissions. +2. Check that your Notion integration has access to the relevant pages and databases. +3. Verify that all components are properly connected in the flow. +4. Review the Langflow logs for any error messages. + +For more advanced usage and integration options, refer to the [Notion API documentation](https://developers.notion.com/) and [Langflow documentation](/). \ No newline at end of file diff --git a/docs/docs/Integrations/Notion/notion-agent-meeting-notes.md b/docs/docs/Integrations/Notion/notion-agent-meeting-notes.md new file mode 100644 index 000000000000..7980bc15fd6c --- /dev/null +++ b/docs/docs/Integrations/Notion/notion-agent-meeting-notes.md @@ -0,0 +1,176 @@ +--- +title: Notion Meeting Notes Agent +sidebar_position: 1 +slug: /integrations/notion/notion-agent-meeting-notes +--- + +The Notion Agent for Meeting Notes is an AI-powered tool that automatically processes meeting transcripts and updates your Notion workspace. It identifies tasks, action items, and key points from your meetings, then creates new tasks or updates existing ones in Notion without manual input. + +## Prerequisites +--- + +- [Notion App](/integrations/notion/setup) +- [Notion API key](https://www.notion.so/my-integrations) +- [OpenAI API key](https://platform.openai.com/account/api-keys) +- [Download Flow Meeting Agent Flow](./Meeting_Notes_Agent.json)(Download link) + +:::warning + +Before using this flow, ensure you have obtained the necessary API keys from Notion and OpenAI. These keys are essential for the flow to function properly. Keep them secure and do not share them publicly. + +::: + +## Components + +--- + +![Notion Meeting Agent Part 1](./notion_meeting_agent_part_1.png) + + + +### Meeting Transcript (Text Input) + +This component allows users to input the meeting transcript directly into the flow. + +### List Users (Notion Component) + +- **Purpose**: Retrieves a list of users from the Notion workspace. +- **Input**: Notion Secret (API key) +- **Output**: List of user data + +### List Databases (Notion Component) + +- **Purpose**: Searches and lists all databases in the Notion workspace. +- **Input**: + - Notion Secret (API key) + - Query (optional) + - Filter Type (default: database) + - Sort Direction +- **Output**: List of database data + +### Prompt + +This component creates a dynamic prompt template using the following inputs: +- Meeting Transcript +- List of Users +- List of Databases +- Current Date + +### Meeting Summarizer (Tool Calling Agent) + +- **Purpose**: Analyzes the meeting transcript and identifies tasks and action items. +- **Inputs**: + - System Prompt (from the Prompt component) + - Language Model (OpenAI) + - Tools: + - Notion Search + - List Database Properties + - Create Page + - Update Page Property + - Add Content to Page + +![Notion Meeting Agent Part 2](./notion_meeting_agent_part_2.png) + +### Notion Agent (Tool Calling Agent) + +- **Purpose**: Executes actions in Notion based on the meeting summary. +- **Inputs**: + - System Prompt (from the second Prompt component) + - Language Model (OpenAI) + - Tools: + - List Database Properties + - Create Page + - Update Page Property + - Add Content to Page + +### Notion Components (Tools) + +#### List Database Properties + +- **Purpose**: Retrieves the properties of a specified Notion database. +- **Input**: + - Database ID + - Notion Secret (API key) + +#### Create Page + +- **Purpose**: Creates a new page in a Notion database. +- **Inputs**: + - Database ID + - Notion Secret (API key) + - Properties (JSON) + +#### Update Page Property + +- **Purpose**: Updates the properties of an existing Notion page. +- **Inputs**: + - Page ID + - Notion Secret (API key) + - Properties to update + +#### Add Content to Page + +- **Purpose**: Converts markdown text to Notion blocks and appends them to a specified Notion page. +- **Inputs**: + - Page/Block ID + - Notion Secret (API key) + - Markdown text + +### Chat Output + +Displays the final output of the Notion Agent in the Playground. + +## Flow Process + +--- + +1. The user inputs a meeting transcript. +2. The flow retrieves the list of Notion users and databases. +3. A prompt is generated using the transcript, user list, database list, and current date. +4. The Meeting Summarizer analyzes the transcript and identifies tasks and action items. +5. The Notion Agent uses the meeting summary to: + - Create new pages for new tasks + - Update existing pages for existing tasks + - Add content to pages with meeting notes +6. The Chat Output displays a summary of actions taken in Notion. + +## Run the Notion Meeting Notes flow + +--- + +To run the Notion Agent for Meeting Notes: + +1. Open Langflow and create a new project. +2. Add the components listed above to your flow canvas, or download the [Flow Meeting Agent Flow](./Meeting_Notes_Agent.json)(Download link) and **Import** the JSON file into Langflow. +3. Connect the components as shown in the flow diagram. +4. Input the Notion and OpenAI API keys in their respective components. +5. Paste your meeting transcript into the Meeting Transcript component. +6. Run the flow by clicking **Play** on the **Chat Output** component. +7. Review the output in the Chat Output component, which will summarize the actions taken in your Notion workspace. + +For optimal results, use detailed meeting transcripts. The quality of the output depends on the comprehensiveness of the input provided. + +## Customization + +--- + +The flow can be customized to meet your team's specific needs. + +Customize this flow by: + +1. Adjusting the system prompt to change the agent's behavior or knowledge base. +2. Adding or removing Notion tools based on your specific needs. +3. Modifying the OpenAI model parameters (e.g., temperature) to adjust the agent's response style. + +## Troubleshooting + +--- + +If you encounter issues: + +1. Ensure all API keys are correctly set and have the necessary permissions. +2. Check that your Notion integration has access to the relevant pages and databases. +3. Verify that all components are properly connected in the flow. +4. Review the Langflow logs for any error messages. + +For more advanced usage and integration options, refer to the [Notion API documentation](https://developers.notion.com/) and [Langflow documentation](/). \ No newline at end of file diff --git a/docs/docs/Integrations/Notion/notion_conversational_agent_tools.png b/docs/docs/Integrations/Notion/notion_conversational_agent_tools.png new file mode 100644 index 000000000000..c981e6c6645c Binary files /dev/null and b/docs/docs/Integrations/Notion/notion_conversational_agent_tools.png differ diff --git a/docs/docs/Integrations/Notion/notion_meeting_agent_part_1.png b/docs/docs/Integrations/Notion/notion_meeting_agent_part_1.png new file mode 100644 index 000000000000..db32f5b9a506 Binary files /dev/null and b/docs/docs/Integrations/Notion/notion_meeting_agent_part_1.png differ diff --git a/docs/docs/Integrations/Notion/notion_meeting_agent_part_2.png b/docs/docs/Integrations/Notion/notion_meeting_agent_part_2.png new file mode 100644 index 000000000000..387d6f8f4525 Binary files /dev/null and b/docs/docs/Integrations/Notion/notion_meeting_agent_part_2.png differ diff --git a/docs/docs/Integrations/assemblyai-components.png b/docs/docs/Integrations/assemblyai-components.png new file mode 100644 index 000000000000..788feb0aa256 Binary files /dev/null and b/docs/docs/Integrations/assemblyai-components.png differ diff --git a/docs/docs/Integrations/integrations-assemblyai.md b/docs/docs/Integrations/integrations-assemblyai.md new file mode 100644 index 000000000000..3c9ec98b4015 --- /dev/null +++ b/docs/docs/Integrations/integrations-assemblyai.md @@ -0,0 +1,165 @@ +--- +title: AssemblyAI +sidebar_position: 1 +slug: /integrations-assemblyai +--- + + + +# AssemblyAI + +The AssemblyAI components allow you to apply powerful Speech AI models to your app for tasks like: + +- Transcribing audio and video files +- Formatting transcripts +- Generating subtitles +- Applying LLMs to audio files + +More info about AssemblyAI: + +- [Website](https://www.assemblyai.com/) +- [AssemblyAI API Docs](https://www.assemblyai.com/docs) +- [Get a Free API key](https://www.assemblyai.com/dashboard/signup) + + +## Prerequisites + +You need an **AssemblyAI API key**. After creating a free account, you'll find the API key in your dashboard. [Get a Free API key here](https://www.assemblyai.com/dashboard/signup). + +Enter the key in the *AssemblyAI API Key* field in all components that require the key. + +(Optional): To use LeMUR, you need to upgrade your AssemblyAI account, since this is not included in the free account. + +## Components + +![AssemblyAI Components](./assemblyai-components.png) + +### AssemblyAI Start Transcript + +This component allows you to submit an audio or video file for transcription. + +**Tip**: You can freeze the path of this component to only submit the file once. + +- **Input**: + - AssemblyAI API Key: Your API key. + - Audio File: The audio or video file to transcribe. + - Speech Model (Optional): Select the class of models. Default is *Best*. See [speech models](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#select-the-speech-model-with-best-and-nano) for more info. + - Automatic Language Detection (Optional): Enable automatic language detection. + - Language (Optional): The language of the audio file. Can be set manually if automatic language detection is disabled. + See [supported languages](https://www.assemblyai.com/docs/getting-started/supported-languages) for a list of supported language codes. + - Enable Speaker Labels (Optional): Detect speakers in an audio file and what each speaker said. + - Expected Number of Speakers (Optional): Set the expected number of speakers, if Speaker Labels is enabled. + - Audio File URL (Optional): The URL of the audio or video file to transcribe. Can be used instead of *Audio File*. + - Punctuate (Optional): Apply punctuation. Default is true. + - Format Text (Optional): Apply casing and text formatting. Default is true. + +- **Output**: + - Transcript ID: The id of the transcript + + +### AssebmlyAI Poll Transcript + +This components allows you to poll the transcripts. It checks the status of the transcript every few seconds until the transcription is completed. + +- **Input**: + - AssemblyAI API Key: Your API key. + - Polling Interval (Optional): The polling interval in seconds. Default is 3. + +- **Output**: + - Transcription Result: The AssemblyAI JSON response of a completed transcript. Contains the text and other info. + + +### AssebmlyAI Get Subtitles + +This component allows you to generate subtitles in SRT or VTT format. + +- **Input**: + - AssemblyAI API Key: Your API key. + - Transcription Result: The output of the *Poll Transcript* component. + - Subtitle Format: The format of the captions (SRT or VTT). + - Character per Caption (Optional): The maximum number of characters per caption (0 for no limit). + +- **Output**: + - Subtitles: A JSON response with the `subtitles` field containing the captions in SRT or VTT format. + + +### AssebmlyAI LeMUR + +This component allows you to apply Large Language Models to spoken data using the [AssemblyAI LeMUR framework](https://www.assemblyai.com/docs/lemur). + +LeMUR automatically ingests the transcript as additional context, making it easy to apply LLMs to audio data. You can use it for tasks like summarizing audio, extracting insights, or asking questions. + +- **Input**: + - AssemblyAI API Key: Your API key. + - Transcription Result: The output of the *Poll Transcript* component. + - Input Prompt: The text to prompt the model. You can type your prompt in this field or connect it to a *Prompt* component. + - Final Model: The model that is used for the final prompt after compression is performed. Default is Claude 3.5 Sonnet. + - Temperature (Optional): The temperature to use for the model. Default is 0.0. + - Max Output Size (Optional): Max output size in tokens, up to 4000. Default is 2000. + - Endpoint (Optional): The LeMUR endpoint to use. Default is "task". For "summary" and "question-answer", no prompt input is needed. See [LeMUR API docs](https://www.assemblyai.com/docs/api-reference/lemur/) for more info. + - Questions (Optional): Comma-separated list of your questions. Only used if *Endpoint* is "question-answer". + - Transcript IDs (Optional): Comma-separated list of transcript IDs. LeMUR can perform actions over multiple transcripts. If provided, the *Transcription Result* is ignored. + +- **Output**: + - LeMUR Response: The generated LLM response. + +### AssemblyAI List Transcripts + +This component can be used as a standalone component to list all previously generated transcripts. + +- **Input**: + - AssemblyAI API Key: Your API key. + - Limit (Optional): Maximum number of transcripts to retrieve. Default is 20, use 0 for all. + - Filter (Optional): Filter by transcript status. + - Created On (Optional): Only get transcripts created on this date (YYYY-MM-DD). + - Throttled Only (Optional): Only get throttled transcripts, overrides the status filter + +- **Output**: + - Transcript List: A list of all transcripts with info such as the transcript ID, the status, and the data. + + +## Flow Process + +1. The user inputs an audio or video file. +2. The user can also input an LLM prompt. In this example, we want to generate a summary of the transcript. +3. The flow submits the audio file for transcription. +4. The flow checks the status of the transcript every few seconds until transcription is completed. +5. The flow parses the transcription result and outputs the transcribed text. +6. The flow also generates subtitles. +7. The flow applies the LLM prompt to generate a summary. +8. As a standalone component, all transcripts can be listed. + +## Run the Transcription and Speech AI Flow + +To run the Transcription and Speech AI Flow: + +1. Open Langflow and create a new project. +2. Add the components listed above to your flow canvas, or download the [AssemblyAI Transcription and Speech AI Flow](./AssemblyAI_Flow.json)(Download link) and **Import** the JSON file into Langflow. +3. Connect the components as shown in the flow diagram. **Tip**: Freeze the path of the *Start Transcript* component to only submit the file once. +4. Input the AssemblyAI API key in in all components that require the key (Start Transcript, Poll Transcript, Get Subtitles, LeMUR, List Transcripts). +5. Select an audio or video file in the *Start Transcript* component. +6. Run the flow by clicking **Play** on the *Parse Data* component. Make sure that the specified template is `{text}`. +7. To generate subtitles, click **Play** on the *Get Subtitles* component. +8. To apply an LLM to your audio file, click **Play** on the *LeMUR* component. Note that you need an upgraded AssemblyAI account to use LeMUR. +9. To list all transcripts, click **Play** on the *List Transcript* component. + + +## Customization + +The flow can be customized by: + +1. Modifying the parameters in the *Start Transcript* component. +2. Modifying the subtitle format in the *Get Subtitles* component. +3. Modifying the LLM prompt for input of the *LeMUR* component. +4. Modifying the LLM parameters (e.g., temperature) in the *LeMUR* component. + +## Troubleshooting + +If you encounter issues: + +1. Ensure the API key is correctly set in all components that require the key. +2. To use LeMUR, you need to upgrade your AssemblyAI account, since this is not included in the free account. +3. Verify that all components are properly connected in the flow. +4. Review the Langflow logs for any error messages. + +For more advanced usage, refer to the [AssemblyAI API documentation](https://www.assemblyai.com/docs/). If you need more help, you can reach out to the [AssemblyAI support](https://www.assemblyai.com/contact/support). diff --git a/docs/docs/Integrations/integrations-langfuse.md b/docs/docs/Integrations/integrations-langfuse.md new file mode 100644 index 000000000000..2e64536f05da --- /dev/null +++ b/docs/docs/Integrations/integrations-langfuse.md @@ -0,0 +1,78 @@ +--- +title: Langfuse +sidebar_position: 2 +slug: /integrations-langfuse +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Integrate Langfuse with Langflow + +[Langfuse](https://langfuse.com/) is an observability and analytics platform specifically designed for language models and AI applications. + +This guide walks you through how to configure Langflow to collect [tracing](https://langfuse.com/docs/tracing) data about your flow executions and automatically send the data to Langfuse. + +## Prerequisites + +- A project in Langflow with a runnable flow +- A Langfuse Cloud account in any [data region](https://langfuse.com/faq/all/cloud-data-regions) +- A Langfuse organization and project + +## Create Langfuse project credentials + +1. In Langfuse, go to your project settings, and then create a new set of API keys. + +2. Copy the following API key information: + + - Secret Key + - Public Key + - Host URL + +## Set your Langfuse credentials as environment variables + +Set your Langfuse project credentials as environment variables in the same environment where you run Langflow. + +You can use any method you prefer to set environment variables. +The following examples show how to set environment variables in a terminal session (Linux or macOS) and in a command prompt session (Windows): + + + + +``` +export LANGFUSE_SECRET_KEY=SECRET_KEY +export LANGFUSE_PUBLIC_KEY=PUBLIC_KEY +export LANGFUSE_HOST=HOST_URL +``` + + + +``` +set LANGFUSE_SECRET_KEY=SECRET_KEY +set LANGFUSE_PUBLIC_KEY=PUBLIC_KEY +set LANGFUSE_HOST=HOST_URL +``` + + + + +Replace `SECRET_KEY`, `PUBLIC_KEY`, and `HOST_URL` with the API key information you copied from Langfuse. + +## Start Langflow and run a flow + +1. Start Langflow in the same terminal or environment where you set the environment variables: + + ```bash + python -m langflow run + ``` + +2. In Langflow, open and existing project, and then run a flow. + +## View tracing data in Langfuse + +Langflow automatically collects and sends tracing data about the flow execution to Langfuse. +You can view the collected data in your Langfuse project dashboard. + +## Disable the Langfuse integration + +To disable the Langfuse integration, remove the environment variables you set in the previous steps and restart Langflow. diff --git a/docs/docs/Integrations/integrations-langsmith.md b/docs/docs/Integrations/integrations-langsmith.md index d26b707b0a65..957ee9d6b3c4 100644 --- a/docs/docs/Integrations/integrations-langsmith.md +++ b/docs/docs/Integrations/integrations-langsmith.md @@ -1,6 +1,6 @@ --- title: LangSmith -sidebar_position: 0 +sidebar_position: 3 slug: /integrations-langsmith --- @@ -25,11 +25,9 @@ or export the environment variables in your terminal: `export LANGCHAIN_API_KEY="your-api-key"export LANGCHAIN_PROJECT="your-project-name"` -1. Restart Langflow using `langflow run --env-file .env` -2. Run any project and check the LangSmith dashboard for monitoring and observability. +3. Restart Langflow using `langflow run --env-file .env` +4. Run a project in Langflow. +5. View the Langsmith dashboard for monitoring and observability. -![](./965098683.png) - - -![](./1641825585.png) +![](/img/langsmith-dashboard.png) diff --git a/docs/docs/Integrations/integrations-langwatch.md b/docs/docs/Integrations/integrations-langwatch.md index 53440e7165d5..626c1b8b5f43 100644 --- a/docs/docs/Integrations/integrations-langwatch.md +++ b/docs/docs/Integrations/integrations-langwatch.md @@ -1,6 +1,6 @@ --- title: LangWatch -sidebar_position: 1 +sidebar_position: 4 slug: /integrations-langwatch --- @@ -32,11 +32,9 @@ or export it in your terminal: export LANGWATCH_API_KEY="your-api-key" ``` -1. Restart Langflow using `langflow run --env-file .env` -2. Run any project and check the LangWatch dashboard for monitoring and observability. +3. Restart Langflow using `langflow run --env-file .env` +4. Run a project in Langflow. +5. View the LangWatch dashboard for monitoring and observability. -![](./505849097.png) - - -![](./1503538843.png) +![](/img/langwatch-dashboard.png) diff --git a/docs/docs/Settings/418277339.png b/docs/docs/Settings/418277339.png deleted file mode 100644 index 9909932f9f79..000000000000 Binary files a/docs/docs/Settings/418277339.png and /dev/null differ diff --git a/docs/docs/Settings/_category_.json b/docs/docs/Settings/_category_.json deleted file mode 100644 index 42ef607366bf..000000000000 --- a/docs/docs/Settings/_category_.json +++ /dev/null @@ -1 +0,0 @@ -{"position":7, "label":"Settings"} \ No newline at end of file diff --git a/docs/docs/Settings/settings-global-variables.md b/docs/docs/Settings/settings-global-variables.md deleted file mode 100644 index 1e7d2f52c7d5..000000000000 --- a/docs/docs/Settings/settings-global-variables.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Global Variables -sidebar_position: 0 -slug: /settings-global-variables ---- - -import ReactPlayer from "react-player"; - -:::info - -This page may contain outdated information. It will be updated as soon as possible. - -::: - - - - -Global Variables are a useful feature of Langflow, allowing you to define reusable variables accessed from any Text field in your project. - - -**TL;DR** - -- Global Variables are reusable variables accessible from any Text field in your project. -- To create one, click the 🌐 button in a Text field and then **+ Add New Variable**. -- Define the **Name**, **Type**, and **Value** of the variable. -- Click **Save Variable** to create it. -- All Credential Global Variables are encrypted and accessible only by you. -- Set _`LANGFLOW_STORE_ENVIRONMENT_VARIABLES`_ to _`true`_ in your `.env` file to add all variables in _`LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`_ to your user's Global Variables. - -### Create and Add a Global Variable {#3543d5ef00eb453aa459b97ba85501e5} - - -To create and add a global variable, click the 🌐 button in a Text field, and then click **+ Add New Variable**. - - -Text fields are where you write text without opening a Text area, and are identified with the 🌐 icon. - - -For example, to create an environment variable for the **OpenAI** component: - -1. In the **OpenAI API Key** text field, click the 🌐 button, then **Add New Variable**. -2. Enter `openai_api_key` in the **Variable Name** field. -3. Paste your OpenAI API Key (`sk-...`) in the **Value** field. -4. Select **Credential** for the **Type**. -5. Choose **OpenAI API Key** in the **Apply to Fields** field to apply this variable to all fields named **OpenAI API Key**. -6. Click **Save Variable**. - -You now have a `openai_api_key` global environment variable for your Langflow project. -Subsequently, clicking the 🌐 button in a Text field will display the new variable in the dropdown. - - -:::tip - -You can also create global variables in Settings > Global Variables. - -::: - - - - -![](./418277339.png) - - -To view and manage your project's global environment variables, visit **Settings** > **Global Variables**. - - -### Configure Environment Variables in your .env file {#76844a93dbbc4d1ba551ea1a4a89ccdd} - - -Setting `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `true` in your `.env` file (default) adds all variables in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` to your user's Global Variables. - - -These variables are accessible like any other Global Variable. - - -:::info - -To prevent this behavior, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `false` in your `.env` file. - -::: - - - - -You can specify variables to get from the environment by listing them in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`, as a comma-separated list (e.g., _`VARIABLE1, VARIABLE2`_). - - -The default list of variables includes the ones below and more: - -- ANTHROPIC_API_KEY -- ASTRA_DB_API_ENDPOINT -- ASTRA_DB_APPLICATION_TOKEN -- AZURE_OPENAI_API_KEY -- AZURE_OPENAI_API_DEPLOYMENT_NAME -- AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME -- AZURE_OPENAI_API_INSTANCE_NAME -- AZURE_OPENAI_API_VERSION -- COHERE_API_KEY -- GOOGLE_API_KEY -- GROQ_API_KEY -- HUGGINGFACEHUB_API_TOKEN -- OPENAI_API_KEY -- PINECONE_API_KEY -- SEARCHAPI_API_KEY -- SERPAPI_API_KEY -- UPSTASH_VECTOR_REST_URL -- UPSTASH_VECTOR_REST_TOKEN -- VECTARA_CUSTOMER_ID -- VECTARA_CORPUS_ID -- VECTARA_API_KEY - - - - -### Precautions - -Global variables are stored in the database, and their values are protected by encryption using a secret -key. To preserve access to your global variables and avoid losing them, you should take a few precautions: - -1. Keep your secret key safe: Even if your database is secure, it won’t be of much use if you can't decrypt -the values. Ideally, you can set your own secret key using the `LANGFLOW_SECRET_KEY` environment variable. If -you don't provide a custom value for the secret key, one will be generated randomly and saved in the Langflow -installation directory. - -2. We use SQLite as the default database, and Langflow saves the database file in the installation directory. -To ensure the security of your data, it’s a good practice to regularly back up this file. If needed, you can -also change the database location by setting the `LANGFLOW_SAVE_DB_IN_CONFIG_DIR` environment variable to true -and configuring `LANGFLOW_CONFIG_DIR` to point to a directory of your choice. Alternatively, you can opt to use -an external database such as PostgreSQL, in which case these configurations are no longer necessary. - -For your convenience, if you’re running Langflow directly on your system or in a virtual environment -via a pip installation, you can set these values by providing Langflow with a .env file containing these -environment variables, using the following command: - -```bash -langflow run --env-file .env -``` - -If you’re running Langflow in a Docker container, you can set these values by providing Langflow with: - -```bash -docker run \ - --privileged \ - --user 1000:0 \ - -p 7860:7860 \ - -e LANGFLOW_SECRET_KEY= \ - -e LANGFLOW_SAVE_DB_IN_CONFIG_DIR=true \ - -e LANGFLOW_CONFIG_DIR=/app/container_path \ - -v $(PWD)/your_path:/app/container_path \ - langflowai/langflow:latest -``` - -or - -```bash -docker run \ - --privileged \ - --user 1000:0 \ - -p 7860:7860 \ - --env-file .env \ - -v $(PWD)/your_path:/app/container_path \ - langflowai/langflow:latest -``` diff --git a/docs/docs/Settings/settings-project-general-settings.md b/docs/docs/Settings/settings-project-general-settings.md deleted file mode 100644 index 089c29d60685..000000000000 --- a/docs/docs/Settings/settings-project-general-settings.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Project & General Settings -sidebar_position: 1 -slug: /settings-project-general-settings ---- - - - -:::info - -This page may contain outdated information. It will be updated as soon as possible. - -::: - - - - -Change the **Project Settings** or **General Settings** for Langflow. - - -## Project Settings {#71e61e6544c94f808cd74b8cc012363d} - - ---- - - -Click **Project Name** > **Settings** to view your **Project Settings**. - -- **Name** - the name of your project. -- **Description** - the description for your project. -Visible on the Langflow Store. -- **Endpoint name** - the custom endpoint name for your project's API endpoint. -To use the default value, leave this field blank. - -## General Settings {#1a0c451fd5e84feeb1d18c2886d642eb} - - ---- - - -Select your **Profile Picture** > **Settings** to view your **General Settings**. - - -### Profile Picture {#8abfa80ed6c448b6977467679d43c275} - - -Select a profile picture. - - -### Store API Key {#6a12756beb0b42fd84bdf5ab5f10fffb} - - -Add your **Langflow Store** API key. To get a Store key, go to the [Langflow store](https://www.langflow.store/). - - -### Global Variables {#12aa7f28060447babc987bdf57fc065e} - - -Select **Add New** to add a key to Langflow. - - -Select the **trash icon** to delete a key. - - -For more information, see Global Variables. - - -### Langflow API {#0a08ffc3dd9042da9f9d2d49c9df0b6a} - - -Create a Langflow API key. - - -Click **Add New** > **Create Secret Key** and copy the key somewhere safe and accessible. - - -For more information, see Langflow API. - - -### Shortcuts {#6c9d705c9e7f466db496dbc6571c81d2} - - -A list of keyboard shortcuts for Langflow. - - -### Messages {#898425cf88b74c048c23e8e1e1d7c2bd} - - -Inspect, edit, and remove messages in your flow for testing and debugging purposes. - - -For more information, see the Playground. - diff --git a/docs/docs/Starter-Projects/1073956357.png b/docs/docs/Starter-Projects/1073956357.png deleted file mode 100644 index 835092f82826..000000000000 Binary files a/docs/docs/Starter-Projects/1073956357.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1079168789.png b/docs/docs/Starter-Projects/1079168789.png deleted file mode 100644 index 812d3f9cc6d9..000000000000 Binary files a/docs/docs/Starter-Projects/1079168789.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1140665127.png b/docs/docs/Starter-Projects/1140665127.png deleted file mode 100644 index 453c9b60f3f5..000000000000 Binary files a/docs/docs/Starter-Projects/1140665127.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1190998947.png b/docs/docs/Starter-Projects/1190998947.png deleted file mode 100644 index a9eed6305ce8..000000000000 Binary files a/docs/docs/Starter-Projects/1190998947.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1390293355.png b/docs/docs/Starter-Projects/1390293355.png deleted file mode 100644 index 8885b24f0f99..000000000000 Binary files a/docs/docs/Starter-Projects/1390293355.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1511598495.png b/docs/docs/Starter-Projects/1511598495.png deleted file mode 100644 index a6bca8f6cdee..000000000000 Binary files a/docs/docs/Starter-Projects/1511598495.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1835734464.png b/docs/docs/Starter-Projects/1835734464.png deleted file mode 100644 index 1b5cc15ab750..000000000000 Binary files a/docs/docs/Starter-Projects/1835734464.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/1946624394.png b/docs/docs/Starter-Projects/1946624394.png deleted file mode 100644 index 47b8f2e9e44c..000000000000 Binary files a/docs/docs/Starter-Projects/1946624394.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/25156979.png b/docs/docs/Starter-Projects/25156979.png deleted file mode 100644 index 7064ec2855be..000000000000 Binary files a/docs/docs/Starter-Projects/25156979.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/257920618.png b/docs/docs/Starter-Projects/257920618.png deleted file mode 100644 index 4f2e0edb537e..000000000000 Binary files a/docs/docs/Starter-Projects/257920618.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/447530731.png b/docs/docs/Starter-Projects/447530731.png deleted file mode 100644 index e245d408b4b5..000000000000 Binary files a/docs/docs/Starter-Projects/447530731.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/450254819.png b/docs/docs/Starter-Projects/450254819.png deleted file mode 100644 index e464e6927946..000000000000 Binary files a/docs/docs/Starter-Projects/450254819.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/487525520.png b/docs/docs/Starter-Projects/487525520.png deleted file mode 100644 index b56696f44e5a..000000000000 Binary files a/docs/docs/Starter-Projects/487525520.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/626991262.png b/docs/docs/Starter-Projects/626991262.png deleted file mode 100644 index 95948480f416..000000000000 Binary files a/docs/docs/Starter-Projects/626991262.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/648489928.png b/docs/docs/Starter-Projects/648489928.png deleted file mode 100644 index da9a27d13a94..000000000000 Binary files a/docs/docs/Starter-Projects/648489928.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/690736575.png b/docs/docs/Starter-Projects/690736575.png deleted file mode 100644 index 280969e76865..000000000000 Binary files a/docs/docs/Starter-Projects/690736575.png and /dev/null differ diff --git a/docs/docs/Starter-Projects/starter-projects-basic-prompting.md b/docs/docs/Starter-Projects/starter-projects-basic-prompting.md index 7f0c732b2cd8..3c1e12c48fad 100644 --- a/docs/docs/Starter-Projects/starter-projects-basic-prompting.md +++ b/docs/docs/Starter-Projects/starter-projects-basic-prompting.md @@ -23,19 +23,10 @@ This article demonstrates how to use Langflow's prompt tools to issue basic prom - [Langflow installed and running](/getting-started-installation) - [OpenAI API key created](https://platform.openai.com/) -## Basic Prompting {#1fae0e83ef08453fba017870d6d981bc} +## Create the basic prompting flow {#19d5305239c841548a695e2bf7839e7a} ---- - - -### Create the basic prompting project {#19d5305239c841548a695e2bf7839e7a} - - -1. From the Langflow dashboard, click **New Project**. - - -![](./1835734464.png) +1. From the Langflow dashboard, click **New Flow**. 2. Select **Basic Prompting**. @@ -44,7 +35,7 @@ This article demonstrates how to use Langflow's prompt tools to issue basic prom 3. The **Basic Prompting** flow is created. -![](./487525520.png) +![](/img/starter-flow-basic-prompting.png) This flow allows you to chat with the **OpenAI** component through the **Prompt** component. @@ -53,24 +44,19 @@ This flow allows you to chat with the **OpenAI** component through the **Prom Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.` This should be interesting... -![](./690736575.png) - - 4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**. 1. In the **Variable Name** field, enter `openai_api_key`. 2. In the **Value** field, paste your OpenAI API Key (`sk-...`). 3. Click **Save Variable**. -![](./1390293355.png) - -### Run {#ce52f8e6b491452a9dfb069feb962eed} +## Run the basic prompting flow {#ce52f8e6b491452a9dfb069feb962eed} 1. Click the **Playground** button on the control panel (bottom right side of the workspace). This is where you can interact with your AI. 2. Type a message and press Enter. The bot should respond in a markedly piratical manner! -### Modify the prompt for a different result {#3ab045fcbe774c8fb3adc528f9042ba0} +## Modify the prompt for a different result {#3ab045fcbe774c8fb3adc528f9042ba0} 1. To modify your prompt results, in the **Prompt** template, click the **Template** field. The **Edit Prompt** window opens. 2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Hermione Granger.` diff --git a/docs/docs/Starter-Projects/starter-projects-blog-writer.md b/docs/docs/Starter-Projects/starter-projects-blog-writer.md index 64e3871eb475..35a906a8fc0d 100644 --- a/docs/docs/Starter-Projects/starter-projects-blog-writer.md +++ b/docs/docs/Starter-Projects/starter-projects-blog-writer.md @@ -4,9 +4,11 @@ sidebar_position: 1 slug: /starter-projects-blog-writer --- +Build a Blog Writer flow for a one-shot application using OpenAI. +This flow extends the Basic Prompting flow with the **URL** and **Parse data** components that fetch content from multiple URLs and convert the loaded data into plain text. -Build a blog writer with OpenAI that uses URLs for reference content. +OpenAI uses this loaded data to generate a blog post, as instructed by the **Text input** component. ## Prerequisites {#899268e6c12c49b59215373a38287507} @@ -17,43 +19,41 @@ Build a blog writer with OpenAI that uses URLs for reference content. - [Langflow installed and running](/getting-started-installation) - [OpenAI API key created](https://platform.openai.com/) -## Blog Writer {#ef6e7555a96d4cbab169fbd2d8578bdc} +## Create the blog writer flow {#0c1a9c65b7d640f693ec3aad963416ff} ---- - - -### Create the Blog Writer project {#0c1a9c65b7d640f693ec3aad963416ff} - -1. From the Langflow dashboard, click **New Project**. +1. From the Langflow dashboard, click **New Flow**. 2. Select **Blog Writer**. -3. A workspace for the **Blog Writer** is displayed. +3. The **Blog Writer** flow is created. -![](./1190998947.png) +![](/img/starter-flow-blog-writer.png) -This flow creates a one-shot article generator with **Prompt**, **OpenAI**, and **Chat Output** components, augmented with reference content and instructions from the **URL** and **Instructions** components. +This flow creates a one-shot article generator with **Prompt**, **OpenAI**, and **Chat Output** components, augmented with reference content and instructions from the **URL** and **Text Input** components. +The **URL** component extracts raw text and metadata from one or more web links. +The **Parse Data** component converts the data coming from the **URL** component into plain text to feed the prompt. -The **Template** field of the **Prompt** looks like this: +To examine the flow's prompt, click the **Template** field of the **Prompt** component. +```plain +Reference 1: -![](./257920618.png) +{references} +--- -The `{instructions}` value is received from the **Instructions** component. One or more `{references}`  are received from a list of URLs. +{instructions} -- **URL** extracts raw text and metadata from one or more web links. -- **Parse Data** converts the data coming from the **URL** component into plain text to feed a prompt. +Blog: +``` -![](./25156979.png) +The `{instructions}` value is received from the **Text input** component, and one or more `{references}` are received from a list of URLs parsed from the **URL** component. -### Run the Blog Writer {#b93be7a567f5400293693b31b8d0f81a} +### Run the blog writer flow {#b93be7a567f5400293693b31b8d0f81a} 1. Click the **Playground** button. Here you can chat with the AI that has access to the **URL** content. 2. Click the **Lighting Bolt** icon to run it. 3. To write about something different, change the values in the **URL** component and adjust the instructions on the left side bar of the **Playground**. Try again and see what the LLM constructs. -![](./447530731.png) - diff --git a/docs/docs/Starter-Projects/starter-projects-document-qa.md b/docs/docs/Starter-Projects/starter-projects-document-qa.md index bf48b31e4dc1..fd12f729ad26 100644 --- a/docs/docs/Starter-Projects/starter-projects-document-qa.md +++ b/docs/docs/Starter-Projects/starter-projects-document-qa.md @@ -17,36 +17,24 @@ Build a question-and-answer chatbot with a document loaded from local memory. - [Langflow installed and running](/getting-started-installation) - [OpenAI API key created](https://platform.openai.com/) -## Document QA {#acc90b19d4634c279b3e4e19e4e7ab1d} +## Create the document QA flow {#204500104f024553aab2b633bb99f603} ---- - - -### Create the Document QA project {#204500104f024553aab2b633bb99f603} - -1. From the Langflow dashboard, click **New Project**. +1. From the Langflow dashboard, click **New Flow**. 2. Select **Document QA**. -3. The **Document QA** project is created. +3. The **Document QA** flow is created. -![](./626991262.png) +![](/img/starter-flow-document-qa.png) This flow is composed of a standard chatbot with the **Chat Input**, **Prompt**, **OpenAI**, and **Chat Output** components, but it also incorporates a **File** component, which loads a file from your local machine. **Parse Data** is used to convert the data from **File** into the **Prompt** component as `{Document}`. The **Prompt** component is instructed to answer questions based on the contents of `{Document}`. This gives the **OpenAI** component context it would not otherwise have access to. -![](./1140665127.png) - - -### Run the Document QA {#f58fcc2b9e594156a829b1772b6a7191} +### Run the document QA flow {#f58fcc2b9e594156a829b1772b6a7191} 1. To select a document to load, in the **File** component, click the **Path** field. Select a local file, and then click **Open**. The file name appears in the field. - -![](./1073956357.png) - - 2. Click the **Playground** button. Here you can chat with the AI that has access to your document's content. diff --git a/docs/docs/Starter-Projects/starter-projects-dynamic-agent.md b/docs/docs/Starter-Projects/starter-projects-dynamic-agent.md new file mode 100644 index 000000000000..7c026226b5cd --- /dev/null +++ b/docs/docs/Starter-Projects/starter-projects-dynamic-agent.md @@ -0,0 +1,53 @@ +--- +title: Dynamic agent +sidebar_position: 7 +slug: /starter-projects-dynamic-agent +--- + +Build a **Dynamic Agent** flow for an agentic application using the CrewAI. + +An **agent** uses an LLM as its "brain" to reason through tasks and select among the connected tools to complete them. + +This flow uses [CrewAI](https://docs.crewai.com/) to manage a [Hierarchical crew](https://docs.crewai.com/how-to/Hierarchical/) of **Agents** as they perform a sequence of **Tasks**. + +CrewAI agents have **Roles**, **Goals**, and **Backstories** that define their behavior and interactions with other agents. Agents in a Hierarchical Crew are managed by a single agent with a **Manager** role, which is connected to an **Open AI** LLM component to reason through the tasks and select the appropriate tools to complete them. + +This flow is "dynamic" because it uses the **Chat input** component's text to define a CrewAI agent's Role, Goal, and Backstory. The created agent then uses the connected tools to research and complete the **Task** created from the **Chat input** component. + +## Prerequisites + +To use this flow, you need an [OpenAI API key](https://platform.openai.com/) and a [Search API key](https://www.searchapi.io/). + +## Open Langflow and start a new flow + +Click **New Flow**, and then select the **Dynamic Agent** flow. + +This opens a starter flow with the necessary components to run an agentic application using CrewAI. + +The **Dynamic Agent** flow consists of these components: + +* The **Chat Input** component accepts user input to the chat. +* The **Prompt** component combines the user input with a user-defined prompt. +* The **OpenAI** model component sends the user input and prompt to the OpenAI API and receives a response. +* The **Chat Output** component prints the flow's output to the chat. +* The **CrewAI Agent** component is an autonomous unit programmed to perform tasks, make decisions, and communicate with other agents. +* The **Crew AI Crew** component represents a collaborative group of agents working together to achieve a set of tasks. This Crew can manage work **sequentially** or **hierarchically**. +* The **Crew AI Task** component is a specific assignment to be completed by agents. +This task can be **sequential** or **hierarchical** depending on the Crew's configuration. +* The **SearchAPI** tool performs web searches using the **SearchAPI.io** API. +* The **Yahoo Finance News Tool** component creates a tool for retrieving news from Yahoo Finance. + +## Run the Dynamic Agent flow + +1. Add your credentials to the OpenAI and SearchAPI components using Langflow's Global Variables: + - Click **Settings**, then **Global Variables**. + - Click **Add New**. + - Name your variable and paste your API key in the **Value** field. + - In the **Apply To Fields** field, select the field to apply this variable to. + - Click **Save Variable**. +2. In the **Chat output** component, click ▶️ Play to start the end-to-end application flow. + A **Chat output built successfully** message and a ✅ Check on all components indicate that the flow ran successfully. +3. Click **Playground** to start a chat session. + You should receive a detailed, helpful answer to the question defined in the **Chat input** component. + +Now that your query has completed the journey from **Chat input** to **Chat output**, you have completed the **Dynamic Agent** flow. \ No newline at end of file diff --git a/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md b/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md index 03f44af555d7..ee07d08d623d 100644 --- a/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md +++ b/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md @@ -6,7 +6,7 @@ slug: /starter-projects-memory-chatbot -This flow extends the [Basic Prompting](http://localhost:3000/starter-projects/basic-prompting) flow to include a chat memory. This makes the AI remember previous user inputs. +This flow extends the [basic prompting](/starter-projects-basic-prompting) flow to include a chat memory. This makes the AI remember previous user inputs. ## Prerequisites {#a71d73e99b1543bbba827207503cf31f} @@ -17,19 +17,14 @@ This flow extends the [Basic Prompting](http://localhost:3000/starter-projects/ - [Langflow installed and running](/getting-started-installation) - [OpenAI API key created](https://platform.openai.com/) -## Memory Chatbot {#54073bf7eb1b4ff490c313f5612f0842} +## Create the memory chatbot flow ---- - - -### Create the memory chatbot project {#70ce99381b7043a1b417a81e9ae74c72} - -1. From the Langflow dashboard, click **New Project**. +1. From the Langflow dashboard, click **New Flow**. 2. Select **Memory Chatbot**. -3. The **Memory Chatbot** flow is created . +3. The **Memory Chatbot** flow is created. -![](./1511598495.png) +![](/img/starter-flow-memory-chatbot.png) This flow uses the same components as the Basic Prompting one, but extends it with a **Chat Memory** component. This component retrieves previous messages and sends them to the **Prompt** component to fill a part of the **Template** with context. @@ -38,13 +33,10 @@ This flow uses the same components as the Basic Prompting one, but extends it wi By clicking the template, you'll see the prompt editor like below: -![](./450254819.png) - - This gives the **OpenAI** component a memory of previous chat messages. -### Run {#a110cad860584c98af1aead006035378} +## Run the memory chatbot flow 1. Open the Playground. 2. Type multiple questions. In the **Memories** tab, your queries are logged in order. Up to 100 queries are stored by default. Try telling the AI your name and asking `What is my name?` on a second message, or `What is the first subject I asked you about?` to validate that previous knowledge is taking effect. @@ -57,11 +49,7 @@ Check and adjust advanced parameters by opening the Advanced Settings of the ** - -![](./1079168789.png) - - -## Session ID {#4e68c3c0750942f98c45c1c45d7ffbbe} +## Use Session ID in with the memory chatbot flow {#4e68c3c0750942f98c45c1c45d7ffbbe} --- diff --git a/docs/docs/Starter-Projects/starter-projects-simple-agent.md b/docs/docs/Starter-Projects/starter-projects-simple-agent.md new file mode 100644 index 000000000000..52879c4ad220 --- /dev/null +++ b/docs/docs/Starter-Projects/starter-projects-simple-agent.md @@ -0,0 +1,58 @@ +--- +title: Simple agent +sidebar_position: 6 +slug: /starter-projects-simple-agent +--- + +Build a **Simple Agent** flow for an agentic application using the Tool-calling agent. + +An **agent** uses an LLM as its "brain" to select among the connected tools and complete its tasks. + +In this flow, the **Tool-calling agent** reasons using an **Open AI** LLM to solve math problems. It will select the **Calculator** tool for simpler math, and the **Python REPL** tool (with the Python `math` library) for more complex problems. + +## Prerequisites + +To use this flow, you need an OpenAI API key. + +## Open Langflow and start a new flow + +Click **New Flow**, and then select the **Simple Agent** flow. + +This opens a starter flow with the necessary components to run an agentic application using the Tool-calling agent. + +## Simple Agent flow + +![](/img/starter-flow-simple-agent.png) + +The **Simple Agent** flow consists of these components: + +* The **Tool calling agent** component uses the connected LLM to reason through the user's input and select among the connected tools to complete its task. +* The **Python REPL tool** component executes Python code in a REPL (Read-Evaluate-Print Loop) interpreter. +* The **Calculator** component performs basic arithmetic operations. +* The **Chat Input** component accepts user input to the chat. +* The **Prompt** component combines the user input with a user-defined prompt. +* The **Chat Output** component prints the flow's output to the chat. +* The **OpenAI** model component sends the user input and prompt to the OpenAI API and receives a response. + +## Run the Simple Agent flow + +1. Add your credentials to the Open AI component. +2. In the **Chat output** component, click ▶️ Play to start the end-to-end application flow. + A **Chat output built successfully** message and a ✅ Check on all components indicate that the flow ran successfully. +3. Click **Playground** to start a chat session. +4. Enter a simple math problem, like `2 + 2`, and then make sure the bot responds with the correct answer. +5. To confirm the REPL interpreter is working, prompt the `math` library directly with `math.sqrt(4)` and see if the bot responds with `4`. +6. The agent will also reason through more complex word problems. For example, prompt the agent with the following math problem: + +```plain +The equation 24x2+25x−47ax−2=−8x−3−53ax−2 is true for all values of x≠2a, where a is a constant. +What is the value of a? +A) -16 +B) -3 +C) 3 +D) 16 +``` + +The agent should respond with `B`. + +Now that your query has completed the journey from **Chat input** to **Chat output**, you have completed the **Simple Agent** flow. diff --git a/docs/docs/Starter-Projects/starter-projects-travel-planning-agent.md b/docs/docs/Starter-Projects/starter-projects-travel-planning-agent.md new file mode 100644 index 000000000000..c34704d06c29 --- /dev/null +++ b/docs/docs/Starter-Projects/starter-projects-travel-planning-agent.md @@ -0,0 +1,48 @@ +--- +title: Travel planning agent +sidebar_position: 8 +slug: /starter-projects-travel-planning-agent +--- + +Build a **Travel Planning Agent** flow for an agentic application using the multiple Tool-calling agents. + +An **agent** uses an LLM as its "brain" to select among the connected tools and complete its tasks. + +In this flow, multiple **Tool-calling agents** reason using an **Open AI** LLM to plan a travel journey. Each agent is given a different responsibility defined by its **System Prompt** field. + +The **Chat input** defines where the user wants to go, and passes the result to the **City Selection** agent. The **Local Expert** agent then adds information based on the selected cities, and the **Travel Concierge** assembles a seven day travel plan in Markdown. + +All agents have access to the **Search API** and **URL Content Fetcher** components, while only the Travel Concierge can use the **Calculator** for computing the trip costs. + +## Prerequisites + +To use this flow, you need an [OpenAI API key](https://platform.openai.com/) and a [Search API key](https://www.searchapi.io/). + +## Open Langflow and start a new flow + +Click **New Flow**, and then select the **Travel Planning Agent** flow. + +This opens a starter flow with the necessary components to run an agentic application using multiple Tool-calling agents. + +## Create the travel planning agent flow + +![](/img/starter-flow-travel-agent.png) + +The **Travel Planning Agent** flow consists of these components: + +* Multiple **Tool calling agent** components that use the connected LLM to reason through the user's input and select among the connected tools to complete their tasks. +* The **Calculator** component performs basic arithmetic operations. +* The **URL Content Fetcher** component scrapes content from a given URL. +* The **Chat Input** component accepts user input to the chat. +* The **Chat Output** component prints the flow's output to the chat. +* The **OpenAI** model component sends the user input and prompt to the OpenAI API and receives a response. + +## Run the travel planning agent flow + +1. Add your credentials to the Open AI and Search API components. +2. In the **Chat output** component, click ▶️ Play to start the end-to-end application flow. + A **Chat output built successfully** message and a ✅ Check on all components indicate that the flow ran successfully. +3. Click **Playground** to start a chat session. + You should receive a detailed, helpful answer to the journey defined in the **Chat input** component. + +Now that your query has completed the journey from **Chat input** to **Chat output**, you have completed the **Travel Planning Agent** flow. diff --git a/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md b/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md index a95aa46d5efb..9e70d3c6c74a 100644 --- a/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md +++ b/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md @@ -15,7 +15,7 @@ RAG is backed by a **vector store**, a vector database which stores embeddings This enables **vector search**, a more powerful and context-aware search. -We've chosen [Astra DB](https://astra.datastax.com/signup?utm_source=langflow-pre-release&utm_medium=referral&utm_campaign=langflow-announcement&utm_content=create-a-free-astra-db-account) as the vector database for this starter project, but you can follow along with any of Langflow's vector database options. +We've chosen [Astra DB](https://astra.datastax.com/signup?utm_source=langflow-pre-release&utm_medium=referral&utm_campaign=langflow-announcement&utm_content=create-a-free-astra-db-account) as the vector database for this starter flow, but you can follow along with any of Langflow's vector database options. ## Prerequisites {#6aa2c6dff6894eccadc39d4903d79e66} @@ -23,31 +23,23 @@ We've chosen [Astra DB](https://astra.datastax.com/signup?utm_source=langflow-p --- -- [Langflow installed and running](http://localhost:3000/getting-started/install-langflow) +- [Langflow installed and running](https://docs.langflow.org/getting-started-installation) - [OpenAI API key](https://platform.openai.com/) - [An Astra DB vector database created](https://docs.datastax.com/en/astra-db-serverless/get-started/quickstart.html) with: - Application Token - API Endpoint -![](./648489928.png) +## Create the vector store RAG flow -## Vector Store RAG {#1c37d15332f94cfe8f6e11dc6cc8a4ea} - - ---- - - -### Create the vector store RAG project {#e3ed64193e5e448f81279e1d54ba43cf} - -1. From the Langflow dashboard, click **New Project**. +1. From the Langflow dashboard, click **New Flow**. 2. Select **Vector Store RAG**. -3. The **Vector Store RAG** project is created. +3. The **Vector Store RAG** flowis created. -![](./1946624394.png) +![](/img/starter-flow-vector-rag.png) -The vector store RAG flow is built of two separate flows. Ingestion and query. +The vector store RAG flow is built of two separate flows for ingestion and query. The **ingestion** part (bottom of the screen) populates the vector store with data from a local file. It ingests data from a file (**File**), splits it into chunks (**Split Text**), indexes it in Astra DB (**Astra DB**), and computes embeddings for the chunks using an embedding model (**OpenAI Embeddings**). @@ -86,7 +78,7 @@ The **query** part (top of the screen) allows users to retrieve embedded vecto 5. Repeat the above steps for the **API Endpoint** field, pasting your Astra API Endpoint instead (`https://ASTRA_DB_ID-ASTRA_DB_REGION.apps.astra.datastax.com`). 6. Add the global variable to both the **Astra DB** and **Astra DB Search** components. -### Run the Vector Store RAG {#815a6536d2d548d987f0f4e375a58b15} +## Run the Vector Store RAG flow 1. Click the **Playground** button. Here you can chat with the AI that uses context from the database you created. 2. Type a message and press Enter. (Try something like "What topics do you know about?") diff --git "a/docs/docs/What\342\200\231s-New/1143907392.png" "b/docs/docs/What\342\200\231s-New/1143907392.png" deleted file mode 100644 index 54779b8537ca..000000000000 Binary files "a/docs/docs/What\342\200\231s-New/1143907392.png" and /dev/null differ diff --git "a/docs/docs/What\342\200\231s-New/_category_.json" "b/docs/docs/What\342\200\231s-New/_category_.json" deleted file mode 100644 index d014c875afec..000000000000 --- "a/docs/docs/What\342\200\231s-New/_category_.json" +++ /dev/null @@ -1 +0,0 @@ -{"position":0, "label":"What’s New?"} \ No newline at end of file diff --git "a/docs/docs/What\342\200\231s-New/whats-new-a-new-chapter-langflow.md" "b/docs/docs/What\342\200\231s-New/whats-new-a-new-chapter-langflow.md" deleted file mode 100644 index 1c69dd8a7aae..000000000000 --- "a/docs/docs/What\342\200\231s-New/whats-new-a-new-chapter-langflow.md" +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: 1.0 - A new chapter for Langflow -sidebar_position: 0 -slug: /whats-new-a-new-chapter-langflow ---- - - - -![](./1143907392.png) - - -## First things first {#5add758564574950862f689198a4e085} - - ---- - - -**Thank you all for being part of the Langflow community**. The journey so far has been amazing, and we are thrilled to have you with us. - - -We have some exciting news to share with you. Langflow is evolving, and we want to tell you all about it! - - -## What's new? {#ecad059e8f8249c598404c0b2bab36fa} - - ---- - - -In the past year, we learned a lot from the community and our users. We saw the potential of Langflow and the need for a visual, interactive platform for building conversational AI applications (and beyond). You thought us the importance of a platform that is easy to use, but also powerful and controllable, and that made clear to us how Langflow's transparency could be improved. - - -Below are some of the new features we included to make that happen! - - -### Same Component, Multiple Outputs {#85986f3ae303444fb69d9ea1baa2eca8} - - -Components can now have more than a single output, allowing for unique flexibility in creating complex flows. The game-changer is output routing — it allows for so many new capabilities it’s almost silly to describe! - -1. Branch to one or more subsequent components; -2. Apply logic operations like if/else and exploit decision-making; -3. Create classification models that choose between paths; -4. Enable the development of agent architectures from scratch; -5. Build an orchestrator that routes between agents. - -### Flow + Composition {#12d86f91b5af4abc9f8e885c5f828df4} - - -One key change you'll notice is that projects now require you to define **Inputs** and **Outputs**. They allow you to define the structure of your conversation and how data flows through it. This change comes with a new way of visualizing your projects. - - -Before 1.0 you would connect components to ultimately build one final component that was processed behind the scenes. Now, each step of the process is defined by you, is visible on the workspace, and can be monitored and controlled. - - -This makes it so that composition is now just **another way** of building in Langflow and **data flows through your project more transparently**. This means that the easy stuff is _really_ easy and the complex parts are still possible! - -- **Flow:** Data is processed by one component and then passed to the next component in line for immediate execution. -- **Composition**: Allows components to not only forward data but also share states for modular building. - -For example, a flow can sequentially process text, and after a few steps, trigger an agent. It can access functions that wait to be called or to respond. This blend of flow and composition brings an unprecedented level of flexibility and control to data workflows in LLM-based apps and agents that use multiple models and APIs working together to achieve tasks. - - -### Memory Management {#a9e352272f4a44499f52fae65b663118} - - -Langflow 1.0 natively allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create multiple “memories” for agents to store and recall specific information as needed. - - -You can edit and remove previous messages to inspect and validate a model’s response behavior. Control, explore, and manage conversation histories to get your models acting just right. - - -### Component Freeze 🥶 {#4912d08da5464ff2aff595d6b26fd809} - - -Component output freezing is back in Langflow, and it’s cooler than ever! - - -Once a component runs, you can now lock its previous output state to prevent it from re-running. - - -Avoid spending extra tokens and remove repetition when output should be constant — plus it's great for debugging and prototyping! - - -### Output Preview {#f56d4400b0214ef2ab1206add068dd99} - - -Each component now includes an output visualizer that opens a pop-up screen, allowing you to easily inspect and monitor transmissions between components. It provides instant feedback on your workflows, letting you see results as they are processed. 🔍 - - -### Inputs and Outputs Handling {#2112c11dc496480c9bd681f04f8533ea} - - -Inputs and outputs finally make more sense to us, and hopefully to you too. - - -We’re proposing Langflow-native types to keep things consistent, but not limited to use any Python type. For instance, a Chat Input component sends out what we call a Message object (text + metadata like date, time, and sender), but maybe you want to introduce external types from your favorite Python package? Go wild. Each native type will have their own visualization modes and will evolve according to new integrations added. - - -### Custom Endpoint Name {#bfeb7018e8bd46bf9dbef43ce2d3692b} - - -Now you can pick a custom name for your endpoint used to call your flow from the API. - - -### Logs & Monitoring {#c902ee43b4b24711b526352ae2451c48} - - -A new logs page has been added! Now, both component executions and message history from the chat can be inspected in an interactive table. This will make it easier to debug, inspect, and manage messages passing through components. - - -### Folders 📁 {#d01237fd83e4467994fed6fa7a8ee4f4} - - -We introduced folders on the home page to help categorize flows and components. Create, remove and edit them to keep your work organized. - - -### Playground {#27649918dcb64a0ebae83db767bfe2eb} - - -By having a clear definition of Inputs and Outputs, we could build the experience around that, which led us to create the Playground. - - -When building a project, testing and debugging are crucial. The Playground is an interface that changes dynamically based on the Inputs and Outputs you defined in your project. - - -For example, let's say you are building a simple RAG application. Generally, you have an Input, some references that come from a Vector Store Search, a Prompt, and the answer. Now, you could plug the output of your Prompt into a Text Output, rename that to "Prompt Result," and see the output of your Prompt in the Playground. We have many planned features for the Playground, and we're excited to see how you'll explore it! - - -### Multi-Modal {#b7753bc9d72f40c49c4074a8d0e51344} - - -Langflow is now multi-modal! It can now handle images and more soon! - - -We’ve also improved project organization, global variables and overall settings, added Python 3.12 compatibility, keyboard shortcuts and a lot of new and fun experimental components! - - -## An Easier Start {#c6bf434c0d35482bbf2ef0bceff902f0} - - ---- - - -The experience for first-time users is something we wanted to improve. For that we created a couple of Starter Projects. It's now much easier to start a new project, and you can choose from a list of starter projects to get you going. - - -For now, we have: - -- **Basic Prompting (Hello, World)**: Learn the basics of a Prompt Component. -- **Vector Store RAG**: Ingest data into a Vector Store and then use it to run a RAG application. -- **Memory Chatbot**: Create a simple chatbot that can remember things about the user. -- **Document QA**: Build a simple flow that helps you get answers about a document. -- **Blog Writer**: Expand on the Prompt variables and be creative about what inputs you add to it. - -Please let us know what other starter projects you would like to see in the future! - - -## What's Next? {#21004578890d4397bc291b43eb140640} - - ---- - - -Langflow has gone through a big change, and we are excited to see how you use it and what you think of it. We plan to add more types of Input and Output like Image and Audio, and we also plan to add more Components to help you build more complex projects. - - -We are excited to see the community embracing Langflow as their number one AI builder and eagerly wait to see what new inspiring projects will come out of this release! A big thanks to everyone who's supporting or being part of this community in any way. ✨ - - -Sincerely, - - -**The Langflow Team 🚀** - diff --git a/docs/docs/Workspace/1109547993.png b/docs/docs/Workspace/1109547993.png deleted file mode 100644 index 5bc8db5c9449..000000000000 Binary files a/docs/docs/Workspace/1109547993.png and /dev/null differ diff --git a/docs/docs/Workspace/1224026366.png b/docs/docs/Workspace/1224026366.png deleted file mode 100644 index ca271dd0808d..000000000000 Binary files a/docs/docs/Workspace/1224026366.png and /dev/null differ diff --git a/docs/docs/Workspace/1769489314.png b/docs/docs/Workspace/1769489314.png deleted file mode 100644 index dbc9493e0440..000000000000 Binary files a/docs/docs/Workspace/1769489314.png and /dev/null differ diff --git a/docs/docs/Workspace/1801518600.png b/docs/docs/Workspace/1801518600.png deleted file mode 100644 index 868b5d5e9b06..000000000000 Binary files a/docs/docs/Workspace/1801518600.png and /dev/null differ diff --git a/docs/docs/Workspace/1862729633.png b/docs/docs/Workspace/1862729633.png deleted file mode 100644 index f4c26e1695ff..000000000000 Binary files a/docs/docs/Workspace/1862729633.png and /dev/null differ diff --git a/docs/docs/Workspace/310382168.png b/docs/docs/Workspace/310382168.png deleted file mode 100644 index 9a29e3c7e79c..000000000000 Binary files a/docs/docs/Workspace/310382168.png and /dev/null differ diff --git a/docs/docs/Workspace/563382604.png b/docs/docs/Workspace/563382604.png deleted file mode 100644 index bddaf128ccc6..000000000000 Binary files a/docs/docs/Workspace/563382604.png and /dev/null differ diff --git a/docs/docs/Workspace/566212295.png b/docs/docs/Workspace/566212295.png deleted file mode 100644 index f5e133b7664d..000000000000 Binary files a/docs/docs/Workspace/566212295.png and /dev/null differ diff --git a/docs/docs/Workspace/816620192.png b/docs/docs/Workspace/816620192.png deleted file mode 100644 index 6b34528c827e..000000000000 Binary files a/docs/docs/Workspace/816620192.png and /dev/null differ diff --git a/docs/docs/Workspace/workspace-api.md b/docs/docs/Workspace/workspace-api.md index 9873143d5032..2400737aacf1 100644 --- a/docs/docs/Workspace/workspace-api.md +++ b/docs/docs/Workspace/workspace-api.md @@ -9,7 +9,7 @@ import ReactPlayer from "react-player"; The **API** section presents code templates for integrating your flow into external applications. -![](./1862729633.png) +![](/img/api-pane.png) ### cURL {#4eb287a8424349c4b0b436a6703de5f3} @@ -36,9 +36,6 @@ The **Python Code** tab displays code to interact with your flow's `.json` f The **Tweaks** tab displays the available parameters for your flow. Modifying the parameters changes the code parameters across all windows. For example, changing the **Chat Input** component's `input_value` will change that value across all API calls. - - - ## Chat Widget {#48f121a6cb3243979a341753da0c2700} @@ -57,18 +54,12 @@ You can get the HTML code embedded with the chat by clicking the Code button at Clicking the Chat Widget HTML tab, you'll get the code to be inserted. Read below to learn how to use it with HTML, React and Angular. -![](./566212295.png) - - ### Embed your flow into HTML {#6e84db2f2a0d451db6fa03c57e9bf9a4} The Chat Widget can be embedded into any HTML page, inside a `` tag, as demonstrated in the video below. - - - ### Embed your flow with React {#fe5d3b1c42e74e4c84ebc9d1799b7665} @@ -80,7 +71,6 @@ To embed the Chat Widget using React, insert this ` documentation.", mode: "html", image: "https://cdn-icons-png.flaticon.com/512/5968/5968350.png", language: "html", - code: codes[5], - }, - ]; - - if (includeWebhookCurl) { - tabs.splice(1, 0, { - name: "Webhook cURL", - mode: "bash", - image: "https://curl.se/logo/curl-symbol-transparent.png", - language: "sh", - code: codes[1], + code: codes.widgetCode, }); } - if (includeTweaks) { tabs.push({ name: "Tweaks", mode: "python", image: "https://cdn-icons-png.flaticon.com/512/5968/5968350.png", language: "py", - code: codes[6], + code: codes.tweaksCode, }); } diff --git a/src/frontend/src/modals/baseModal/helpers/switch-case-size.ts b/src/frontend/src/modals/baseModal/helpers/switch-case-size.ts index e2505a495ccc..23196ce8f00c 100644 --- a/src/frontend/src/modals/baseModal/helpers/switch-case-size.ts +++ b/src/frontend/src/modals/baseModal/helpers/switch-case-size.ts @@ -38,6 +38,11 @@ export const switchCaseModalSize = (size: string) => { minWidth = "min-w-[85vw]"; height = "h-[80vh]"; break; + case "templates": + minWidth = "w-[97vw] max-w-[1200px]"; + height = + "min-h-[700px] lg:min-h-0 h-[90vh] md:h-[80vh] lg:h-[50vw] lg:max-h-[620px]"; + break; case "three-cards": minWidth = "min-w-[1066px]"; height = "max-h-[94vh]"; @@ -61,6 +66,12 @@ export const switchCaseModalSize = (size: string) => { minWidth = "min-w-[80vw]"; height = ""; break; + + case "x-large": + minWidth = "min-w-[95vw]"; + height = "h-[95vh]"; + break; + default: minWidth = "min-w-[80vw]"; height = "h-[90vh]"; diff --git a/src/frontend/src/modals/baseModal/index.tsx b/src/frontend/src/modals/baseModal/index.tsx index 947b70d4d451..ffeebaca8ce7 100644 --- a/src/frontend/src/modals/baseModal/index.tsx +++ b/src/frontend/src/modals/baseModal/index.tsx @@ -22,7 +22,11 @@ import { modalHeaderType } from "../../types/components"; import { cn } from "../../utils/utils"; import { switchCaseModalSize } from "./helpers/switch-case-size"; -type ContentProps = { children: ReactNode; overflowHidden?: boolean }; +type ContentProps = { + children: ReactNode; + overflowHidden?: boolean; + className?: string; +}; type HeaderProps = { children: ReactNode; description: string }; type FooterProps = { children: ReactNode }; type TriggerProps = { @@ -32,12 +36,17 @@ type TriggerProps = { className?: string; }; -const Content: React.FC = ({ children, overflowHidden }) => { +const Content: React.FC = ({ + children, + overflowHidden, + className, +}) => { return (
{children} @@ -68,10 +77,10 @@ const Header: React.FC<{ }> = ({ children, description }: modalHeaderType): JSX.Element => { return ( - + {children} - + {description} @@ -89,9 +98,16 @@ const Footer: React.FC<{ onClick?: () => void; }; close?: boolean; -}> = ({ children, submit, close }) => { + centered?: boolean; +}> = ({ children, submit, close, centered }) => { return ( -
+
{submit ? (
{children ??
} @@ -125,12 +141,14 @@ const Footer: React.FC<{ ); }; interface BaseModalProps { - children: [ - React.ReactElement, - React.ReactElement, - React.ReactElement?, - React.ReactElement?, - ]; + children: + | [ + React.ReactElement, + React.ReactElement?, + React.ReactElement?, + React.ReactElement?, + ] + | React.ReactElement; open?: boolean; setOpen?: (open: boolean) => void; size?: @@ -143,13 +161,15 @@ interface BaseModalProps { | "three-cards" | "large-thin" | "large-h-full" + | "templates" | "small-h-full" | "medium-h-full" | "md-thin" | "sm-thin" | "smaller-h-full" - | "medium-log"; - + | "medium-log" + | "x-large"; + className?: string; disable?: boolean; onChangeOpenModal?: (open?: boolean) => void; type?: "modal" | "dialog"; @@ -157,6 +177,7 @@ interface BaseModalProps { onEscapeKeyDown?: (e: KeyboardEvent) => void; } function BaseModal({ + className, open, setOpen, children, @@ -189,7 +210,7 @@ function BaseModal({ const modalContent = ( <> - {headerChild} + {headerChild && headerChild} {ContentChild} {ContentFooter && ContentFooter} @@ -199,6 +220,7 @@ function BaseModal({ minWidth, height, "flex flex-col duration-300 overflow-hidden", + className, ); //UPDATE COLORS AND STYLE CLASSSES @@ -213,6 +235,7 @@ function BaseModal({ {triggerChild} event.preventDefault()} onEscapeKeyDown={onEscapeKeyDown} className={contentClasses} > diff --git a/src/frontend/src/modals/codeAreaModal/index.tsx b/src/frontend/src/modals/codeAreaModal/index.tsx index 9a6943576c10..d2371d406538 100644 --- a/src/frontend/src/modals/codeAreaModal/index.tsx +++ b/src/frontend/src/modals/codeAreaModal/index.tsx @@ -6,6 +6,8 @@ import "ace-builds/src-noconflict/theme-twilight"; // import "ace-builds/webpack-resolver"; import { usePostValidateCode } from "@/controllers/API/queries/nodes/use-post-validate-code"; import { usePostValidateComponentCode } from "@/controllers/API/queries/nodes/use-post-validate-component-code"; +import useFlowStore from "@/stores/flowStore"; +import { cloneDeep } from "lodash"; import { useEffect, useRef, useState } from "react"; import AceEditor from "react-ace"; import ReactAce from "react-ace/lib/ace"; @@ -40,6 +42,7 @@ export default function CodeAreaModal({ readonly = false, open: myOpen, setOpen: mySetOpen, + componentId, }: codeAreaModalPropsType): JSX.Element { const [code, setCode] = useState(value); const [open, setOpen] = @@ -52,12 +55,15 @@ export default function CodeAreaModal({ const setErrorData = useAlertStore((state) => state.setErrorData); const [openConfirmation, setOpenConfirmation] = useState(false); const codeRef = useRef(null); + const { mutate, isPending } = usePostValidateCode(); const [error, setError] = useState<{ detail: CodeErrorDataTypeAPI; } | null>(null); - const { mutate: validateCode } = usePostValidateCode(); const { mutate: validateComponentCode } = usePostValidateComponentCode(); + const currentFlow = useFlowStore((state) => state.currentFlow); + const nodes = useFlowStore((state) => state.nodes); + const setNodes = useFlowStore((state) => state.setNodes); useEffect(() => { // if nodeClass.template has more fields other than code and dynamic is true @@ -68,7 +74,7 @@ export default function CodeAreaModal({ }, []); function processNonDynamicField() { - validateCode( + mutate( { code }, { onSuccess: (apiReturn) => { @@ -120,6 +126,17 @@ export default function CodeAreaModal({ if (data && type) { setValue(code); setNodeClass(data, type); + const currentNode = nodes.find((node) => node.id === componentId); + const currentNodeIndex = nodes.findIndex( + (node) => node.id === componentId, + ); + const currentNodes = cloneDeep(nodes); + + if (currentNode) { + currentNodes[currentNodeIndex].data.node = data; + } + setNodes(currentNodes); + setError({ detail: { error: undefined, traceback: undefined } }); setOpen(false); } @@ -184,6 +201,7 @@ export default function CodeAreaModal({ }} open={open} setOpen={setOpen} + size="x-large" > {children} @@ -220,7 +238,7 @@ export default function CodeAreaModal({ onChange={(value) => { setCode(value); }} - className="h-full w-full rounded-lg border-[1px] border-gray-300 custom-scroll dark:border-gray-600" + className="h-full min-w-full rounded-lg border-[1px] border-gray-300 custom-scroll dark:border-gray-600" />
{ + setOpenConfirmation(false); + }} onEscapeKeyDown={(e) => { e.stopPropagation(); setOpenConfirmation(false); diff --git a/src/frontend/src/modals/confirmationModal/index.tsx b/src/frontend/src/modals/confirmationModal/index.tsx index e22f1d1625b9..76f7be2bda99 100644 --- a/src/frontend/src/modals/confirmationModal/index.tsx +++ b/src/frontend/src/modals/confirmationModal/index.tsx @@ -1,4 +1,5 @@ import GenericIconComponent from "@/components/genericIconComponent"; +import { DialogClose } from "@radix-ui/react-dialog"; import React, { useEffect, useState } from "react"; import ShadTooltip from "../../components/shadTooltipComponent"; import { Button } from "../../components/ui/button"; @@ -66,6 +67,16 @@ function ConfirmationModal({ (child) => (child as React.ReactElement).type === Content, ); + const shouldShowConfirm = confirmationText && onConfirm; + const shouldShowCancel = cancelText; + const shouldShowFooter = shouldShowConfirm || shouldShowCancel; + + const handleCancel = () => { + setFlag(true); + setModalOpen(false); + onCancel?.(); + }; + return ( {triggerChild} @@ -89,34 +100,38 @@ function ConfirmationModal({ {ContentChild} - - - {cancelText && onCancel && ( - - )} - + {shouldShowFooter ? ( + + {shouldShowConfirm && ( + + )} + {shouldShowCancel && ( + + + + )} + + ) : ( + <> + )} ); } diff --git a/src/frontend/src/modals/dictAreaModal/index.tsx b/src/frontend/src/modals/dictAreaModal/index.tsx index cfb040c923de..41a2e2a70263 100644 --- a/src/frontend/src/modals/dictAreaModal/index.tsx +++ b/src/frontend/src/modals/dictAreaModal/index.tsx @@ -27,58 +27,68 @@ export default function DictAreaModal({ }): JSX.Element { const [open, setOpen] = useState(false); const isDark = useDarkStore((state) => state.dark); - const [myValue, setMyValue] = useState(value); + const [componentValue, setComponentValue] = useState(value); useEffect(() => { - setMyValue(value); + setComponentValue(value); }, [value, open]); + const handleSubmit = () => { + if (onChange) { + onChange(componentValue); + setOpen(false); + } + }; + + const handleJsonChange = (edit) => { + setComponentValue(edit.src); + }; + + const customizeCopy = (copy) => { + navigator.clipboard.writeText(JSON.stringify(copy)); + }; + + const renderHeader = () => ( + + + {onChange ? "Edit Dictionary" : "View Dictionary"} + + + ); + + const renderContent = () => ( + +
+ +
+
+ ); + return ( { - onChange(myValue); - setOpen(false); - } - : undefined - } + onSubmit={onChange ? handleSubmit : undefined} > - {children} - - - {onChange ? "Edit Dictionary" : "View Dictionary"} - - - -
- { - setMyValue(edit.src); - }} - src={cloneDeep(myValue)} - customizeCopy={(copy) => { - navigator.clipboard.writeText(JSON.stringify(copy)); - }} - /> -
-
+ + {children} + + {renderHeader()} + {renderContent()}
); diff --git a/src/frontend/src/modals/editNodeModal/index.tsx b/src/frontend/src/modals/editNodeModal/index.tsx index 4a6ab0675066..17dcaaf0ba63 100644 --- a/src/frontend/src/modals/editNodeModal/index.tsx +++ b/src/frontend/src/modals/editNodeModal/index.tsx @@ -31,7 +31,7 @@ const EditNodeModal = ({ }, [data.node]); return ( - + <> diff --git a/src/frontend/src/modals/exportModal/index.tsx b/src/frontend/src/modals/exportModal/index.tsx index 9689170faa69..c4cb29613e9e 100644 --- a/src/frontend/src/modals/exportModal/index.tsx +++ b/src/frontend/src/modals/exportModal/index.tsx @@ -1,3 +1,4 @@ +import { track } from "@/customization/utils/analytics"; import useFlowStore from "@/stores/flowStore"; import { ReactNode, forwardRef, useEffect, useState } from "react"; import EditFlowSettings from "../../components/editFlowSettingsComponent"; @@ -66,6 +67,7 @@ const ExportModal = forwardRef( description, ); setOpen(false); + track("Flow Exported", { flowId: currentFlow!.id }); }} > {props.children} diff --git a/src/frontend/src/modals/flowLogsModal/index.tsx b/src/frontend/src/modals/flowLogsModal/index.tsx index 9094d5ea8c2e..31debbf71fa2 100644 --- a/src/frontend/src/modals/flowLogsModal/index.tsx +++ b/src/frontend/src/modals/flowLogsModal/index.tsx @@ -31,7 +31,7 @@ export default function FlowLogsModal({ }, [data, open, isLoading]); return ( - +
diff --git a/src/frontend/src/modals/flowSettingsModal/index.tsx b/src/frontend/src/modals/flowSettingsModal/index.tsx index fb0435fc0620..15e2672d22c0 100644 --- a/src/frontend/src/modals/flowSettingsModal/index.tsx +++ b/src/frontend/src/modals/flowSettingsModal/index.tsx @@ -65,8 +65,7 @@ export default function FlowSettingsModal({ if (flows) { const tempNameList: string[] = []; flows.forEach((flow: FlowType) => { - if ((flow.is_component ?? false) === false) - tempNameList.push(flow.name); + tempNameList.push(flow.name); }); setNameList(tempNameList.filter((name) => name !== currentFlow!.name)); } diff --git a/src/frontend/src/modals/newFlowModal/components/NewFlowCardComponent/index.tsx b/src/frontend/src/modals/newFlowModal/components/NewFlowCardComponent/index.tsx index ed7885f48bc2..9a9d74d971a1 100644 --- a/src/frontend/src/modals/newFlowModal/components/NewFlowCardComponent/index.tsx +++ b/src/frontend/src/modals/newFlowModal/components/NewFlowCardComponent/index.tsx @@ -1,5 +1,7 @@ +import { useCustomNavigate } from "@/customization/hooks/use-custom-navigate"; +import { track } from "@/customization/utils/analytics"; import useAddFlow from "@/hooks/flows/use-add-flow"; -import { useNavigate, useParams } from "react-router-dom"; +import { useParams } from "react-router-dom"; import { Card, CardContent, @@ -9,16 +11,19 @@ import { export default function NewFlowCardComponent() { const addFlow = useAddFlow(); - const navigate = useNavigate(); + const navigate = useCustomNavigate(); const { folderId } = useParams(); + const handleClick = () => { + addFlow({ new_blank: true }).then((id) => { + navigate(`/flow/${id}${folderId ? `/folder/${folderId}` : ""}`); + }); + track("New Flow Created", { template: "Blank Flow" }); + }; + return ( { - addFlow().then((id) => { - navigate(`/flow/${id}${folderId ? `/folder/${folderId}` : ""}`); - }); - }} + onClick={handleClick} className="h-64 w-80 cursor-pointer bg-background pt-4" data-testid="blank-flow" > diff --git a/src/frontend/src/modals/newFlowModal/components/hooks/use-redirect-flow-card-click.tsx b/src/frontend/src/modals/newFlowModal/components/hooks/use-redirect-flow-card-click.tsx new file mode 100644 index 000000000000..d0ba71b4c2f9 --- /dev/null +++ b/src/frontend/src/modals/newFlowModal/components/hooks/use-redirect-flow-card-click.tsx @@ -0,0 +1,24 @@ +import { useNavigate } from "react-router-dom"; +import { track } from "../../../../customization/utils/analytics"; +import useAddFlow from "../../../../hooks/flows/use-add-flow"; +import useFlowsManagerStore from "../../../../stores/flowsManagerStore"; +import { FlowType } from "../../../../types/flow"; +import { updateIds } from "../../../../utils/reactflowUtils"; + +export function useFlowCardClick() { + const navigate = useNavigate(); + const addFlow = useAddFlow(); + + const handleFlowCardClick = async (flow: FlowType, folderIdUrl: string) => { + try { + updateIds(flow.data!); + const id = await addFlow({ flow }); + navigate(`/flow/${id}/folder/${folderIdUrl}`); + track("New Flow Created", { template: `${flow.name} Template` }); + } catch (error) { + console.error("Error handling flow card click:", error); + } + }; + + return handleFlowCardClick; +} diff --git a/src/frontend/src/modals/newFlowModal/components/undrawCards/index.tsx b/src/frontend/src/modals/newFlowModal/components/undrawCards/index.tsx index d0a5cd785458..afecfcc5deb2 100644 --- a/src/frontend/src/modals/newFlowModal/components/undrawCards/index.tsx +++ b/src/frontend/src/modals/newFlowModal/components/undrawCards/index.tsx @@ -1,5 +1,5 @@ /// -import { useNavigate, useParams } from "react-router-dom"; +import { useParams } from "react-router-dom"; import BlogPost from "../../../../assets/undraw_blog_post_re_fy5x.svg?react"; import ChatBot from "../../../../assets/undraw_chat_bot_re_e2gj.svg?react"; import PromptChaining from "../../../../assets/undraw_cloud_docs_re_xjht.svg?react"; @@ -10,7 +10,6 @@ import APIRequest from "../../../../assets/undraw_real_time_analytics_re_yliv.sv import BasicPrompt from "../../../../assets/undraw_short_bio_re_fmx0.svg?react"; import TransferFiles from "../../../../assets/undraw_transfer_files_re_a2a9.svg?react"; -import useAddFlow from "@/hooks/flows/use-add-flow"; import { Card, CardContent, @@ -20,17 +19,17 @@ import { import { useFolderStore } from "../../../../stores/foldersStore"; import { UndrawCardComponentProps } from "../../../../types/components"; import { updateIds } from "../../../../utils/reactflowUtils"; +import { useFlowCardClick } from "../hooks/use-redirect-flow-card-click"; export default function UndrawCardComponent({ flow, }: UndrawCardComponentProps): JSX.Element { - const addFlow = useAddFlow(); - const navigate = useNavigate(); const { folderId } = useParams(); const myCollectionId = useFolderStore((state) => state.myCollectionId); - const folderIdUrl = folderId ?? myCollectionId; + const handleFlowCardClick = useFlowCardClick(); + function selectImage() { switch (flow.name) { case "Blog Writer": @@ -93,7 +92,7 @@ export default function UndrawCardComponent({ preserveAspectRatio="xMidYMid meet" /> ); - case "Sequential Tasks Agent": + case "Simple Agent": return ( ); - case "Hierarchical Tasks Agent": + case "Travel Planning Agents": return ( ); - case "Complex Agent": + case "Dynamic Agent": return ( { - updateIds(flow.data!); - addFlow({ flow }).then((id) => { - navigate(`/flow/${id}/folder/${folderIdUrl}`); - }); - }} + onClick={() => handleFlowCardClick(flow, folderIdUrl!)} className="h-64 w-80 cursor-pointer bg-background pt-4" > diff --git a/src/frontend/src/modals/newFlowModal/index.tsx b/src/frontend/src/modals/newFlowModal/index.tsx deleted file mode 100644 index 444795a45f40..000000000000 --- a/src/frontend/src/modals/newFlowModal/index.tsx +++ /dev/null @@ -1,86 +0,0 @@ -import useFlowsManagerStore from "../../stores/flowsManagerStore"; -import { newFlowModalPropsType } from "../../types/components"; -import BaseModal from "../baseModal"; -import NewFlowCardComponent from "./components/NewFlowCardComponent"; -import UndrawCardComponent from "./components/undrawCards"; - -export default function NewFlowModal({ - open, - setOpen, -}: newFlowModalPropsType): JSX.Element { - const examples = useFlowsManagerStore((state) => state.examples); - - return ( - - - - Get Started - - - -
-
- - - {examples.find( - (e) => e.name == "Basic Prompting (Hello, World)", - ) && ( - e.name == "Basic Prompting (Hello, World)", - )! - } - /> - )} - {examples.find((e) => e.name == "Memory Chatbot") && ( - e.name == "Memory Chatbot")!} - /> - )} - {examples.find((e) => e.name == "Document QA") && ( - e.name == "Document QA")!} - /> - )} - {examples.find((e) => e.name == "Blog Writer") && ( - e.name == "Blog Writer")!} - /> - )} - {examples.find((e) => e.name == "Vector Store RAG") && ( - e.name == "Vector Store RAG")!} - /> - )} - {examples.find((e) => e.name == "Sequential Tasks Agent") && ( - e.name == "Sequential Tasks Agent")!} - /> - )} - {examples.find((e) => e.name == "Hierarchical Tasks Agent") && ( - e.name == "Hierarchical Tasks Agent")! - } - /> - )} - {examples.find((e) => e.name == "Complex Agent") && ( - e.name == "Complex Agent")!} - /> - )} -
-
-
-
- ); -} diff --git a/src/frontend/src/modals/promptModal/index.tsx b/src/frontend/src/modals/promptModal/index.tsx index 1736a2ef51f0..c27b530edb7e 100644 --- a/src/frontend/src/modals/promptModal/index.tsx +++ b/src/frontend/src/modals/promptModal/index.tsx @@ -1,5 +1,5 @@ import { usePostValidatePrompt } from "@/controllers/API/queries/nodes/use-post-validate-prompt"; -import { useEffect, useRef, useState } from "react"; +import React, { useEffect, useRef, useState } from "react"; import IconComponent from "../../components/genericIconComponent"; import SanitizedHTMLWrapper from "../../components/sanitizedHTMLWrapper"; import ShadTooltip from "../../components/shadTooltipComponent"; @@ -47,6 +47,10 @@ export default function PromptModal({ const divRef = useRef(null); const divRefPrompt = useRef(null); const { mutate: postValidatePrompt } = usePostValidatePrompt(); + const [clickPosition, setClickPosition] = useState({ x: 0, y: 0 }); + const [scrollPosition, setScrollPosition] = useState(0); + const previewRef = useRef(null); + const textareaRef = useRef(null); function checkVariables(valueToCheck: string): void { const regex = /\{([^{}]+)\}/g; @@ -131,8 +135,8 @@ export default function PromptModal({ field_name = Array.isArray( apiReturn?.frontend_node?.custom_fields?.[""], ) - ? apiReturn?.frontend_node?.custom_fields?.[""][0] ?? "" - : apiReturn?.frontend_node?.custom_fields?.[""] ?? ""; + ? (apiReturn?.frontend_node?.custom_fields?.[""][0] ?? "") + : (apiReturn?.frontend_node?.custom_fields?.[""] ?? ""); } if (apiReturn) { let inputVariables = apiReturn.input_variables ?? []; @@ -172,11 +176,43 @@ export default function PromptModal({ ); } + const handlePreviewClick = (e: React.MouseEvent) => { + if (!isEdit && !readonly) { + const clickX = e.clientX; + const clickY = e.clientY; + setClickPosition({ x: clickX, y: clickY }); + setScrollPosition(e.currentTarget.scrollTop); + setIsEdit(true); + } + }; + + useEffect(() => { + if (isEdit && textareaRef.current) { + textareaRef.current.focus(); + textareaRef.current.scrollTop = scrollPosition; + + const textArea = textareaRef.current; + const { x, y } = clickPosition; + + // Use caretPositionFromPoint to get the closest text position. Does not work on Safari. + if ("caretPositionFromPoint" in document) { + let range = (document as any).caretPositionFromPoint(x, y)?.offset ?? 0; + if (range) { + const position = range; + textArea.setSelectionRange(position, position); + } + } + } else if (!isEdit && previewRef.current) { + previewRef.current.scrollTop = scrollPosition; + } + }, [isEdit, clickPosition, scrollPosition]); + return ( {}} open={modalOpen} setOpen={setModalOpen} + size="x-large" > {children} @@ -201,10 +237,11 @@ export default function PromptModal({