diff --git a/.github/scripts/check_doc_references.py b/.github/scripts/check_doc_references.py
deleted file mode 100644
index a27145b..0000000
--- a/.github/scripts/check_doc_references.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import subprocess
-import sys
-from pathlib import Path
-
-DOCS_DIR = Path("docusaurus/docs")
-REPO_ROOT = Path(".")
-
-def get_created_md_files():
- """
- Identify and return a list of Markdown (.md, .mdx) files that were added
- in the current Git branch compared to the main branch.
- The function uses `git diff` to determine the files that have been added
- (status "A") and filters them to include only Markdown files located within
- the documentation directory.
- Returns:
- list[Path]: A list of relative paths to the newly created Markdown files.
- """
- result = subprocess.run(
- ["git", "diff", "--name-status", "origin/main...HEAD"],
- stdout=subprocess.PIPE,
- text=True,
- check=True
- )
-
- created_files = []
- for line in result.stdout.strip().splitlines():
- status, filepath = line.split(maxsplit=1)
- path = Path(filepath)
- if status == "A" and path.suffix in [".md", ".mdx"] and DOCS_DIR in path.parents:
- created_files.append(path.relative_to(REPO_ROOT))
-
- return created_files
-
-def is_file_referenced(file_path):
- """
- Checks if a given file is referenced in the Git repository.
- This function searches the Git repository for occurrences of the file name
- and determines if the file is referenced in any other files.
- Args:
- file_path (Path): The path to the file to check.
- Returns:
- bool: True if the file is referenced in other files, False otherwise.
- """
- file_name = file_path.name
- result = subprocess.run(
- ["git", "grep", "-l", file_name],
- stdout=subprocess.PIPE,
- text=True
- )
-
- matches = [line for line in result.stdout.strip().splitlines() if Path(line) != file_path]
- return len(matches) > 0
-
-def main():
- created_files = get_created_md_files()
- if not created_files:
- print("No new Markdown files detected.")
- return
-
- print(f"Checking {len(created_files)} new Markdown files...")
-
- unreferenced = []
-
- for md_file in created_files:
- if not is_file_referenced(md_file):
- unreferenced.append(md_file)
-
- if unreferenced:
- print("❌ The following new files are unreferenced:")
- for f in unreferenced:
- print(f" - {f}")
- sys.exit(1)
- else:
- print("✅ All new Markdown files are referenced.")
-
-if __name__ == "__main__":
- main()
diff --git a/.github/workflows/config/markdown.links.config.json b/.github/workflows/config/markdown.links.config.json
new file mode 100644
index 0000000..3ecde5c
--- /dev/null
+++ b/.github/workflows/config/markdown.links.config.json
@@ -0,0 +1,10 @@
+{
+ "ignorePatterns": [
+ {
+ "pattern": "^https?://(localhost|127\\.0\\.0\\.1)(:\\d+)?(/.*)?$"
+ },
+ {
+ "pattern": "^https?://controller\\.url\\.sample(:\\d+)?(/.*)?$"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/workflows/config/markdown_lint.json b/.github/workflows/config/markdown_lint.json
new file mode 100644
index 0000000..b1074d1
--- /dev/null
+++ b/.github/workflows/config/markdown_lint.json
@@ -0,0 +1,58 @@
+{
+ "ignorePatterns": [
+ {
+ "pattern": "^(?!http).*"
+ },
+ {
+ "pattern": ".*%2F*"
+ },
+ {
+ "pattern": ".*127.0.0.1.*"
+ },
+ {
+ "pattern" : ".*localhost.*"
+ },
+ {
+ "pattern": "^https://controller.url.sample:9443"
+ },
+ {
+ "pattern": "^https://nms.yourdomain.com"
+ },
+ {
+ "pattern" : "^https://your-org.nms.yourdomain.com"
+ },
+ {
+ "pattern" : "https://magma.github.io/magma/docs/orc8r/rds_upgrade#logs-and-validation"
+ },
+ {
+ "pattern" : "https://github.com/fbcinternal/ens_magma/tree/master/spirent_automation"
+ },
+ {
+ "pattern" : "https://github.com/magma/ci-infra/blob/master/bazel/remote_caching/Readme.md"
+ },
+ {
+ "pattern" : "https://github.com/magma/security/issues"
+ },
+ {
+ "pattern" : "https://github.com/dysinger/basebox"
+ },
+ {
+ "pattern" : "https://docs.virtlet.cloud/user-guide/real-cluster/"
+ },
+ {
+ "pattern" : "^https://www.etsi.org.*pdf"
+ },
+ {
+ "pattern" : "https://core.ac.uk/download/pdf/322886318.pdf"
+ },
+ {
+ "pattern" : "http://automation.fbmagma.ninja/"
+ },
+ {
+ "pattern" : "http://ens-spirent-test-summary.com.s3-us-west-1.amazonaws.com/sanity/hilsanitypass.svg"
+ },
+ {
+ "pattern" : "https://fb.quip.com/4tmUAtlox4Oy"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/workflows/docs-fix.yml b/.github/workflows/docs-fix.yml
new file mode 100644
index 0000000..b59cac5
--- /dev/null
+++ b/.github/workflows/docs-fix.yml
@@ -0,0 +1,60 @@
+# Copyright 2022 The Magma Authors.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# owner: @magma/approvers-infra
+# purpose: Fixes the documentation if any issue is found during tests
+# remediation:
+
+name: Docs Fix (Triggered on Check Failure)
+
+on:
+ workflow_run:
+ workflows: ["Docs Check"]
+ types:
+ - completed
+
+jobs:
+ only_if_failed:
+ if: ${{ github.event.workflow_run.conclusion == 'failure' }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Setup Python (if needed)
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.8'
+
+ - name: Install deps
+ run: yarn install --frozen-lockfile
+
+ - name: Run precommit fix
+ run: make -C readmes precommit_fix
+
+ - name: Show diff after fix
+ run: git diff --color
+
+ - name: Upload patch
+ run: |
+ git diff > docs_fixes.patch
+ continue-on-error: true
+
+ - name: Upload auto-fix patch
+ uses: actions/upload-artifact@v4
+ with:
+ name: auto-fix-patch
+ path: docs_fixes.patch
diff --git a/.github/workflows/docs-test.yml b/.github/workflows/docs-test.yml
new file mode 100644
index 0000000..ad9bce8
--- /dev/null
+++ b/.github/workflows/docs-test.yml
@@ -0,0 +1,113 @@
+# Copyright 2022 The Magma Authors.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# owner: @magma/approvers-infra
+# purpose: Check the documentation changes for issues and formatting errors
+# remediation:
+---
+name: Docs Check
+
+on:
+ pull_request:
+ branches: [ main ]
+ push:
+ branches: [ main ]
+
+jobs:
+ docusaurus-test:
+ name: Docusaurus Build & Log
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Install dependencies
+ run: |
+ cd docusaurus/
+ yarn install --frozen-lockfile
+
+ - name: Build and test Docusaurus
+ run: |
+ cd docusaurus/
+ yarn test
+
+ - name: Verify build output
+ run: |
+ cd docusaurus/
+ test -d build && echo "✅ Build successful"
+
+ markdown-lint:
+ name: Markdown Format Check
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install markdownlint-cli
+ run: npm install -g markdownlint-cli
+ - name: Run markdownlint
+ run: markdownlint "docs/**/*.md" "readmes/**/*.md"
+
+ translation-consistency:
+ name: Translation Consistency Check
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: Grant execute permissions and convert script file
+ run: |
+ chmod +x ./.github/workflows/scripts/check_translations.sh
+ - name: Check Translation Consistency
+ run: |
+ ./.github/workflows/scripts/check_translations.sh --debug
+ shell: bash
+
+ id-filename-sync:
+ name: Docusaurus ID & Filename Sync
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Grant execute permissions to scripts and convert script file
+ run: |
+ chmod +x ./.github/workflows/scripts/check_id_matches_filename.sh
+ - name: Check ID matches filename
+ run: |
+ ./.github/workflows/scripts/check_id_matches_filename.sh --debug
+
+ check-symlinks:
+ name: Broken Symlinks
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Check for broken symlinks in docusaurus/
+ working-directory: ${{ github.workspace }}/docusaurus
+ run: |
+ BROKEN=$(find . -xtype l)
+ if [ -n "$BROKEN" ]; then
+ echo "❌ Broken symlinks found:"
+ echo "$BROKEN"
+ exit 1
+ else
+ echo "✅ No broken symlinks found in ./docusaurus."
+ fi
+
+ markdown-link-check:
+ name: Markdown URL Check
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: gaurav-nelson/github-action-markdown-link-check@v1
+ with:
+ use-quiet-mode: 'yes'
+ use-verbose-mode: 'yes'
+ config-file: '.github/workflows/config/markdown.links.config.json'
+ folder-path: './docusaurus/docs'
diff --git a/.github/workflows/scripts/check_id_matches_filename.sh b/.github/workflows/scripts/check_id_matches_filename.sh
new file mode 100644
index 0000000..c6720f6
--- /dev/null
+++ b/.github/workflows/scripts/check_id_matches_filename.sh
@@ -0,0 +1,100 @@
+# File: ./scripts/docs/check_id_matches_filename.sh
+# Description: Ensure each doc file's frontmatter id matches its filename
+# Is possible to add more checks to the Front Matter infromations
+
+DEBUG_MODE="false"
+DOCS_DIR="docusaurus/docs"
+EXTENSIONS=("*.md" "*.mdx")
+FIND_EXPR=""
+
+# Parse command line arguments
+while [ "$#" -gt 0 ]; do
+ case "$1" in
+ --debug)
+ DEBUG_MODE="true"
+ shift
+ ;;
+ *)
+ echo "❌ Error: Unknown argument: $1"
+ exit 1
+ ;;
+ esac
+done
+
+echo "🔍 Checking if doc IDs match filenames in $DOCS_DIR"
+
+if [ ! -d "$DOCS_DIR" ]; then
+ echo "❌ Error: Documentation directory not found at $DOCS_DIR"
+ exit 1
+fi
+
+MISMATCH_COUNT=0
+CHECKED_FILES=0
+
+
+# TODO: Probably transform in a dedicated function as is used in chack_id and check_translations
+# Get modified files or all files based on debug mode
+if [ "$DEBUG_MODE" = "true" ]; then
+ # Build the `-name` expression dynamically
+ for EXT in "${EXTENSIONS[@]}"; do
+ if [ -n "$FIND_EXPR" ]; then
+ FIND_EXPR="$FIND_EXPR -o -name \"$EXT\""
+ else
+ FIND_EXPR="-name \"$EXT\""
+ fi
+ done
+ # Wrap the whole expression in escaped parentheses
+ FIND_CMD="find "$DOCS_DIR" -type f \\( $FIND_EXPR \\) ! -path \"versioned_docs/*\""
+ # Evaluate and execute
+ MODIFIED_DOCS=$(eval $FIND_CMD)
+else
+ BASE_REF="${GITHUB_BASE_REF:-origin/main}"
+ MODIFIED_DOCS=$(git diff --name-only "$BASE_REF...HEAD" -- 'docs/**/*.md' 'docs/**/*.mdx' ':!versioned_docs/**')
+fi
+
+if [ -z "$MODIFIED_DOCS" ]; then
+ echo "✅ No files to check."
+ exit 0
+fi
+
+# Use a temporary file to store mismatches
+TMP_MISMATCHES=$(mktemp)
+
+for FILE in $MODIFIED_DOCS; do
+ # Only process existing files
+ if [ ! -f "$file" ]; then
+ continue
+ fi
+
+ CHECKED_FILES=$((CHECKED_FILES + 1))
+
+ FILENAME=$(basename "$FILE")
+ EXPECTED_ID=$(basename "$FILE" .md) # Remove .md extension
+ EXPECTED_ID=$(basename "$EXPECTED_ID" .mdx) # Remove .mdx extension if present
+ EXPECTED_ID=$(echo "$FILENAME" | sed 's/\.[^.]*$//') # Remove extension
+ FOUND_ID=$(grep -E '^id: ' "$FILE" | head -n 1 | cut -d ' ' -f2-)
+
+ if [ "$DEBUG_MODE" = "true" ]; then
+ REL_PATH=$(echo "$FILE" | sed "s|^$DOCS_DIR/||")
+ echo "🔎 Checking: $REL_PATH (ID: $FOUND_ID)"
+ fi
+
+ if [ -n "$FOUND_ID" ] && [ "$FOUND_ID" != "$EXPECTED_ID" ]; then
+ REL_PATH=$(echo "$FILE" | sed "s|^$DOCS_DIR/||")
+ echo "$REL_PATH (id: $FOUND_ID, expected: $EXPECTED_ID)" >> "$TMP_MISMATCHES"
+ MISMATCH_COUNT=$((MISMATCH_COUNT + 1))
+ fi
+done
+
+if [ "$MISMATCH_COUNT" -eq 0 ]; then
+ echo "✅ All doc IDs match filenames (checked $CHECKED_FILES files)."
+ rm -f "$TMP_MISMATCHES"
+ exit 0
+else
+ echo "❌ ID/Filename mismatches found:"
+ cat "$TMP_MISMATCHES" | while read -r LINE; do
+ echo "- $LINE"
+ done
+ rm -f "$TMP_MISMATCHES"
+ exit 1
+fi
diff --git a/.github/workflows/scripts/check_translations.sh b/.github/workflows/scripts/check_translations.sh
new file mode 100644
index 0000000..2c033e9
--- /dev/null
+++ b/.github/workflows/scripts/check_translations.sh
@@ -0,0 +1,87 @@
+# File: ./.github/workflows/scripts/check_translations.sh
+# Description: Ensure any updated docs in /docs have corresponding translations
+
+DEBUG_MODE="false"
+DOCS_DIR="docusaurus/docs"
+EXTENSIONS=("*.md" "*.mdx")
+FIND_EXPR=""
+
+# Parse command line arguments
+while [ "$#" -gt 0 ]; do
+ case "$1" in
+ --debug)
+ DEBUG_MODE="true"
+ shift
+ ;;
+ *)
+ echo "❌ Error: Unknown option '$1'"
+ exit 1
+ ;;
+ esac
+done
+
+echo "🔍 Checking for missing translations in $DOCS_DIR"
+
+if [ ! -d "$DOCS_DIR" ]; then
+ echo "❌ Error: Documentation directory not found at $DOCS_DIR"
+ exit 1
+fi
+
+if [ "$DEBUG_MODE" = "true" ]; then
+ echo "⚠️ DEBUG MODE: Checking ALL documentation files"
+fi
+
+# TODO: Probably transform in a dedicated function as is used in chack_id and check_translations
+# Get modified files or all files based on debug mode
+if [ "$DEBUG_MODE" = "true" ]; then
+ # Build the `-name` expression dynamically
+ for EXT in "${EXTENSIONS[@]}"; do
+ if [ -n "$FIND_EXPR" ]; then
+ FIND_EXPR="$FIND_EXPR -o -name \"$EXT\""
+ else
+ FIND_EXPR="-name \"$EXT\""
+ fi
+ done
+ # Wrap the whole expression in escaped parentheses
+ FIND_CMD="find "$DOCS_DIR" -type f \\( $FIND_EXPR \\) ! -path \"versioned_docs/*\""
+ # Evaluate and execute
+ MODIFIED_DOCS=$(eval $FIND_CMD)
+else
+ BASE_REF="${GITHUB_BASE_REF:-origin/main}"
+ MODIFIED_DOCS=$(git diff --name-only "$BASE_REF...HEAD" -- 'docs/**/*.md' 'docs/**/*.mdx' ':!versioned_docs/**')
+fi
+
+if [ -z "$MODIFIED_DOCS" ]; then
+ echo "✅ No files to check."
+ exit 0
+fi
+
+MISSING=0
+
+for file in $MODIFIED_DOCS; do
+ # Only process existing files
+ if [ ! -f "$file" ]; then
+ continue
+ fi
+
+ REL_PATH=$(echo "$file" | sed 's|^docs/||')
+ echo "Checking translations for: $REL_PATH"
+
+ for lang in $(ls i18n 2>/dev/null); do
+ TRANSLATED_FILE="i18n/$lang/docusaurus-plugin-content-docs/current/$REL_PATH"
+ if [ ! -f "$TRANSLATED_FILE" ]; then
+ if [ "$MISSING" -eq 0 ]; then
+ echo "❌ Missing translations:"
+ fi
+ echo "- $TRANSLATED_FILE"
+ MISSING=$((MISSING + 1))
+ fi
+ done
+done
+
+if [ "$MISSING" -eq 0 ]; then
+ echo "✅ All translations present."
+ exit 0
+else
+ exit 1
+fi
diff --git a/docusaurus/.dockerignore b/docusaurus/.dockerignore
index cf70988..33b4ed3 100644
--- a/docusaurus/.dockerignore
+++ b/docusaurus/.dockerignore
@@ -1 +1,3 @@
-**/node_modules
+# Local development files
+node_modules
+**/yarn.lock
diff --git a/docusaurus/.gitignore b/docusaurus/.gitignore
new file mode 100644
index 0000000..a9ac97d
--- /dev/null
+++ b/docusaurus/.gitignore
@@ -0,0 +1,53 @@
+# Node modules
+**/data/
+**/node_modules/
+
+# Build files
+**/build/
+**/.docusaurus/
+**/.cache/
+**/.cache-loader/
+
+# Environment variables
+**/.env
+**/.env.local
+**/.env.development.local
+**/.env.test.local
+**/.env.production.local
+
+# Logs
+**/npm-debug.log*
+**/yarn-debug.log*
+**/yarn-error.log*
+**/pnpm-debug.log*
+**/lerna-debug.log*
+
+# IDE-specific files
+**/.idea/
+**/.vscode/
+
+# TypeScript build files
+**/*.tsbuildinfo
+
+# MacOS system files
+**/.DS_Store
+
+# Linux files
+**/*.pid
+
+# Editor-specific files
+**/*.suo
+**/*.ntvs*
+**/*.njsproj
+**/*.sln
+**/*.sw*
+
+# Miscellaneous
+**/*.DS_Store
+**/*.log
+**/*.swp
+
+# Ignore any package-lock or yarn.lock if using a different package manager
+# Uncomment the one you're not using
+**/yarn.lock
+**/package-lock.json
diff --git a/docusaurus/Dockerfile b/docusaurus/Dockerfile
index 6d5d957..5c35d57 100644
--- a/docusaurus/Dockerfile
+++ b/docusaurus/Dockerfile
@@ -1,17 +1,56 @@
-# Copyright 2023 The Magma Authors.
-
+# Copyright 2025 The Magma Authors.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#==================================================================
+# Dockerfile based on Docusaurus community guide found at
+# https://docusaurus.community/knowledge/deployment/docker/
+#==================================================================
+
+#==================================================================
+# Base Node image
+#==================================================================
+# Node version 22 is the curent LTS
+# The slim verision is based on a small debian image
+FROM node:22-slim AS base
+ENV FORCE_COLOR=0
+RUN corepack enable
+WORKDIR /opt/docusaurus
+COPY package.json .
+COPY package-lock.json* .
+
+#==================================================================
+# Local deployment
+#==================================================================
+FROM base AS dev
+WORKDIR /opt/docusaurus
+EXPOSE 3000
+RUN [ ! -d "node_modules" ] && npm install --package-lock-only && npm ci
+CMD ["npm", "run", "start", "--", "--poll", "1000"]
-FROM node:14
+#==================================================================
+# Base image for production deployment
+#==================================================================
+FROM base AS prod
+WORKDIR /opt/docusaurus
+COPY . /opt/docusaurus/
+RUN npm ci
+RUN npm run build
-WORKDIR /app/website
+#==================================================================
+# Production deployment
+#==================================================================
+FROM prod AS serve
+EXPOSE 3000
+CMD ["npm", "run", "serve", "--", "--no-open"]
-COPY package.json /app/website/package.json
-RUN yarn install
+#==================================================================
+# Production deployment with Caddy automatic SSL/TLS
+#==================================================================
+FROM caddy:2-alpine AS caddy
+COPY --from=prod /opt/docusaurus/Caddyfile /etc/caddy/Caddyfile
+COPY --from=prod /opt/docusaurus/build /var/docusaurus
\ No newline at end of file
diff --git a/docusaurus/Makefile b/docusaurus/Makefile
deleted file mode 100644
index f753df8..0000000
--- a/docusaurus/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-.PHONY: dev
-
-dev:
- ./create_docusaurus_website.sh
diff --git a/docusaurus/babel.config.js b/docusaurus/babel.config.js
new file mode 100644
index 0000000..e00595d
--- /dev/null
+++ b/docusaurus/babel.config.js
@@ -0,0 +1,3 @@
+module.exports = {
+ presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
+};
diff --git a/docusaurus/core/Footer.js b/docusaurus/core/Footer.js
deleted file mode 100644
index e84bdf8..0000000
--- a/docusaurus/core/Footer.js
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020 The Magma Authors.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree.
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const React = require('react');
-
-class Footer extends React.Component {
- docUrl(doc, language) {
- const baseUrl = this.props.config.baseUrl;
- const docsUrl = this.props.config.docsUrl;
- const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`;
- const langPart = `${language ? `${language}/` : ''}`;
- return `${baseUrl}${docsPart}${langPart}${doc}`;
- }
-
- pageUrl(doc, language) {
- const baseUrl = this.props.config.baseUrl;
- return baseUrl + (language ? `${language}/` : '') + doc;
- }
-
- render() {
- return (
-
- );
- }
-}
-
-module.exports = Footer;
diff --git a/docusaurus/create_docusaurus_website.sh b/docusaurus/create_docusaurus_website.sh
index 7b983e8..9af843d 100755
--- a/docusaurus/create_docusaurus_website.sh
+++ b/docusaurus/create_docusaurus_website.sh
@@ -50,4 +50,4 @@ echo 'If you want to follow the build logs, run docker compose logs -f docusauru
spin
echo 'Navigate to http://localhost:3000/ to see the docs.'
-xdg-open 'http://localhost:3000/docs/next/basics/introduction.html' || true
+# xdg-open 'http://localhost:3000/docs/next/basics/introduction.html' || true
diff --git a/docusaurus/docker-compose.publish.yml b/docusaurus/docker-compose.publish.yml
deleted file mode 100644
index a0dc599..0000000
--- a/docusaurus/docker-compose.publish.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-version: "3.7"
-
-services:
- docusaurus:
- # Don't install or start anything, will do that manually during publish
- command: bash -c 'while true ; do echo $$(date) ; sleep 10 ; done'
diff --git a/docusaurus/docker-compose.yml b/docusaurus/docker-compose.yml
index f15e040..66ed17c 100644
--- a/docusaurus/docker-compose.yml
+++ b/docusaurus/docker-compose.yml
@@ -1,12 +1,46 @@
-version: "3.7"
+version: '3.8'
+name: "docusaurus"
services:
- docusaurus:
+ dev:
+ build:
+ context: .
+ target: dev
+ container_name: docusaurus_local
+ ports:
+ - "3000:3000"
volumes:
- - ./../docusaurus:/app/website
- - ./../readmes:/app/docs
+ - .:/opt/docusaurus
+ - ./../readmes:/opt/docusaurus/docs
+ environment:
+ - NODE_ENV=development
+
+ serve: # TODO: fix issues
+ build:
+ context: .
+ target: serve
+ container_name: docusaurus
+ ports:
+ - "3000:3000"
+ environment:
+ - NODE_ENV=production
+
+ caddy: # TODO: fix issues
+ build:
+ context: .
+ target: caddy
+ container_name: docusaurus_caddy
ports:
- - 3000:3000/tcp
- - 35729:35729/tcp
- image: magma_docusaurus
- command: bash -c 'yarn install && yarn start'
+ - "80:80"
+ - "443:443"
+ environment:
+ - NODE_ENV=production
+ - DOCUSAURUS_DOMAIN=docusaurus.example
+ - DOCUSAURUS_EMAIL=letsencrypt@docusaurus.example
+ volumes:
+ - caddy_data:/data
+ - caddy_config:/config
+
+volumes:
+ caddy_data:
+ caddy_config:
\ No newline at end of file
diff --git a/docusaurus/docs/basics/introduction.md b/docusaurus/docs/basics/introduction.md
new file mode 100644
index 0000000..cbf4a95
--- /dev/null
+++ b/docusaurus/docs/basics/introduction.md
@@ -0,0 +1,28 @@
+---
+id: version-1.0.0-introduction
+title: Introduction
+hide_title: true
+original_id: introduction
+---
+# Introduction
+
+Magma is an open-source software platform that gives network operators an open, flexible and extendable mobile core network solution. Magma enables better connectivity by:
+
+* Allowing operators to offer cellular service without vendor lock-in with a modern, open source core network
+* Enabling operators to manage their networks more efficiently with more automation, less downtime, better predictability, and more agility to add new services and applications
+* Enabling federation between existing MNOs and new infrastructure providers for expanding rural infrastructure
+* Allowing operators who are constrained with licensed spectrum to add capacity and reach by using Wi-Fi and CBRS
+
+## Magma Architecture
+
+The figure below shows the high-level Magma architecture. Magma is designed to be 3GPP generation and access network (cellular or WiFi) agnostic. It can flexibly support a radio access network with minimal development and deployment effort.
+
+Magma has three major components:
+
+* **Access Gateway:** The Access Gateway (AGW) provides network services and policy enforcement. In an LTE network, the AGW implements an evolved packet core (EPC), and a combination of an AAA and a PGW. It works with existing, unmodified commercial radio hardware.
+
+* **Orchestrator:** Orchestrator is a cloud service that provides a simple and consistent way to configure and monitor the wireless network securely. The Orchestrator can be hosted on a public/private cloud. The metrics acquired through the platform allows you to see the analytics and traffic flows of the wireless users through the Magma web UI.
+
+* **Federation Gateway:** The Federation Gateway integrates the MNO core network with Magma by using standard 3GPP interfaces to existing MNO components. It acts as a proxy between the Magma AGW and the operator's network and facilitates core functions, such as authentication, data plans, policy enforcement, and charging to stay uniform between an existing MNO network and the expanded network with Magma.
+
+
diff --git a/docusaurus/docs/basics/prerequisites.md b/docusaurus/docs/basics/prerequisites.md
new file mode 100644
index 0000000..a437f5e
--- /dev/null
+++ b/docusaurus/docs/basics/prerequisites.md
@@ -0,0 +1,103 @@
+---
+id: version-1.0.0-prerequisites
+title: Prerequisites
+hide_title: true
+original_id: prerequisites
+---
+# Prerequisites
+
+These are the prerequisites to setting up a full private LTE Magma deployment.
+Additional prerequisites for developers can be found in the developer's guide.
+
+## Development Tools
+
+Install the following tools:
+
+1. [Docker](https://www.docker.com) and Docker Compose
+2. [Homebrew](https://brew.sh/) *only* for MacOS users
+3. [VirtualBox](https://www.virtualbox.org/)
+3. [Vagrant](https://vagrantup.com)
+
+Replace `brew` with your OS-appropriate package manager as necessary:
+
+```bash
+brew install python3
+pip3 install ansible fabric3 jsonpickle requests PyYAML
+vagrant plugin install vagrant-vbguest
+```
+
+If you are on MacOS, you should start Docker for Mac and increase the memory
+allocation for the Docker engine to at least 4GB (Preferences -> Advanced).
+
+
+
+## Build/Deploy Tooling
+
+We support building the AGW and Orchestrator on MacOS and Linux host operating
+systems. Doing so on a Windows environment should be possible but has not been
+tested. You may prefer to use a Linux virtual machine if you are on a Windows
+host.
+
+First, follow the previous section on developer tools. Then, install some
+additional prerequisite tools (replace `brew` with your OS-appropriate package
+manager as necessary):
+
+```console
+$ brew install aws-iam-authenticator kubernetes-cli kubernetes-helm python3 terraform
+$ pip3 install awscli
+$ aws configure
+```
+
+Provide the access key ID and secret key for an administrator user in AWS
+(don't use the root user) when prompted by `aws configure`. Skip this step if
+you will use something else for managing AWS credentials.
+
+## Production Hardware
+
+### Access Gateways
+
+Access gateways (AGWs) can be deployed on to any AMD64 architecture machine
+which can support a Debian Linux installation. The basic system requirements
+for the AGW production hardware are:
+
+1. 2+ physical ethernet interfaces
+2. AMD64 dual-core processor around 2GHz clock speed or faster
+3. 2GB RAM
+4. 128GB-256GB SSD storage
+
+In addition, in order to build the AGW, you should have on hand:
+
+1. A USB stick with 2GB+ capacity to load a Debian Stretch ISO
+2. Peripherals (keyboard, screen) for your production AGW box for use during
+provisioning
+
+### RAN Equipment
+
+We currently have tested with the following EnodeB's:
+
+1. Baicells Nova 233 TDD Outdoor
+2. Baicells Nova 243 TDD Outdoor
+3. Assorted Baicells indoor units (for lab deployments)
+
+Support for other RAN hardware can be implemented inside the `enodebd` service
+on the AGW, but we recommend starting with one of these EnodeBs.
+
+### Orchestrator and NMS
+
+Orchestrator deployment depends on the following components:
+
+1. An AWS account
+2. A Docker image repository (e.g. Docker Hub, JFrog)
+3. A registered domain for Orchestrator endpoints
+
+We recommend deploying the Orchestrator cloud component of magma into AWS.
+Our open-source Terraform scripts target an AWS deployment environment, but if
+you are familiar with devops and are willing to roll your own, Orchestrator can
+run on any public/private cloud with a Kubernetes cluster available to use.
+The deployment documentation will assume an AWS deployment environment - if
+this is your first time using or deploying Orchestrator, we recommend that you
+follow this guide before attempting to deploy it elsewhere.
+
+You will also need a Docker image repository available to publish the various
+Orchestrator NMS containers to. We recommend using a private repository for
+this.
diff --git a/docusaurus/docs/basics/quick_start_guide.md b/docusaurus/docs/basics/quick_start_guide.md
new file mode 100644
index 0000000..1a9fdc2
--- /dev/null
+++ b/docusaurus/docs/basics/quick_start_guide.md
@@ -0,0 +1,177 @@
+---
+id: version-1.0.0-quick_start_guide
+title: Quick Start Guide
+hide_title: true
+original_id: quick_start_guide
+---
+# Quick Start Guide
+
+The quick start guide is for developing on Magma or just trying it out. Follow
+the deployment guides under Orchestrator and Access Gateway if you are
+installing Magma for a production deployment.
+
+With the [prereqs](prerequisites.md) installed, we can now set up a minimal
+end-to-end system on your development environment. In this guide, we'll start
+by running the LTE access gateway and orchestrator cloud, and then
+register your local access gateway with your local cloud for management.
+
+We will be spinning up a virtual machine and some docker containers for this
+full setup, so you'll probably want to do this on a system with at least 8GB
+of memory. Our development VM's are in the 192.168.80.0/24 address space, so
+make sure that you don't have anything running which hijacks that (e.g. VPN).
+
+In the following steps, note the prefix in terminal commands. `HOST` means to
+run the indicated command on your host machine, and `MAGMA-VM` on the `magma`
+vagrant machine under `lte/gateway`.
+
+## Provisioning the environment
+
+Go ahead and open up 2 fresh terminal tabs. Start in
+
+### Terminal Tab 1: Provision the AGW VM
+
+The development environment virtualizes the access gateway so you don't need
+any production hardware on hand to test an end-to-end setup.
+We'll be setting up the LTE AGW VM in this tab.
+
+```bash
+HOST [magma]$ cd lte/gateway
+HOST [magma/lte/gateway]$ vagrant up magma
+```
+
+This will take a few minutes to spin up the VM. While that runs, switch over
+to...
+
+### Terminal Tab 2: Build Orchestrator
+
+Here, we'll be building the Orchestrator docker containers.
+
+```bash
+HOST [magma]$ cd orc8r/cloud/docker
+HOST [magma/orc8r/cloud/docker]$ ./build.py -a
+```
+
+This will build all the docker images for Orchestrator. The `vagrant up` from
+the first tab should finish before the image building, so you should switch
+to that tab and move on for now.
+
+## Initial Run
+
+Once `vagrant up` in the first tab finishes:
+
+### Terminal Tab 1: Build AGW from Source
+
+We will kick off the initial build of the AGW from source here.
+
+```bash
+HOST [magma/lte/gateway]$ vagrant ssh magma
+MAGMA-VM [/home/vagrant]$ cd magma/lte/gateway
+MAGMA-VM [/home/vagrant/magma/lte/gateway]$ make run
+```
+
+This will take a while (we have a lot of CXX files to build). With 2 extensive
+build jobs running, now is a good time to grab a coffee or lunch. The first
+build ever from source will take a while, but afterwards, a persistent ccache
+and Docker's native layer caching will speed up subsequent builds
+significantly.
+
+You can monitor what happens in the other tab now:
+
+### Terminal Tab 2: Start Orchestrator
+
+Once the Orchestrator build finishes, we can start the development Orchestrator
+cloud for the first time. We'll also use this time to register the local
+client certificate you'll need to access the local API gateway for your
+development stack.
+
+Starting Orchestrator is as simple as:
+
+```bash
+HOST [magma/orc8r/cloud/docker]$ docker-compose up -d
+
+Creating orc8r_postgres_1 ... done
+Creating orc8r_test_1 ... done
+Creating orc8r_maria_1 ... done
+Creating elasticsearch ... done
+Creating fluentd ... done
+Creating orc8r_kibana_1 ... done
+Creating orc8r_proxy_1 ... done
+Creating orc8r_controller_1 ... done
+```
+
+The Orchestrator application containers will bootstrap certificates on startup
+which are cached for future runs. Watch the directory `magma/.cache/test_certs`
+for a file `admin_operator.pfx` to show up (this may take a minute or 2), then:
+
+```bash
+HOST [magma/orc8r/cloud/docker]$ ls ../../../.cache/test_certs
+
+admin_operator.key.pem bootstrapper.key controller.crt rootCA.key
+admin_operator.pem certifier.key controller.csr rootCA.pem
+admin_operator.pfx certifier.pem controller.key rootCA.srl
+
+HOST [magma/orc8r/cloud/docker]$ open ../../../.cache/test_certs
+```
+
+In the Finder window that pops up, double-click `admin_operator.pfx` to add the
+local client cert to your keychain. *The password for the cert is magma*.
+In some cases, you may have to open up the Keychain app in MacOS and drag-drop
+the file into the login keychain if double-clicking doesn't work.
+
+If you use Firefox, you'll have to import this .pfx file into your browser's
+installed client certificates. See [here](https://support.globalsign.com/customer/en/portal/articles/1211486-install-client-digital-certificate---firefox-for-windows)
+for instructions. If you use Chrome or Safari, you may have to restart the
+browser before the certificate can be used.
+
+### Connecting Your Local LTE Gateway to Your Local Cloud
+
+At this point, you will have built all the code in the LTE access gateway and
+the Orchestrator cloud. All the services on the LTE access gateway and
+orchestrator cloud are running, but your gateway VM isn't yet set up to
+communicate with your local cloud.
+
+We have a fabric command set up to do this:
+
+```bash
+HOST [magma]$ cd lte/gateway
+HOST [magma/lte/gateway]$ fab -f dev_tools.py register_vm
+```
+
+This command will seed your gateway and network on Orchestrator with some
+default LTE configuration values and set your gateway VM up to talk to your
+local Orchestrator cloud. Wait a minute or 2 for the changes to propagate,
+then you can verify that things are working:
+
+```bash
+HOST [magma/lte/gateway]$ vagrant ssh magma
+
+MAGMA-VM$ sudo service magma@* stop
+MAGMA-VM$ sudo service magma@magmad restart
+MAGMA-VM$ sudo tail -f /var/log/syslog
+
+# After a minute or 2 you should see these messages:
+Sep 27 22:57:35 magma-dev magmad[6226]: [2018-09-27 22:57:35,550 INFO root] Checkin Successful!
+Sep 27 22:57:55 magma-dev magmad[6226]: [2018-09-27 22:57:55,684 INFO root] Processing config update g1
+Sep 27 22:57:55 magma-dev control_proxy[6418]: 2018-09-27T22:57:55.683Z [127.0.0.1 -> streamer-controller.magma.test,8443] "POST /magma.Streamer/GetUpdates HTTP/2" 200 7bytes 0.009s
+```
+
+## Using the NMS UI
+
+Magma provides an UI for configuring and monitoring the networks. To set up
+the NMS to talk to your local Orchestrator:
+
+```bash
+HOST [magma]$ cd nms/fbcnms-projects/magmalte
+HOST [magma/nms/fbcnms-projects/magmalte] $ docker-compose build magmalte
+HOST [magma/nms/fbcnms-projects/magmalte] $ docker-compose up -d
+HOST [magma/nms/fbcnms-projects/magmalte] $ ./scripts/dev_setup.sh
+```
+
+After this, you will be able to access the UI by visiting
+[https://localhost](https://localhost), and using the email `admin@magma.test`
+and password `password1234`. If you see Gateway Error 502, don't worry, the
+NMS can take upto 60 seconds to finish starting up.
+
+
+
+
diff --git a/docusaurus/docs/cwf/healthchecker.md b/docusaurus/docs/cwf/healthchecker.md
new file mode 100644
index 0000000..e641e1d
--- /dev/null
+++ b/docusaurus/docs/cwf/healthchecker.md
@@ -0,0 +1,22 @@
+---
+id: version-1.0.0-healthchecker
+title: Health Checker
+sidebar_label: Health Checker
+hide_title: true
+original_id: healthchecker
+---
+# Health Checker
+Health checker reports:
+* Gateway - Controller connectivity
+* Status for all the running services
+* Number of restarts per each service
+* Number of errors per each service
+* Internet and DNS status
+* Kernel version
+* Magma version
+
+# Usage
+```bash
+docker-compose exec magmad bash
+health_cli.py
+```
\ No newline at end of file
diff --git a/docusaurus/docs/cwf/setup.md b/docusaurus/docs/cwf/setup.md
new file mode 100644
index 0000000..3ec24c7
--- /dev/null
+++ b/docusaurus/docs/cwf/setup.md
@@ -0,0 +1,35 @@
+---
+id: version-1.0.0-setup
+title: CWAG Setup (With Vagrant)
+sidebar_label: Setup (With Vagrant)
+hide_title: true
+original_id: setup
+---
+# CWF Access Gateway Setup (With Vagrant)
+### Prerequisites
+To develop and manage a Magma VM, you must have the following applications installed locally:
+
+* Virtualbox
+* Vagrant
+* Ansible
+
+### Steps
+
+To bring up a Wifi Access Gateway (CWAG) VM using Vagrant:
+
+* Run the following command:
+
+``HOST:magma/cwf/gateway USER$ vagrant up cwag``
+
+Vagrant will bring up the VM, then Ansible will provision the VM.
+
+
+* Once the CWAG VM is up and provisioned, run the following commands:
+
+``HOST:magma/cwf/gateway USER$ vagrant ssh cwag``
+``AGW:~ USER$ cd magma/cwf/gateway/docker``
+``AGW:~/magma/cwf/gateway/docker USER$ docker-compose build --parallel``
+``AGW:~/magma/cwf/gateway/docker USER$ docker-compose up -d``
+
+After this, all the CWAG docker containers should have been brought up
+successfully.
diff --git a/docusaurus/docs/feg/deploy_build.md b/docusaurus/docs/feg/deploy_build.md
new file mode 100644
index 0000000..ecb7455
--- /dev/null
+++ b/docusaurus/docs/feg/deploy_build.md
@@ -0,0 +1,24 @@
+---
+id: version-1.0.0-deploy_build
+title: Building Federation Gateway
+hide_title: true
+original_id: deploy_build
+---
+
+# Building Federation Gateway Components
+
+Start up your Docker daemon, `cd` to where you've cloned Magma, then:
+
+```bash
+cd magma/feg/gateway/docker
+docker-compose build --parallel
+```
+
+If this is your first time building the FeG, this may take a while. When this
+job finishes, upload these images to your image registry:
+
+```bash
+../../../orc8r/tools/docker/publish.sh -r -i gateway_python
+../../../orc8r/tools/docker/publish.sh -r -i gateway_go
+../../../orc8r/tools/docker/publish.sh -r -i gateway_radius
+```
diff --git a/docusaurus/docs/feg/deploy_install.md b/docusaurus/docs/feg/deploy_install.md
new file mode 100644
index 0000000..abebc97
--- /dev/null
+++ b/docusaurus/docs/feg/deploy_install.md
@@ -0,0 +1,160 @@
+---
+id: version-1.0.0-deploy_install
+title: Installing Federation Gateway
+hide_title: true
+original_id: deploy_install
+---
+# Installing Federation Gateway
+
+## Prerequisites
+
+To install the Federation Gateway, there are three required files that are
+deployment-specific. These are described below:
+
+* `rootCA.pem` - This file should match the `rootCA.pem` of the Orchestrator
+that the Federation Gateway will connect to.
+
+* `control_proxy.yml` - This file is used to configure the `magmad`
+and `control_proxy` services to point toward the appropriate Orchestrator.
+A sample configuration is provided below. The `bootstrap_address`,
+`bootstrap_port`, `controller_address`, and `controller_port` are the
+parameters that will likely need to be modified.
+
+```
+#
+# Copyright 2020 The Magma Authors.
+
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# nghttpx config will be generated here and used
+nghttpx_config_location: /var/tmp/nghttpx.conf
+
+# Location for certs
+rootca_cert: /var/opt/magma/certs/rootCA.pem
+gateway_cert: /var/opt/magma/certs/gateway.crt
+gateway_key: /var/opt/magma/certs/gateway.key
+
+# Listening port of the proxy for local services. The port would be closed
+# for the rest of the world.
+local_port: 8443
+
+# Cloud address for reaching out to the cloud.
+cloud_address: controller.magma.test
+cloud_port: 443
+
+bootstrap_address: bootstrapper-controller.magma.test
+bootstrap_port: 443
+
+# Option to use nghttpx for proxying. If disabled, the individual
+# services would establish the TLS connections themselves.
+proxy_cloud_connections: True
+
+# Allows http_proxy usage if the environment variable is present
+allow_http_proxy: True
+```
+
+* `.env` - This file provides any deployment specific environment variables used
+in the `docker-compose.yml` of the Federation Gateway. A sample configuration
+is provided below:
+
+```
+# Copyright 2020 The Magma Authors.
+
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+COMPOSE_PROJECT_NAME=feg
+DOCKER_REGISTRY=
+DOCKER_USERNAME=
+DOCKER_PASSWORD=
+IMAGE_VERSION=latest
+
+ROOTCA_PATH=/var/opt/magma/certs/rootCA.pem
+CONTROL_PROXY_PATH=/etc/magma/control_proxy.yml
+SNOWFLAKE_PATH=/etc/snowflake
+
+CERTS_VOLUME=/var/opt/magma/certs
+CONFIGS_VOLUME=/var/opt/magma/configs
+
+# This section is unnecessary if using host networking
+S6A_LOCAL_PORT=3868
+S6A_HOST_PORT=3868
+S6A_NETWORK=sctp
+
+SWX_LOCAL_PORT=3869
+SWX_HOST_PORT=3869
+SWX_NETWORK=sctp
+
+GX_LOCAL_PORT=3870
+GX_HOST_PORT=3870
+GX_NETWORK=tcp
+
+GY_LOCAL_PORT=3871
+GY_HOST_PORT=3871
+GY_NETWORK=tcp
+```
+
+## Installation
+
+The installation is done using the `install_gateway.sh` script located at
+`magma/orc8r/tools/docker`. To install, copy that file and the three files
+described above into a directory on the install host. Then
+
+```console
+INSTALL_HOST [~/]$ sudo ./install_gateway.sh feg
+```
+
+After this completes, you should see: `Installed successfully!!`
+
+## Registration
+
+After installation, the next step is to register the gateway with the Orchestrator.
+To do so:
+
+```console
+INSTALL_HOST [~/]$ cd /var/opt/magma/docker
+INSTALL_HOST [/var/opt/magma/docker]$ docker-compose exec magmad /usr/local/bin/show_gateway_info.py
+```
+
+This will output a hardware ID and a challenge key. This information must be
+registered with the Orchestrator. At this time, NMS support for FeG
+registration is still in-progress.
+
+To register the FeG, go to the Orchestrator's APIdocs in your browser.
+**Note: It is highly encouraged to use V1 of the apidocs**
+(i.e. https://controller.url.sample:9443/apidocs/v1/).
+
+Now, create a Federation Network. This is found at `/feg` under the
+**Federation Networks** section. Then register the gateway under the
+**Federation Gateway** section at `/feg/{network_id}/gateways` using the
+network ID of the Federation Network and the hardware ID and challenge key
+from the previous step.
+
+To verify that the gateway was correctly registered, run:
+
+```console
+INSTALL_HOST [~/]$ cd /var/opt/magma/docker
+INSTALL_HOST [/var/opt/magma/docker]$ docker-compose exec magmad /usr/local/bin/checkin_cli.py
+```
+
+## Upgrades
+
+The Federation Gateway supports NMS initiated upgrades. These can be triggered
+from the NMS under the `Configure` section by updating the FeG's tier to the
+appropriate `Software Version`. After triggering the upgrade from the NMS,
+magmad on the gateway will pull down the specified docker images,
+update any static configuration, and update the docker-compose file to the
+appropriate version.
diff --git a/docusaurus/docs/feg/deploy_intro.md b/docusaurus/docs/feg/deploy_intro.md
new file mode 100644
index 0000000..128727b
--- /dev/null
+++ b/docusaurus/docs/feg/deploy_intro.md
@@ -0,0 +1,17 @@
+---
+id: version-1.0.0-deploy_intro
+title: Introduction
+hide_title: true
+original_id: deploy_intro
+---
+# Deploying Federation Gateway: Introduction
+
+These pages will walk through the full process of deploying a
+Federation Gateway, from building the various containers that you'll need
+to installing them on either a Bare-Metal or VM host. This installation guide
+targets *production* environments - if you aren't ready for this, the developer
+documentation will be up shortly.
+
+If you want to get a head start on the development setup, you can build the
+FeG containers following this guide and use docker-compose at
+`magma/feg/gateway/docker` to spin up the local version of the FeG.
diff --git a/docusaurus/docs/feg/legacy/FAQ.md b/docusaurus/docs/feg/legacy/FAQ.md
new file mode 100644
index 0000000..5125932
--- /dev/null
+++ b/docusaurus/docs/feg/legacy/FAQ.md
@@ -0,0 +1,25 @@
+---
+id: version-1.0.0-faq
+title: FAQ
+hide_title: true
+original_id: faq
+---
+# FAQ
+
+1. Do I need to run the federated gateway as an individual developer?
+
+ - It is highly unlikely you'll need this component. Only those who plan
+ to integrate with a Mobile Network Operator will need the federated gateway.
+
+2. I'm seeing 500's in `/var/log/syslog`. How do I fix this?
+
+ - Ensure your cloud VM is up and services are running
+ - Ensure that you've run `register_feg_vm` at `magma/feg/gateway` on your host machine
+
+3. I'm seeing 200's, but streamed configs at `/var/opt/magma/configs` aren't being updated?
+
+ - Ensure the directory at `/var/opt/magma/configs` exists
+ - Ensure the gateway configs in NMS are created (see [link](https://github.com/magma/magma/blob/v1.1.0/docs/Magma_Network_Management_System.pdf) for more instructions)
+ - Ensure one of the following configs exist:
+ - [Federated Gateway Network Configs](https://127.0.0.1:9443/apidocs#/Networks/post_networks__network_id__configs_federation)
+ - [Federated Gateway Configs](https://127.0.0.1:9443/apidocs#/Gateways/post_networks__network_id__gateways__gateway_id__configs_federation)
diff --git a/docusaurus/docs/feg/legacy/README.md b/docusaurus/docs/feg/legacy/README.md
new file mode 100644
index 0000000..eaf27ba
--- /dev/null
+++ b/docusaurus/docs/feg/legacy/README.md
@@ -0,0 +1,45 @@
+---
+id: version-1.0.0-readme
+title: Federated Gateway (FeG)
+sidebar_label: Overview
+hide_title: true
+original_id: readme
+---
+# Federated Gateway (FeG)
+The federated gateway provides remote procedure call (GRPC) based interfaces to standard 3GPP components, such as
+HSS (S6a, SWx), OCS (Gy), and PCRF (Gx). The exposed RPC interface provides versioning & backward compatibility,
+security (HTTP2 & TLS) as well as support for multiple programming languages. The Remote Procedures below provide
+simple, extensible, multi-language interfaces based on GRPC which allow developers to avoid dealing with the
+complexities of 3GPP protocols. Implementing these RPC interfaces allows networks running on Magma to integrate
+with traditional 3GPP core components.
+
+
+
+The Federated Gateway supports the following features and functionalities:
+
+1. Hosting centralized control plane interface towards HSS, PCRF, OCS and MSC/VLR on behalf of distributed AGW/EPCs.
+2. Establishing diameter connection with HSS, PCRF and OCS directly as 1:1 or via DRA.
+3. Establishing SCTP/IP connection with MSC/VLR.
+4. Interfacing with AGW over GPRC interface by responding to remote calls from EPC (MME and Sessiond/PCEF) components,
+ converting these remote calls to 3GPP compliant messages and then sending these messages to the appropriate core network
+ components such as HSS, PCRF, OCS and MSC. Similarly the FeG receives 3GPP compliant messages from HSS, PCRF, OCS and MSC
+ and converts these to the appropriate GPRC messages before sending them to the AGW.
+
+
+
+Please see the **[Magma Product Spec](https://github.com/magma/magma/blob/v1.1.0/docs/Magma_Specs_V1.1.pdf)** for more detailed information.
+
+## Federated Gateway Services & Tools
+
+The following services run on the federated gateway:
+ - `s6a_proxy` - translates calls from GRPC to S6a protocol between AGW and HSS
+ - `session_proxy` - translates calls from GRPC to gx/gy protocol between AGW and PCRF/OCS
+ - `csfb` - translates calls from GRPC interface to csfb protocol between AGW and VLR
+ - `swx_proxy` - translates GRPC interface to SWx protocol between AGW and HSS
+ - `gateway_health` - provides health updates to the orc8r to be used for
+ achieving highly available federated gateway clusters (see **[Magma Product Spec](https://github.com/magma/magma/blob/v1.1.0/docs/Magma_Specs_V1.1.pdf)**
+ for more details)
+ - `radiusd` - fetches metrics from the running radius server and exports them
+
+Associated tools for sending requests and debugging issues can be found
+at `magma/feg/gateway/tools`.
diff --git a/docusaurus/docs/feg/legacy/docker_setup.md b/docusaurus/docs/feg/legacy/docker_setup.md
new file mode 100644
index 0000000..51870d1
--- /dev/null
+++ b/docusaurus/docs/feg/legacy/docker_setup.md
@@ -0,0 +1,64 @@
+---
+id: version-1.0.0-docker_setup
+title: FeG Docker Setup
+hide_title: true
+original_id: docker_setup
+---
+# FeG Docker Setup
+
+The FeG runs each service in its own Docker container.
+Production services are defined in `docker-compose.yml`.
+Development services are defined in `docker-compose.override.yml`.
+The development `test` service is used to run unit tests and regenerate Swagger/Protobuf code.
+The development `test` service can also be used to perform other development-related procedures.
+
+## Requirements
+
+To run the FeG with docker, both docker and docker compose must be installed.
+* Follow [these steps](https://docs.docker.com/install/) to install docker
+* Follow [these steps](https://docs.docker.com/compose/install/) to install docker compose
+
+NOTE: If you are running the FeG on Mac, you will need to increase the memory
+limit of the docker daemon to at least 4GB to build the images. Otherwise,
+when building the Go image, you may see an error message similar to this:
+`/usr/local/go/pkg/tool/linux_amd64/link: signal: killed`.
+
+The `rootCA.pem` certificate must be located in the `.cache/test_certs` folder,
+so that it can be mounted into the appropriate containers from there.
+
+## Development
+
+Follow these steps to run the FeG services:
+1. `cd magma/feg/gateway/docker`
+2. `docker-compose build`
+3. `docker-compose up -d`
+
+Each service should now be running in each of its containers.
+By default, both production and development services should be running.
+To place a shell into the test container, run the command:
+
+`docker-compose exec test /bin/bash`
+
+The test container contains the mounted source code and configuration settings.
+The mounted source code and configuration settings can be changed externally
+and the changes will be reflected inside the test container.
+Run the command `make precommit` in the container before submitting a patch.
+
+To make changes to currently running FeG services, the containers must be rebuilt and restarted:
+1. `docker-compose down`
+2. `docker-compose build`
+3. `docker-compose up -d`
+
+To manage the containers, the following commands are useful:
+* `docker-compose ps` (get status of each container)
+* `docker-compose logs -f` (tail logs of all containers)
+* `docker-compose logs -f ` (tail logs of a particular service)
+* `docker-compose down` (stop all services)
+
+## Publishing the images
+
+To push production images to a private docker registry, use the following script:
+```
+[/magma/feg/gateway/docker]$ ../../../orc8r/tools/docker/publish.sh -r -i gateway_python
+[/magma/feg/gateway/docker]$ ../../../orc8r/tools/docker/publish.sh -r -i gateway_go
+```
diff --git a/docusaurus/docs/howtos/README_package.md b/docusaurus/docs/howtos/README_package.md
new file mode 100644
index 0000000..9a0bb62
--- /dev/null
+++ b/docusaurus/docs/howtos/README_package.md
@@ -0,0 +1,67 @@
+---
+id: version-1.0.0-readme_package
+title: Packaging
+hide_title: true
+original_id: readme_package
+---
+# Packaging
+TL;DR
+-----
+1. Running `fab dev package:git` on the host creates the package inside the
+gateway VM
+2. Commit changes to build-magma.sh and magma.lockfile.
+
+Creating a release package.
+---------------------------
+Run `fab dev package:git` under lte/gateway/ on the host to create a Magma
+release. This runs the `build-magma.sh` script with the latest commit id and
+build type as DEBUG. It builds everything, identifies dependencies (assuming
+they're specified in the setup.py properly), and creates a Debian package in the
+magma-packages/ directory on dev VM.
+
+For a production release, you'll want to bump the package version probably --
+this can be done inside build-magma.sh. The version number should be bumped
+when there is a minor or major feature or bug fix release. If you're just
+making a minor tweak (e.g., mistake in building the package), you can also
+increment the iteration number.
+
+Testing a release package before you push it.
+---------------------------------------------
+You should always do this. In general, try your best not to release broken
+packages.
+
+1. Build the release like you normally would.
+2. Spin up a fresh prod VM or gateway machine and copy the magma_.deb
+generated above.
+3. Run `sudo apt-get install gdebi; sudo gdebi magma_.deb'
+4. A VM reload or gateway reboot will likely be required due to kernel upgrade.
+
+This will simulate the exact steps that apt-get performs in production.
+After you've done this, your environment is identical to what you'll have if
+you had released your package and run `apt-get install magma`.
+
+If you want to test upgrading versions, skip the part where you spin up a fresh
+prod VM, and instead set up a VM with whatever version you're trying to test
+upgrade from. For example, if you want to test upgrading from the currrently
+released version N to the new version N+1, install magma v(N) on the prod VM,
+then install your pre-release package of magma v(N+1).
+
+Building Python dependencies.
+-----------------------------
+The `pydep` tool lets you build Python dependencies. By default,
+`build-magma.sh` figures out what Python packages we depend on, but it doesn't
+build those packages. You can manually build packages by running
+
+`./pydep finddep -b ../python/setup.py`
+
+This will figure out what Python dependencies aren't satisfied by released
+system packages and build those. You only need to do this when you've added a
+Python dependency that doesn't have a debian package already available that
+satisfies your constraints.
+
+What is magma.lockfile?
+-----------------------
+`magma.lockfile` is generated by `pydep` every time you run `build-magma.sh`
+and captures a specific set of Python packages that satisfy the Python
+dependencies of Magma. This lets us keep track of what we're actually using for
+each release.
diff --git a/docusaurus/docs/howtos/configurations.md b/docusaurus/docs/howtos/configurations.md
new file mode 100644
index 0000000..134fb8d
--- /dev/null
+++ b/docusaurus/docs/howtos/configurations.md
@@ -0,0 +1,57 @@
+---
+id: version-1.0.0-configurations
+sidebar_label: Configurations
+title: Configurations in Magma
+hide_title: true
+original_id: configurations
+---
+# Configurations in Magma
+### Cloud
+On the cloud side, service related configuration files are stored in
+`magma/{MODULE_NAME}/cloud/configs/`.
+
+#### Service Registry
+`magma/{MODULE_NAME}/cloud/configs/service_registry.yml` lists all services in
+the module and stores configurations that all services must have (i.e. port, host).
+The information is used for services routing.
+
+#### Service Specific Configs
+All service specific configurations are stored in
+`magma/{MODULE_NAME}/cloud/configs/{SERVICE_NAME}.yml`.
+
+#### How To Modify Configs
+When the cloud VM is provisioned, the service specific configuration files are
+sym-linked into `/etc/magma/configs/{MODULE_NAME}/`.
+The configs can be modified in both `/etc/magma/configs/` and `/var/opt/magma/configs/`, but
+the latter takes priority over the other.
+
+Every time a service starts, both the per-service configs and service registries
+are loaded. Restart the corresponding service after modifying configs to see the changes.
+
+### Gateway
+On the Gateway side, service related configuration files are stored in
+`magma/lte/gateway/configs/`
+
+#### Service Registry
+`magma/lte/gateway/configs/service_registry.yml` lists all services on the gateway
+ and stores configurations that all services must have.
+ The information is used for services routing.
+
+#### Service Specific Configs
+All service specific configurations are stored in
+`magma/lte/gateway/configs/{SERVICE_NAME}.yml`.
+
+#### How To Modify Configs
+When the magma VM is provisioned, the service specific configuration files are
+sym-linked into `/etc/magma/configs/`.
+The configs can be modified in both `/etc/magma/configs/` and `/var/opt/magma/configs/`, but
+the latter takes priority over the other.
+
+Restart the service to see the the changes reflected.
+
+#### How to Modify Log Level
+Log level can be modified using the `log_level` field in the configs. Alternatively,
+there is a CLI to change the log level:
+```bash
+venvsudo magma/orc8r/gateway/python/scripts/config_cli.py set_log_level {SERVICE_NAME} {LOG_LEVEL}
+```
diff --git a/docusaurus/docs/howtos/documentation_development.md b/docusaurus/docs/howtos/documentation_development.md
new file mode 100644
index 0000000..48b2c68
--- /dev/null
+++ b/docusaurus/docs/howtos/documentation_development.md
@@ -0,0 +1,68 @@
+---
+id: version-1.X.X-documentation_development
+sidebar_label: Configurations
+title: Documentation Development
+hide_title: true
+original_id: documentation_development
+---
+# Development
+## Local installation
+> This is required to run the documentation locally and other commands like the translation commands.
+
+Due to some issues with the `npm install` command, it is recommended to use `yarn` instead. To install `yarn` run the following command:
+```sh
+npm install -g yarn
+```
+Then run the following command to install the dependencies (at `./magma-documentation`):
+```sh
+yarn install
+```
+
+> [!TIP]
+> If you need to refresh the dependencies, run:
+> ```sh
+> rm -rf node_modules
+> rm yarn.lock
+> yarn cache clean
+> yarn install
+> ```
+> This will remove the `node_modules` folder, the `yarn.lock` file, clean the cache and install the dependencies again.
+
+## Docker development
+As was needed to run both local and in docker environments, the `scripts.start` configuration of the `package.json` is one for the local and another for the docker environment. The local environment will run the `docusaurus start` command and the docker environment will run the `docusaurus start --host 0.0.0.0` command.
+
+In order to deploy the Docussaurus instance locally with the magma documentation, execute the commands below:
+
+```sh
+cd magma-documentation
+docker compose up docusaurus-dev
+```
+
+This will initialize the Docussaurus application inside a docker container with the proper configurations and documents. The service will be running as a daemon, in the background, and exposed trough the `http://localhost:3000` url, with hot-reloading enabled.
+
+In order to stop the Docussaurus container, execute the command below:
+
+```sh
+docker compose down
+```
+
+If any building procedure fails, it is possible to clean the cached files running the command below:
+
+```sh
+docker compose down --rmi all --remove-orphans
+```
+
+If you also want to remove the volumes, add the flag `-v`/`--volumes` to the command above.
+
+
+> [!NOTE]
+> For more development information, please refer to the official [Docussaurus documentation](https://docusaurus.io/docs).
+
+### Production
+
+> To be done.
+
+```sh
+cd magma-documentation
+docker compose up docusaurus-prod
+```
diff --git a/docusaurus/docs/howtos/docusaurus.md b/docusaurus/docs/howtos/docusaurus.md
new file mode 100644
index 0000000..4838c53
--- /dev/null
+++ b/docusaurus/docs/howtos/docusaurus.md
@@ -0,0 +1,18 @@
+---
+id: version-1.0.0-docusaurus
+title: Docusaurus
+hide_title: true
+original_id: docusaurus
+---
+# Docusaurus
+### Generating the Documentation Website
+
+1. Ensure [docker](https://docs.docker.com/install/) is installed
+2. From `magma/docs`, run `./docusaurus/create_docusaurus_website.sh`
+3. Navigate to http://127.0.0.1:3000/magma/ to view a local version of the site
+
+### Directory Structure
+
+The documentation website is generated using [docusaurus](https://docusaurus.io/) from
+the README files stored in `docs/readmes/`. The docusaurus files needed to generate the website are
+stored in `docs/docusaurus`.
diff --git a/docusaurus/docs/lte/README_AGW.md b/docusaurus/docs/lte/README_AGW.md
new file mode 100644
index 0000000..8d2b29c
--- /dev/null
+++ b/docusaurus/docs/lte/README_AGW.md
@@ -0,0 +1,139 @@
+---
+id: version-1.0.0-readme_agw
+title: AGW Services/Sub-Components
+sidebar_label: Services/Sub-Components
+hide_title: true
+original_id: readme_agw
+---
+# AGW Services/Sub-Components
+## MME
+ MME includes S1AP, NAS and MME_APP subcomponents. MME functions include:
+
+1. S1AP external Interface with eNB
+ 1. S1AP ASN.1 encode/decode
+ 2. S1AP Procedures
+2. NAS external Interface with UE
+ 1. NAS message encode/decode
+ 2. NAS Procedures
+ 3. NAS state-machine for NAS EMM and NAS ESM protocols
+3. S11 like Interface with unified S-GW & P-GW
+ 1. Create and delete PDN Sessions
+ 2. Create/modify/delete default and dedicated bearers
+4. GRPC based S6a like interface towards FedGW
+ 1. To get authentication vector and subscriber profile to authenticate and authorize the subscriber
+ 2. To register the serving MME-id with HSS
+ 3. To receive the HSS initiated subscriber de-registration request
+ 4. To send purge request to HSS during UE de-registration
+ 5. To receive HSS reset indication
+5. GRPC based SGs like interface towards FeGW
+ 1. To support NON-EPS services for the subscriber ( CS voice and CS-SMS)
+6. Update serving GW-id for the subscriber to the FeGW
+7. Statistics to track the number of eNodeBs connected, number of registered UEs, number of connected UEs and number of idle UEs.
+8. MME APP maintains UE state machine and routes the message to appropriate modules based on UE state, context and received message.
+
+## S-PGW Control Plane
+S-PGW Control Plane functions include:
+
+1. S11 like interface Interface with MME
+ 1. Create and delete PDN Sessions
+ 2. Create/modify/delete default and dedicate bearers
+2. Interface with MobilityD to allocate and release IP address for the subscriber during PDN connection establishment and release, respectively
+3. Interface with Sessiond/PCEF to trigger Gx and Gy session establishment for the subscriber during PDN connection establishment
+4. Establish and release GTP tunnel during bearer setup and release
+
+## Health Checker
+Health checker reports 2 kinds of health status:
+1. Access Gateway specific health which includes:
+ * Number of allocated_ips
+ * Number of core_dumps
+ * Registration_success_rate
+ * Subscriber table
+2. Generic Health status which includes:
+ * Gateway - Controller connectivity
+ * Status for all the running services
+ * Number of restarts per each service
+ * Number of errors per each service
+ * Internet and DNS status
+ * Kernel version
+ * Magma version
+
+## Mobilityd
+Mobilityd functions include:
+
+1. Interface with orchestrator to receive IP address block during system bring-up.
+2. Allocate and release IP address for the subscriber on the request from S-PGW Control Plane.
+
+## Sessiond / PCEF
+Sessiond implements the control plane for the PCEF functionality in Magma. Sessiond is responsible for the lifecycle management of the session state (credit and rules) associated with a user. It interacts with the PCEF datapath through pipelined for L2-L4 and DPId for L4-L7 policies.
+
+## Pipelined
+Pipelined is the control application that programs the OVS openflow rules. In implementation pipelined is a set of services that are chained together. These services can be chained and enabled/disabled through the REST API. The README (https://github.com/facebookincubator/magma/blob/master/README.md) describes the contract in greater detail.
+
+## PolicyDB
+PolicyDB is the service that supports static PCRF rules. This service runs in both the AGW and the orchestrator. Rules managed through the rest API are streamed to the policydb instances on the AGW. Sessiond ensures these policies are implemented as specified.
+
+## Subscriberdb
+Subscriberdb is Magma's local version of HSS. Magma uses Subscriberdb to enable LTE data services through one network node like AGW for LTE subscribers. It is deactivated for the deployments that make use of the MNO's HSS. It supports the following two S6a procedures:
+
+1. S6a: Authentication Information Request and Answer (AIR/AIA)
+2. S6a: Update Location Request and Answer (ULR/ULA)
+
+Subscriberdb functions include:
+
+1. Interface with Orchestrator to receive subscriber information such as IMSI, secret key (K) , OP, user-profile during system bring-up.
+2. Generate Authentication vectors using* *Milenage Algorithm and share these with MME.
+3. Share user profile with MME.
+
+## OVS - Data path
+OVS (http://www.openvswitch.org/) is used to implement basic PCEF functionality for user plane traffic. The control plane applications interacting with OVS are implemented in pipelined.
+
+## Enodebd
+
+Enodebd supports management of eNodeB devices that use TR-069 as management interface. This is used for both provisioning the eNodeB and collecting the performance metrics.It suppots followig data models:
+1. Device Data model : TR-181 and TR-098
+2. Information Data model : TR-196
+
+
+## Control Proxy
+Control proxy manages the network transport between the gateways and the controller.
+
+1. Control proxy abstract the service addressability, by providing a service registry which maps a user addressable name to its remote IP and port.
+2. All traffic over HTTP/2, and are encrypted using TLS. The traffic is routed to individual services by encoding the service name in the HTTP/2 :authority: header.
+3. Individual GRPC calls between a gateway and the controller are multiplexed over the same HTTP/2 connection, and this helps to avoid the connection setup time per RPC call.
+
+# Command Line Interfaces to Magma services
+
+Several services listed above can be configured using CLIs, located under
+magma/lte/gateway/python/scripts. These are:
+
+1. Health Checker: agw_health_cli.py
+2. Mobilityd: mobility_cli.py
+3. Sessiond: session_manager_cli.py
+4. Pipelined: pipelined_cli.py
+5. PolicyDB: policydb_cli.py
+6. Subscriberdb: subscriber_cli.py
+7. Enodebd: enodebd_cli.py
+
+Each of these CLIs can be used in the gateway VM:
+
+```bash
+vagrant@magma-dev:~$ magtivate
+(python) vagrant@magma-dev:~$ enodebd_cli.py -h
+
+usage: enodebd_cli.py [-h]
+ {get_parameter,set_parameter,config_enodeb,reboot_enodeb,get_status}
+ ...
+
+Management CLI for Enodebd
+
+optional arguments:
+ -h, --help show this help message and exit
+
+subcommands:
+ {get_parameter,set_parameter,config_enodeb,reboot_enodeb,get_status}
+ get_parameter Send GetParameterValues message
+ set_parameter Send SetParameterValues message
+ config_enodeb Configure eNodeB
+ reboot_enodeb Reboot eNodeB
+ get_status Get eNodeB status
+```
diff --git a/docusaurus/docs/lte/README_callflow.md b/docusaurus/docs/lte/README_callflow.md
new file mode 100644
index 0000000..3a9b7a5
--- /dev/null
+++ b/docusaurus/docs/lte/README_callflow.md
@@ -0,0 +1,16 @@
+---
+id: version-1.0.0-readme_callflow
+title: Building the callflow
+hide_title: true
+original_id: readme_callflow
+---
+# Building the callflow
+In order to visualize the attach call flow in Magma, this change adds a sequence
+flow diagram. The file **Attach_call_flow_in_Magma.txt** can be uploaded to
+sequencediagram.org to edit and to export the .svg. or .jpg image. The color
+scheme in the diagram is as follows:
+
+ * Green: State changes
+ * Red: Code that crosses task boundaries or modifies *emm_context* without a function call
+ * Orange: Timers and notes on which function sends out the message
+ * Blue: Code that can be optimized, renamed or is inconsequential in this call flow
diff --git a/docusaurus/docs/lte/config_agw.md b/docusaurus/docs/lte/config_agw.md
new file mode 100644
index 0000000..9aea563
--- /dev/null
+++ b/docusaurus/docs/lte/config_agw.md
@@ -0,0 +1,114 @@
+---
+id: version-1.0.0-config_agw
+title: AGW Configuration
+sidebar_label: AGW Configuration
+hide_title: true
+original_id: config_agw
+---
+# Access Gateway Configuration
+## Prerequisites
+
+Before beginning to configure your Magma Access Gateway, you will need to make
+sure that it is running all services without crashing. You will also need a
+working Orchestrator setup. Please follow the instructions in
+"[Deploying Orchestrator](../orc8r/deploy_intro.md)" for a
+successful Orchestrator installation.
+
+You also should have completed all the steps in "[Access Gateway Setup (On Bare Metal)](./setup_deb.md)".
+For this part, we strongly recommend that you SSH into the AGW box from a host
+machine instead of using the AGW directly.
+
+## Access Gateway Configuration
+
+First, copy the root CA for your Orchestrator deployment into your AGW:
+
+```bash
+HOST$ scp rootCA.pem magma@10.0.2.1:~
+HOST$ ssh magma@10.0.2.1
+
+AGW$ sudo mkdir -p /var/opt/magma/tmp/certs/
+AGW$ sudo mv rootCA.pem /var/opt/magma/tmp/certs/rootCA.pem
+```
+
+Then, point your AGW to your Orchestrator:
+
+```bash
+AGW$ sudo mkdir -p /var/opt/magma/configs
+AGW$ cd /var/opt/magma/configs
+AGW$ sudo vi control_proxy.yml
+```
+
+Put the following contents into the file:
+
+```
+cloud_address: controller.yourdomain.com
+cloud_port: 443
+bootstrap_address: bootstrapper-controller.yourdomain.com
+bootstrap_port: 443
+
+rootca_cert: /var/opt/magma/tmp/certs/rootCA.pem
+```
+
+Then restart your services to pick up the config changes:
+
+```bash
+AGW$ sudo service magma@* stop
+AGW$ sudo service magma@magmad restart
+```
+
+## Creating and Configuring Your Network
+
+Navigate to your NMS instance, `https://nms.yourdomain.com`, and log in with the
+administrator credentials you provisioned when installing Orchestrator. If this
+is a fresh Orchestrator install, you will be prompted to create your first
+network. Otherwise, select "Create Network" from the network selection icon
+at the bottom of the left sidebar. Select `lte` as the network type and fill
+the other fields as you see fit.
+
+
+
+Go back to the network management app using the app selector in the bottom left
+of the screen, then go to "Configure" in the left sidebar, followed by the
+"Network Configuration" tab at the top of the screen. Configure your RAN and
+EPC parameters which are appropriate for your hardware setup.
+
+
+
+## Registering and Configuring Your Access Gateway
+
+You need to grab the hardware secrets off your AGW:
+
+```bash
+AGW$ show_gateway_info.py
+Hardware ID:
+------------
+1576b8e7-91a0-4e8d-b19f-d06421ad72b4
+
+Challenge Key:
+-----------
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAECMB9zEbAlLDQLq1K8tgCLO8Kie5IloU4QuAXEjtR19jt0KTkRzTYcBK1XwA+C6ALVKFWtlxQfrPpwOwLE7GFkZv1i7Lzc6dpqLnufSlvE/Xlq4n5K877tIuNac3U/8un
+```
+
+Navigate to "Gateways" on the NMS via the left navigation bar, hit
+"Add Gateway", and fill out the form using the hardware secrets from above:
+
+
+
+After you create your gateway, hit the Edit icon in its table row, select the
+"LTE" tab, and configure your EPC/RAN parameters. Make sure you enable enodeB
+transmit.
+
+
+
+At this point, you can validate the connection between your AGW and
+Orchestrator:
+
+```bash
+AGW$ journalctl -u magma@magmad -f
+# Look for the following logs
+# INFO:root:Checkin Successful!
+# INFO:root:[SyncRPC] Got heartBeat from cloud
+# INFO:root:Processing config update gateway_id
+```
+
+If everything looks OK, you can move on to configuring your enodeB.
diff --git a/docusaurus/docs/lte/enodebd.md b/docusaurus/docs/lte/enodebd.md
new file mode 100644
index 0000000..69c4ea9
--- /dev/null
+++ b/docusaurus/docs/lte/enodebd.md
@@ -0,0 +1,188 @@
+---
+id: version-1.0.0-enodebd
+title: eNodeB Configuration
+hide_title: true
+original_id: enodebd
+---
+# eNodeB Configuration
+## Prerequisites
+
+Make sure you follow the instructions in "[Deploying Orchestrator](../orc8r/deploy_intro.md)" for successful
+installation of Orchestrator and the instructions in "[AGW Configuration](./config_agw.md)" to provision and
+configure your Access Gateway (AGW).
+
+## S1 interface
+Connect your eNodeB to the `eth1` interface of Magma gateway. Magma uses `eth1`
+as the default `S1` interface. If you have more than one eNodeB, use an L2
+switch to connect all `S1` interfaces. For debugging purposes, you may find it
+particularly useful to do the following:
+
+1. Configure a managed L2 switch (e.g. [this NETGEAR](https://www.amazon.com/NETGEAR-GS108T-200NAS-GS108Tv2-Lifetime-Protection/dp/B07PS6Z162/))
+to mirror port X and port Y to port Z.
+2. Connect port X of that switch to the `eth1` interface on your AGW.
+3. Connect the WAN interface on your enodeB to port Y on the switch.
+4. Connect your host to port Z on the switch.
+
+This will allow you to do live packet captures with Wireshark from your host to
+debug the S1 interface between the enodeB and the AGW (filter for SCTP).
+
+## Automatic configuration
+*Magma officially supports auto-configuration of the following devices:*
+* Baicells Nova-243 Outdoor FDD/TDD eNodeB
+ - Firmware Version: BaiBS_RTS_3.1.6
+* Baicells mBS1100 LTE-TDD Base Station
+ - Firmware Version: BaiStation_V100R001C00B110SPC003
+* Baicells Neutrino-244 ID FDD/TDD enodeB
+
+*Magma supports the following management protocols:*
+* TR-069 (CWMP)
+
+*Magma supports configuration of the following data models:*
+* TR-196 data model
+* TR-181 data model
+
+The Magma team plans to add support for more devices and management protocols.
+
+To handle automatic configuration of eNodeB devices on your network, Magma
+uses the `enodebd` service. The `enodebd` service is responsible for handling
+the O&M interface between Magma and any connected eNodeB. The `enodebd` service
+can be disabled if you configure your eNodeB devices manually.
+
+### Baicells
+
+Use the enodeB's management interface to set the management server URL to
+`baiomc.cloudapp.net:48080`. Magma uses DNS hijacking to point the eNodeB to
+the configuration server being run by enodebd. `baiomc.cloudapp.net:48080`
+will point to `192.88.99.142`, the IP address that the TR-069 ACS is
+being hosted on.
+
+## Provisioning Your eNodeB on NMS
+
+Get the serial number of your eNodeB, you'll need it to register the device.
+On the NMS, navigate to "eNodeB Devices" in the sidebar, and hit "Add EnodeB".
+Configure the RAN parameters as necessary. Note that fields left blank will
+be inherited from either the network or gateway LTE parameters:
+
+
+
+Then, go back to the "Gateways" page and edit the LTE configuration of your
+AGW. Enter the serial number of the enodeB you just provisioned into the
+"Registered eNodeBs" field, then hit save.
+
+### Basic Troubleshooting
+After connecting your eNodeB(s) to the gateway through the `eth1` interface, you
+may want to check a few things if auto-configuration is not working.
+
+Magma will be running a DHCP server to assign an IP address to your connected
+eNodeB. Check if an IP address gets assigned to your eNodeB by either checking
+the eNodeB UI or monitoring the `dnsd` service.
+
+```
+journalctl -u magma@dnsd -f
+# Check for a similar log
+# DHCPDISCOVER(eth1) 48:bf:74:07:68:ee
+# DHCPOFFER(eth1) 10.0.2.246 48:bf:74:07:68:ee
+# DHCPREQUEST(eth1) 10.0.2.246 48:bf:74:07:68:ee
+# DHCPACK(eth1) 10.0.2.246 48:bf:74:07:68:ee
+```
+
+Use the `enodebd_cli.py` tool to check basic status of eNodeB(s). It also allows
+for querying the value of parameters, setting them, and sending reboot requests
+to the eNodeB. The following example gets the status of all connected eNodeBs.
+
+```
+enodebd_cli.py get_all_status
+# --- eNodeB Serial: 120200002618AGP0001 ---
+# IP Address..................10.0.2.246
+# eNodeB connected.....................1
+# eNodeB Configured....................1
+# Opstate Enabled......................1
+# RF TX on.............................1
+# RF TX desired........................1
+# GPS Connected........................0
+# PTP Connected........................0
+# MME Connected........................1
+# GPS Longitude...............113.902069
+# GPS Latitude.................22.932018
+# FSM State...............Completed provisioning eNB. Awaiting new Inform.
+```
+
+It may take time for the eNodeB to start transmitting because `enodebd` will
+reboot the eNodeB to apply new configurations. Monitor the progress of `enodebd`
+using the following command
+
+```
+journalctl -u magma@enodebd -f
+# Check for a similar log
+# INFO:root:Successfully configured CPE parameters!
+```
+
+## Manual configuration
+Manual configuration of connected eNodeB(s) is always possible. Magma was tested
+with multiple Airspan eNodeB models configured through NetSpan management
+software.
+When manually configuring eNodeBs, make sure the eNodeB configuration matches
+that of the Magma cellular configuration. Pay special attention to the
+configuration of `PLMN`, `EARFCN` and `TAC`.
+
+### Basic Troubleshooting
+When manually configuring your eNodeB, you can use the manufacturers tools or
+interfaces to monitor and troubleshoot the eNodeB configuration.
+
+You can also listen to the `S1` interface traffic and validate a proper `S1`
+setup and handshake. Below are the `SCTP` packets exchanged between the eNodeB
+and MME.
+
+```
+Source Destination Protocol Length Info
+10.0.2.246 10.0.2.1 SCTP 66 INIT
+10.0.2.1 10.0.2.246 SCTP 298 INIT_ACK
+10.0.2.246 10.0.2.1 SCTP 278 COOKIE_ECHO
+10.0.2.1 10.0.2.246 SCTP 60 COOKIE_ACK
+10.0.2.246 10.0.2.1 S1AP 106 S1SetupRequest
+10.0.2.1 10.0.2.246 SCTP 62 SACK
+10.0.2.1 10.0.2.246 S1AP 90 S1SetupResponse
+10.0.2.246 10.0.2.1 SCTP 62 SACK
+10.0.2.246 10.0.2.1 SCTP 66 HEARTBEAT
+10.0.2.1 10.0.2.246 SCTP 66 HEARTBEAT_ACK
+```
+
+# Connecting your first user
+
+## Adding subscribers
+
+Once your eNodeB starts transmitting, UEs may attempt to attach to your
+network. Your AGW will reject these attach requests due to authentication
+failure until you add the corresponding IMSI to the subscriber database.
+
+On the NMS, go to "Subscribers", then "Add Subscriber". The SIM secrets can be
+entered either in hex or base64-encoded binary:
+
+
+
+Subscriber information will eventually propagate to the AGW. You can verify
+using the CLI command `"subscriber_cli.py list"`
+
+## Validating UE connectivity
+
+Validating UE connectivity can be done from the UE side, MME side, or by
+listening to traffic on the `S1` interface.
+Below is a typical UE attach procedure as captured on the `S1` interface.
+
+```
+Source Destination Protocol Info
+10.0.2.246 10.0.2.1 S1AP/NAS-EPS InitialUEMessage, Attach request, PDN connectivity request
+10.0.2.1 10.0.2.246 S1AP/NAS-EPS DownlinkNASTransport, Identity request
+10.0.2.246 10.0.2.1 S1AP/NAS-EPS UplinkNASTransport, Identity response
+10.0.2.1 10.0.2.246 S1AP/NAS-EPS DownlinkNASTransport, Authentication request
+10.0.2.246 10.0.2.1 S1AP/NAS-EPS UplinkNASTransport, Authentication response
+10.0.2.1 10.0.2.246 S1AP/NAS-EPS DownlinkNASTransport, Security mode command
+10.0.2.246 10.0.2.1 S1AP/NAS-EPS UplinkNASTransport, Security mode complete
+10.0.2.1 10.0.2.246 S1AP/NAS-EPS DownlinkNASTransport, ESM information request
+10.0.2.246 10.0.2.1 S1AP/NAS-EPS UplinkNASTransport, ESM information response
+10.0.2.1 10.0.2.246 S1AP/NAS-EPS InitialContextSetupRequest, Attach accept, Activate default EPS bearer context request
+10.0.2.246 10.0.2.1 S1AP UECapabilityInfoIndication, UECapabilityInformation
+10.0.2.246 10.0.2.1 S1AP InitialContextSetupResponse
+10.0.2.246 10.0.2.1 S1AP/NAS-EPS UplinkNASTransport, Attach complete, Activate default EPS bearer context accept
+10.0.2.1 10.0.2.246 S1AP/NAS-EPS DownlinkNASTransport, EMM information
+```
diff --git a/docusaurus/docs/lte/pipelined.md b/docusaurus/docs/lte/pipelined.md
new file mode 100644
index 0000000..bde9e9f
--- /dev/null
+++ b/docusaurus/docs/lte/pipelined.md
@@ -0,0 +1,196 @@
+---
+id: version-1.0.0-pipelined
+title: Pipelined
+hide_title: true
+original_id: pipelined
+---
+# Pipelined
+## Overview
+
+Pipelined is the control application that programs rules in the Open vSwitch (OVS). In implementation, Pipelined is a set of network services that are chained together. These services can be chained and enabled/disabled through the REST API in orchestrator.
+
+### Open vSwitch & OpenFlow
+
+[Open vSwitch (OVS)](http://docs.openvswitch.org/en/latest/intro/what-is-ovs/) is a virtual switch that implements the [OpenFlow](https://en.wikipedia.org/wiki/OpenFlow) protocol. Pipelined services program rules in OVS to implement basic PCEF functionality for user plane traffic.
+
+The OpenFlow pipeline of OVS contains 255 flow tables. Pipelined splits the tables into two categories:
+ - Main table (Table 1 - 20)
+ - Scratch table (Table 21 - 254)
+
+
+
+[*Source: OpenFlow Specification*](https://www.opennetworking.org/wp-content/uploads/2014/10/openflow-spec-v1.4.0.pdf)
+
+Each service is associated with a main table, which is used to forward traffic between different services. Services can claim scratch tables optionally, which are used for complex flow matching and processing within the same service. See [Services](#services) for a detailed breakdown of each Pipelined services.
+
+Each flow table is programmed by a single service through OpenFlow and it can contain multiple flow entries. When a packet is forwarded to a table, it is matched against the flow entries installed in the table and the highest-priority matching flow entry is selected. The actions defined in the selected flow entry will be applied to the packet.
+
+### Ryu
+
+[Ryu](https://ryu.readthedocs.io/en/latest/getting_started.html) is a Python library that provides an API wrapper for programming OVS.
+
+Pipelined services are implemented as Ryu applications (controllers) under the hood. Ryu apps are single-threaded entities that communicate using an event model. Generally, each controller is assigned a table and manages the its flows.
+
+## Services
+### Static Services
+
+Static services include mandatory services (such as OAI and inout) which are always enabled, and services with a set table number. Static services can be configured in the YAML config.
+
+```
+ GTP port Local Port
+ Uplink Downlink
+ | |
+ | |
+ V V
+ -------------------------------
+ | Table 0 |
+ | GTP APP (OAI) |
+ |- sets IMSI metadata |
+ |- sets tunnel id on downlink |
+ |- sets eth src/dst on uplink |
+ -------------------------------
+ |
+ V
+ -------------------------------
+ | Table 1 |
+ | inout |
+ |- sets direction bit |
+ -------------------------------
+ |
+ V
+ -------------------------------
+ | Table 2 |
+ | ARP |
+ |- Forwards non-ARP traffic |
+ |- Responds to ARP requests w/| ---> Arp traffic - LOCAL
+ | ovs bridge MAC |
+ -------------------------------
+ |
+ V
+ -------------------------------
+ | Table 3 |
+ | access control |
+ |- Forwards normal traffic |
+ |- Drops traffic with ip |
+ | address that matches the |
+ | ip blacklist |
+ -------------------------------
+ |
+ V
+ Configurable apps managed by cloud <---> Scratch tables
+ (Tables 4-19) (Tables 21 - 254)
+ |
+ V
+ -------------------------------
+ | Table 20 |
+ | inout |
+ |- Forwards uplink traffic to |
+ | LOCAL port |
+ |- Forwards downlink traffic |
+ | to GTP port |
+ -------------------------------
+ | |
+ | |
+ V V
+ GTP port Local Port
+ downlink uplink
+
+```
+
+### Configurable Services
+
+These services can be enabled and ordered from orchestrator cloud. `mconfig` is used to stream the list of enabled service to gateway.
+
+Table numbers are dynamically assigned to these services and depenedent on the order.
+
+```
+ -------------------------------
+ | Table X |
+ | metering |
+ |- Assigns unique flow id to |
+ | IP traffic |
+ |- Receives flow stats from |
+ | OVS and forwards to cloud |
+ -------------------------------
+
+ -------------------------------
+ | Table X |
+ | DPI |
+ |- Assigns App ID to each new |
+ | IP tuple encountered |
+ |- Optional, requires separate|
+ | DPI engine |
+ -------------------------------
+
+ ------------------------------- -------------------------------
+ | Table X | | Scratch Table 1 |
+ | enforcement | --->| redirect |
+ |- Activates/deactivates rules| |- Drop all non-HTTP traffic |
+ | for a subscriber | | for redirected subscribers |
+ | |<--- | |
+ | | | |
+ ------------------------------- -------------------------------
+ |
+ | In relay mode only -------------------------------
+ --------------------->| Scratch Table 2 |
+ | enforcement stats |
+ |- Keeps track of flow stats |
+ | and sends to sessiond |
+ | |
+ | |
+ -------------------------------
+```
+
+### Reserved registers
+
+[Nicira extension](https://ryu.readthedocs.io/en/latest/nicira_ext_ref.html#module-ryu.ofproto.nicira_ext) for OpenFlow provides additional registers (0 - 15) that can be set and matched. The table below lists the registers used in Pipelined.
+
+Register | Type | Use | Set by
+---------|------------|----------------------|-----------------------------
+metadata | Write-once | Stores IMSI | Table 0 (GTP application)
+reg0 | Scratch | Temporary Arithmetic | Any
+reg1 | Global | Direction bit | Table 1 (inout application)
+reg2 | Local | Policy number | Enforcement app
+reg3 | Local | App ID | DPI app
+reg4 | Local | Policy version number| Enforcement app
+
+## Testing
+
+### Scripts
+
+Some scripts in `/lte/gateway/python/scripts` may come in handy for testing. These scripts should be ran in virtualenv so `magtivate` needs to be ran first to enter the virtualenv .
+
+- `pipelined_cli.py` can be used to to make calls to the rpc API
+ - Some commands require sudo privilege. To run the script as sudo in virtualenv, use `venvsudo pipelined_cli.py`
+ - Example:
+
+```bash
+$ ./pipelined_cli.py enforcement activate_dynamic_rule --imsi IMSI12345 --rule_id rule1 --priority 110 --hard_timeout 60
+```
+
+```bash
+$ venvsudo ./pipelined_cli.py enforcement display_flows
+```
+
+- `fake_user.py` can be used to debug Pipelined without an eNodeB. It creates a fake_user OVS port and an interface with the same name and IP (10.10.10.10). Any traffic sent through the interface would traverse the pipeline, as if its sent from a user ip (192.168.128.200 by default).
+ - Example:
+
+```bash
+$ ./fake_user.py create --imsi IMSI12345
+$ sudo curl --interface fake_user -vvv --ipv4 http://www.google.com > /dev/null
+```
+
+
+### Unit Tests
+
+See the [Unit Test README](pipelined_tests.md) for more details.
+
+### Integration Tests
+
+Traffic integration tests cover the end to end flow of Pipelined. See the [Integration Test README](s1ap_tests.md) for more details.
+
+## Additional Readings
+
+[OpenFlow Specification](https://www.opennetworking.org/wp-content/uploads/2014/10/openflow-spec-v1.4.0.pdf)
+
+[Ryu API Doc](https://ryu.readthedocs.io/en/latest/api_ref.html)
diff --git a/docusaurus/docs/lte/pipelined_tests.md b/docusaurus/docs/lte/pipelined_tests.md
new file mode 100644
index 0000000..41be521
--- /dev/null
+++ b/docusaurus/docs/lte/pipelined_tests.md
@@ -0,0 +1,172 @@
+---
+id: version-1.0.0-pipelined_tests
+title: Pipelined testing framework
+hide_title: true
+original_id: pipelined_tests
+---
+# Pipelined testing framework
+## Overview
+The testing framework aims to isolate pipelined for better testing. This is
+achieved by running tests on a different bridge, running only some of the
+pipelined apps. Additionally, by inserting OVS *forwarding* flows we isolate
+testing only to specific tables.
+
+The framework can also be used with integration testing, using a processing
+thread with hub queues, an integ_test flag is provided in the test config.
+This means pipelined tests can work with gRPC.
+
+## Functionality breakdown
+This is high-level explanation of what happens when running a test. One of the
+main principles in designing this framework was making it as component based as
+possible so that its easy to add/replace some parts of the process.
+
+### Launch pipelined application directly (not as services)
+The first step is to launch pipelined controllers that we want to test.
+By launching ryu application directly and avoiding using services we can get
+the references to instantiated controllers, thus testing their functionality
+directly.
+
+### Isolate the table that is being tested
+As we want to run unit tests its necessary to isolate specific tables. This is
+done by inserting special *forwarding* flows that will both forward all packets
+to the ****table specified as well as setting the required register values that
+would have been set by the tables skipped(such as metadata and reg1).
+
+### Insert flow rules if needed (f.e. subscriber policy rules)
+Having references to pipelined controllers makes it simple to insert flows into
+OVS. The test framework provides an api to add PolicyRules for subscribers.
+
+### Using Scapy insert packets into OVS
+For inserting packets the testing framework uses the Scapy library. A wrapper
+for easier packet building, packet insertion is provided. After sending packets
+its necessary to wait for packets to be received/processed by OVS, example
+test files have wait functions to achieve this.
+
+## Testing controller
+The testing controller is a ryu app used for instantiating testing
+flows(table isolation) and for querying flow stats. The controller is only used
+for testing purposes and only runs when invoked in tests.
+
+## API variations
+Initially the testing framework was developed using REST and gRPC. The RyuRPC*,
+RyuRest* classes are still present but are deprecated. No active tests use them
+as they require a running pipelined service to function.
+The primary API is RyuDirect*, all active tests use it. Later gRPC calls
+will work with this framework after some threading fixes.
+
+## Writing a new test
+Example test files are a good place to see the framework in action, also the
+`pipelined_test_util.py` file provides convenient functions for easier and
+faster test writing.
+
+### Setup that can be used for multiple tests, this should go in `setUpClass`
+**Setup a new bridge or use the production bridge**
+```
+BRIDGE = 'testing_br'
+IFACE = 'testing_br'
+BridgeTools.create_bridge(BRIDGE, IFACE)
+```
+
+**Start ryu apps on a separate thread**
+```
+# Set the futures for pipelined controller references
+enforcement_controller_reference = Future()
+testing_controller_reference = Future()
+
+# Build a test_setup for launching ryu apps
+test_setup = TestSetup(
+ apps=[PipelinedController.Enforcement,
+ PipelinedController.Testing],
+ references={
+ PipelinedController.Enforcement:
+ enforcement_controller_reference,
+ PipelinedController.Testing:
+ testing_controller_reference
+ },
+ config={
+ 'bridge_name': cls.BRIDGE,
+ 'bridge_ip_address': '192.168.128.1',
+ 'nat_iface': 'eth2',
+ 'enodeb_iface': 'eth1'
+ },
+ mconfig=None,
+ loop=None
+)
+
+# Start the apps from the test_setup config
+cls.thread = start_ryu_app_thread(test_setup)
+
+# Wait for apps to start, retrieve references
+cls.enforcement_controller = enforcement_controller_reference.result()
+cls.testing_controller = testing_controller_reference.result()
+```
+
+### Unit test example
+**Setup basic information/constants**
+```
+# Setup subscriber info imsi, ip and a PolicyRule
+imsi = 'IMSI010000000088888'
+sub_ip = '192.168.128.74'
+flow_list1 = [FlowDescription(
+ match=FlowMatch(
+ ipv4_dst='45.10.0.0/24', direction=FlowMatch.UPLINK),
+ action=FlowDescription.PERMIT)
+]
+policy = PolicyRule(id='simple_match', priority=2, flow_list=flow_list1)
+
+pkts_matched = 256
+pkts_sent = 4096
+```
+
+**Setup the testing framework classes**
+```
+# Create a subscriber context, used for adding PolicyRules
+sub_context = RyuDirectSubscriberContext(
+ imsi, sub_ip, self.enforcement_controller
+).add_dynamic_rule(policy)
+
+# Create a table isolator from subscriber context, will set metadata/reg1,
+# forward the packets to table 5
+isolator = RyuDirectTableIsolator(
+ RyuForwardFlowArgsBuilder.from_subscriber(sub_context.cfg)
+ .build_requests(),
+ self.testing_controller
+)
+
+# Create a packet sender, an ip packet for testing our PolicyRule
+pkt_sender = ScapyPacketInjector(self.IFACE)
+packet = IPPacketBuilder()\
+ .set_ip_layer('45.10.0.0/20', sub_ip)\
+ .set_ether_layer(self.MAC_DEST, "00:00:00:00:00:00")\
+ .build()
+
+# Generate a flow query for checking the stats of the added rule
+flow_query = FlowQuery(
+ self.TID, self.testing_controller,
+ match=flow_match_to_match(flow_list1[0].match)
+)
+```
+
+**Test & Verify everything works**
+```
+# Verify aggregate table stats
+# Each FlowTest provides a query and number of packets that query should match
+# wait_after_send function ensures all packets are processed before verifying
+flow_verifier = FlowVerifier([
+ FlowTest(FlowQuery(self.TID, self.testing_controller), pkts_sent),
+ FlowTest(flow_query, pkts_matched)
+], lambda: wait_after_send(self.testing_controller))
+
+# Initialize all contexts and then send packets
+# isolator - inserts the flow forward rules that forward traffic to table 5,
+# set the required registers (metadata, reg1)
+#
+# sub_context - adds subscriber flows, activates the PolicyRule provided
+#
+# flow_verifier - gathers FlowTest stats on when initialized and when exiting
+with isolator, sub_context, flow_verifier:
+ pkt_sender.send(packet)
+
+# Asserts that conditions were met for each provided FlowTest
+flow_verifier.verify()
+```
diff --git a/docusaurus/docs/lte/redirectd.md b/docusaurus/docs/lte/redirectd.md
new file mode 100644
index 0000000..da8c1a8
--- /dev/null
+++ b/docusaurus/docs/lte/redirectd.md
@@ -0,0 +1,116 @@
+---
+id: version-1.0.0-redirectd
+title: Redirection
+hide_title: true
+original_id: redirectd
+---
+# Redirection
+### Overview
+When we enable redirection for the subscriber the next request(next packet) is
+intercepted by the redirection server. The subscriber then completes the tcp
+handshake with the redirection server. After the tcp handshake is complete the
+subscriber sends an HTTP GET request and receives the HTTP 302 response from
+the redirection server. Then it establishes a new tcp connection with the
+redirection address provided in the 302 response and traffic to this address
+is allowed to go through pipelined.
+
+HTTPS is not supported as without ssl certificates this isn't possible.
+
+### Server details
+Redirectd runs a flask server which sends back HTTP 302(redirect) responses.
+The server uses redis to lookup redirect information for incoming requests.
+We save the information for subscribers in pipelined, using src_ip of request
+to redirect_info lookup. When no such information is found return a 404,
+this shouldn't happen, means redirect info wasn’t properly saved.
+
+Redirectd is also a dynamic service, it is only launched when mconfig
+dynamic_services array has a 'redirectd' entry.
+
+
+### Redirection in pipelined
+EnforcementController instantiates the required flows for forwarding subscriber
+traffic to the redirection server. Pipelined also saves the redirect
+information in redis using subcriber_ip as the key(mobilityd is used for
+getting the subcriber ip from imsi).
+All this is only done when subscriber PolicyRule has redirection enabled.
+Example PolicyRule with enabled redirection:
+```
+policy = PolicyRule(
+ id='redirect_test',
+ priority=3,
+ flow_list=flow_list,
+ redirect=RedirectInformation(
+ support=1,
+ address_type=2,
+ server_address="http://about.sha.ddih.org/"
+ )
+)
+```
+
+*Description of added flows:*
+* Add flow to allow UDP traffic so DNS queries can go through
+* Add flows with a higher priority that allow traffic to and from the
+ redirection address provided in the redirect rule
+ - if address_type is url submit a dns query and allow access to resolved IPs,
+ the resolved IPs are stored in a ttl cache to decrease num of dns requests
+ - if address_type is IPv4 allow access to that redirection IP address
+ - ignore IPv6 address_type redirection as we don't support it
+ - ignore SIP_URI address_type is not implemented
+* Intercept tcp traffic from UE, send it to the redirection server, also
+intercept tcp traffic from Redirection server and send it back to the UE.
+This is done by adding an OVS flow with a learn action (when packets from UE
+hit this flow a new flow will be instantiated to intercept traffic from
+Redirection server)
+ - Flow matching TCP traffic from the user with port 80, modify&send packets
+ to the Redirection server. The learn action will 'save' the original dst_ip
+ address by loading it into the instantiated flow
+ - Flow instantiated from the learn action, matching TCP traffic from the
+ server with the UE ip_addr/tcp_sport, modify&send packets back to UE
+* Drop other traffic (default for all subscribers)
+
+### Packet path breakdown
+```
+Packet protocol diagram (only includes fields that are being checked):
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| Source IP Address | Destination IP Address |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| TCP Source Port | TCP Destination Port |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+UE ip: 192.168.128.9
+Remote url ip: 185.199.110.8
+Redirect server ip: 192.168.128.1
+
+For the tcp handshake, initial HTTP request the traffic flow looks like this:
+
+UE sends a packet to remote url
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 192.168.128.9 | 185.199.110.8 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 43040 | 80 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+Packet gets modified in EnforcementController, sent to the Redirection server
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 192.168.128.9 | *192.168.128.1* |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 43040 | 80 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+Redirection server responds, packet is sent back into OVS
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 192.168.128.1 | 192.168.128.9 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 80 | 43040 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+Packet gets modified in EnforcementController, sent back to the UE
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| *185.199.110.8* | 192.168.128.9 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 80 | 43040 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+After getting a 302 response from the redirect server the traffic can go
+straight to the redirected address without being changed in pipelined
+```
diff --git a/docusaurus/docs/lte/s1ap_tests.md b/docusaurus/docs/lte/s1ap_tests.md
new file mode 100644
index 0000000..6afab66
--- /dev/null
+++ b/docusaurus/docs/lte/s1ap_tests.md
@@ -0,0 +1,110 @@
+---
+id: version-1.0.0-s1ap_tests
+title: S1AP Integration Tests
+hide_title: true
+original_id: s1ap_tests
+---
+# S1AP Integration Tests
+Current testing workflow for VM-only S1AP integration tests. We cover gateway-only tests, cloud-included tests, and some general notes.
+
+Our VM-only tests use 2 to 4 Vagrant-managed VMs hosted on the local device (laptop):
+
+- *magma*, i.e. magma-dev or gateway
+- *magma_test*, i.e. s1ap_tester
+- *cloud*
+- *datastore*
+
+## Gateway-only tests
+
+These tests use only the *magma* and *magma_test* VMs. The *magma_test* VM abstracts away the UE and eNodeB, while the *magma* VM acts as the gateway.
+
+### Gateway VM setup
+
+Spin up and provision the gateway VM, then make and start its services:
+
+1. From `magma/lte/gateway` on the host machine: `vagrant up magma && vagrant ssh magma`
+1. Now in the gateway VM: `cd $MAGMA_ROOT/lte/gateway && make run`
+
+### Test VM setup
+
+Spin up and provision the s1ap tester's VM, make, then make in the integ_tests directory.
+
+1. From `magma/lte/gateway` on the host machine: `vagrant up magma_test && vagrant ssh magma_test`
+1. Now in the *magma_test* VM:
+ 1. cd `$MAGMA_ROOT/lte/gateway/python && make`
+ 1. cd `$MAGMA_ROOT/lte/gateway/python/integ_tests && make`
+ 1. `deactivate; magtivate`
+
+You will see "-bash: deactivate: command not found" if this is the first time
+you are compiling the tester code. It can be ignored.
+
+### Run tests
+
+From `$MAGMA_ROOT/lte/gateway/python/integ_tests` on the *magma_test* VM, run
+either individual tests or the full suite of tests. A safe, non-flaky test to run is `s1aptests/test_attach_detach.py`.
+
+**Note**: after make-ing, run `deactivate; magtivate`. Make sure that you are "magtivate-d" before all tests (your command prompt will include `(python)`).
+
+* Individual test(s): `make integ_test TESTS=`
+* All tests: `make integ_test`
+
+**Note**: The traffic tests will fail as traffic server is not running in this
+setup. Look at the section below on running traffic tests.
+
+To run the tests when upstreaming to OAI use the environment variable `TEST_OAI_UPSTREAM` from the *magma_test* vm. This will use OAI's HSS and disable our mobilityd e.g. `TEST_OAI_UPSTREAM make integ_test`
+
+### Running uplink/downlink traffic tests
+
+1. On the *magma* VM, run, `disable-tcp-checksumming`
+
+1. On the *magma_test* VM, `disable-tcp-checksumming`
+
+1. Start the traffic server VM from the host, `vagrant up magma_trfserver && vagrant ssh magma_trfserver`
+
+1. From *magma_trfserver* VM, run `disable-tcp-checksumming && trfgen-server`
+
+Running `make integ_test` in *magma_test* VM should succeed now.
+
+## Cloud-included
+
+These tests mirror the gateway-only tests, except they make their calls to the cloud's REST API, rather than directly to the gateway over gRPC. For a given update made via the REST API (e.g. add a subscriber, update a mobility config), these tests poll the gateway over gRPC until the cloud and gateway responses match. Thus, these tests are much slower than the gateway-only tests since the gateway's polling mechanism is a bit slow.
+
+These tests use all 4 VMs.
+
+**Note**. Cloud-included tests are currently only supported for `test_attach_detach.py`, but will and can easily be added for the rest of our suite of integration tests.
+
+### Gateway setup
+
+Same as for gateway-only tests.
+
+### Magma_test setup
+
+Same as for gateway-only tests.
+
+### Cloud setup
+
+Spin up and provision the *cloud* and *datastore* VMs, then make.
+
+1. From `magma/orc8r/cloud` on the host machine: `vagrant up datastore && vagrant up cloud && vagrant ssh cloud`
+1. Now in the *cloud* VM: `cd ~/magma/orc8r/cloud && make run`
+
+### Other setup
+
+From `magma/lte/gateway` on your host device run `fab s1ap_setup_cloud`
+
+Using the *cloud* VM requires a few extra steps, which are handled by the above [fab](http://www.fabfile.org/) command. In summary:
+
+- Tell the *magma* VM to use the *cloud* VM for its REST API calls (clean up previous prod certs if any)
+- Decrease the gateway's streamer timeout
+
+### Run tests
+
+The integration tests are told to use the cloud by setting the environment variable `MAGMA_S1APTEST_USE_CLOUD` on the *magma_test* VM.
+
+So from `$MAGMA_ROOT/lte/gateway/python/integ_tests` on the *magma_test* VM run e.g. `MAGMA_S1APTEST_USE_CLOUD=1 make integ_test TESTS=s1aptests/test_attach_detach.py` to run a cloud-included version of `test_attach_detach`.
+
+## Notes
+
+- Restart the *magma* VM (`vagrant reload magma`) on an assertion error involving `ENB_S1_SETUP_RESP.` This is a known issue.
+- See *[Bindings for Magma's REST API](https://fb.quip.com/4tmUAtlox4Oy)* for notes on the Python bindings for our REST API generated by [swagger-codegen](https://github.com/swagger-api/swagger-codegen).
+- It may be cleaner to set the host using the [configuration class](https://github.com/swagger-api/swagger-codegen/blob/master/samples/client/petstore/python/petstore_api/configuration.py). This is also where we can set SSL options.
diff --git a/docusaurus/docs/lte/setup.md b/docusaurus/docs/lte/setup.md
new file mode 100644
index 0000000..c257ea5
--- /dev/null
+++ b/docusaurus/docs/lte/setup.md
@@ -0,0 +1,33 @@
+---
+id: version-1.0.0-setup
+title: AGW Setup (With Vagrant)
+sidebar_label: Setup (With Vagrant)
+hide_title: true
+original_id: setup
+---
+# Access Gateway Setup (With Vagrant)
+### Prerequisites
+To develop and manage a Magma VM, you must have the following applications installed locally:
+
+* Virtualbox
+* Vagrant
+* Ansible
+
+### Steps
+
+To bring up an Access Gateway (AGW) VM using Vagrant:
+
+* Run the following command:
+
+``HOST:magma/lte/gateway USER$ vagrant up magma``
+
+Vagrant will bring up the VM, then Ansible will provision the VM.
+
+
+* Once the Access Gateway VM is up and provisioned, run the following commands:
+
+``HOST:magma/lte/gateway USER$ vagrant ssh magma``
+``AGW:~ USER$ cd magma/lte/gateway``
+``AGW:~/magma/lte/gateway USER$ make run``
+
+Once the Access Gateway VM is running successfully, proceed to attaching the eNodeB.
diff --git a/docusaurus/docs/lte/setup_baremetal.md b/docusaurus/docs/lte/setup_baremetal.md
new file mode 100644
index 0000000..d392903
--- /dev/null
+++ b/docusaurus/docs/lte/setup_baremetal.md
@@ -0,0 +1,146 @@
+---
+id: version-1.0.0-setup_baremetal
+title: AGW Setup (Bare Metal)
+sidebar_label: Setup (Bare Metal)
+hide_title: true
+original_id: setup_baremetal
+---
+# AGW installation on baremetal
+
+
+### HW requirements
+- Ansible host
+- AGW host, 64bit-X86 machine
+ Two ethernet ports. We use enp1s0 and enp2s0 in this guide. They might have different names on your hardware so just replace enp1s0 and enp2s0 by your current interfaces name in this guidelind.
+ One port is for the SGi interface (default: enp1s0) and one for the S1 interface (default: enp2s0)
+
+### Install Ansible (on Ansible host)
+- Mac Installation
+```bash
+brew install Ansible
+```
+
+- Install ansible on a RHEL/CentOS Linux based system
+```bash
+sudo yum install ansible
+```
+
+- Install ansible on a Debian/Ubuntu Linux based system
+```bash
+sudo apt-get install software-properties-common
+sudo apt-add-repository ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install ansible
+```
+
+- Install ansible using pip
+```bash
+pip install ansible
+```
+
+- Using source code
+``` bash
+cd ~
+git clone git://github.com/ansible/ansible.git
+cd ./ansible
+source ./hacking/env-setup
+```
+When running ansible from a git checkout, one thing to remember is that you will need to setup your environment everytime you want to use it, or you can add it to your bash rc file:
+```bash
+echo "export ANSIBLE_HOSTS=~/ansible_hosts" >> ~/.bashrc
+echo "source ~/ansible/hacking/env-setup" >> ~/.bashrc
+```
+
+### Install Debian Stretch (on AGW host)
+
+1. Create boot USB stick
+
+- Download .iso image from [Debian mirror](https://cdimage.debian.org/cdimage/archive/9.13.0/amd64/iso-cd/debian-9.13.0-amd64-netinst.iso)
+- Create bootable usb using etcher [tutorial here](https://tutorials.ubuntu.com/tutorial/tutorial-create-a-usb-stick-on-macos#0)
+- Boot your AGW host from USB
+ (Press F11 to select boot sequence, :warning: This might be different for
+ your machine). If you see 2 options to boot from USB, select the non-UEFI
+ option.
+- Install and configure you access gateway according to your network defaults.
+ - Make sure to enable ssh server and utilities (untick every other)
+- Connect your SGi interface to the internet and select this port during the
+installation process to get an IP using DHCP.
+
+2. Prepare AGW
+ - Change interface names
+
+ ```bash
+ su
+
+ sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"/g' /etc/default/grub
+
+ grub-mkconfig -o /boot/grub/grub.cfg
+ sed -i 's/enp1s0/eth0/g' /etc/network/interfaces
+
+ echo “auto eth1
+ iface eth1 inet static
+ address 10.0.2.1
+ netmask 255.255.255.0” > /etc/network/interfaces.d/eth1
+
+ reboot
+ ```
+
+ - Make magma user a sudoer on AGW host
+
+ ```bash
+ su
+ apt install -y sudo python-minimal
+ adduser magma sudo
+ echo "magma ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
+ ```
+
+ - Generate key on your Ansible host
+
+ ``` bash
+ cd .ssh
+ ssh-keygen
+ chmod 600 id_rsa
+ ssh-add id_rsa
+ cat id_rsa.pub # copy text
+ ```
+
+ - On AGW host, add the pub key to authorized_keys
+
+ ```bash
+ su magma
+ mkdir -p ~/.ssh
+ vi ~/.ssh/authorized_keys # paste pub key inside
+ ```
+
+ - Install Magma
+
+ ``` bash
+ #Clone magma repository on Ansible host
+ git clone https://github.com/facebookincubator/magma.git ~/
+ ```
+
+ - Prepare for deployment
+
+ ``` bash
+ AGW_IP=10.0.2.1
+ USERNAME=magma
+ echo "[ovs_build]
+ $AGW_IP ansible_ssh_port=22 ansible_ssh_user=$USERNAME ansible_ssh_private_key_file=~/.ssh/id_rsa
+ [ovs_deploy]
+ $AGW_IP ansible_ssh_port=22 ansible_ssh_user=$USERNAME ansible_ssh_private_key_file=~/.ssh/id_rsa
+ " >> ~/magma/lte/gateway/deploy/agw_hosts
+ ```
+
+ - Build ovs
+
+ ``` bash
+ cd ~/magma/lte/gateway/deploy
+ ansible-playbook -e "MAGMA_ROOT='~/magma' OUTPUT_DIR='/tmp'" -i agw_hosts ovs_gtp.yml
+ ```
+
+ - Deploy AGW
+
+ ``` bash
+ cd ~/magma/lte/gateway/deploy
+ ansible-playbook -i agw_hosts -e "PACKAGE_LOCATION='/tmp'" ovs_deploy.yml
+ ```
diff --git a/docusaurus/docs/lte/setup_deb.md b/docusaurus/docs/lte/setup_deb.md
new file mode 100644
index 0000000..fb828d6
--- /dev/null
+++ b/docusaurus/docs/lte/setup_deb.md
@@ -0,0 +1,84 @@
+---
+id: version-1.0.0-setup_deb
+title: AGW Setup (Bare Metal)
+sidebar_label: Setup (Bare Metal)
+hide_title: true
+original_id: setup_deb
+---
+# Access Gateway Setup (On Bare Metal)
+## Prerequisites
+
+To setup a Magma Access Gateway, you will need a machine that
+satisfies the following requirements:
+
+
+- Docker host where the container AGW_DEPLOY will be built. This container can
+ be run directly on your machine or a remote host, as long as it can reach
+ your Access Gateway. The `agw_prepare.sh` script will assign the static IP
+ address 10.0.2.1 to the `eth1` interface, so on your host, assign the
+ interface that you are using to connect to the AGW the IP address `10.0.2.2`
+ with netmask `255.255.255.0`, and set the priority of this interface to
+ maximum.
+
+- AGW_HOST: 64bit-X86 machine, hardware strongly recommended (not virtualized).
+ You will need two ethernet ports. We use enp1s0 and enp2s0 in this guide.
+ They might have different names on your hardware so just replace enp1s0 and
+ enp2s0 with your current interfaces name in this guideline.
+ One port is for the SGi interface (default: enp1s0) and one for the S1
+ interface (default: enp2s0). Note that the `agw_prepare.sh` script will
+ rename the `enp1s0` interface to `eth0`.
+
+## Deployment
+### 1. Create boot USB stick and install Debian on your AGW host
+
+- Download .iso image from [Debian mirror](https://cdimage.debian.org/cdimage/archive/9.13.0/amd64/iso-cd/debian-9.13.0-amd64-netinst.iso)
+- Create bootable usb using etcher [tutorial here](https://tutorials.ubuntu.com/tutorial/tutorial-create-a-usb-stick-on-macos#0)
+- Boot your AGW host from USB
+ (Press F11 to select boot sequence, :warning: This might be different for
+ your machine). If you see 2 options to boot from USB, select the non-UEFI
+ option.
+- Install and configure you access gateway according to your network defaults.
+ - Make sure to enable ssh server and utilities (untick every other)
+- Connect your SGi interface to the internet and select this port during the
+installation process to get an IP using DHCP.
+
+### 2. Prepare AGW_HOST
+- [AGW_HOST] Prepare AGW_HOST
+
+```bash
+su
+wget https://raw.githubusercontent.com/magma/magma/v1.0.0/lte/gateway/deploy/agw_prepare.sh
+sh agw_prepare.sh
+```
+
+A prompt will pop up to as you if you want to stop removing linux-image-4.9.0-11-amd64 please hit: No
+
+### 3. Prepare AGW_DEPLOY
+- [AGW_DEPLOY] Build and run AGW_DEPLOY container
+
+```bash
+git clone https://github.com/magma/magma.git ~/magma
+git fetch && git fetch --tags
+git checkout v1.0.0
+
+cd ~/magma
+docker build --build-arg CACHE_DATE="$(date)" -t agw_deploy -f lte/gateway/docker/deploy/Dockerfile .
+docker run -it agw_deploy bash
+scp ~/.ssh/id_rsa.pub magma@10.0.2.1:/home/magma/.ssh/authorized_keys
+```
+
+### 4. Build openvswitch on Access Gateway
+- [AGW_DEPLOY] Run build playbook
+
+``` bash
+cd ~/magma/lte/gateway/deploy
+ansible-playbook -e "MAGMA_ROOT='~/magma' OUTPUT_DIR='/tmp'" -i agw_hosts ovs_build.yml
+```
+
+### 5. Deploy Access Gateway
+- [AGW_DEPLOY] Run deploy playbook
+
+``` bash
+cd ~/magma/lte/gateway/deploy
+ansible-playbook -i agw_hosts -e "PACKAGE_LOCATION='/tmp'" ovs_deploy.yml
+```
diff --git a/docusaurus/docs/lte/tr069.md b/docusaurus/docs/lte/tr069.md
new file mode 100644
index 0000000..ea9e3cb
--- /dev/null
+++ b/docusaurus/docs/lte/tr069.md
@@ -0,0 +1,72 @@
+---
+id: version-1.0.0-tr069
+title: Adding TR-069 support for an eNodeB
+hide_title: true
+original_id: tr069
+---
+# Add eNB TR-069 Support
+## enodebd Overview
+
+The enodebd service is responsible for any TR-069 management of eNodeB devices.
+As such, you’ll only need to modify enodebd to add support for a new eNB.
+
+At a high level, enodebd brings up a server, the tr069_server, and when the
+tr069_server receives an HTTP request, it translates the contained SOAP message
+into the corresponding model representing the message, eg. Inform,
+GetParameterRequest, etc. It takes the message, and passes it on to the
+StateMachineManager.
+
+The StateMachineManager, as the name implies, manages different state machines.
+One state machine is used for each eNB that the Magma AGW is managing. Using
+the IP of the HTTP request, the StateMachineManager routes the request to the
+correct state machine (also called ‘Handler’).
+
+Each EnodebAcsStateMachine contains a representation of the configuration that
+we believe the eNB has. It also contains the configuration that we desire to
+set on it. These are labeled as the ‘device configuration’ and ‘desired
+configuration’. Each state machine will attempt to configure its corresponding
+eNB to the desired configuration.
+
+Configuration through TR-069 is done in the ‘Provisioning Session’. The state
+machine has a state for each step of the provisioning session, each with its
+own logic for how to process the incoming TR-069 message, and how to construct
+the TR-069 response. Most of these states are in
+enodebd/state_machines/enb_acs_states.py, since the provisioning session
+happens nearly the same way for most devices. There are differences though,
+and so there is a different state machine for each supported eNB device model,
+which can be found under enodebd/devices.
+
+
+## Adding an eNB
+To add TR-069 support, the TR-069 data model is needed for the device, as well
+as the hardware model, and software version. A new state machine should be
+created under enodebd/devices.
+
+For the data model, the same parameter will have different paths for different
+devices, and may even have its value formatted differently. For these
+formatting differences, the ‘transform’ functions should be added to transform
+the parameter value from the formatting that the eNB uses, to the common
+formatting that Magma enodebd understands, and vice-versa.
+
+For constructing the desired-configuration of the eNB, there will be
+differences between devices. Attach a EnodebConfigurationPostProcessor to the
+eNB state machine, and add any logic for modifying the desired-configuration
+beyond the defaults that enodebd already does.
+
+The provisioning session can occur a little different for each eNB. The state
+map attached to each state machine lets you customize the state machine for
+differences between devices. Some devices require rebooting for parameter
+changes to take effect, for example. You can add/remove behavior through this
+map, and also add custom states.
+
+[An example pull request for adding an eNB](https://github.com/facebookincubator/magma/commit/e1d4564f7daa7a4c1be135e8dbffe7a10bfa4e34)
+
+## Testing
+Make sure that the eNB is configured to reach out to
+baiomc.cloudapp.net:48080, so that Magma’s DNS hijacking works, and routes
+TR-069 messages through iptables to enodebd’s TR-069 server. Debugging can most
+easily be done with tcpdump, and then viewing the SOAP messages through
+Wireshark.
+
+Magma's dnsd service configures dnsmasq for DNS hijacking. See: dnsd.yml. You
+can modify dnsd.yml and restart the service for further configuration.
diff --git a/docusaurus/docs/orc8r/deploy_build.md b/docusaurus/docs/orc8r/deploy_build.md
new file mode 100644
index 0000000..d29d784
--- /dev/null
+++ b/docusaurus/docs/orc8r/deploy_build.md
@@ -0,0 +1,38 @@
+---
+id: version-1.0.0-deploy_build
+title: Building Orchestrator
+hide_title: true
+original_id: deploy_build
+---
+
+# Building Orchestrator Components
+
+Start up your Docker daemon, `cd` to where you've cloned Magma, then:
+
+```bash
+cd magma/orc8r/cloud/docker
+./build.py -a
+```
+
+If this is your first time building Orchestrator, this may take a while. We
+recommend continuing onto the next step (Terraforming cloud resources) in a
+different terminal instance or tab and coming back to this section when the
+builds are completed.
+
+When this job finishes, upload these images to your image registry:
+
+```bash
+export MAGMA_TAG=v1.0.0
+for image in proxy controller prometheus-cache config-manager grafana
+do
+ ../../../orc8r/tools/docker/publish.sh -r REGISTRY -i ${image} -v ${MAGMA_TAG}
+done
+```
+
+While we're here, you can build and publish the NMS containers as well:
+
+```bash
+cd magma/nms/fbcnms-projects/magmalte
+docker-compose build magmalte
+COMPOSE_PROJECT_NAME=magmalte ../../../orc8r/tools/docker/publish.sh -r REGISTRY -i magmalte -v ${MAGMA_TAG}
+```
diff --git a/docusaurus/docs/orc8r/deploy_dns.md b/docusaurus/docs/orc8r/deploy_dns.md
new file mode 100644
index 0000000..43addf4
--- /dev/null
+++ b/docusaurus/docs/orc8r/deploy_dns.md
@@ -0,0 +1,44 @@
+---
+id: version-1.0.0-deploy_dns
+title: DNS Resolution
+hide_title: true
+original_id: deploy_dns
+---
+# DNS Resolution
+
+In the following steps, replace `yourdomain.com` with the TLD or subdomain that
+you've chosen to host Orchestrator on. It's important that you follow the
+naming conventions for subdomains in order for your Access Gateways to
+successfully communicate with Orchestrator.
+
+First, grab the public-facing hostnames for the ELB instance fronting the
+internet-facing Orchestrator components
+(`orc8r-bootstrap-legacy`, `orc8r-clientcert-legacy`, `nginx-proxy`):
+
+```bash
+$ kubectl -n magma get svc -o='custom-columns=NAME:.metadata.name,HOSTNAME:.status.loadBalancer.ingress[0].hostname'
+
+NAME HOSTNAME
+magmalte
+nginx-proxy ELB-ADDRESS1.elb.amazonaws.com
+orc8r-bootstrap-legacy ELB-ADDRESS2.elb.amazonaws.com
+orc8r-clientcert-legacy ELB-ADDRESS3.elb.amazonaws.com
+orc8r-controller
+orc8r-graphite
+orc8r-metrics
+orc8r-prometheus-cache
+orc8r-proxy ELB-ADDRESS4.elb.amazonaws.com
+```
+
+Set up the following CNAME records for your chosen domain:
+
+| Subdomain | CNAME |
+|-----------|-------|
+| nms.yourdomain.com | nginx-proxy hostname |
+| controller.yourdomain.com | orc8r-clientcert-legacy hostname |
+| bootstrapper-controller.yourdomain.com | orc8r-bootstrap-legacy hostname |
+| api.yourdomain.com | orc8r-clientcert-legacy hostname |
+
+Wait for DNS records to propagate, then if you go to
+`https://nms.yourdomain.com`, you should be able to log in as the admin user
+that your created earlier and create your first network.
diff --git a/docusaurus/docs/orc8r/deploy_install.md b/docusaurus/docs/orc8r/deploy_install.md
new file mode 100644
index 0000000..f1b0d27
--- /dev/null
+++ b/docusaurus/docs/orc8r/deploy_install.md
@@ -0,0 +1,505 @@
+---
+id: version-1.0.0-deploy_install
+title: Installing Orchestrator
+hide_title: true
+original_id: deploy_install
+---
+# Installing Orchestrator
+
+## Creating Secrets
+
+IMPORTANT: in all the below instructions, replace `yourdomain.com` with the
+actual domain/subdomain which you've chosen to host Orchestrator on.
+
+We recommend storing the following secrets on S3 in AWS, or any similar
+object storage service on your preferred cloud provider.
+
+Start first by creating a new directory somewhere to hold the secrets while
+you create them:
+
+```bash
+mkdir -p ~/secrets/
+```
+
+### SSL Certificates
+
+You will need the following certificates and private keys:
+
+1. The public SSL certificate for your Orchestrator domain,
+with CN=*.yourdomain.com. This can be an SSL certificate chain, but it must be
+in one file
+2. The private key which corresponds to the above SSL certificate
+3. The rootCA certificate which verifies your SSL certificate.
+
+If you already have these files, you can do the following:
+
+1. Rename your public SSL certificate to `controller.crt`
+2. Rename your SSL certificate's private key to `controller.key`
+3. Rename your SSL certificate's root CA to `rootCA.pem`
+4. Put these 3 files under a subdirectory `certs`
+
+If you aren't worried about a browser warning, you can also self-sign these
+certs. Change the values of the DN prompts as necessary, but pay *very* close
+attention to the common names - these are very important to get right!
+
+```bash
+$ mkdir -p ~/secrets/certs
+$ cd ~/secrets/certs
+$ openssl genrsa -out rootCA.key 2048
+
+Generating RSA private key, 2048 bit long modulus
+
+$ openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 3650 -out rootCA.pem
+
+You are about to be asked to enter information that will be incorporated
+into your certificate request.
+What you are about to enter is what is called a Distinguished Name or a DN.
+There are quite a few fields but you can leave some blank
+For some fields there will be a default value,
+If you enter '.', the field will be left blank.
+-----
+Country Name (2 letter code) []:US
+State or Province Name (full name) []:CA
+Locality Name (eg, city) []:Menlo Park
+Organization Name (eg, company) []:Facebook
+Organizational Unit Name (eg, section) []:Magma
+Common Name (eg, fully qualified host name) []:rootca.yourdomain.com
+Email Address []:admin@yourdomain.com
+
+$ openssl genrsa -out controller.key 2048
+
+Generating RSA private key, 2048 bit long modulus
+
+$ openssl req -new -key controller.key -out controller.csr
+
+You are about to be asked to enter information that will be incorporated
+into your certificate request.
+What you are about to enter is what is called a Distinguished Name or a DN.
+There are quite a few fields but you can leave some blank
+For some fields there will be a default value,
+If you enter '.', the field will be left blank.
+-----
+Country Name (2 letter code) []:US
+State or Province Name (full name) []:CA
+Locality Name (eg, city) []:Menlo Park
+Organization Name (eg, company) []:Facebook
+Organizational Unit Name (eg, section) []:Magma
+Common Name (eg, fully qualified host name) []:*.yourdomain.com
+Email Address []:admin@yourdomain.com
+
+Please enter the following 'extra' attributes
+to be sent with your certificate request
+A challenge password []:
+
+$ openssl x509 -req -in controller.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out controller.crt -days 365 -sha256
+
+Signature ok
+subject=/C=US/ST=CA/L=Menlo Park/O=Facebook/OU=Magma/CN=*.yourdomain.com/emailAddress=admin@yourdomain.com
+Getting CA Private Key
+
+$ rm controller.csr rootCA.srl
+```
+
+At this point, regardless of whether you self-signed your certs or acquired
+them from a certificate provider, your `certs` subdirectory should look like
+this:
+
+```bash
+$ ls
+controller.crt controller.key rootCA.pem rootCA.key
+```
+
+We *strongly* recommend moving `rootCA.key` to a more secure location at this
+point. By default, the Helm chart for secrets will below will upload it to EKS
+as a Kubernetes secret and mount it to controller pods. If the private key
+portion of the root CA is compromised, all TLS traffic to and from your cluster
+will be compromised.
+
+Keep rootCA.key in a place where you can access it again - you will need it to
+renew the SSL certificate for the Orchestrator controller when it expires.
+
+### Application Certificates and Keys
+
+`certifier` is the Orchestrator service which signs client certificates. All
+access to Orchestrator is authenticated by client SSL certificates, so you'll
+need to create the verifying certificate for `certifier`.
+
+Again, pay *very* close attention to the CN.
+
+```bash
+$ openssl genrsa -out certifier.key 2048
+
+Generating RSA private key, 2048 bit long modulus
+
+$ openssl req -x509 -new -nodes -key certifier.key -sha256 -days 3650 -out certifier.pem
+
+...
+Common Name (eg, fully qualified host name) []:certifier.yourdomain.com
+
+$ openssl genrsa -out bootstrapper.key 2048
+
+Generating RSA private key, 2048 bit long modulus
+
+$ ls
+bootstrapper.key certifier.key certifier.pem controller.crt controller.key rootCA.pem rootCA.key
+```
+
+The last command created a private key for the `bootstrapper` service, which
+is the mechanism by which Access Gateways acquire their client certificates
+from `certifier`.
+
+### Environment Secrets
+
+Go into the AWS management console, choose "RDS", and find the hostname of your
+orc8r RDS instance (make sure not to choose the NMS RDS instance). Note this
+down, then continue:
+
+```bash
+mkdir -p ~/secrets/envdir
+cd ~/secrets/envdir
+echo "STREAMER,SUBSCRIBERDB,METRICSD,CERTIFIER,BOOTSTRAPPER,METERINGD_RECORDS,ACCESSD,OBSIDIAN,DISPATCHER,DIRECTORYD" > CONTROLLER_SERVICES
+echo "dbname=orc8r user=orc8r password= host=" > DATABASE_SOURCE
+```
+
+### Static Configuration Files
+
+Orchestrator microservices can be configured with static YAML files. In this
+deployment, the only one you'll have to create will be for `metricsd`:
+
+```bash
+mkdir -p ~/secrets/configs/orc8r
+cd ~/secrets/configs/orc8r
+touch metricsd.yml
+```
+
+Put the following contents into `metricsd.yml`:
+
+```
+profile: "prometheus"
+
+prometheusQueryAddress: "http://orc8r-prometheus:9090"
+prometheusPushAddresses:
+ - "http://orc8r-prometheus-cache:9091/metrics"
+
+alertmanagerApiURL: "http://orc8r-alertmanager:9093/api/v2/alerts"
+prometheusConfigServiceURL: "http://orc8r-config-manager:9100"
+alertmanagerConfigServiceURL: "http://orc8r-config-manager:9101"
+```
+
+## Initial Helm Deploy
+
+Copy your secrets into the Helm subchart where you cloned Magma:
+
+```bash
+cp -r ~/secrets magma/orc8r/cloud/helm/orc8r/charts/secrets/.secrets
+```
+
+We need to set up the EKS cluster before we can `helm deploy` to it:
+
+```bash
+cd magma/orc8r/cloud/helm/orc8r
+kubectl apply -f tiller-rbac-config.yaml
+helm init --service-account tiller --history-max 200
+# Wait for tiller to become 'Running'
+kubectl get pods -n kube-system | grep tiller
+
+kubectl create namespace magma
+```
+
+Next, create a `vals.yml` somewhere in a source controlled directory that you
+own (e.g. adjacent to your terraform scripts). Fill in the values in caps
+with the correct values for your docker registry and Orchestrator hostname:
+
+```
+imagePullSecrets:
+ - name: orc8r-secrets-registry
+
+secrets:
+ create: true
+ docker:
+ registry: YOUR-DOCKER-REGISTRY
+ username: YOUR-DOCKER-USERNAME
+ password: YOUR-DOCKER-PASSWORD
+
+
+proxy:
+ image:
+ repository: YOUR-DOCKER-REGISTRY/proxy
+ tag: YOUR-CONTAINER-TAG
+
+ replicas: 2
+
+ service:
+ name: orc8r-bootstrap-legacy
+ type: LoadBalancer
+
+ spec:
+ hostname: controller.YOURDOMAIN.COM
+
+ nodeSelector:
+ worker-type: controller
+
+controller:
+ image:
+ repository: YOUR-DOCKER-REGISTRY/controller
+ tag: YOUR-CONTAINER-TAG
+
+ replicas: 2
+
+ migration:
+ new_handlers: 1
+ new_mconfigs: 1
+
+ nodeSelector:
+ worker-type: controller
+
+metrics:
+ imagePullSecrets:
+ - name: orc8r-secrets-registry
+
+ metrics:
+ volumes:
+ prometheusData:
+ volumeSpec:
+ hostPath:
+ path: /prometheusData
+ type: DirectoryOrCreate
+ prometheusConfig:
+ volumeSpec:
+ hostPath:
+ path: /configs/prometheus
+ type: DirectoryOrCreate
+
+ prometheus:
+ create: true
+ nodeSelector:
+ worker-type: metrics
+
+ configmanager:
+ create: true
+ image:
+ repository: YOUR-DOCKER-REGISTRY/config-manager
+ tag: YOUR-CONTAINER-TAG
+ nodeSelector:
+ worker-type: metrics
+
+ alertmanager:
+ create: true
+ nodeSelector:
+ worker-type: metrics
+
+ prometheusCache:
+ create: true
+ image:
+ repository: YOUR-DOCKER-REGISTRY/prometheus-cache
+ tag: YOUR-CONTAINER-TAG
+ limit: 500000
+ nodeSelector:
+ worker-type: metrics
+
+ grafana:
+ create: true
+ image:
+ repository: YOUR-DOCKER-REGISTRY/grafana
+ tag: YOUR-CONTAINER-TAG
+ nodeSelector:
+ worker-type: metrics
+
+nms:
+ imagePullSecrets:
+ - name: orc8r-secrets-registry
+
+ magmalte:
+ manifests:
+ secrets: false
+ deployment: false
+ service: false
+ rbac: false
+
+ image:
+ repository: YOUR-DOCKER-REGISTRY/magmalte
+ tag: YOUR-CONTAINER-TAG
+
+ env:
+ api_host: controller.YOURDOMAIN.COM
+ mysql_host: YOUR RDS MYSQL HOST
+ mysql_user: magma
+ mysql_pass: YOUR RDS MYSQL PASSWORD
+ nginx:
+ manifests:
+ configmap: false
+ secrets: false
+ deployment: false
+ service: false
+ rbac: false
+
+ service:
+ type: LoadBalancer
+
+ deployment:
+ spec:
+ ssl_cert_name: controller.crt
+ ssl_cert_key_name: controller.key
+```
+
+NMS won't work without a client certificate, so we've turned off those
+deployments for now. We'll create an admin cert and upgrade the deployment
+with NMS once the core Orchestrator components are up.
+
+At this point, if your `vals.yml` is good, you can do your first helm deploy:
+
+```bash
+cd magma/orc8r/cloud/helm/orc8r
+helm install --name orc8r --namespace magma . --values=PATH_TO_VALS/vals.yml
+```
+
+## Creating an Admin User
+
+First, find a `orc8r-controller-` pod in k8s:
+
+```bash
+$ kubectl -n magma get pods
+
+NAME READY STATUS RESTARTS AGE
+orc8r-configmanager-896d784bc-chqr7 1/1 Running 0 X
+orc8r-controller-7757567bf5-cm4wn 1/1 Running 0 X
+orc8r-controller-7757567bf5-jshpv 1/1 Running 0 X
+orc8r-alertmanager-c8dc7cdb5-crzpl 1/1 Running 0 X
+orc8r-grafana-6446b97885-ck6g8 1/1 Running 0 X
+orc8r-prometheus-6c67bcc9d8-6lx22 1/1 Running 0 X
+orc8r-prometheus-cache-6bf7648446-9t9hx 1/1 Running 0 X
+orc8r-proxy-57cf989fcc-cg54z 1/1 Running 0 X
+orc8r-proxy-57cf989fcc-xn2cw 1/1 Running 0 X
+```
+
+Then:
+
+```bash
+export CNTLR_POD=$(kubectl -n magma get pod -l app.kubernetes.io/component=controller -o jsonpath='{.items[0].metadata.name}')
+kubectl exec -it ${CNTLR_POD} bash
+
+# The following commands are to be run inside the pod
+(pod)$ cd /var/opt/magma/bin
+(pod)$ envdir /var/opt/magma/envdir ./accessc add-admin -cert admin_operator admin_operator
+(pod)$ openssl pkcs12 -export -out admin_operator.pfx -inkey admin_operator.key.pem -in admin_operator.pem
+
+Enter Export Password:
+Verifying - Enter Export Password:
+
+(pod)$ exit
+```
+
+Now on your host, copy down the client certificates for the admin operator we
+just created into the secrets directory:
+
+```bash
+cd ~/secrets/certs
+for certfile in admin_operator.pem admin_operator.key.pem admin_operator.pfx
+do
+ kubectl -n magma cp ${CNTLR_POD}:/var/opt/magma/bin/${certfile} ./${certfile}
+done
+```
+
+`admin_operator.pem` and `admin_operator.key.pem` are the files that NMS will
+use to authenticate itself with the Orchestrator API. `admin_operator.pfx` is
+for you to add to your keychain if you'd like to use the Orchestrator REST API
+directly (on MacOS, double-click this file and add it to your keychain).
+
+## Deploying NMS
+
+Now that we've got an admin operator cert, we can deploy NMS. Edit the
+`vals.yml` from above, and change the `nms` section to the following:
+
+```
+nms:
+ imagePullSecrets:
+ - name: orc8r-secrets-registry
+
+ magmalte:
+ manifests:
+ secrets: true
+ deployment: true
+ service: true
+ rbac: false
+
+ image:
+ repository: YOUR-DOCKER-REGISTRY/magmalte
+ tag: YOUR-CONTAINER-TAG
+
+ env:
+ api_host: controller.YOURDOMAIN.COM
+ mysql_host: YOUR RDS MYSQL HOST
+ mysql_user: magma
+ mysql_pass: YOUR RDS MYSQL PASSWORD
+ nginx:
+ manifests:
+ configmap: true
+ secrets: true
+ deployment: true
+ service: true
+ rbac: false
+
+ service:
+ type: LoadBalancer
+
+ deployment:
+ spec:
+ ssl_cert_name: controller.crt
+ ssl_cert_key_name: controller.key
+```
+
+You'll just flip all the `manifests` keys to `true` except `rbac`.
+
+Next, copy your `secrets` directory back to the chart (to pick up the admin
+certificate), and upload to to S3 (this step is optional, but you should have
+some story for where you're storing these).
+
+```bash
+rm -r magma/orc8r/cloud/helm/orc8r/charts/secrets/.secrets
+cp -r ~/secrets magma/orc8r/cloud/helm/orc8r/charts/secrets/.secrets
+aws s3 cp magma/orc8r/helm/orc8r/charts/secrets/.secrets s3://your-bucket --recursive
+# Delete the local secrets after you've uploaded them
+rm -r ~/secrets
+```
+
+We can upgrade the Helm deployment to include NMS components now:
+
+```bash
+cd magma/orc8r/cloud/helm/orc8r
+helm upgrade orc8r . --values=PATH_TO_VALS/vals.yml
+kubectl -n magma get pods
+```
+
+Wait for the NMS pods (`nms-magmalte`, `nms-nginx-proxy`) to transition into
+`Running` state, then create a user on the NMS:
+
+```bash
+kubectl exec -it -n magma \
+ $(kubectl -n magma get pod -l app.kubernetes.io/component=magmalte -o jsonpath='{.items[0].metadata.name}') -- \
+ yarn setAdminPassword
+```
+
+## Upgrading the Deployment
+
+We recommend an upgrade procedure along these lines:
+
+1. `git checkout` the tag of the most recent release on Github
+2. Rebuild all the images and push them
+3. Update the image tags in vals.yml
+4. `aws s3 cp` the secrets bucket in S3 into `.secrets` under the secrets
+subchart in Magma
+5. Upgrade helm deployment with `helm upgrade`
+6. Delete the `.secrets` folder
+
+We've automated steps 4-6 with a fabfile under
+`magma/orc8r/cloud/helm/orc8r/fabfile.py`. You can upgrade your deployment
+using this fabfile like this:
+
+```bash
+fab deploy:PATH_TO_VALS_YML,PATH_TO_TERRAFORM_KUBECONFIG,S3_BUCKET_NAME
+```
+
+where `PATH_TO_VALS_YML` is the full path to `vals.yml` on your machine,
+`PATH_TO_TERRAFORM_KUBECONFIG` is the full path to the `kubeconfig_orc8r` file
+produced by Terraform, and `S3_BUCKET_NAME` is the name of the S3 bucket where
+you've uploaded your secrets.
diff --git a/docusaurus/docs/orc8r/deploy_intro.md b/docusaurus/docs/orc8r/deploy_intro.md
new file mode 100644
index 0000000..d75131a
--- /dev/null
+++ b/docusaurus/docs/orc8r/deploy_intro.md
@@ -0,0 +1,21 @@
+---
+id: version-1.0.0-deploy_intro
+title: Introduction
+hide_title: true
+original_id: deploy_intro
+---
+# Deploying Orchestrator: Introduction
+
+These pages will walk through the full process of spinning up a full
+Orchestrator deployment, from building the various containers that you'll need
+to deploying them onto Amazon Elastic Kubernetes Service (EKS). This
+installation guide targets *production* environments - if you aren't ready for
+this, the developer documentation will be up shortly.
+
+Familiarity with the AWS console and the Kubernetes command line are expected.
+The instructions in this section have been tested on MacOS and Linux. If you
+are deploying from a Windows host, some shell commands may require adjustments.
+
+If you want to get a head start on the development setup, you can build the
+Orchestrator containers following this guide and use docker-compose at
+`magma/orc8r/cloud/docker` to spin up the local version of Orchestrator.
diff --git a/docusaurus/docs/orc8r/deploy_terraform.md b/docusaurus/docs/orc8r/deploy_terraform.md
new file mode 100644
index 0000000..e3c7161
--- /dev/null
+++ b/docusaurus/docs/orc8r/deploy_terraform.md
@@ -0,0 +1,119 @@
+---
+id: version-1.0.0-deploy_terraform
+title: Terraforming Orchestrator on AWS
+hide_title: true
+original_id: deploy_terraform
+---
+# Terraforming Orchestrator on AWS
+
+## Pre-Terraform
+
+First, copy the contents of [orc8r/cloud/deploy/terraform](https://github.com/facebookincubator/magma/tree/master/orc8r/cloud/deploy/terraform)
+into a source-controlled directory that you control. This directory contains
+bare-bones Terraform scripts to bring up the raw AWS resources needed for
+Orchestrator. We highly recommend familiarizing yourself with [Terraform](https://www.terraform.io/)
+before continuing - the rest of this guide will assume some familiarity with
+both Terraform and the [AWS CLI](https://aws.amazon.com/cli/).
+
+Adjust the example Terraform files as you see fit. If you aren't familiar with
+Terraform yet, we recommend not changing anything here for now.
+
+Next `cd` to where you've copied the contents of the Terraform directory and
+
+```bash
+$ terraform init
+
+Initializing modules...
+Initializing the backend...
+Initializing provider plugins...
+Terraform has been successfully initialized!
+```
+
+In the AWS console, create or import a new keypair to enable SSH access to the
+worker nodes of the EKS cluster. This can be found in the EC2 dashboard under
+"Key Pairs". If you're creating a new key pair, make sure not to lose the
+private key, you won't be able to recover it from AWS.
+
+
+
+Next, create a `vars.tfvars` file in your directory, *add it to your source
+control's .ignore*, and specify your desired RDS password and the name of the
+keypair that you imported or created in the above step:
+
+```bash
+$ cat vars.tfvars
+db_password = "foobar"
+nms_db_password = "foobar"
+key_name = "my_key"
+```
+
+Check the README under the original terraform directory for additional
+variables that you can configure.
+
+Now you're ready to move on:
+
+## Applying Terraform
+
+Execute your standard Terraform workflow and wait for the resources to finish
+provisioning. If you are terraforming on an AWS account that's already being
+used for other purposes, carefully examine Terraform's planned execution before
+continuing.
+
+Note: There is a known issue with the module we use to provision the EKS
+cluster (see https://github.com/facebookincubator/magma/issues/793).
+If you see a stacktrace like the following, simply `terraform apply` again
+and the stack provisioning should succeed
+
+```
+Error: Provider produced inconsistent final plan
+
+When expanding the plan for module.eks.aws_autoscaling_group.workers[0] to
+include new values learned so far during apply, provider "aws" produced an
+invalid new value for .initial_lifecycle_hook: planned set element
+cty.ObjectVal(map[string]cty.Value{"default_result":cty.UnknownVal(cty.String),
+"heartbeat_timeout":cty.UnknownVal(cty.Number),
+"lifecycle_transition":cty.UnknownVal(cty.String),
+"name":cty.UnknownVal(cty.String),
+"notification_metadata":cty.UnknownVal(cty.String),
+"notification_target_arn":cty.UnknownVal(cty.String),
+"role_arn":cty.UnknownVal(cty.String)}) does not correlate with any element in
+actual.
+```
+
+Once `terraform apply -var-file=vars.tfvars` finishes, there is some additional
+manual setup to perform before our EKS cluster is ready to deploy onto.
+
+First find the public IP address of the metrics instance using
+```bash
+export METRICS_IP=$(aws ec2 describe-instances --filters Name=tag:orc8r-node-type,Values=orc8r-prometheus-node --query 'Reservations[*].Instances[0].PublicIpAddress' --output text)
+echo $METRICS_IP
+```
+
+The Prometheus config manager application expects some configuration files to
+be seeded in the EBS config volume (don't forget to use the correct private
+key in `scp` with the `-i` flag):
+
+```bash
+scp -r config_defaults ec2-user@$METRICS_IP:~
+ssh ec2-user@$METRICS_IP
+[ec2-user@ ~]$ sudo cp -r config_defaults/. /configs/prometheus
+```
+
+Now you've got your infra set up, we can move on to configuring the EKS cluster.
+
+Assuming you don't have an existing Kubeconfig file in `~/.kube/config`, run
+the following. If you do, you can use the `KUBECONFIG` environment variable
+and `kubeconfig view --flatten` to concatenate the kubeconfig file that
+Terraform created with your existing kubeconfig.
+
+```bash
+cp ./kubeconfig_orc8r ~/.kube/config
+```
+
+Now we can set up access to the EKS cluster:
+
+```bash
+kubectl apply -f config-map-aws-auth_orc8r.yaml
+```
+
+At this point, our cluster is ready for deploying the application onto.
diff --git a/docusaurus/docs/orc8r/legacy/docker_setup.md b/docusaurus/docs/orc8r/legacy/docker_setup.md
new file mode 100644
index 0000000..84dd829
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/docker_setup.md
@@ -0,0 +1,105 @@
+---
+id: version-1.0.0-docker_setup
+title: Docker Setup
+original_id: docker_setup
+---
+## Container setup
+
+Orc8r consists of 2 containers: one for the proxy, and one for all the
+controller services. We use supervisord to spin multiple services within
+these containers. There are an additional 5 containers for metrics. These are
+used to monitor system and gateway metrics, but are optional if you don't need
+that for your setup.
+
+NOTE: The multiple services per container model was adopted to model the
+legacy Vagrant setup and for easier migration, and we will soon migrate to
+one container per microservice model which is more appropriate.
+
+For development, we use a postgresql container as the datastore. For
+production, it is advisable to use a hosted solution like AWS RDS.
+
+NOTE: This guide assumes that you are running the commands inside
+the `magma/orc8r/cloud/docker` directory in your host.
+
+## How to build the images
+
+Since orc8r can include modules outside the magma codebase, we use a wrapper
+python script which creates a temporary folder as the docker build context.
+The temporary folder contains all the modules necessary, and is created based
+on the module.yml config.
+
+Build the docker images using:
+```
+./build.py
+```
+To use a different module.yml file, run something similar to:
+```
+MAGMA_MODULES_FILE=../../../modules.yml ./build.py
+```
+
+NOTE: If you are running on Mac, you may need to increase the memory
+limit of the docker daemon to build the images. Otherwise, you may see an error
+message similar to this:
+`/usr/local/go/pkg/tool/linux_amd64/link: signal: killed`.
+
+## How to run
+
+To run and manage the containers, use the following commands:
+```
+docker-compose up -d
+docker-compose ps
+docker-compose down
+```
+To tail the logs from the containers, use one of these commands:
+```
+docker-compose logs -f
+docker-compose logs -f controller
+```
+To create a shell inside a container, run:
+```
+docker-compose exec controller bash
+```
+
+Similarly for the metrics containers just specify the docker-compose file
+before running a command, such as:
+```
+docker-compose -f docker-compose.metrics.yml up -d
+docker-compose -f docker-compose.metrics.yml ps
+docker-compose -f docker-compose.metrics.yml down
+```
+
+## How to run unit tests
+We use a `test` container for running the go unit tests. Use one of the
+following commands to run the tests in a clean room environment:
+```
+./build.py --tests
+./build.py -t
+```
+The `--mount` option in `build.py` can be used to spin a test container
+with the code from individual modules mounted, so that we can individual
+unit tests.
+
+*NOTE: make sure to run `precommit` using mount before submitting a patch*
+
+```
+./build.py -m
+[container] /magma/orc8r/cloud# make precommit
+```
+
+## How to generate code after Protobuf and Swagger changes
+The `--mount` option can also be used to run the codegen scripts for swagger
+and protobufs, after any changes in those files.
+```
+./build.py -m
+[container] /magma/orc8r/cloud# make gen
+```
+
+## Publishing the images
+
+To push the images to a private docker registry, use the following script:
+```
+../../tools/docker/publish.sh -r REGISTRY -i proxy
+../../tools/docker/publish.sh -r REGISTRY -i controller
+
+../../tools/docker/publish.sh -r REGISTRY -i proxy -u USERNAME -p /tmp/password
+```
diff --git a/docusaurus/docs/orc8r/legacy/generic_commands.md b/docusaurus/docs/orc8r/legacy/generic_commands.md
new file mode 100644
index 0000000..10799b5
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/generic_commands.md
@@ -0,0 +1,145 @@
+---
+id: version-1.0.0-generic_commands
+sidebar_label: Generic commands framework
+title: Using the generic command framework
+hide_title: true
+original_id: generic_commands
+---
+# Using the generic command framework
+The generic command framework is a way to define commands in the gateway without having to implement all aspects of a command (such as the cloud implementation and handler). Instead, we can define the commands on the gateway, and call the REST endpoint `/networks/{network_id}/gateways/{gateway_id}/command/generic` with the command name and parameters to execute the command.
+
+## Explanation
+
+In `magmad.yml`, the generic command framework with the field `generic_command_config`:
+
+```
+# magmad.yml
+
+generic_command_config:
+ module: ...
+ class: ...
+```
+
+When the gateway starts up, it looks for the class inside that module and sets it as the command executor. The purpose of the command executor is to hold a table that maps command names to async functions, execute those functions, and return the output. When the RPC method `GenericCommand` is called, it will attempt to execute the command, returning the response if successful or an error if a timeout or exception occurs. We can configure the timeout by providing a `timeout_secs` field within `generic_command_config` to determine the number of seconds before a command times out.
+
+On the cloud side, we can send a POST request to `/networks/{network_id}/gateways/{gateway_id}/command/generic` with the following request body:
+
+```
+{
+ "command": "name_of_command",
+ "params": {
+ ...
+ }
+}
+```
+
+## Using the Shell Command Executor
+
+The default provided command executor is `ShellCommandExecutor`, which resides in the module `magma.magmad.generic_command.shell_command_executor`. This command executor will run shell commands located in `generic_command_config`. For each shell command, we can decide if we want to allow parameters with the field `allow_params`, which will treat the command as a format string. Parameters are read from the field `shell_params` ( a list of parameters) within the `params` field of the request. For example:
+
+```
+# magmad.yml
+
+generic_command_config:
+ module: magma.magmad.generic_command.shell_command_executor
+ class: ShellCommandExecutor
+ shell_commands:
+ - name: tail_syslog
+ command: "sudo tail /var/log/syslog -n 20"
+ - name: echo
+ command: "echo {}"
+ allow_params: True
+```
+
+We can then use the API endpoint to execute the command. For example:
+
+```
+POST `/networks/{network_id}/gateways/{gateway_id}/command/generic
+
+{
+ "command": "echo",
+ "params": {
+ "shell_params": ["Hello world!"]
+ }
+}`
+```
+
+We then get a response with the return code, stderr, and stdout:
+
+```
+{
+ "response": {
+ "returncode": 0,
+ "stderr": "",
+ "stdout": "Hello world!\n"
+ }
+}
+```
+
+## Creating a custom command executor
+
+The generic command framework is designed to be extensible. If we want more complex functionality, we can define our own command executor and configure `magmad.yml` to use it instead.
+
+All command executors must be an instance of `CommandExecutor`, which is the abstract base class for all command executors.
+
+```
+# magma/magmad/generic_command/command_executor.py
+
+class CommandExecutor(ABC):
+ """
+ Abstract class for command executors
+ """
+ def __init__(
+ self,
+ config: Dict[str, Any],
+ loop: asyncio.AbstractEventLoop,
+ ) -> None:
+ self._config = config
+ self._loop = loop
+
+ async def execute_command(
+ self,
+ command: str,
+ params: Dict[str, ParamValueT],
+ ) -> Dict[str, Any]:
+ """
+ Run the command from the dispatch table with params
+ """
+ result = await self.get_command_dispatch()[command](params)
+ return result
+
+ @abstractmethod
+ def get_command_dispatch(self) -> Dict[str, ExecutorFuncT]:
+ """
+ Returns the command dispatch table for this command executor
+ """
+ pass
+```
+
+Command executors must provide a method `get_command_dispatch`, which returns the dispatch table of commands. We can then add our own commands to the dispatch table. Command functions are coroutines that take in a dictionary of parameters as arguments and return a dictionary as a response.
+
+```
+class CustomCommandExecutor(CommandExecutor):
+ def __init__(
+ self,
+ config: Dict[str, Any],
+ loop: asyncio.AbstractEventLoop,
+ ) -> None:
+ ...
+ self._dispatch_table = {
+ "hello_world": self._handle_hello_world
+ }
+
+ def get_command_dispatch(self) -> Dict[str, ExecutorFuncT]:
+ return self._dispatch_table
+
+ async def _handle_hello_world(
+ self,
+ params: Dict[str, ParamValueT]
+ ) -> Dict[str, Any]:
+ return {
+ "hello": "world!"
+ }
+```
+
+
diff --git a/docusaurus/docs/orc8r/legacy/nms_setup.md b/docusaurus/docs/orc8r/legacy/nms_setup.md
new file mode 100644
index 0000000..b9b903f
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/nms_setup.md
@@ -0,0 +1,32 @@
+---
+id: version-1.0.0-nms_setup
+title: Setting up the NMS
+hide_title: true
+original_id: nms_setup
+---
+# Setting up the NMS
+The NMS is the UI for managing, configuring, and monitoring networks. To set up the NMS, we will first need the orc8r to be setup (the NMS needs magma certs for the API).
+
+## Setup
+By default, the NMS looks for API certs in `magma/.cache/test_certs`, and uses `host.docker.internal:9443` as the API host. If you wish to use different API certs and/or a different API host, you can create a `.env` file within `magma/nms/fbcnms-projects/magmalte` and specify them there.
+```bash
+API_HOST=example.com
+API_CERT_FILENAME=/path/to/api_operator_cert.pem
+API_PRIVATE_KEY_FILENAME=/path/to/operator_pk.pem
+```
+
+## Running the NMS
+In the `magmalte` directory, start docker containers and create a test user:
+```bash
+HOST [magma]$ cd nms/fbcnms-projects/magmalte
+HOST [magma/nms/fbcnms-projects/magmalte]$ docker-compose up -d
+HOST [magma/nms/fbcnms-projects/magmalte]$ ./scripts/dev_setup.sh
+```
+You may get an error if you run `dev_setup.sh` immediately after `docker-compose up -d`. To resolve this, wait a bit before running the script to let migrations run.
+
+Once you have started the docker containers and created a test user using the `dev_setup.sh` script, go to https://localhost and login with test credentials `admin@magma.test` and `password1234`.
+
+Note: if you want to name a user other than `admin@magma.test`, you can run `setAdminPassword`, like so:
+```bash
+HOST [magma/nms/fbcnms-projects/magmalte]$ docker-compose run magmalte yarn run setAdminPassword admin@magma.test password1234
+```
diff --git a/docusaurus/docs/orc8r/legacy/packer.md b/docusaurus/docs/orc8r/legacy/packer.md
new file mode 100644
index 0000000..d6ce5c9
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/packer.md
@@ -0,0 +1,61 @@
+---
+id: version-1.0.0-packer
+title: Packer Build
+hide_title: true
+original_id: packer
+---
+# Packer Build
+## Intro
+This directory contains the needed files to create a new amazon ami and vagrant
+boxes to run the magma dev and test instances on. You will need to use this if
+you want to update the base environment, for example, changing the debian or
+kernel version.
+
+If you're looking to install additional software specific to each box when
+provisioning, you'll want to instead add that to the Ansible playbook.
+
+## Usage
+You'll need to have packer installed. You can get it here:
+https://www.packer.io/downloads.html or through you package manager. Packer
+works by creating an amazon instance or virtualbox, running some provisioning
+scripts, and then saving it as an ami/box.
+
+The .json file defines the base image, and is later provisioned by the shell
+scripts.
+
+### Vagrant
+To upload to vagrant cloud, you'll need the upload token from lastpass.
+Once you've made your changes, run
+```
+export ATLAS_TOKEN=
+packer validate debian-stretch-virtualbox.json
+```
+and then
+```
+packer build debian-stretch-virtualbox.json
+```
+Packer is set up to handle installing the base OS and guest additions.
+
+### AWS
+Once you've made your changes, run
+```
+packer validate debian-stretch-aws.json
+```
+and then
+```
+packer build -force \
+-var "aws_access_key=YOUR_ACCESS_KEY" \
+-var "aws_secret_key=Your_SECRET_KEY" \
+-var "subnet=YOUR_SUBNET" \
+-var "vpc=YOUR_VPC" \
+debian-stretch-aws.json
+```
+
+where YOUR\_SUBNET and YOUR\_VPC are existing subnets and vpcs on your aws
+region. The choice of subnet and vpc won't affect the final box, they are
+the subnet/vpc which the box is launched into while building. The subnet/vpc ids
+should look something like: "subnet-8430fce3" and "vpc-7e99b91a".
+
+After you run packer, it will spit out the ami id. Make sure you remember to so
+you can launch instances with it. If you forget it, you can find it under the
+"My AMIs" in the Choose an Amazon Machine Image step.
diff --git a/docusaurus/docs/orc8r/legacy/remote_cli.md b/docusaurus/docs/orc8r/legacy/remote_cli.md
new file mode 100644
index 0000000..882c6ac
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/remote_cli.md
@@ -0,0 +1,143 @@
+---
+id: version-1.0.0-remote_cli
+title: Orchestrator Remote CLI - Creating commands guide
+sidebar_label: Orchestrator Remote CLI
+hide_title: true
+original_id: remote_cli
+---
+# Orchestrator Remote CLI - Creating commands guide
+## Creating a Command
+
+There are a couple steps needed to implement a command.
+
+### Define rpc method in gateway
+
+Create a new RPC method in the service protobuf that you want the method to live in.
+
+```
+// magma/orc8r/protos/magmad.proto
+
+service Magmad {
+ ...
+ rpc Reboot (Void) returns (Void) {}
+ ...
+}
+```
+
+### Implement in gateway
+
+Gateway services should have a gRPC server implementation located in `rpc_servicer.py`. Within the servicer, create a function that implements this RPC method.
+
+```
+# magma/orc8r/gateway/python/magma/magmad/rpc_servicer.py
+
+class MagmadRpcServicer(magmad_pb2_grpc.MagmadServicer):
+ ...
+ def Reboot(self, _, context):
+ """
+ Reboot the gateway device
+ """
+ ...
+```
+
+### Implement gateway api in cloud
+
+Within the cloud service, create `gateway_api.go` that will call service methods. We can create a function to get a connection to the service, and use the dispatcher to forward requests to gateway services, like so:
+
+```
+// magma/orc8r/cloud/go/services/magmad/gateway_api.go
+
+func getGWMagmadClient(networkId string, gatewayId string) (protos.MagmadClient, *grpc.ClientConn, context.Context, error) {
+ ...
+ conn, ctx, err := gateway_registry.GetGatewayConnection("magmad", gwRecord.HwId.Id)
+ ...
+ return protos.NewMagmadClient(conn), conn, ctx, nil
+}
+```
+
+Using this client, we can create a function that calls the method:
+
+```
+// magma/orc8r/cloud/go/services/magmad/gateway_api.go
+
+func GatewayReboot(networkId string, gatewayId string) error {
+ client, conn, ctx, err := getGWMagmadClient(networkId, gatewayId)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+ _, err = client.Reboot(ctx, new(protos.Void))
+ return err
+}
+```
+
+### Define REST API endpoint
+
+Each cloud service should have a `swagger` folder that documents service paths and definitions in `swagger.yml`. Add your path (and parameters or definitions, if necessary) to this file:
+
+```
+# magma/orc8r/cloud/go/services/magmad/swagger/swagger.yml
+
+...
+paths:
+ ...
+ /networks/{network_id}/gateways/{gateway_id}/command/reboot:
+ post:
+ summary: Reboot gateway device
+ tags:
+ - Commands
+ parameters:
+ - $ref: './swagger-common.yml#/parameters/network_id'
+ - $ref: './swagger-common.yml#/parameters/gateway_id'
+ responses:
+ '200':
+ description: Success
+ default:
+ $ref: './swagger-common.yml#/responses/UnexpectedError'
+```
+
+### Handler implementation
+
+Each cloud service should have an `obsidian` folder, which contains handlers in `obsidian/handlers` and generated models in `obsidian/models`.
+
+Create your handler function:
+
+```
+// magma/orc8r/cloud/go/services/magmad/obsidian/handlers/gateway_handlers.go
+
+func rebootGateway(c echo.Context) error {
+ ...
+ return c.NoContent(http.StatusOK)
+}
+```
+
+Add your handler to the list of handlers in `GetObsidianHandlers()`.
+
+```
+// magma/orc8r/cloud/go/services/magmad/obsidian/handlers/handlers.go
+
+func GetObsidianHandlers() []handlers.Handler {
+ ...
+ return []handlers.Handler{
+ ...
+ {Path: RebootGateway, Methods: handlers.POST, HandlerFunc: rebootGateway},
+ ...
+ }
+}
+```
+
+Build and see your new endpoint in the swagger UI.
+
+## Adding commands to NMS UI
+Use `MagmaAPIUrls.command()` to get the url to the command endpoint `/networks/{network_id}/gateways/{gateway_id}/command/{command_name}`.
+
+We can then make a request using that url, for example:
+```
+const url = MagmaAPIUrls.command(match, id, commandName);
+
+axios
+ .post(url)
+ .then(_resp => {
+ this.props.alert('Success');
+ })
+ .catch(error => this.props.alert(error.response.data.message));
diff --git a/docusaurus/docs/orc8r/legacy/rest_apis.md b/docusaurus/docs/orc8r/legacy/rest_apis.md
new file mode 100644
index 0000000..5d9eda3
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/rest_apis.md
@@ -0,0 +1,27 @@
+---
+id: version-1.0.0-rest_apis
+title: Swagger UI for REST APIs
+hide_title: true
+original_id: rest_apis
+---
+# Swagger UI for REST APIs
+We use [Swagger](https://swagger.io/) for defining the north bound REST APIs, and the APIs can be viewed and tested using the Swagger UI. To use the UI:
+
+```console
+HOST$ open magma/.cache/test_certs
+```
+
+This will open up a finder window. Double-click the `admin_operator.pfx` cert
+in this directory, which will open up Keychain to import the cert. The
+password for the cert is `magma`. If you use Chrome or Safari, this is all you
+need to do. If you use Firefox, copy this file to your desktop, then go to
+`Preferences -> PrivacyAndSecurity -> View Certificates -> Import` and select
+it.
+
+Linux/Windows users should replace the above steps with the system-appropriate
+method to import a client cert.
+
+You can access the orchestrator REST API at https://127.0.0.1:9443/apidocs.
+The SSL cert is self-signed, so click through any security warnings your
+browser gives you. You should be prompted for a client cert, at which point
+you should select the `admin_operator` cert that you added to Keychain above.
diff --git a/docusaurus/docs/orc8r/legacy/testing.md b/docusaurus/docs/orc8r/legacy/testing.md
new file mode 100644
index 0000000..72c092a
--- /dev/null
+++ b/docusaurus/docs/orc8r/legacy/testing.md
@@ -0,0 +1,27 @@
+---
+id: version-1.0.0-testing
+sidebar_label: Testing
+title: Testing in Orchestrator
+hide_title: true
+original_id: testing
+---
+# Testing in Orchestrator
+### Unit Tests
+One easy way to test is to run unit tests. This can be done by running..
+```
+HOST [magma/orc8r/cloud/docker]$ ./build.py --tests
+```
+
+### Run the services and check their health
+Unit tests are great for checking small logic chunks,
+but another way to test is to run the services and check their status.
+The services can be built and started by running
+```
+docker compose up -d
+```
+
+The state of the containers can be checked by running
+```
+docker-compose ps
+docker-compose logs -f
+```
\ No newline at end of file
diff --git a/docusaurus/docusaurus.config.js b/docusaurus/docusaurus.config.js
new file mode 100644
index 0000000..990db4f
--- /dev/null
+++ b/docusaurus/docusaurus.config.js
@@ -0,0 +1,180 @@
+/**
+ * Copyright 2025 The Magma Authors.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// @ts-check
+// `@type` JSDoc annotations allow editor autocompletion and type checking
+// (when paired with `@ts-check`).
+// There are various equivalent ways to declare your Docusaurus config.
+// See: https://docusaurus.io/docs/api/docusaurus-config
+
+import {themes as prismThemes} from 'prism-react-renderer';
+
+const url = process.env.DOCUSAURUS_URL || 'https://magma.github.io'
+const baseUrl = process.env.DOCUSAURUS_BASE_URL || '/'
+
+// Security note on visibility of this secret in the source code: the API key is
+// not secured by secrecy. It is secured by a referer check for magma.github.io
+// and by a rate limit, both administered on the Algolia site. Linux Foundation
+// has a 1Password file with the login info. For debugging on your own machine
+// set the environment variable to this unsecured key:
+// f95caeb7bc059b294eec88e340e5445b
+const algoliaApiKey =
+ process.env.ALGOLIA_API_KEY || '7b4d4c984e53d3a746869d22ed9e983b';
+
+const magmaGithubUrl = 'https://github.com/magma/magma'
+
+// Path to images for header/footer
+const footerIcon = 'img/magma_icon.png'
+const favicon = 'img/icon.png'
+const magmaLogo = 'img/magma-logo.svg'
+
+/** @type {import('@docusaurus/types').Config} */
+const config = {
+ title: 'Magma Documentation',
+ tagline: 'Magma is an open-source software platform that gives network operators an open, flexible and extendable mobile core network solution.',
+ favicon: favicon,
+
+ url: url, // production url
+ baseUrl: baseUrl, // // pathname under which the site is served
+
+ // GitHub pages deployment config.
+ // If you aren't using GitHub pages, you don't need these.
+ organizationName: 'magma',
+ projectName: 'magma',
+
+ onBrokenLinks: 'throw',
+ onBrokenMarkdownLinks: 'warn',
+
+ i18n: {
+ defaultLocale: 'en',
+ locales: ['en', 'es', 'pt'],
+ },
+
+ presets: [
+ [
+ 'classic',
+ /** @type {import('@docusaurus/preset-classic').Options} */
+ ({
+ docs: {
+ sidebarPath: './sidebars.js',
+ },
+ theme: {
+ customCss: './src/css/custom.css',
+ },
+ }),
+ ],
+ ],
+
+ themeConfig: /** @type {import('@docusaurus/preset-classic').ThemeConfig} */
+ (
+ {
+ image: favicon,
+ navbar: {
+ title: 'Magma',
+ logo: {
+ alt: 'Magma Logo',
+ src: magmaLogo,
+ },
+ items: [
+ {
+ type: 'docsVersionDropdown',
+ sidebarId: 'versionSidebar',
+ position: 'left',
+ label: 'Tutorial',
+ },
+ {
+ to: 'https://magmacore.org/',
+ label: 'Magma Website',
+ position: 'left'
+ },
+ {
+ to: '/',
+ label: 'Docs',
+ position: 'left'
+ },
+ {
+ to: magmaGithubUrl,
+ label: 'Code',
+ position: 'left'
+ },
+ {
+ to: 'https://github.com/magma/magma/wiki/Contributor-Guide',
+ label: 'Contributing',
+ position: 'left'
+ },
+ {
+ to: 'https://lf-magma.atlassian.net/wiki/spaces/HOME/overview?mode=global',
+ label: 'Wiki',
+ position: 'left'
+ },
+ {
+ type: 'localeDropdown',
+ position: 'right',
+ },
+ ],
+ },
+ footer: {
+ style: 'dark',
+ links: [
+ {
+ title: 'Community',
+ items: [
+ {
+ label: 'Slack',
+ href: 'https://magmacore.slack.com/ssb/redirect',
+ },
+ ],
+ },
+ ],
+ logo: {
+ alt: 'Magma Logo',
+ src: footerIcon,
+ href: url,
+ height: 100,
+ width: 100,
+ },
+ copyright: `Copyright \u{00A9} ${new Date().getFullYear()} Magma Project. Built with Docusaurus.`,
+ },
+ prism: {
+ theme: prismThemes.github,
+ darkTheme: prismThemes.dracula,
+ defaultLanguage: 'bash',
+ // magicComments: [],
+ },
+ colorMode: {
+ defaultMode: 'light',
+ disableSwitch: false,
+ respectPrefersColorScheme: true,
+ },
+
+ // Enable Algolia DocSearch Functionality within Docusaurus
+ algolia: {
+ appId: 'magma',
+ apiKey: algoliaApiKey,
+ indexName: 'magma',
+ },
+ }
+ ),
+
+ // Add custom scripts here that would be placed in
- Magma
-
-
- If you are not redirected automatically, follow this link.
-
-