diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index a90aa3d1e..fac3c8986 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "superpowers", - "description": "Core skills library for Claude Code: TDD, debugging, collaboration patterns, and proven techniques", - "version": "3.2.3", + "description": "Comprehensive skills library for Claude Code: TDD, debugging, collaboration, automation, document creation (Word/PDF/Excel/PowerPoint), creative tools, business research, productivity workflows, and AI prompt engineering for Veo3, Midjourney, DALL-E, Flux, Stable Diffusion, Claude, ChatGPT, and Gemini", + "version": "3.7.0", "author": { "name": "Jesse Vincent", "email": "jesse@fsck.com" @@ -9,5 +9,5 @@ "homepage": "https://github.com/obra/superpowers", "repository": "https://github.com/obra/superpowers", "license": "MIT", - "keywords": ["skills", "tdd", "debugging", "collaboration", "best-practices", "workflows"] + "keywords": ["skills", "tdd", "debugging", "collaboration", "best-practices", "workflows", "playwright", "ios", "automation", "productivity", "documents", "pdf", "word", "excel", "powerpoint", "creative", "business", "research", "notebooklm", "prompt-engineering", "ai-prompts", "veo3", "midjourney", "dalle", "stable-diffusion"] } diff --git a/README.md b/README.md index 6776255a0..f4a767b36 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,12 @@ A comprehensive skills library of proven techniques, patterns, and workflows for - **Debugging Skills** - Systematic debugging, root cause tracing, verification - **Collaboration Skills** - Brainstorming, planning, code review, parallel agents - **Development Skills** - Git worktrees, finishing branches, subagent workflows +- **Automation Skills** - Playwright browser testing, iOS simulator automation +- **Productivity Skills** - File organization, Gmail automation, Notion integration +- **Document Skills** - Word, PDF, Excel, PowerPoint creation and manipulation +- **Creative & Media Skills** - Visual design, image enhancement, GIFs, video downloads +- **Business & Research Skills** - Lead research, competitor analysis, NotebookLM integration +- **AI Prompt Engineering** - Expert techniques for Veo3, Midjourney, DALL-E, Flux, Stable Diffusion, Claude, ChatGPT, Gemini - **Meta Skills** - Creating, testing, and sharing skills Plus: @@ -105,6 +111,36 @@ Skills activate automatically when relevant. For example: - **testing-skills-with-subagents** - Validate skill quality - **using-superpowers** - Introduction to the skills system +**Automation** (`skills/automation/`) +- **playwright-browser-automation** - Browser testing and automation with Playwright +- **ios-simulator-testing** - iOS app testing with accessibility-first navigation + +**Productivity** (`skills/productivity/`) +- **file-organizer** - Intelligent file and folder organization with duplicate detection +- **gmail-intelligence** - Analyze Gmail data, process email threads, and automate workflows +- **notion-template-processor** - Fill Notion database templates and deliver via email + +**Document Skills** (`skills/documents/`) +- **docx** - Create and edit Word documents with tracked changes and formatting +- **pdf** - Extract text/tables, create, merge, and split PDFs +- **xlsx** - Create Excel spreadsheets with formulas and data analysis +- **pptx** - Create PowerPoint presentations with layouts and charts + +**Creative & Media** (`skills/creative/`) +- **canvas-design** - Visual art creation in PNG and PDF formats +- **image-enhancer** - Upscale and improve image resolution and clarity +- **slack-gif-creator** - Create animated GIFs optimized for Slack +- **theme-factory** - Apply professional themes to documents and slides +- **video-downloader** - Download videos from multiple platforms + +**Business & Research** (`skills/business/`) +- **lead-research-assistant** - Identify and qualify potential business leads +- **competitive-ads-extractor** - Analyze competitor advertising strategies +- **notebooklm** - Query NotebookLM for source-grounded, citation-backed answers + +**AI Prompt Engineering** (`skills/`) +- **prompt-engineer** - Expert prompt engineering for video generation (Veo3), image creation (Midjourney, DALL-E, Flux, Stable Diffusion), and conversational AI (Claude, ChatGPT, Gemini) with platform-specific techniques, parameters, and best practices + ### Commands All commands are thin wrappers that activate the corresponding skill: diff --git a/create-skill-zips.sh b/create-skill-zips.sh new file mode 100755 index 000000000..d4f4c7048 --- /dev/null +++ b/create-skill-zips.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Script to create ZIP files for all Claude Code skills + +SKILLS_DIR="/home/user/superpowers/skills" +OUTPUT_DIR="/home/user/superpowers/skill-zips" + +# Create output directory +mkdir -p "$OUTPUT_DIR" + +echo "Creating ZIP files for all skills..." +echo "======================================" + +# Counter +count=0 + +# Loop through each directory in skills/ (excluding the 'commands' folder) +for skill_dir in "$SKILLS_DIR"/*; do + # Skip if not a directory + if [ ! -d "$skill_dir" ]; then + continue + fi + + # Get the skill name (directory name) + skill_name=$(basename "$skill_dir") + + # Skip the 'commands' folder as it's not a skill + if [ "$skill_name" = "commands" ]; then + echo "Skipping: $skill_name (not a skill)" + continue + fi + + # Check if SKILL.md exists + if [ ! -f "$skill_dir/SKILL.md" ]; then + echo "WARNING: $skill_name missing SKILL.md - skipping" + continue + fi + + # Create ZIP file + zip_file="$OUTPUT_DIR/${skill_name}.zip" + + echo -n "Creating $skill_name.zip... " + + # Change to skills directory and zip the skill folder + cd "$SKILLS_DIR" || exit 1 + zip -r -q "$zip_file" "$skill_name" + + if [ $? -eq 0 ]; then + echo "✓ Done" + ((count++)) + else + echo "✗ Failed" + fi +done + +echo "======================================" +echo "Created $count skill ZIP files in $OUTPUT_DIR" diff --git a/skill-zips/README.md b/skill-zips/README.md new file mode 100644 index 000000000..456d0dacf --- /dev/null +++ b/skill-zips/README.md @@ -0,0 +1,108 @@ +# Superpowers Skill ZIP Files + +This directory contains pre-packaged ZIP files for all Superpowers skills, ready for installation in Claude Desktop or Claude Code. + +## Installation for Claude Desktop + +### Option 1: Install Individual Skills + +Copy the skill ZIP file to your Claude Desktop skills directory: + +```bash +# On macOS +cp .zip ~/Library/Application\ Support/Claude/skills/ + +# On Linux +cp .zip ~/.config/Claude/skills/ + +# On Windows +copy .zip %APPDATA%\Claude\skills\ +``` + +Then extract it in the skills directory. + +### Option 2: Install All Skills + +```bash +# On macOS +cp *.zip ~/Library/Application\ Support/Claude/skills/ +cd ~/Library/Application\ Support/Claude/skills/ +for f in *.zip; do unzip -q "$f"; done + +# On Linux +cp *.zip ~/.config/Claude/skills/ +cd ~/.config/Claude/skills/ +for f in *.zip; do unzip -q "$f"; done +``` + +## Available Skills (37 Total) + +### Testing (3 skills) +- `test-driven-development.zip` - RED-GREEN-REFACTOR cycle +- `condition-based-waiting.zip` - Async test patterns +- `testing-anti-patterns.zip` - Common pitfalls to avoid + +### Debugging (4 skills) +- `systematic-debugging.zip` - 4-phase root cause process +- `root-cause-tracing.zip` - Find the real problem +- `verification-before-completion.zip` - Ensure it's actually fixed +- `defense-in-depth.zip` - Multiple validation layers + +### Collaboration (9 skills) +- `brainstorming.zip` - Socratic design refinement +- `writing-plans.zip` - Detailed implementation plans +- `executing-plans.zip` - Batch execution with checkpoints +- `dispatching-parallel-agents.zip` - Concurrent subagent workflows +- `requesting-code-review.zip` - Pre-review checklist +- `receiving-code-review.zip` - Responding to feedback +- `using-git-worktrees.zip` - Parallel development branches +- `finishing-a-development-branch.zip` - Merge/PR decision workflow +- `subagent-driven-development.zip` - Fast iteration with quality gates + +### Automation (2 skills) +- `playwright-browser-automation.zip` - Browser testing with Playwright +- `ios-simulator-testing.zip` - iOS app testing with accessibility automation + +### Productivity (3 skills) +- `file-organizer.zip` - Intelligent file and folder organization with duplicate detection +- `gmail-intelligence.zip` - Analyze Gmail data, process email threads, and automate workflows +- `notion-template-processor.zip` - Fill Notion database templates and deliver via email + +### Document Skills (4 skills) +- `docx.zip` - Create and edit Word documents with tracked changes and formatting +- `pdf.zip` - Extract text/tables, create, merge, and split PDFs +- `xlsx.zip` - Create Excel spreadsheets with formulas and data analysis +- `pptx.zip` - Create PowerPoint presentations with layouts and charts + +### Creative & Media (5 skills) +- `canvas-design.zip` - Visual art creation in PNG and PDF formats +- `image-enhancer.zip` - Upscale and improve image resolution and clarity +- `slack-gif-creator.zip` - Create animated GIFs optimized for Slack +- `theme-factory.zip` - Apply professional themes to documents and slides +- `video-downloader.zip` - Download videos from multiple platforms + +### Business & Research (3 skills) +- `lead-research-assistant.zip` - Identify and qualify potential business leads +- `competitive-ads-extractor.zip` - Analyze competitor advertising strategies +- `notebooklm.zip` - Query NotebookLM for source-grounded, citation-backed answers + +### Meta (4 skills) +- `writing-skills.zip` - Create new skills following best practices +- `sharing-skills.zip` - Contribute skills back via branch and PR +- `testing-skills-with-subagents.zip` - Validate skill quality +- `using-superpowers.zip` - Introduction to the skills system + +## Regenerating ZIP Files + +If you need to regenerate these ZIP files (after making changes to skills): + +```bash +cd /home/user/superpowers +./create-skill-zips.sh +``` + +This will recreate all ZIP files in this directory. + +## License + +See individual skill licenses. Most skills are MIT licensed. diff --git a/skill-zips/brainstorming.zip b/skill-zips/brainstorming.zip new file mode 100644 index 000000000..09c3150cd Binary files /dev/null and b/skill-zips/brainstorming.zip differ diff --git a/skill-zips/canvas-design.zip b/skill-zips/canvas-design.zip new file mode 100644 index 000000000..1ea478b7b Binary files /dev/null and b/skill-zips/canvas-design.zip differ diff --git a/skill-zips/competitive-ads-extractor.zip b/skill-zips/competitive-ads-extractor.zip new file mode 100644 index 000000000..bd91b8903 Binary files /dev/null and b/skill-zips/competitive-ads-extractor.zip differ diff --git a/skill-zips/condition-based-waiting.zip b/skill-zips/condition-based-waiting.zip new file mode 100644 index 000000000..637cd6744 Binary files /dev/null and b/skill-zips/condition-based-waiting.zip differ diff --git a/skill-zips/defense-in-depth.zip b/skill-zips/defense-in-depth.zip new file mode 100644 index 000000000..9ca87524c Binary files /dev/null and b/skill-zips/defense-in-depth.zip differ diff --git a/skill-zips/dispatching-parallel-agents.zip b/skill-zips/dispatching-parallel-agents.zip new file mode 100644 index 000000000..163fbd289 Binary files /dev/null and b/skill-zips/dispatching-parallel-agents.zip differ diff --git a/skill-zips/docx.zip b/skill-zips/docx.zip new file mode 100644 index 000000000..a5f5a8887 Binary files /dev/null and b/skill-zips/docx.zip differ diff --git a/skill-zips/executing-plans.zip b/skill-zips/executing-plans.zip new file mode 100644 index 000000000..7412fcea9 Binary files /dev/null and b/skill-zips/executing-plans.zip differ diff --git a/skill-zips/file-organizer.zip b/skill-zips/file-organizer.zip new file mode 100644 index 000000000..319459205 Binary files /dev/null and b/skill-zips/file-organizer.zip differ diff --git a/skill-zips/finishing-a-development-branch.zip b/skill-zips/finishing-a-development-branch.zip new file mode 100644 index 000000000..be98b6cb3 Binary files /dev/null and b/skill-zips/finishing-a-development-branch.zip differ diff --git a/skill-zips/gmail-intelligence.zip b/skill-zips/gmail-intelligence.zip new file mode 100644 index 000000000..d4af3d012 Binary files /dev/null and b/skill-zips/gmail-intelligence.zip differ diff --git a/skill-zips/image-enhancer.zip b/skill-zips/image-enhancer.zip new file mode 100644 index 000000000..1581f3852 Binary files /dev/null and b/skill-zips/image-enhancer.zip differ diff --git a/skill-zips/ios-simulator-testing.zip b/skill-zips/ios-simulator-testing.zip new file mode 100644 index 000000000..500afa8ad Binary files /dev/null and b/skill-zips/ios-simulator-testing.zip differ diff --git a/skill-zips/landing-page-expert.zip b/skill-zips/landing-page-expert.zip new file mode 100644 index 000000000..7670a2ffa Binary files /dev/null and b/skill-zips/landing-page-expert.zip differ diff --git a/skill-zips/lead-research-assistant.zip b/skill-zips/lead-research-assistant.zip new file mode 100644 index 000000000..1042f3d2e Binary files /dev/null and b/skill-zips/lead-research-assistant.zip differ diff --git a/skill-zips/notebooklm.zip b/skill-zips/notebooklm.zip new file mode 100644 index 000000000..a8e50b49a Binary files /dev/null and b/skill-zips/notebooklm.zip differ diff --git a/skill-zips/notion-template-processor.zip b/skill-zips/notion-template-processor.zip new file mode 100644 index 000000000..df6903f21 Binary files /dev/null and b/skill-zips/notion-template-processor.zip differ diff --git a/skill-zips/pdf.zip b/skill-zips/pdf.zip new file mode 100644 index 000000000..2c0eb3db0 Binary files /dev/null and b/skill-zips/pdf.zip differ diff --git a/skill-zips/playwright-browser-automation.zip b/skill-zips/playwright-browser-automation.zip new file mode 100644 index 000000000..7a8ce518d Binary files /dev/null and b/skill-zips/playwright-browser-automation.zip differ diff --git a/skill-zips/pptx.zip b/skill-zips/pptx.zip new file mode 100644 index 000000000..a7e32bf4f Binary files /dev/null and b/skill-zips/pptx.zip differ diff --git a/skill-zips/prompt-engineer.zip b/skill-zips/prompt-engineer.zip new file mode 100644 index 000000000..c27b58db2 Binary files /dev/null and b/skill-zips/prompt-engineer.zip differ diff --git a/skill-zips/receiving-code-review.zip b/skill-zips/receiving-code-review.zip new file mode 100644 index 000000000..c23441875 Binary files /dev/null and b/skill-zips/receiving-code-review.zip differ diff --git a/skill-zips/requesting-code-review.zip b/skill-zips/requesting-code-review.zip new file mode 100644 index 000000000..fd5a44180 Binary files /dev/null and b/skill-zips/requesting-code-review.zip differ diff --git a/skill-zips/root-cause-tracing.zip b/skill-zips/root-cause-tracing.zip new file mode 100644 index 000000000..55f8b7416 Binary files /dev/null and b/skill-zips/root-cause-tracing.zip differ diff --git a/skill-zips/sharing-skills.zip b/skill-zips/sharing-skills.zip new file mode 100644 index 000000000..7ec1fcc11 Binary files /dev/null and b/skill-zips/sharing-skills.zip differ diff --git a/skill-zips/slack-gif-creator.zip b/skill-zips/slack-gif-creator.zip new file mode 100644 index 000000000..ac3195988 Binary files /dev/null and b/skill-zips/slack-gif-creator.zip differ diff --git a/skill-zips/subagent-driven-development.zip b/skill-zips/subagent-driven-development.zip new file mode 100644 index 000000000..9711c4301 Binary files /dev/null and b/skill-zips/subagent-driven-development.zip differ diff --git a/skill-zips/systematic-debugging.zip b/skill-zips/systematic-debugging.zip new file mode 100644 index 000000000..8d0975a46 Binary files /dev/null and b/skill-zips/systematic-debugging.zip differ diff --git a/skill-zips/test-driven-development.zip b/skill-zips/test-driven-development.zip new file mode 100644 index 000000000..348ae2c3b Binary files /dev/null and b/skill-zips/test-driven-development.zip differ diff --git a/skill-zips/testing-anti-patterns.zip b/skill-zips/testing-anti-patterns.zip new file mode 100644 index 000000000..8c1844d49 Binary files /dev/null and b/skill-zips/testing-anti-patterns.zip differ diff --git a/skill-zips/testing-skills-with-subagents.zip b/skill-zips/testing-skills-with-subagents.zip new file mode 100644 index 000000000..3fe284d4c Binary files /dev/null and b/skill-zips/testing-skills-with-subagents.zip differ diff --git a/skill-zips/theme-factory.zip b/skill-zips/theme-factory.zip new file mode 100644 index 000000000..5a4e535a0 Binary files /dev/null and b/skill-zips/theme-factory.zip differ diff --git a/skill-zips/using-git-worktrees.zip b/skill-zips/using-git-worktrees.zip new file mode 100644 index 000000000..a17bc25e9 Binary files /dev/null and b/skill-zips/using-git-worktrees.zip differ diff --git a/skill-zips/using-superpowers.zip b/skill-zips/using-superpowers.zip new file mode 100644 index 000000000..724903df7 Binary files /dev/null and b/skill-zips/using-superpowers.zip differ diff --git a/skill-zips/verification-before-completion.zip b/skill-zips/verification-before-completion.zip new file mode 100644 index 000000000..cd1fa025d Binary files /dev/null and b/skill-zips/verification-before-completion.zip differ diff --git a/skill-zips/video-downloader.zip b/skill-zips/video-downloader.zip new file mode 100644 index 000000000..918e72061 Binary files /dev/null and b/skill-zips/video-downloader.zip differ diff --git a/skill-zips/writing-plans.zip b/skill-zips/writing-plans.zip new file mode 100644 index 000000000..f338ad879 Binary files /dev/null and b/skill-zips/writing-plans.zip differ diff --git a/skill-zips/writing-skills.zip b/skill-zips/writing-skills.zip new file mode 100644 index 000000000..4680db952 Binary files /dev/null and b/skill-zips/writing-skills.zip differ diff --git a/skill-zips/xlsx.zip b/skill-zips/xlsx.zip new file mode 100644 index 000000000..3acc700e5 Binary files /dev/null and b/skill-zips/xlsx.zip differ diff --git a/skills/canvas-design/LICENSE.txt b/skills/canvas-design/LICENSE.txt new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/skills/canvas-design/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/skills/canvas-design/SKILL.md b/skills/canvas-design/SKILL.md new file mode 100644 index 000000000..9f63fee82 --- /dev/null +++ b/skills/canvas-design/SKILL.md @@ -0,0 +1,130 @@ +--- +name: canvas-design +description: Create beautiful visual art in .png and .pdf documents using design philosophy. You should use this skill when the user asks to create a poster, piece of art, design, or other static piece. Create original visual designs, never copying existing artists' work to avoid copyright violations. +license: Complete terms in LICENSE.txt +--- + +These are instructions for creating design philosophies - aesthetic movements that are then EXPRESSED VISUALLY. Output only .md files, .pdf files, and .png files. + +Complete this in two steps: +1. Design Philosophy Creation (.md file) +2. Express by creating it on a canvas (.pdf file or .png file) + +First, undertake this task: + +## DESIGN PHILOSOPHY CREATION + +To begin, create a VISUAL PHILOSOPHY (not layouts or templates) that will be interpreted through: +- Form, space, color, composition +- Images, graphics, shapes, patterns +- Minimal text as visual accent + +### THE CRITICAL UNDERSTANDING +- What is received: Some subtle input or instructions by the user that should be taken into account, but used as a foundation; it should not constrain creative freedom. +- What is created: A design philosophy/aesthetic movement. +- What happens next: Then, the same version receives the philosophy and EXPRESSES IT VISUALLY - creating artifacts that are 90% visual design, 10% essential text. + +Consider this approach: +- Write a manifesto for an art movement +- The next phase involves making the artwork + +The philosophy must emphasize: Visual expression. Spatial communication. Artistic interpretation. Minimal words. + +### HOW TO GENERATE A VISUAL PHILOSOPHY + +**Name the movement** (1-2 words): "Brutalist Joy" / "Chromatic Silence" / "Metabolist Dreams" + +**Articulate the philosophy** (4-6 paragraphs - concise but complete): + +To capture the VISUAL essence, express how the philosophy manifests through: +- Space and form +- Color and material +- Scale and rhythm +- Composition and balance +- Visual hierarchy + +**CRITICAL GUIDELINES:** +- **Avoid redundancy**: Each design aspect should be mentioned once. Avoid repeating points about color theory, spatial relationships, or typographic principles unless adding new depth. +- **Emphasize craftsmanship REPEATEDLY**: The philosophy MUST stress multiple times that the final work should appear as though it took countless hours to create, was labored over with care, and comes from someone at the absolute top of their field. This framing is essential - repeat phrases like "meticulously crafted," "the product of deep expertise," "painstaking attention," "master-level execution." +- **Leave creative space**: Remain specific about the aesthetic direction, but concise enough that the next Claude has room to make interpretive choices also at a extremely high level of craftmanship. + +The philosophy must guide the next version to express ideas VISUALLY, not through text. Information lives in design, not paragraphs. + +### PHILOSOPHY EXAMPLES + +**"Concrete Poetry"** +Philosophy: Communication through monumental form and bold geometry. +Visual expression: Massive color blocks, sculptural typography (huge single words, tiny labels), Brutalist spatial divisions, Polish poster energy meets Le Corbusier. Ideas expressed through visual weight and spatial tension, not explanation. Text as rare, powerful gesture - never paragraphs, only essential words integrated into the visual architecture. Every element placed with the precision of a master craftsman. + +**"Chromatic Language"** +Philosophy: Color as the primary information system. +Visual expression: Geometric precision where color zones create meaning. Typography minimal - small sans-serif labels letting chromatic fields communicate. Think Josef Albers' interaction meets data visualization. Information encoded spatially and chromatically. Words only to anchor what color already shows. The result of painstaking chromatic calibration. + +**"Analog Meditation"** +Philosophy: Quiet visual contemplation through texture and breathing room. +Visual expression: Paper grain, ink bleeds, vast negative space. Photography and illustration dominate. Typography whispered (small, restrained, serving the visual). Japanese photobook aesthetic. Images breathe across pages. Text appears sparingly - short phrases, never explanatory blocks. Each composition balanced with the care of a meditation practice. + +**"Organic Systems"** +Philosophy: Natural clustering and modular growth patterns. +Visual expression: Rounded forms, organic arrangements, color from nature through architecture. Information shown through visual diagrams, spatial relationships, iconography. Text only for key labels floating in space. The composition tells the story through expert spatial orchestration. + +**"Geometric Silence"** +Philosophy: Pure order and restraint. +Visual expression: Grid-based precision, bold photography or stark graphics, dramatic negative space. Typography precise but minimal - small essential text, large quiet zones. Swiss formalism meets Brutalist material honesty. Structure communicates, not words. Every alignment the work of countless refinements. + +*These are condensed examples. The actual design philosophy should be 4-6 substantial paragraphs.* + +### ESSENTIAL PRINCIPLES +- **VISUAL PHILOSOPHY**: Create an aesthetic worldview to be expressed through design +- **MINIMAL TEXT**: Always emphasize that text is sparse, essential-only, integrated as visual element - never lengthy +- **SPATIAL EXPRESSION**: Ideas communicate through space, form, color, composition - not paragraphs +- **ARTISTIC FREEDOM**: The next Claude interprets the philosophy visually - provide creative room +- **PURE DESIGN**: This is about making ART OBJECTS, not documents with decoration +- **EXPERT CRAFTSMANSHIP**: Repeatedly emphasize the final work must look meticulously crafted, labored over with care, the product of countless hours by someone at the top of their field + +**The design philosophy should be 4-6 paragraphs long.** Fill it with poetic design philosophy that brings together the core vision. Avoid repeating the same points. Keep the design philosophy generic without mentioning the intention of the art, as if it can be used wherever. Output the design philosophy as a .md file. + +--- + +## DEDUCING THE SUBTLE REFERENCE + +**CRITICAL STEP**: Before creating the canvas, identify the subtle conceptual thread from the original request. + +**THE ESSENTIAL PRINCIPLE**: +The topic is a **subtle, niche reference embedded within the art itself** - not always literal, always sophisticated. Someone familiar with the subject should feel it intuitively, while others simply experience a masterful abstract composition. The design philosophy provides the aesthetic language. The deduced topic provides the soul - the quiet conceptual DNA woven invisibly into form, color, and composition. + +This is **VERY IMPORTANT**: The reference must be refined so it enhances the work's depth without announcing itself. Think like a jazz musician quoting another song - only those who know will catch it, but everyone appreciates the music. + +--- + +## CANVAS CREATION + +With both the philosophy and the conceptual framework established, express it on a canvas. Take a moment to gather thoughts and clear the mind. Use the design philosophy created and the instructions below to craft a masterpiece, embodying all aspects of the philosophy with expert craftsmanship. + +**IMPORTANT**: For any type of content, even if the user requests something for a movie/game/book, the approach should still be sophisticated. Never lose sight of the idea that this should be art, not something that's cartoony or amateur. + +To create museum or magazine quality work, use the design philosophy as the foundation. Create one single page, highly visual, design-forward PDF or PNG output (unless asked for more pages). Generally use repeating patterns and perfect shapes. Treat the abstract philosophical design as if it were a scientific bible, borrowing the visual language of systematic observation—dense accumulation of marks, repeated elements, or layered patterns that build meaning through patient repetition and reward sustained viewing. Add sparse, clinical typography and systematic reference markers that suggest this could be a diagram from an imaginary discipline, treating the invisible subject with the same reverence typically reserved for documenting observable phenomena. Anchor the piece with simple phrase(s) or details positioned subtly, using a limited color palette that feels intentional and cohesive. Embrace the paradox of using analytical visual language to express ideas about human experience: the result should feel like an artifact that proves something ephemeral can be studied, mapped, and understood through careful attention. This is true art. + +**Text as a contextual element**: Text is always minimal and visual-first, but let context guide whether that means whisper-quiet labels or bold typographic gestures. A punk venue poster might have larger, more aggressive type than a minimalist ceramics studio identity. Most of the time, font should be thin. All use of fonts must be design-forward and prioritize visual communication. Regardless of text scale, nothing falls off the page and nothing overlaps. Every element must be contained within the canvas boundaries with proper margins. Check carefully that all text, graphics, and visual elements have breathing room and clear separation. This is non-negotiable for professional execution. **IMPORTANT: Use different fonts if writing text. Search the `./canvas-fonts` directory. Regardless of approach, sophistication is non-negotiable.** + +Download and use whatever fonts are needed to make this a reality. Get creative by making the typography actually part of the art itself -- if the art is abstract, bring the font onto the canvas, not typeset digitally. + +To push boundaries, follow design instinct/intuition while using the philosophy as a guiding principle. Embrace ultimate design freedom and choice. Push aesthetics and design to the frontier. + +**CRITICAL**: To achieve human-crafted quality (not AI-generated), create work that looks like it took countless hours. Make it appear as though someone at the absolute top of their field labored over every detail with painstaking care. Ensure the composition, spacing, color choices, typography - everything screams expert-level craftsmanship. Double-check that nothing overlaps, formatting is flawless, every detail perfect. Create something that could be shown to people to prove expertise and rank as undeniably impressive. + +Output the final result as a single, downloadable .pdf or .png file, alongside the design philosophy used as a .md file. + +--- + +## FINAL STEP + +**IMPORTANT**: The user ALREADY said "It isn't perfect enough. It must be pristine, a masterpiece if craftsmanship, as if it were about to be displayed in a museum." + +**CRITICAL**: To refine the work, avoid adding more graphics; instead refine what has been created and make it extremely crisp, respecting the design philosophy and the principles of minimalism entirely. Rather than adding a fun filter or refactoring a font, consider how to make the existing composition more cohesive with the art. If the instinct is to call a new function or draw a new shape, STOP and instead ask: "How can I make what's already here more of a piece of art?" + +Take a second pass. Go back to the code and refine/polish further to make this a philosophically designed masterpiece. + +## MULTI-PAGE OPTION + +To create additional pages when requested, create more creative pages along the same lines as the design philosophy but distinctly different as well. Bundle those pages in the same .pdf or many .pngs. Treat the first page as just a single page in a whole coffee table book waiting to be filled. Make the next pages unique twists and memories of the original. Have them almost tell a story in a very tasteful way. Exercise full creative freedom. \ No newline at end of file diff --git a/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt b/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt new file mode 100644 index 000000000..1dad6ca6d --- /dev/null +++ b/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2012 The Arsenal Project Authors (andrij.design@gmail.com) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf b/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf new file mode 100644 index 000000000..fe5409b22 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf b/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf new file mode 100644 index 000000000..fc5f8fdde Binary files /dev/null and b/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt b/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt new file mode 100644 index 000000000..b220280e7 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2019 The Big Shoulders Project Authors (https://github.com/xotypeco/big_shoulders) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf b/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf new file mode 100644 index 000000000..de8308ce3 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt b/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt new file mode 100644 index 000000000..1890cb1c2 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2024 The Boldonse Project Authors (https://github.com/googlefonts/boldonse) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf b/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf new file mode 100644 index 000000000..43fa30aff Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf b/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf new file mode 100644 index 000000000..f3b1deda1 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt b/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt new file mode 100644 index 000000000..fc2b2167c --- /dev/null +++ b/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2022 The Bricolage Grotesque Project Authors (https://github.com/ateliertriay/bricolage) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf b/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf new file mode 100644 index 000000000..0674ae3e4 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf b/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf new file mode 100644 index 000000000..58730fb4c Binary files /dev/null and b/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf b/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf new file mode 100644 index 000000000..786a1bd66 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt b/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt new file mode 100644 index 000000000..f976fdc91 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2018 The Crimson Pro Project Authors (https://github.com/Fonthausen/CrimsonPro) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf b/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf new file mode 100644 index 000000000..f5666b9be Binary files /dev/null and b/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/DMMono-OFL.txt b/skills/canvas-design/canvas-fonts/DMMono-OFL.txt new file mode 100644 index 000000000..5b17f0c62 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/DMMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2020 The DM Mono Project Authors (https://www.github.com/googlefonts/dm-mono) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf b/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf new file mode 100644 index 000000000..7efe813da Binary files /dev/null and b/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt b/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt new file mode 100644 index 000000000..490d01201 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt @@ -0,0 +1,94 @@ +Copyright (c) 2011 by LatinoType Limitada (luciano@latinotype.com), +with Reserved Font Names "Erica One" + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf b/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf new file mode 100644 index 000000000..8bd91d117 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf b/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf new file mode 100644 index 000000000..736ff7c3b Binary files /dev/null and b/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt b/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt new file mode 100644 index 000000000..679a685a2 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2024 The Geist Project Authors (https://github.com/vercel/geist-font.git) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf b/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf new file mode 100644 index 000000000..1a30262ab Binary files /dev/null and b/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Gloock-OFL.txt b/skills/canvas-design/canvas-fonts/Gloock-OFL.txt new file mode 100644 index 000000000..363acd33d --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Gloock-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2022 The Gloock Project Authors (https://github.com/duartp/gloock) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf b/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf new file mode 100644 index 000000000..3e58c4e45 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf b/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf new file mode 100644 index 000000000..247979cae Binary files /dev/null and b/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt b/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt new file mode 100644 index 000000000..e423b7478 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright © 2017 IBM Corp. with Reserved Font Name "Plex" + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf b/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf new file mode 100644 index 000000000..601ae945e Binary files /dev/null and b/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf b/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf new file mode 100644 index 000000000..78f6e500d Binary files /dev/null and b/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf b/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf new file mode 100644 index 000000000..369b89d26 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf b/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf new file mode 100644 index 000000000..a4d859a77 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf b/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf new file mode 100644 index 000000000..35f454cea Binary files /dev/null and b/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf b/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf new file mode 100644 index 000000000..f602dcef2 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf b/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf new file mode 100644 index 000000000..122b27305 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf b/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf new file mode 100644 index 000000000..4b98fb8dd Binary files /dev/null and b/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt b/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt new file mode 100644 index 000000000..4bb99142f --- /dev/null +++ b/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2022 The Instrument Sans Project Authors (https://github.com/Instrument/instrument-sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf b/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf new file mode 100644 index 000000000..14c6113cd Binary files /dev/null and b/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf b/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf new file mode 100644 index 000000000..8fa958d9b Binary files /dev/null and b/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf b/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf new file mode 100644 index 000000000..976303184 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Italiana-OFL.txt b/skills/canvas-design/canvas-fonts/Italiana-OFL.txt new file mode 100644 index 000000000..ba8af215b --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Italiana-OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2011, Santiago Orozco (hi@typemade.mx), with Reserved Font Name "Italiana". + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf b/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf new file mode 100644 index 000000000..a9b828c0f Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf b/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf new file mode 100644 index 000000000..1926c804b Binary files /dev/null and b/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt b/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt new file mode 100644 index 000000000..5ceee0025 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2020 The JetBrains Mono Project Authors (https://github.com/JetBrains/JetBrainsMono) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf b/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf new file mode 100644 index 000000000..436c982ff Binary files /dev/null and b/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Jura-Light.ttf b/skills/canvas-design/canvas-fonts/Jura-Light.ttf new file mode 100644 index 000000000..dffbb3397 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Jura-Light.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Jura-Medium.ttf b/skills/canvas-design/canvas-fonts/Jura-Medium.ttf new file mode 100644 index 000000000..4bf91a339 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Jura-Medium.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Jura-OFL.txt b/skills/canvas-design/canvas-fonts/Jura-OFL.txt new file mode 100644 index 000000000..64ad4c67d --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Jura-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2019 The Jura Project Authors (https://github.com/ossobuffo/jura) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt b/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt new file mode 100644 index 000000000..8c531fa56 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2012 The Libre Baskerville Project Authors (https://github.com/impallari/Libre-Baskerville) with Reserved Font Name Libre Baskerville. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf b/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf new file mode 100644 index 000000000..c1abc2645 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Lora-Bold.ttf b/skills/canvas-design/canvas-fonts/Lora-Bold.ttf new file mode 100644 index 000000000..edae21eb6 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Lora-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf b/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf new file mode 100644 index 000000000..12dea8c6f Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Lora-Italic.ttf b/skills/canvas-design/canvas-fonts/Lora-Italic.ttf new file mode 100644 index 000000000..e24b69b26 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Lora-Italic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Lora-OFL.txt b/skills/canvas-design/canvas-fonts/Lora-OFL.txt new file mode 100644 index 000000000..4cf1b950d --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Lora-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2011 The Lora Project Authors (https://github.com/cyrealtype/Lora-Cyrillic), with Reserved Font Name "Lora". + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Lora-Regular.ttf b/skills/canvas-design/canvas-fonts/Lora-Regular.ttf new file mode 100644 index 000000000..dc751db00 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Lora-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf b/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf new file mode 100644 index 000000000..f4d7c021b Binary files /dev/null and b/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt b/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt new file mode 100644 index 000000000..f4ec3fba9 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2025 The National Park Project Authors (https://github.com/benhoepner/National-Park) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf b/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf new file mode 100644 index 000000000..e4cbfbf5e Binary files /dev/null and b/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt b/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt new file mode 100644 index 000000000..c81eccdee --- /dev/null +++ b/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2010, Kimberly Geswein (kimberlygeswein.com) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf b/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf new file mode 100644 index 000000000..b086bced9 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf b/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf new file mode 100644 index 000000000..f9f2f72af Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Outfit-OFL.txt b/skills/canvas-design/canvas-fonts/Outfit-OFL.txt new file mode 100644 index 000000000..fd0cb995c --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Outfit-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2021 The Outfit Project Authors (https://github.com/Outfitio/Outfit-Fonts) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf b/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf new file mode 100644 index 000000000..3939ab246 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf b/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf new file mode 100644 index 000000000..95cd37253 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf differ diff --git a/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt b/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt new file mode 100644 index 000000000..b02d1b676 --- /dev/null +++ b/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2021 The Pixelify Sans Project Authors (https://github.com/eifetx/Pixelify-Sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt b/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt new file mode 100644 index 000000000..607bdad3f --- /dev/null +++ b/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2011, Denis Masharov (denis.masharov@gmail.com) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf b/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf new file mode 100644 index 000000000..b339511b0 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf b/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf new file mode 100644 index 000000000..a6e3cf157 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt b/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt new file mode 100644 index 000000000..16cf394bb --- /dev/null +++ b/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2024 The Red Hat Project Authors (https://github.com/RedHatOfficial/RedHatFont) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf b/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf new file mode 100644 index 000000000..3bf6a698b Binary files /dev/null and b/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt b/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt new file mode 100644 index 000000000..a1fe7d5fb --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2001 The Silkscreen Project Authors (https://github.com/googlefonts/silkscreen) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf b/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf new file mode 100644 index 000000000..8abaa7c50 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf b/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf new file mode 100644 index 000000000..0af9ead07 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf differ diff --git a/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt b/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt new file mode 100644 index 000000000..4c2f033ac --- /dev/null +++ b/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2016 The Smooch Sans Project Authors (https://github.com/googlefonts/smooch-sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf b/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf new file mode 100644 index 000000000..34fc79719 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf differ diff --git a/skills/canvas-design/canvas-fonts/Tektur-OFL.txt b/skills/canvas-design/canvas-fonts/Tektur-OFL.txt new file mode 100644 index 000000000..2cad55f1b --- /dev/null +++ b/skills/canvas-design/canvas-fonts/Tektur-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2023 The Tektur Project Authors (https://www.github.com/hyvyys/Tektur) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf b/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf new file mode 100644 index 000000000..f280fba40 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf b/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf new file mode 100644 index 000000000..5c9798929 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf differ diff --git a/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf b/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf new file mode 100644 index 000000000..54418b8a6 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf b/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf new file mode 100644 index 000000000..40529b68f Binary files /dev/null and b/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf differ diff --git a/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt b/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt new file mode 100644 index 000000000..070f3416c --- /dev/null +++ b/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2019 The Work Sans Project Authors (https://github.com/weiweihuanghuang/Work-Sans) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf b/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf new file mode 100644 index 000000000..d24586cc0 Binary files /dev/null and b/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf differ diff --git a/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt b/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt new file mode 100644 index 000000000..f09443cbe --- /dev/null +++ b/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2023 The Young Serif Project Authors (https://github.com/noirblancrouge/YoungSerif) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf b/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf new file mode 100644 index 000000000..f454fbedd Binary files /dev/null and b/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf differ diff --git a/skills/competitive-ads-extractor/SKILL.md b/skills/competitive-ads-extractor/SKILL.md new file mode 100644 index 000000000..d1a173971 --- /dev/null +++ b/skills/competitive-ads-extractor/SKILL.md @@ -0,0 +1,293 @@ +--- +name: competitive-ads-extractor +description: Extracts and analyzes competitors' ads from ad libraries (Facebook, LinkedIn, etc.) to understand what messaging, problems, and creative approaches are working. Helps inspire and improve your own ad campaigns. +--- + +# Competitive Ads Extractor + +This skill extracts your competitors' ads from ad libraries and analyzes what's working—the problems they're highlighting, use cases they're targeting, and copy/creative that's resonating. + +## When to Use This Skill + +- Researching competitor ad strategies +- Finding inspiration for your own ads +- Understanding market positioning +- Identifying successful ad patterns +- Analyzing messaging that works +- Discovering new use cases or pain points +- Planning ad campaigns with proven concepts + +## What This Skill Does + +1. **Extracts Ads**: Scrapes ads from Facebook Ad Library, LinkedIn, etc. +2. **Captures Screenshots**: Saves visual copies of all ads +3. **Analyzes Messaging**: Identifies problems, use cases, and value props +4. **Categorizes Ads**: Groups by theme, audience, or format +5. **Identifies Patterns**: Finds common successful approaches +6. **Provides Insights**: Explains why certain ads likely perform well + +## How to Use + +### Basic Extraction + +``` +Extract all current ads from [Competitor Name] on Facebook Ad Library +``` + +``` +Scrape ads from [Company] and analyze their messaging +``` + +### Specific Analysis + +``` +Get all ads from [Competitor] focusing on their messaging +about [specific problem]. What pain points are they highlighting? +``` + +### Competitive Set + +``` +Extract ads from these 5 competitors: [list]. +Compare their approaches and tell me what's working. +``` + +### Specific Platform + +``` +Get LinkedIn ads from [Competitor] and analyze their +B2B positioning strategy +``` + +## Example + +**User**: "Extract ads from Notion on Facebook Ad Library and tell me what messaging is working for them." + +**Process**: +``` +Accessing Facebook Ad Library... +Searching for: Notion +Found: 23 active ads + +Extracting screenshots... +[████████████████████] 100% + +Saved to: competitor-ads/notion/ +- ad-001-collaboration.png +- ad-002-productivity.png +- ad-003-templates.png +... + +Analyzing messaging... +``` + +**Output**: +``` +# Notion Ad Analysis + +## Overview +- Total Ads: 23 active +- Primary Themes: Productivity (35%), Collaboration (30%), + Templates (20%), AI Features (15%) +- Ad Formats: Static images (60%), Video (40%) +- CTA Patterns: "Try for free", "Get started" + +## Key Problems They're Highlighting + +1. **Scattered Information** (8 ads) + Copy: "Stop switching between 10 different tools" + Why it works: Direct pain point many teams face + +2. **Meeting Overload** (5 ads) + Copy: "Replace unnecessary meetings with async updates" + Why it works: Post-COVID remote work pain point + +3. **Lost Documentation** (4 ads) + Copy: "Never ask 'where is that doc?' again" + Why it works: Universal workplace frustration + +## Successful Creative Patterns + +### Pattern 1: Before/After Split +- Shows chaotic tool landscape → Clean Notion workspace +- Used in 6 high-performing ads +- Visual metaphor is immediately clear + +### Pattern 2: Feature Showcase +- GIF of actual product usage +- Shows specific feature in 5 seconds +- Used for new features (AI, templates) + +### Pattern 3: Social Proof +- "Join 20M users" messaging +- Customer logos +- Used in 4 ads targeting enterprise + +## Copy That's Working + +Best Headlines: +1. "Your team's knowledge, finally in one place" + → Benefit-focused, addresses pain directly + +2. "The all-in-one workspace" + → Clear positioning, broad appeal + +3. "AI that actually helps you work" + → Addresses AI skepticism, practical angle + +Best Body Copy Patterns: +- Short sentences (under 10 words) +- Focus on outcomes not features +- Include specific numbers ("Cut meetings by 50%") + +## Audience Targeting Insights + +Based on ad variations: +- Startup founders: Solo productivity angle +- Team leads: Collaboration and alignment +- Enterprise: Security and compliance mentions +- Students: Free plan, templates, organization + +## Recommendations for Your Ads + +1. **Test the "tool sprawl" pain point** + → Strong resonance based on their ad frequency + +2. **Use product screenshots over abstract visuals** + → All their top ads show actual UI + +3. **Lead with the problem, not the solution** + → "Tired of X?" performs better than "Introducing Y" + +4. **Keep copy under 100 characters** + → Their shortest ads seem most frequent + +5. **Test before/after visual formats** + → Proven pattern in their creative + +## Files Saved +- All ads: ~/competitor-ads/notion/ +- Analysis: ~/competitor-ads/notion/analysis.md +- Best performers: ~/competitor-ads/notion/top-10/ +``` + +**Inspired by:** Sumant Subrahmanya's use case from Lenny's Newsletter + +## What You Can Learn + +### Messaging Analysis +- What problems they emphasize +- How they position against competition +- Value propositions that resonate +- Target audience segments + +### Creative Patterns +- Visual styles that work +- Video vs. static image performance +- Color schemes and branding +- Layout patterns + +### Copy Formulas +- Headline structures +- Call-to-action patterns +- Length and tone +- Emotional triggers + +### Campaign Strategy +- Seasonal campaigns +- Product launch approaches +- Feature announcement tactics +- Retargeting patterns + +## Best Practices + +### Legal & Ethical +✓ Only use for research and inspiration +✓ Don't copy ads directly +✓ Respect intellectual property +✓ Use insights to inform original creative +✗ Don't plagiarize copy or steal designs + +### Analysis Tips +1. **Look for patterns**: What themes repeat? +2. **Track over time**: Save ads monthly to see evolution +3. **Test hypotheses**: Adapt successful patterns for your brand +4. **Segment by audience**: Different messages for different targets +5. **Compare platforms**: LinkedIn vs Facebook messaging differs + +## Advanced Features + +### Trend Tracking +``` +Compare [Competitor]'s ads from Q1 vs Q2. +What messaging has changed? +``` + +### Multi-Competitor Analysis +``` +Extract ads from [Company A], [Company B], [Company C]. +What are the common patterns? Where do they differ? +``` + +### Industry Benchmarks +``` +Show me ad patterns across the top 10 project management +tools. What problems do they all focus on? +``` + +### Format Analysis +``` +Analyze video ads vs static image ads from [Competitor]. +Which gets more engagement? (if data available) +``` + +## Common Workflows + +### Ad Campaign Planning +1. Extract competitor ads +2. Identify successful patterns +3. Note gaps in their messaging +4. Brainstorm unique angles +5. Draft test ad variations + +### Positioning Research +1. Get ads from 5 competitors +2. Map their positioning +3. Find underserved angles +4. Develop differentiated messaging +5. Test against their approaches + +### Creative Inspiration +1. Extract ads by theme +2. Analyze visual patterns +3. Note color and layout trends +4. Adapt successful patterns +5. Create original variations + +## Tips for Success + +1. **Regular Monitoring**: Check monthly for changes +2. **Broad Research**: Look at adjacent competitors too +3. **Save Everything**: Build a reference library +4. **Test Insights**: Run your own experiments +5. **Track Performance**: A/B test inspired concepts +6. **Stay Original**: Use for inspiration, not copying +7. **Multiple Platforms**: Compare Facebook, LinkedIn, TikTok, etc. + +## Output Formats + +- **Screenshots**: All ads saved as images +- **Analysis Report**: Markdown summary of insights +- **Spreadsheet**: CSV with ad copy, CTAs, themes +- **Presentation**: Visual deck of top performers +- **Pattern Library**: Categorized by approach + +## Related Use Cases + +- Writing better ad copy for your campaigns +- Understanding market positioning +- Finding content gaps in your messaging +- Discovering new use cases for your product +- Planning product marketing strategy +- Inspiring social media content + diff --git a/skills/docx/LICENSE.txt b/skills/docx/LICENSE.txt new file mode 100644 index 000000000..c55ab4222 --- /dev/null +++ b/skills/docx/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/skills/docx/SKILL.md b/skills/docx/SKILL.md new file mode 100644 index 000000000..664663895 --- /dev/null +++ b/skills/docx/SKILL.md @@ -0,0 +1,197 @@ +--- +name: docx +description: "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. When Claude needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks" +license: Proprietary. LICENSE.txt has complete terms +--- + +# DOCX creation, editing, and analysis + +## Overview + +A user may ask you to create, edit, or analyze the contents of a .docx file. A .docx file is essentially a ZIP archive containing XML files and other resources that you can read or edit. You have different tools and workflows available for different tasks. + +## Workflow Decision Tree + +### Reading/Analyzing Content +Use "Text extraction" or "Raw XML access" sections below + +### Creating New Document +Use "Creating a new Word document" workflow + +### Editing Existing Document +- **Your own document + simple changes** + Use "Basic OOXML editing" workflow + +- **Someone else's document** + Use **"Redlining workflow"** (recommended default) + +- **Legal, academic, business, or government docs** + Use **"Redlining workflow"** (required) + +## Reading and analyzing content + +### Text extraction +If you just need to read the text contents of a document, you should convert the document to markdown using pandoc. Pandoc provides excellent support for preserving document structure and can show tracked changes: + +```bash +# Convert document to markdown with tracked changes +pandoc --track-changes=all path-to-file.docx -o output.md +# Options: --track-changes=accept/reject/all +``` + +### Raw XML access +You need raw XML access for: comments, complex formatting, document structure, embedded media, and metadata. For any of these features, you'll need to unpack a document and read its raw XML contents. + +#### Unpacking a file +`python ooxml/scripts/unpack.py ` + +#### Key file structures +* `word/document.xml` - Main document contents +* `word/comments.xml` - Comments referenced in document.xml +* `word/media/` - Embedded images and media files +* Tracked changes use `` (insertions) and `` (deletions) tags + +## Creating a new Word document + +When creating a new Word document from scratch, use **docx-js**, which allows you to create Word documents using JavaScript/TypeScript. + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`docx-js.md`](docx-js.md) (~500 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for detailed syntax, critical formatting rules, and best practices before proceeding with document creation. +2. Create a JavaScript/TypeScript file using Document, Paragraph, TextRun components (You can assume all dependencies are installed, but if not, refer to the dependencies section below) +3. Export as .docx using Packer.toBuffer() + +## Editing an existing Word document + +When editing an existing Word document, use the **Document library** (a Python library for OOXML manipulation). The library automatically handles infrastructure setup and provides methods for document manipulation. For complex scenarios, you can access the underlying DOM directly through the library. + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`ooxml.md`](ooxml.md) (~600 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for the Document library API and XML patterns for directly editing document files. +2. Unpack the document: `python ooxml/scripts/unpack.py ` +3. Create and run a Python script using the Document library (see "Document Library" section in ooxml.md) +4. Pack the final document: `python ooxml/scripts/pack.py ` + +The Document library provides both high-level methods for common operations and direct DOM access for complex scenarios. + +## Redlining workflow for document review + +This workflow allows you to plan comprehensive tracked changes using markdown before implementing them in OOXML. **CRITICAL**: For complete tracked changes, you must implement ALL changes systematically. + +**Batching Strategy**: Group related changes into batches of 3-10 changes. This makes debugging manageable while maintaining efficiency. Test each batch before moving to the next. + +**Principle: Minimal, Precise Edits** +When implementing tracked changes, only mark text that actually changes. Repeating unchanged text makes edits harder to review and appears unprofessional. Break replacements into: [unchanged text] + [deletion] + [insertion] + [unchanged text]. Preserve the original run's RSID for unchanged text by extracting the `` element from the original and reusing it. + +Example - Changing "30 days" to "60 days" in a sentence: +```python +# BAD - Replaces entire sentence +'The term is 30 days.The term is 60 days.' + +# GOOD - Only marks what changed, preserves original for unchanged text +'The term is 3060 days.' +``` + +### Tracked changes workflow + +1. **Get markdown representation**: Convert document to markdown with tracked changes preserved: + ```bash + pandoc --track-changes=all path-to-file.docx -o current.md + ``` + +2. **Identify and group changes**: Review the document and identify ALL changes needed, organizing them into logical batches: + + **Location methods** (for finding changes in XML): + - Section/heading numbers (e.g., "Section 3.2", "Article IV") + - Paragraph identifiers if numbered + - Grep patterns with unique surrounding text + - Document structure (e.g., "first paragraph", "signature block") + - **DO NOT use markdown line numbers** - they don't map to XML structure + + **Batch organization** (group 3-10 related changes per batch): + - By section: "Batch 1: Section 2 amendments", "Batch 2: Section 5 updates" + - By type: "Batch 1: Date corrections", "Batch 2: Party name changes" + - By complexity: Start with simple text replacements, then tackle complex structural changes + - Sequential: "Batch 1: Pages 1-3", "Batch 2: Pages 4-6" + +3. **Read documentation and unpack**: + - **MANDATORY - READ ENTIRE FILE**: Read [`ooxml.md`](ooxml.md) (~600 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Pay special attention to the "Document Library" and "Tracked Change Patterns" sections. + - **Unpack the document**: `python ooxml/scripts/unpack.py ` + - **Note the suggested RSID**: The unpack script will suggest an RSID to use for your tracked changes. Copy this RSID for use in step 4b. + +4. **Implement changes in batches**: Group changes logically (by section, by type, or by proximity) and implement them together in a single script. This approach: + - Makes debugging easier (smaller batch = easier to isolate errors) + - Allows incremental progress + - Maintains efficiency (batch size of 3-10 changes works well) + + **Suggested batch groupings:** + - By document section (e.g., "Section 3 changes", "Definitions", "Termination clause") + - By change type (e.g., "Date changes", "Party name updates", "Legal term replacements") + - By proximity (e.g., "Changes on pages 1-3", "Changes in first half of document") + + For each batch of related changes: + + **a. Map text to XML**: Grep for text in `word/document.xml` to verify how text is split across `` elements. + + **b. Create and run script**: Use `get_node` to find nodes, implement changes, then `doc.save()`. See **"Document Library"** section in ooxml.md for patterns. + + **Note**: Always grep `word/document.xml` immediately before writing a script to get current line numbers and verify text content. Line numbers change after each script run. + +5. **Pack the document**: After all batches are complete, convert the unpacked directory back to .docx: + ```bash + python ooxml/scripts/pack.py unpacked reviewed-document.docx + ``` + +6. **Final verification**: Do a comprehensive check of the complete document: + - Convert final document to markdown: + ```bash + pandoc --track-changes=all reviewed-document.docx -o verification.md + ``` + - Verify ALL changes were applied correctly: + ```bash + grep "original phrase" verification.md # Should NOT find it + grep "replacement phrase" verification.md # Should find it + ``` + - Check that no unintended changes were introduced + + +## Converting Documents to Images + +To visually analyze Word documents, convert them to images using a two-step process: + +1. **Convert DOCX to PDF**: + ```bash + soffice --headless --convert-to pdf document.docx + ``` + +2. **Convert PDF pages to JPEG images**: + ```bash + pdftoppm -jpeg -r 150 document.pdf page + ``` + This creates files like `page-1.jpg`, `page-2.jpg`, etc. + +Options: +- `-r 150`: Sets resolution to 150 DPI (adjust for quality/size balance) +- `-jpeg`: Output JPEG format (use `-png` for PNG if preferred) +- `-f N`: First page to convert (e.g., `-f 2` starts from page 2) +- `-l N`: Last page to convert (e.g., `-l 5` stops at page 5) +- `page`: Prefix for output files + +Example for specific range: +```bash +pdftoppm -jpeg -r 150 -f 2 -l 5 document.pdf page # Converts only pages 2-5 +``` + +## Code Style Guidelines +**IMPORTANT**: When generating code for DOCX operations: +- Write concise code +- Avoid verbose variable names and redundant operations +- Avoid unnecessary print statements + +## Dependencies + +Required dependencies (install if not available): + +- **pandoc**: `sudo apt-get install pandoc` (for text extraction) +- **docx**: `npm install -g docx` (for creating new documents) +- **LibreOffice**: `sudo apt-get install libreoffice` (for PDF conversion) +- **Poppler**: `sudo apt-get install poppler-utils` (for pdftoppm to convert PDF to images) +- **defusedxml**: `pip install defusedxml` (for secure XML parsing) \ No newline at end of file diff --git a/skills/docx/docx-js.md b/skills/docx/docx-js.md new file mode 100644 index 000000000..c6d7b2ddd --- /dev/null +++ b/skills/docx/docx-js.md @@ -0,0 +1,350 @@ +# DOCX Library Tutorial + +Generate .docx files with JavaScript/TypeScript. + +**Important: Read this entire document before starting.** Critical formatting rules and common pitfalls are covered throughout - skipping sections may result in corrupted files or rendering issues. + +## Setup +Assumes docx is already installed globally +If not installed: `npm install -g docx` + +```javascript +const { Document, Packer, Paragraph, TextRun, Table, TableRow, TableCell, ImageRun, Media, + Header, Footer, AlignmentType, PageOrientation, LevelFormat, ExternalHyperlink, + InternalHyperlink, TableOfContents, HeadingLevel, BorderStyle, WidthType, TabStopType, + TabStopPosition, UnderlineType, ShadingType, VerticalAlign, SymbolRun, PageNumber, + FootnoteReferenceRun, Footnote, PageBreak } = require('docx'); + +// Create & Save +const doc = new Document({ sections: [{ children: [/* content */] }] }); +Packer.toBuffer(doc).then(buffer => fs.writeFileSync("doc.docx", buffer)); // Node.js +Packer.toBlob(doc).then(blob => { /* download logic */ }); // Browser +``` + +## Text & Formatting +```javascript +// IMPORTANT: Never use \n for line breaks - always use separate Paragraph elements +// ❌ WRONG: new TextRun("Line 1\nLine 2") +// ✅ CORRECT: new Paragraph({ children: [new TextRun("Line 1")] }), new Paragraph({ children: [new TextRun("Line 2")] }) + +// Basic text with all formatting options +new Paragraph({ + alignment: AlignmentType.CENTER, + spacing: { before: 200, after: 200 }, + indent: { left: 720, right: 720 }, + children: [ + new TextRun({ text: "Bold", bold: true }), + new TextRun({ text: "Italic", italics: true }), + new TextRun({ text: "Underlined", underline: { type: UnderlineType.DOUBLE, color: "FF0000" } }), + new TextRun({ text: "Colored", color: "FF0000", size: 28, font: "Arial" }), // Arial default + new TextRun({ text: "Highlighted", highlight: "yellow" }), + new TextRun({ text: "Strikethrough", strike: true }), + new TextRun({ text: "x2", superScript: true }), + new TextRun({ text: "H2O", subScript: true }), + new TextRun({ text: "SMALL CAPS", smallCaps: true }), + new SymbolRun({ char: "2022", font: "Symbol" }), // Bullet • + new SymbolRun({ char: "00A9", font: "Arial" }) // Copyright © - Arial for symbols + ] +}) +``` + +## Styles & Professional Formatting + +```javascript +const doc = new Document({ + styles: { + default: { document: { run: { font: "Arial", size: 24 } } }, // 12pt default + paragraphStyles: [ + // Document title style - override built-in Title style + { id: "Title", name: "Title", basedOn: "Normal", + run: { size: 56, bold: true, color: "000000", font: "Arial" }, + paragraph: { spacing: { before: 240, after: 120 }, alignment: AlignmentType.CENTER } }, + // IMPORTANT: Override built-in heading styles by using their exact IDs + { id: "Heading1", name: "Heading 1", basedOn: "Normal", next: "Normal", quickFormat: true, + run: { size: 32, bold: true, color: "000000", font: "Arial" }, // 16pt + paragraph: { spacing: { before: 240, after: 240 }, outlineLevel: 0 } }, // Required for TOC + { id: "Heading2", name: "Heading 2", basedOn: "Normal", next: "Normal", quickFormat: true, + run: { size: 28, bold: true, color: "000000", font: "Arial" }, // 14pt + paragraph: { spacing: { before: 180, after: 180 }, outlineLevel: 1 } }, + // Custom styles use your own IDs + { id: "myStyle", name: "My Style", basedOn: "Normal", + run: { size: 28, bold: true, color: "000000" }, + paragraph: { spacing: { after: 120 }, alignment: AlignmentType.CENTER } } + ], + characterStyles: [{ id: "myCharStyle", name: "My Char Style", + run: { color: "FF0000", bold: true, underline: { type: UnderlineType.SINGLE } } }] + }, + sections: [{ + properties: { page: { margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } } }, + children: [ + new Paragraph({ heading: HeadingLevel.TITLE, children: [new TextRun("Document Title")] }), // Uses overridden Title style + new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun("Heading 1")] }), // Uses overridden Heading1 style + new Paragraph({ style: "myStyle", children: [new TextRun("Custom paragraph style")] }), + new Paragraph({ children: [ + new TextRun("Normal with "), + new TextRun({ text: "custom char style", style: "myCharStyle" }) + ]}) + ] + }] +}); +``` + +**Professional Font Combinations:** +- **Arial (Headers) + Arial (Body)** - Most universally supported, clean and professional +- **Times New Roman (Headers) + Arial (Body)** - Classic serif headers with modern sans-serif body +- **Georgia (Headers) + Verdana (Body)** - Optimized for screen reading, elegant contrast + +**Key Styling Principles:** +- **Override built-in styles**: Use exact IDs like "Heading1", "Heading2", "Heading3" to override Word's built-in heading styles +- **HeadingLevel constants**: `HeadingLevel.HEADING_1` uses "Heading1" style, `HeadingLevel.HEADING_2` uses "Heading2" style, etc. +- **Include outlineLevel**: Set `outlineLevel: 0` for H1, `outlineLevel: 1` for H2, etc. to ensure TOC works correctly +- **Use custom styles** instead of inline formatting for consistency +- **Set a default font** using `styles.default.document.run.font` - Arial is universally supported +- **Establish visual hierarchy** with different font sizes (titles > headers > body) +- **Add proper spacing** with `before` and `after` paragraph spacing +- **Use colors sparingly**: Default to black (000000) and shades of gray for titles and headings (heading 1, heading 2, etc.) +- **Set consistent margins** (1440 = 1 inch is standard) + + +## Lists (ALWAYS USE PROPER LISTS - NEVER USE UNICODE BULLETS) +```javascript +// Bullets - ALWAYS use the numbering config, NOT unicode symbols +// CRITICAL: Use LevelFormat.BULLET constant, NOT the string "bullet" +const doc = new Document({ + numbering: { + config: [ + { reference: "bullet-list", + levels: [{ level: 0, format: LevelFormat.BULLET, text: "•", alignment: AlignmentType.LEFT, + style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] }, + { reference: "first-numbered-list", + levels: [{ level: 0, format: LevelFormat.DECIMAL, text: "%1.", alignment: AlignmentType.LEFT, + style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] }, + { reference: "second-numbered-list", // Different reference = restarts at 1 + levels: [{ level: 0, format: LevelFormat.DECIMAL, text: "%1.", alignment: AlignmentType.LEFT, + style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] } + ] + }, + sections: [{ + children: [ + // Bullet list items + new Paragraph({ numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("First bullet point")] }), + new Paragraph({ numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("Second bullet point")] }), + // Numbered list items + new Paragraph({ numbering: { reference: "first-numbered-list", level: 0 }, + children: [new TextRun("First numbered item")] }), + new Paragraph({ numbering: { reference: "first-numbered-list", level: 0 }, + children: [new TextRun("Second numbered item")] }), + // ⚠️ CRITICAL: Different reference = INDEPENDENT list that restarts at 1 + // Same reference = CONTINUES previous numbering + new Paragraph({ numbering: { reference: "second-numbered-list", level: 0 }, + children: [new TextRun("Starts at 1 again (because different reference)")] }) + ] + }] +}); + +// ⚠️ CRITICAL NUMBERING RULE: Each reference creates an INDEPENDENT numbered list +// - Same reference = continues numbering (1, 2, 3... then 4, 5, 6...) +// - Different reference = restarts at 1 (1, 2, 3... then 1, 2, 3...) +// Use unique reference names for each separate numbered section! + +// ⚠️ CRITICAL: NEVER use unicode bullets - they create fake lists that don't work properly +// new TextRun("• Item") // WRONG +// new SymbolRun({ char: "2022" }) // WRONG +// ✅ ALWAYS use numbering config with LevelFormat.BULLET for real Word lists +``` + +## Tables +```javascript +// Complete table with margins, borders, headers, and bullet points +const tableBorder = { style: BorderStyle.SINGLE, size: 1, color: "CCCCCC" }; +const cellBorders = { top: tableBorder, bottom: tableBorder, left: tableBorder, right: tableBorder }; + +new Table({ + columnWidths: [4680, 4680], // ⚠️ CRITICAL: Set column widths at table level - values in DXA (twentieths of a point) + margins: { top: 100, bottom: 100, left: 180, right: 180 }, // Set once for all cells + rows: [ + new TableRow({ + tableHeader: true, + children: [ + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + // ⚠️ CRITICAL: Always use ShadingType.CLEAR to prevent black backgrounds in Word. + shading: { fill: "D5E8F0", type: ShadingType.CLEAR }, + verticalAlign: VerticalAlign.CENTER, + children: [new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new TextRun({ text: "Header", bold: true, size: 22 })] + })] + }), + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + shading: { fill: "D5E8F0", type: ShadingType.CLEAR }, + children: [new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new TextRun({ text: "Bullet Points", bold: true, size: 22 })] + })] + }) + ] + }), + new TableRow({ + children: [ + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + children: [new Paragraph({ children: [new TextRun("Regular data")] })] + }), + new TableCell({ + borders: cellBorders, + width: { size: 4680, type: WidthType.DXA }, // ALSO set width on each cell + children: [ + new Paragraph({ + numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("First bullet point")] + }), + new Paragraph({ + numbering: { reference: "bullet-list", level: 0 }, + children: [new TextRun("Second bullet point")] + }) + ] + }) + ] + }) + ] +}) +``` + +**IMPORTANT: Table Width & Borders** +- Use BOTH `columnWidths: [width1, width2, ...]` array AND `width: { size: X, type: WidthType.DXA }` on each cell +- Values in DXA (twentieths of a point): 1440 = 1 inch, Letter usable width = 9360 DXA (with 1" margins) +- Apply borders to individual `TableCell` elements, NOT the `Table` itself + +**Precomputed Column Widths (Letter size with 1" margins = 9360 DXA total):** +- **2 columns:** `columnWidths: [4680, 4680]` (equal width) +- **3 columns:** `columnWidths: [3120, 3120, 3120]` (equal width) + +## Links & Navigation +```javascript +// TOC (requires headings) - CRITICAL: Use HeadingLevel only, NOT custom styles +// ❌ WRONG: new Paragraph({ heading: HeadingLevel.HEADING_1, style: "customHeader", children: [new TextRun("Title")] }) +// ✅ CORRECT: new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun("Title")] }) +new TableOfContents("Table of Contents", { hyperlink: true, headingStyleRange: "1-3" }), + +// External link +new Paragraph({ + children: [new ExternalHyperlink({ + children: [new TextRun({ text: "Google", style: "Hyperlink" })], + link: "https://www.google.com" + })] +}), + +// Internal link & bookmark +new Paragraph({ + children: [new InternalHyperlink({ + children: [new TextRun({ text: "Go to Section", style: "Hyperlink" })], + anchor: "section1" + })] +}), +new Paragraph({ + children: [new TextRun("Section Content")], + bookmark: { id: "section1", name: "section1" } +}), +``` + +## Images & Media +```javascript +// Basic image with sizing & positioning +// CRITICAL: Always specify 'type' parameter - it's REQUIRED for ImageRun +new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new ImageRun({ + type: "png", // NEW REQUIREMENT: Must specify image type (png, jpg, jpeg, gif, bmp, svg) + data: fs.readFileSync("image.png"), + transformation: { width: 200, height: 150, rotation: 0 }, // rotation in degrees + altText: { title: "Logo", description: "Company logo", name: "Name" } // IMPORTANT: All three fields are required + })] +}) +``` + +## Page Breaks +```javascript +// Manual page break +new Paragraph({ children: [new PageBreak()] }), + +// Page break before paragraph +new Paragraph({ + pageBreakBefore: true, + children: [new TextRun("This starts on a new page")] +}) + +// ⚠️ CRITICAL: NEVER use PageBreak standalone - it will create invalid XML that Word cannot open +// ❌ WRONG: new PageBreak() +// ✅ CORRECT: new Paragraph({ children: [new PageBreak()] }) +``` + +## Headers/Footers & Page Setup +```javascript +const doc = new Document({ + sections: [{ + properties: { + page: { + margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 }, // 1440 = 1 inch + size: { orientation: PageOrientation.LANDSCAPE }, + pageNumbers: { start: 1, formatType: "decimal" } // "upperRoman", "lowerRoman", "upperLetter", "lowerLetter" + } + }, + headers: { + default: new Header({ children: [new Paragraph({ + alignment: AlignmentType.RIGHT, + children: [new TextRun("Header Text")] + })] }) + }, + footers: { + default: new Footer({ children: [new Paragraph({ + alignment: AlignmentType.CENTER, + children: [new TextRun("Page "), new TextRun({ children: [PageNumber.CURRENT] }), new TextRun(" of "), new TextRun({ children: [PageNumber.TOTAL_PAGES] })] + })] }) + }, + children: [/* content */] + }] +}); +``` + +## Tabs +```javascript +new Paragraph({ + tabStops: [ + { type: TabStopType.LEFT, position: TabStopPosition.MAX / 4 }, + { type: TabStopType.CENTER, position: TabStopPosition.MAX / 2 }, + { type: TabStopType.RIGHT, position: TabStopPosition.MAX * 3 / 4 } + ], + children: [new TextRun("Left\tCenter\tRight")] +}) +``` + +## Constants & Quick Reference +- **Underlines:** `SINGLE`, `DOUBLE`, `WAVY`, `DASH` +- **Borders:** `SINGLE`, `DOUBLE`, `DASHED`, `DOTTED` +- **Numbering:** `DECIMAL` (1,2,3), `UPPER_ROMAN` (I,II,III), `LOWER_LETTER` (a,b,c) +- **Tabs:** `LEFT`, `CENTER`, `RIGHT`, `DECIMAL` +- **Symbols:** `"2022"` (•), `"00A9"` (©), `"00AE"` (®), `"2122"` (™), `"00B0"` (°), `"F070"` (✓), `"F0FC"` (✗) + +## Critical Issues & Common Mistakes +- **CRITICAL: PageBreak must ALWAYS be inside a Paragraph** - standalone PageBreak creates invalid XML that Word cannot open +- **ALWAYS use ShadingType.CLEAR for table cell shading** - Never use ShadingType.SOLID (causes black background). +- Measurements in DXA (1440 = 1 inch) | Each table cell needs ≥1 Paragraph | TOC requires HeadingLevel styles only +- **ALWAYS use custom styles** with Arial font for professional appearance and proper visual hierarchy +- **ALWAYS set a default font** using `styles.default.document.run.font` - Arial recommended +- **ALWAYS use columnWidths array for tables** + individual cell widths for compatibility +- **NEVER use unicode symbols for bullets** - always use proper numbering configuration with `LevelFormat.BULLET` constant (NOT the string "bullet") +- **NEVER use \n for line breaks anywhere** - always use separate Paragraph elements for each line +- **ALWAYS use TextRun objects within Paragraph children** - never use text property directly on Paragraph +- **CRITICAL for images**: ImageRun REQUIRES `type` parameter - always specify "png", "jpg", "jpeg", "gif", "bmp", or "svg" +- **CRITICAL for bullets**: Must use `LevelFormat.BULLET` constant, not string "bullet", and include `text: "•"` for the bullet character +- **CRITICAL for numbering**: Each numbering reference creates an INDEPENDENT list. Same reference = continues numbering (1,2,3 then 4,5,6). Different reference = restarts at 1 (1,2,3 then 1,2,3). Use unique reference names for each separate numbered section! +- **CRITICAL for TOC**: When using TableOfContents, headings must use HeadingLevel ONLY - do NOT add custom styles to heading paragraphs or TOC will break +- **Tables**: Set `columnWidths` array + individual cell widths, apply borders to cells not table +- **Set table margins at TABLE level** for consistent cell padding (avoids repetition per cell) \ No newline at end of file diff --git a/skills/docx/ooxml.md b/skills/docx/ooxml.md new file mode 100644 index 000000000..7677e7b83 --- /dev/null +++ b/skills/docx/ooxml.md @@ -0,0 +1,610 @@ +# Office Open XML Technical Reference + +**Important: Read this entire document before starting.** This document covers: +- [Technical Guidelines](#technical-guidelines) - Schema compliance rules and validation requirements +- [Document Content Patterns](#document-content-patterns) - XML patterns for headings, lists, tables, formatting, etc. +- [Document Library (Python)](#document-library-python) - Recommended approach for OOXML manipulation with automatic infrastructure setup +- [Tracked Changes (Redlining)](#tracked-changes-redlining) - XML patterns for implementing tracked changes + +## Technical Guidelines + +### Schema Compliance +- **Element ordering in ``**: ``, ``, ``, ``, `` +- **Whitespace**: Add `xml:space='preserve'` to `` elements with leading/trailing spaces +- **Unicode**: Escape characters in ASCII content: `"` becomes `“` + - **Character encoding reference**: Curly quotes `""` become `“”`, apostrophe `'` becomes `’`, em-dash `—` becomes `—` +- **Tracked changes**: Use `` and `` tags with `w:author="Claude"` outside `` elements + - **Critical**: `` closes with ``, `` closes with `` - never mix + - **RSIDs must be 8-digit hex**: Use values like `00AB1234` (only 0-9, A-F characters) + - **trackRevisions placement**: Add `` after `` in settings.xml +- **Images**: Add to `word/media/`, reference in `document.xml`, set dimensions to prevent overflow + +## Document Content Patterns + +### Basic Structure +```xml + + Text content + +``` + +### Headings and Styles +```xml + + + + + + Document Title + + + + + Section Heading + +``` + +### Text Formatting +```xml + +Bold + +Italic + +Underlined + +Highlighted +``` + +### Lists +```xml + + + + + + + + First item + + + + + + + + + + New list item 1 + + + + + + + + + + + Bullet item + +``` + +### Tables +```xml + + + + + + + + + + + + Cell 1 + + + + Cell 2 + + + +``` + +### Layout +```xml + + + + + + + + + + + + New Section Title + + + + + + + + + + Centered text + + + + + + + + Monospace text + + + + + + + This text is Courier New + + and this text uses default font + +``` + +## File Updates + +When adding content, update these files: + +**`word/_rels/document.xml.rels`:** +```xml + + +``` + +**`[Content_Types].xml`:** +```xml + + +``` + +### Images +**CRITICAL**: Calculate dimensions to prevent page overflow and maintain aspect ratio. + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Links (Hyperlinks) + +**IMPORTANT**: All hyperlinks (both internal and external) require the Hyperlink style to be defined in styles.xml. Without this style, links will look like regular text instead of blue underlined clickable links. + +**External Links:** +```xml + + + + + Link Text + + + + + +``` + +**Internal Links:** + +```xml + + + + + Link Text + + + + + +Target content + +``` + +**Hyperlink Style (required in styles.xml):** +```xml + + + + + + + + + + +``` + +## Document Library (Python) + +Use the Document class from `scripts/document.py` for all tracked changes and comments. It automatically handles infrastructure setup (people.xml, RSIDs, settings.xml, comment files, relationships, content types). Only use direct XML manipulation for complex scenarios not supported by the library. + +**Working with Unicode and Entities:** +- **Searching**: Both entity notation and Unicode characters work - `contains="“Company"` and `contains="\u201cCompany"` find the same text +- **Replacing**: Use either entities (`“`) or Unicode (`\u201c`) - both work and will be converted appropriately based on the file's encoding (ascii → entities, utf-8 → Unicode) + +### Initialization + +**Find the docx skill root** (directory containing `scripts/` and `ooxml/`): +```bash +# Search for document.py to locate the skill root +# Note: /mnt/skills is used here as an example; check your context for the actual location +find /mnt/skills -name "document.py" -path "*/docx/scripts/*" 2>/dev/null | head -1 +# Example output: /mnt/skills/docx/scripts/document.py +# Skill root is: /mnt/skills/docx +``` + +**Run your script with PYTHONPATH** set to the docx skill root: +```bash +PYTHONPATH=/mnt/skills/docx python your_script.py +``` + +**In your script**, import from the skill root: +```python +from scripts.document import Document, DocxXMLEditor + +# Basic initialization (automatically creates temp copy and sets up infrastructure) +doc = Document('unpacked') + +# Customize author and initials +doc = Document('unpacked', author="John Doe", initials="JD") + +# Enable track revisions mode +doc = Document('unpacked', track_revisions=True) + +# Specify custom RSID (auto-generated if not provided) +doc = Document('unpacked', rsid="07DC5ECB") +``` + +### Creating Tracked Changes + +**CRITICAL**: Only mark text that actually changes. Keep ALL unchanged text outside ``/`` tags. Marking unchanged text makes edits unprofessional and harder to review. + +**Attribute Handling**: The Document class auto-injects attributes (w:id, w:date, w:rsidR, w:rsidDel, w16du:dateUtc, xml:space) into new elements. When preserving unchanged text from the original document, copy the original `` element with its existing attributes to maintain document integrity. + +**Method Selection Guide**: +- **Adding your own changes to regular text**: Use `replace_node()` with ``/`` tags, or `suggest_deletion()` for removing entire `` or `` elements +- **Partially modifying another author's tracked change**: Use `replace_node()` to nest your changes inside their ``/`` +- **Completely rejecting another author's insertion**: Use `revert_insertion()` on the `` element (NOT `suggest_deletion()`) +- **Completely rejecting another author's deletion**: Use `revert_deletion()` on the `` element to restore deleted content using tracked changes + +```python +# Minimal edit - change one word: "The report is monthly" → "The report is quarterly" +# Original: The report is monthly +node = doc["word/document.xml"].get_node(tag="w:r", contains="The report is monthly") +rpr = tags[0].toxml() if (tags := node.getElementsByTagName("w:rPr")) else "" +replacement = f'{rpr}The report is {rpr}monthly{rpr}quarterly' +doc["word/document.xml"].replace_node(node, replacement) + +# Minimal edit - change number: "within 30 days" → "within 45 days" +# Original: within 30 days +node = doc["word/document.xml"].get_node(tag="w:r", contains="within 30 days") +rpr = tags[0].toxml() if (tags := node.getElementsByTagName("w:rPr")) else "" +replacement = f'{rpr}within {rpr}30{rpr}45{rpr} days' +doc["word/document.xml"].replace_node(node, replacement) + +# Complete replacement - preserve formatting even when replacing all text +node = doc["word/document.xml"].get_node(tag="w:r", contains="apple") +rpr = tags[0].toxml() if (tags := node.getElementsByTagName("w:rPr")) else "" +replacement = f'{rpr}apple{rpr}banana orange' +doc["word/document.xml"].replace_node(node, replacement) + +# Insert new content (no attributes needed - auto-injected) +node = doc["word/document.xml"].get_node(tag="w:r", contains="existing text") +doc["word/document.xml"].insert_after(node, 'new text') + +# Partially delete another author's insertion +# Original: quarterly financial report +# Goal: Delete only "financial" to make it "quarterly report" +node = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"}) +# IMPORTANT: Preserve w:author="Jane Smith" on the outer to maintain authorship +replacement = ''' + quarterly + financial + report +''' +doc["word/document.xml"].replace_node(node, replacement) + +# Change part of another author's insertion +# Original: in silence, safe and sound +# Goal: Change "safe and sound" to "soft and unbound" +node = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "8"}) +replacement = f''' + in silence, + + + soft and unbound + + + safe and sound +''' +doc["word/document.xml"].replace_node(node, replacement) + +# Delete entire run (use only when deleting all content; use replace_node for partial deletions) +node = doc["word/document.xml"].get_node(tag="w:r", contains="text to delete") +doc["word/document.xml"].suggest_deletion(node) + +# Delete entire paragraph (in-place, handles both regular and numbered list paragraphs) +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph to delete") +doc["word/document.xml"].suggest_deletion(para) + +# Add new numbered list item +target_para = doc["word/document.xml"].get_node(tag="w:p", contains="existing list item") +pPr = tags[0].toxml() if (tags := target_para.getElementsByTagName("w:pPr")) else "" +new_item = f'{pPr}New item' +tracked_para = DocxXMLEditor.suggest_paragraph(new_item) +doc["word/document.xml"].insert_after(target_para, tracked_para) +# Optional: add spacing paragraph before content for better visual separation +# spacing = DocxXMLEditor.suggest_paragraph('') +# doc["word/document.xml"].insert_after(target_para, spacing + tracked_para) +``` + +### Adding Comments + +```python +# Add comment spanning two existing tracked changes +# Note: w:id is auto-generated. Only search by w:id if you know it from XML inspection +start_node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) +end_node = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "2"}) +doc.add_comment(start=start_node, end=end_node, text="Explanation of this change") + +# Add comment on a paragraph +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph text") +doc.add_comment(start=para, end=para, text="Comment on this paragraph") + +# Add comment on newly created tracked change +# First create the tracked change +node = doc["word/document.xml"].get_node(tag="w:r", contains="old") +new_nodes = doc["word/document.xml"].replace_node( + node, + 'oldnew' +) +# Then add comment on the newly created elements +# new_nodes[0] is the , new_nodes[1] is the +doc.add_comment(start=new_nodes[0], end=new_nodes[1], text="Changed old to new per requirements") + +# Reply to existing comment +doc.reply_to_comment(parent_comment_id=0, text="I agree with this change") +``` + +### Rejecting Tracked Changes + +**IMPORTANT**: Use `revert_insertion()` to reject insertions and `revert_deletion()` to restore deletions using tracked changes. Use `suggest_deletion()` only for regular unmarked content. + +```python +# Reject insertion (wraps it in deletion) +# Use this when another author inserted text that you want to delete +ins = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"}) +nodes = doc["word/document.xml"].revert_insertion(ins) # Returns [ins] + +# Reject deletion (creates insertion to restore deleted content) +# Use this when another author deleted text that you want to restore +del_elem = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "3"}) +nodes = doc["word/document.xml"].revert_deletion(del_elem) # Returns [del_elem, new_ins] + +# Reject all insertions in a paragraph +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph text") +nodes = doc["word/document.xml"].revert_insertion(para) # Returns [para] + +# Reject all deletions in a paragraph +para = doc["word/document.xml"].get_node(tag="w:p", contains="paragraph text") +nodes = doc["word/document.xml"].revert_deletion(para) # Returns [para] +``` + +### Inserting Images + +**CRITICAL**: The Document class works with a temporary copy at `doc.unpacked_path`. Always copy images to this temp directory, not the original unpacked folder. + +```python +from PIL import Image +import shutil, os + +# Initialize document first +doc = Document('unpacked') + +# Copy image and calculate full-width dimensions with aspect ratio +media_dir = os.path.join(doc.unpacked_path, 'word/media') +os.makedirs(media_dir, exist_ok=True) +shutil.copy('image.png', os.path.join(media_dir, 'image1.png')) +img = Image.open(os.path.join(media_dir, 'image1.png')) +width_emus = int(6.5 * 914400) # 6.5" usable width, 914400 EMUs/inch +height_emus = int(width_emus * img.size[1] / img.size[0]) + +# Add relationship and content type +rels_editor = doc['word/_rels/document.xml.rels'] +next_rid = rels_editor.get_next_rid() +rels_editor.append_to(rels_editor.dom.documentElement, + f'') +doc['[Content_Types].xml'].append_to(doc['[Content_Types].xml'].dom.documentElement, + '') + +# Insert image +node = doc["word/document.xml"].get_node(tag="w:p", line_number=100) +doc["word/document.xml"].insert_after(node, f''' + + + + + + + + + + + + + + + + + +''') +``` + +### Getting Nodes + +```python +# By text content +node = doc["word/document.xml"].get_node(tag="w:p", contains="specific text") + +# By line range +para = doc["word/document.xml"].get_node(tag="w:p", line_number=range(100, 150)) + +# By attributes +node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) + +# By exact line number (must be line number where tag opens) +para = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + +# Combine filters +node = doc["word/document.xml"].get_node(tag="w:r", line_number=range(40, 60), contains="text") + +# Disambiguate when text appears multiple times - add line_number range +node = doc["word/document.xml"].get_node(tag="w:r", contains="Section", line_number=range(2400, 2500)) +``` + +### Saving + +```python +# Save with automatic validation (copies back to original directory) +doc.save() # Validates by default, raises error if validation fails + +# Save to different location +doc.save('modified-unpacked') + +# Skip validation (debugging only - needing this in production indicates XML issues) +doc.save(validate=False) +``` + +### Direct DOM Manipulation + +For complex scenarios not covered by the library: + +```python +# Access any XML file +editor = doc["word/document.xml"] +editor = doc["word/comments.xml"] + +# Direct DOM access (defusedxml.minidom.Document) +node = doc["word/document.xml"].get_node(tag="w:p", line_number=5) +parent = node.parentNode +parent.removeChild(node) +parent.appendChild(node) # Move to end + +# General document manipulation (without tracked changes) +old_node = doc["word/document.xml"].get_node(tag="w:p", contains="original text") +doc["word/document.xml"].replace_node(old_node, "replacement text") + +# Multiple insertions - use return value to maintain order +node = doc["word/document.xml"].get_node(tag="w:r", line_number=100) +nodes = doc["word/document.xml"].insert_after(node, "A") +nodes = doc["word/document.xml"].insert_after(nodes[-1], "B") +nodes = doc["word/document.xml"].insert_after(nodes[-1], "C") +# Results in: original_node, A, B, C +``` + +## Tracked Changes (Redlining) + +**Use the Document class above for all tracked changes.** The patterns below are for reference when constructing replacement XML strings. + +### Validation Rules +The validator checks that the document text matches the original after reverting Claude's changes. This means: +- **NEVER modify text inside another author's `` or `` tags** +- **ALWAYS use nested deletions** to remove another author's insertions +- **Every edit must be properly tracked** with `` or `` tags + +### Tracked Change Patterns + +**CRITICAL RULES**: +1. Never modify the content inside another author's tracked changes. Always use nested deletions. +2. **XML Structure**: Always place `` and `` at paragraph level containing complete `` elements. Never nest inside `` elements - this creates invalid XML that breaks document processing. + +**Text Insertion:** +```xml + + + inserted text + + +``` + +**Text Deletion:** +```xml + + + deleted text + + +``` + +**Deleting Another Author's Insertion (MUST use nested structure):** +```xml + + + + monthly + + + + weekly + +``` + +**Restoring Another Author's Deletion:** +```xml + + + within 30 days + + + within 30 days + +``` \ No newline at end of file diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd new file mode 100644 index 000000000..6454ef9a9 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd @@ -0,0 +1,1499 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd new file mode 100644 index 000000000..afa4f463e --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd new file mode 100644 index 000000000..64e66b8ab --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd @@ -0,0 +1,1085 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd new file mode 100644 index 000000000..687eea829 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd @@ -0,0 +1,11 @@ + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd new file mode 100644 index 000000000..6ac81b06b --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd @@ -0,0 +1,3081 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd new file mode 100644 index 000000000..1dbf05140 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd new file mode 100644 index 000000000..f1af17db4 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd @@ -0,0 +1,185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd new file mode 100644 index 000000000..0a185ab6e --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd @@ -0,0 +1,287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd new file mode 100644 index 000000000..14ef48886 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd @@ -0,0 +1,1676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd new file mode 100644 index 000000000..c20f3bf14 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd new file mode 100644 index 000000000..ac6025226 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd @@ -0,0 +1,144 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd new file mode 100644 index 000000000..424b8ba8d --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd @@ -0,0 +1,174 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd new file mode 100644 index 000000000..2bddce292 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd new file mode 100644 index 000000000..8a8c18ba2 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd new file mode 100644 index 000000000..5c42706a0 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd new file mode 100644 index 000000000..853c341c8 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd new file mode 100644 index 000000000..da835ee82 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd new file mode 100644 index 000000000..87ad2658f --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd @@ -0,0 +1,582 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd new file mode 100644 index 000000000..9e86f1b2b --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd new file mode 100644 index 000000000..d0be42e75 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd @@ -0,0 +1,4439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd new file mode 100644 index 000000000..8821dd183 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd @@ -0,0 +1,570 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd new file mode 100644 index 000000000..ca2575c75 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd @@ -0,0 +1,509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd new file mode 100644 index 000000000..dd079e603 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd new file mode 100644 index 000000000..3dd6cf625 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd new file mode 100644 index 000000000..f1041e34e --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd new file mode 100644 index 000000000..9c5b7a633 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd @@ -0,0 +1,3646 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd new file mode 100644 index 000000000..0f13678d8 --- /dev/null +++ b/skills/docx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd @@ -0,0 +1,116 @@ + + + + + + See http://www.w3.org/XML/1998/namespace.html and + http://www.w3.org/TR/REC-xml for information about this namespace. + + This schema document describes the XML namespace, in a form + suitable for import by other schema documents. + + Note that local names in this namespace are intended to be defined + only by the World Wide Web Consortium or its subgroups. The + following names are currently defined in this namespace and should + not be used with conflicting semantics by any Working Group, + specification, or document instance: + + base (as an attribute name): denotes an attribute whose value + provides a URI to be used as the base for interpreting any + relative URIs in the scope of the element on which it + appears; its value is inherited. This name is reserved + by virtue of its definition in the XML Base specification. + + lang (as an attribute name): denotes an attribute whose value + is a language code for the natural language of the content of + any element; its value is inherited. This name is reserved + by virtue of its definition in the XML specification. + + space (as an attribute name): denotes an attribute whose + value is a keyword indicating what whitespace processing + discipline is intended for the content of the element; its + value is inherited. This name is reserved by virtue of its + definition in the XML specification. + + Father (in any context at all): denotes Jon Bosak, the chair of + the original XML Working Group. This name is reserved by + the following decision of the W3C XML Plenary and + XML Coordination groups: + + In appreciation for his vision, leadership and dedication + the W3C XML Plenary on this 10th day of February, 2000 + reserves for Jon Bosak in perpetuity the XML name + xml:Father + + + + + This schema defines attributes and an attribute group + suitable for use by + schemas wishing to allow xml:base, xml:lang or xml:space attributes + on elements they define. + + To enable this, such a schema must import this schema + for the XML namespace, e.g. as follows: + <schema . . .> + . . . + <import namespace="http://www.w3.org/XML/1998/namespace" + schemaLocation="http://www.w3.org/2001/03/xml.xsd"/> + + Subsequently, qualified reference to any of the attributes + or the group defined below will have the desired effect, e.g. + + <type . . .> + . . . + <attributeGroup ref="xml:specialAttrs"/> + + will define a type which will schema-validate an instance + element with any of those attributes + + + + In keeping with the XML Schema WG's standard versioning + policy, this schema document will persist at + http://www.w3.org/2001/03/xml.xsd. + At the date of issue it can also be found at + http://www.w3.org/2001/xml.xsd. + The schema document at that URI may however change in the future, + in order to remain compatible with the latest version of XML Schema + itself. In other words, if the XML Schema namespace changes, the version + of this document at + http://www.w3.org/2001/xml.xsd will change + accordingly; the version at + http://www.w3.org/2001/03/xml.xsd will not change. + + + + + + In due course, we should install the relevant ISO 2- and 3-letter + codes as the enumerated possible values . . . + + + + + + + + + + + + + + + See http://www.w3.org/TR/xmlbase/ for + information about this attribute. + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd new file mode 100644 index 000000000..a6de9d273 --- /dev/null +++ b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd new file mode 100644 index 000000000..10e978b66 --- /dev/null +++ b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd new file mode 100644 index 000000000..4248bf7a3 --- /dev/null +++ b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd new file mode 100644 index 000000000..564974671 --- /dev/null +++ b/skills/docx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/mce/mc.xsd b/skills/docx/ooxml/schemas/mce/mc.xsd new file mode 100644 index 000000000..ef725457c --- /dev/null +++ b/skills/docx/ooxml/schemas/mce/mc.xsd @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-2010.xsd b/skills/docx/ooxml/schemas/microsoft/wml-2010.xsd new file mode 100644 index 000000000..f65f77773 --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-2010.xsd @@ -0,0 +1,560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-2012.xsd b/skills/docx/ooxml/schemas/microsoft/wml-2012.xsd new file mode 100644 index 000000000..6b00755a9 --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-2012.xsd @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-2018.xsd b/skills/docx/ooxml/schemas/microsoft/wml-2018.xsd new file mode 100644 index 000000000..f321d333a --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-2018.xsd @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-cex-2018.xsd b/skills/docx/ooxml/schemas/microsoft/wml-cex-2018.xsd new file mode 100644 index 000000000..364c6a9b8 --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-cex-2018.xsd @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-cid-2016.xsd b/skills/docx/ooxml/schemas/microsoft/wml-cid-2016.xsd new file mode 100644 index 000000000..fed9d15b7 --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-cid-2016.xsd @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd b/skills/docx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd new file mode 100644 index 000000000..680cf1540 --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd @@ -0,0 +1,4 @@ + + + + diff --git a/skills/docx/ooxml/schemas/microsoft/wml-symex-2015.xsd b/skills/docx/ooxml/schemas/microsoft/wml-symex-2015.xsd new file mode 100644 index 000000000..89ada9083 --- /dev/null +++ b/skills/docx/ooxml/schemas/microsoft/wml-symex-2015.xsd @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/skills/docx/ooxml/scripts/pack.py b/skills/docx/ooxml/scripts/pack.py new file mode 100755 index 000000000..68bc0886f --- /dev/null +++ b/skills/docx/ooxml/scripts/pack.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +Tool to pack a directory into a .docx, .pptx, or .xlsx file with XML formatting undone. + +Example usage: + python pack.py [--force] +""" + +import argparse +import shutil +import subprocess +import sys +import tempfile +import defusedxml.minidom +import zipfile +from pathlib import Path + + +def main(): + parser = argparse.ArgumentParser(description="Pack a directory into an Office file") + parser.add_argument("input_directory", help="Unpacked Office document directory") + parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)") + parser.add_argument("--force", action="store_true", help="Skip validation") + args = parser.parse_args() + + try: + success = pack_document( + args.input_directory, args.output_file, validate=not args.force + ) + + # Show warning if validation was skipped + if args.force: + print("Warning: Skipped validation, file may be corrupt", file=sys.stderr) + # Exit with error if validation failed + elif not success: + print("Contents would produce a corrupt file.", file=sys.stderr) + print("Please validate XML before repacking.", file=sys.stderr) + print("Use --force to skip validation and pack anyway.", file=sys.stderr) + sys.exit(1) + + except ValueError as e: + sys.exit(f"Error: {e}") + + +def pack_document(input_dir, output_file, validate=False): + """Pack a directory into an Office file (.docx/.pptx/.xlsx). + + Args: + input_dir: Path to unpacked Office document directory + output_file: Path to output Office file + validate: If True, validates with soffice (default: False) + + Returns: + bool: True if successful, False if validation failed + """ + input_dir = Path(input_dir) + output_file = Path(output_file) + + if not input_dir.is_dir(): + raise ValueError(f"{input_dir} is not a directory") + if output_file.suffix.lower() not in {".docx", ".pptx", ".xlsx"}: + raise ValueError(f"{output_file} must be a .docx, .pptx, or .xlsx file") + + # Work in temporary directory to avoid modifying original + with tempfile.TemporaryDirectory() as temp_dir: + temp_content_dir = Path(temp_dir) / "content" + shutil.copytree(input_dir, temp_content_dir) + + # Process XML files to remove pretty-printing whitespace + for pattern in ["*.xml", "*.rels"]: + for xml_file in temp_content_dir.rglob(pattern): + condense_xml(xml_file) + + # Create final Office file as zip archive + output_file.parent.mkdir(parents=True, exist_ok=True) + with zipfile.ZipFile(output_file, "w", zipfile.ZIP_DEFLATED) as zf: + for f in temp_content_dir.rglob("*"): + if f.is_file(): + zf.write(f, f.relative_to(temp_content_dir)) + + # Validate if requested + if validate: + if not validate_document(output_file): + output_file.unlink() # Delete the corrupt file + return False + + return True + + +def validate_document(doc_path): + """Validate document by converting to HTML with soffice.""" + # Determine the correct filter based on file extension + match doc_path.suffix.lower(): + case ".docx": + filter_name = "html:HTML" + case ".pptx": + filter_name = "html:impress_html_Export" + case ".xlsx": + filter_name = "html:HTML (StarCalc)" + + with tempfile.TemporaryDirectory() as temp_dir: + try: + result = subprocess.run( + [ + "soffice", + "--headless", + "--convert-to", + filter_name, + "--outdir", + temp_dir, + str(doc_path), + ], + capture_output=True, + timeout=10, + text=True, + ) + if not (Path(temp_dir) / f"{doc_path.stem}.html").exists(): + error_msg = result.stderr.strip() or "Document validation failed" + print(f"Validation error: {error_msg}", file=sys.stderr) + return False + return True + except FileNotFoundError: + print("Warning: soffice not found. Skipping validation.", file=sys.stderr) + return True + except subprocess.TimeoutExpired: + print("Validation error: Timeout during conversion", file=sys.stderr) + return False + except Exception as e: + print(f"Validation error: {e}", file=sys.stderr) + return False + + +def condense_xml(xml_file): + """Strip unnecessary whitespace and remove comments.""" + with open(xml_file, "r", encoding="utf-8") as f: + dom = defusedxml.minidom.parse(f) + + # Process each element to remove whitespace and comments + for element in dom.getElementsByTagName("*"): + # Skip w:t elements and their processing + if element.tagName.endswith(":t"): + continue + + # Remove whitespace-only text nodes and comment nodes + for child in list(element.childNodes): + if ( + child.nodeType == child.TEXT_NODE + and child.nodeValue + and child.nodeValue.strip() == "" + ) or child.nodeType == child.COMMENT_NODE: + element.removeChild(child) + + # Write back the condensed XML + with open(xml_file, "wb") as f: + f.write(dom.toxml(encoding="UTF-8")) + + +if __name__ == "__main__": + main() diff --git a/skills/docx/ooxml/scripts/unpack.py b/skills/docx/ooxml/scripts/unpack.py new file mode 100755 index 000000000..493879881 --- /dev/null +++ b/skills/docx/ooxml/scripts/unpack.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""Unpack and format XML contents of Office files (.docx, .pptx, .xlsx)""" + +import random +import sys +import defusedxml.minidom +import zipfile +from pathlib import Path + +# Get command line arguments +assert len(sys.argv) == 3, "Usage: python unpack.py " +input_file, output_dir = sys.argv[1], sys.argv[2] + +# Extract and format +output_path = Path(output_dir) +output_path.mkdir(parents=True, exist_ok=True) +zipfile.ZipFile(input_file).extractall(output_path) + +# Pretty print all XML files +xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels")) +for xml_file in xml_files: + content = xml_file.read_text(encoding="utf-8") + dom = defusedxml.minidom.parseString(content) + xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="ascii")) + +# For .docx files, suggest an RSID for tracked changes +if input_file.endswith(".docx"): + suggested_rsid = "".join(random.choices("0123456789ABCDEF", k=8)) + print(f"Suggested RSID for edit session: {suggested_rsid}") diff --git a/skills/docx/ooxml/scripts/validate.py b/skills/docx/ooxml/scripts/validate.py new file mode 100755 index 000000000..508c5891f --- /dev/null +++ b/skills/docx/ooxml/scripts/validate.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Command line tool to validate Office document XML files against XSD schemas and tracked changes. + +Usage: + python validate.py --original +""" + +import argparse +import sys +from pathlib import Path + +from validation import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator + + +def main(): + parser = argparse.ArgumentParser(description="Validate Office document XML files") + parser.add_argument( + "unpacked_dir", + help="Path to unpacked Office document directory", + ) + parser.add_argument( + "--original", + required=True, + help="Path to original file (.docx/.pptx/.xlsx)", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + args = parser.parse_args() + + # Validate paths + unpacked_dir = Path(args.unpacked_dir) + original_file = Path(args.original) + file_extension = original_file.suffix.lower() + assert unpacked_dir.is_dir(), f"Error: {unpacked_dir} is not a directory" + assert original_file.is_file(), f"Error: {original_file} is not a file" + assert file_extension in [".docx", ".pptx", ".xlsx"], ( + f"Error: {original_file} must be a .docx, .pptx, or .xlsx file" + ) + + # Run validations + match file_extension: + case ".docx": + validators = [DOCXSchemaValidator, RedliningValidator] + case ".pptx": + validators = [PPTXSchemaValidator] + case _: + print(f"Error: Validation not supported for file type {file_extension}") + sys.exit(1) + + # Run validators + success = True + for V in validators: + validator = V(unpacked_dir, original_file, verbose=args.verbose) + if not validator.validate(): + success = False + + if success: + print("All validations PASSED!") + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + main() diff --git a/skills/docx/ooxml/scripts/validation/__init__.py b/skills/docx/ooxml/scripts/validation/__init__.py new file mode 100644 index 000000000..db092ece7 --- /dev/null +++ b/skills/docx/ooxml/scripts/validation/__init__.py @@ -0,0 +1,15 @@ +""" +Validation modules for Word document processing. +""" + +from .base import BaseSchemaValidator +from .docx import DOCXSchemaValidator +from .pptx import PPTXSchemaValidator +from .redlining import RedliningValidator + +__all__ = [ + "BaseSchemaValidator", + "DOCXSchemaValidator", + "PPTXSchemaValidator", + "RedliningValidator", +] diff --git a/skills/docx/ooxml/scripts/validation/base.py b/skills/docx/ooxml/scripts/validation/base.py new file mode 100644 index 000000000..0681b199c --- /dev/null +++ b/skills/docx/ooxml/scripts/validation/base.py @@ -0,0 +1,951 @@ +""" +Base validator with common validation logic for document files. +""" + +import re +from pathlib import Path + +import lxml.etree + + +class BaseSchemaValidator: + """Base validator with common validation logic for document files.""" + + # Elements whose 'id' attributes must be unique within their file + # Format: element_name -> (attribute_name, scope) + # scope can be 'file' (unique within file) or 'global' (unique across all files) + UNIQUE_ID_REQUIREMENTS = { + # Word elements + "comment": ("id", "file"), # Comment IDs in comments.xml + "commentrangestart": ("id", "file"), # Must match comment IDs + "commentrangeend": ("id", "file"), # Must match comment IDs + "bookmarkstart": ("id", "file"), # Bookmark start IDs + "bookmarkend": ("id", "file"), # Bookmark end IDs + # Note: ins and del (track changes) can share IDs when part of same revision + # PowerPoint elements + "sldid": ("id", "file"), # Slide IDs in presentation.xml + "sldmasterid": ("id", "global"), # Slide master IDs must be globally unique + "sldlayoutid": ("id", "global"), # Slide layout IDs must be globally unique + "cm": ("authorid", "file"), # Comment author IDs + # Excel elements + "sheet": ("sheetid", "file"), # Sheet IDs in workbook.xml + "definedname": ("id", "file"), # Named range IDs + # Drawing/Shape elements (all formats) + "cxnsp": ("id", "file"), # Connection shape IDs + "sp": ("id", "file"), # Shape IDs + "pic": ("id", "file"), # Picture IDs + "grpsp": ("id", "file"), # Group shape IDs + } + + # Mapping of element names to expected relationship types + # Subclasses should override this with format-specific mappings + ELEMENT_RELATIONSHIP_TYPES = {} + + # Unified schema mappings for all Office document types + SCHEMA_MAPPINGS = { + # Document type specific schemas + "word": "ISO-IEC29500-4_2016/wml.xsd", # Word documents + "ppt": "ISO-IEC29500-4_2016/pml.xsd", # PowerPoint presentations + "xl": "ISO-IEC29500-4_2016/sml.xsd", # Excel spreadsheets + # Common file types + "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd", + "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd", + "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd", + "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd", + ".rels": "ecma/fouth-edition/opc-relationships.xsd", + # Word-specific files + "people.xml": "microsoft/wml-2012.xsd", + "commentsIds.xml": "microsoft/wml-cid-2016.xsd", + "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd", + "commentsExtended.xml": "microsoft/wml-2012.xsd", + # Chart files (common across document types) + "chart": "ISO-IEC29500-4_2016/dml-chart.xsd", + # Theme files (common across document types) + "theme": "ISO-IEC29500-4_2016/dml-main.xsd", + # Drawing and media files + "drawing": "ISO-IEC29500-4_2016/dml-main.xsd", + } + + # Unified namespace constants + MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006" + XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" + + # Common OOXML namespaces used across validators + PACKAGE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/relationships" + ) + OFFICE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/officeDocument/2006/relationships" + ) + CONTENT_TYPES_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/content-types" + ) + + # Folders where we should clean ignorable namespaces + MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"} + + # All allowed OOXML namespaces (superset of all document types) + OOXML_NAMESPACES = { + "http://schemas.openxmlformats.org/officeDocument/2006/math", + "http://schemas.openxmlformats.org/officeDocument/2006/relationships", + "http://schemas.openxmlformats.org/schemaLibrary/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/chart", + "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/diagram", + "http://schemas.openxmlformats.org/drawingml/2006/picture", + "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing", + "http://schemas.openxmlformats.org/wordprocessingml/2006/main", + "http://schemas.openxmlformats.org/presentationml/2006/main", + "http://schemas.openxmlformats.org/spreadsheetml/2006/main", + "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes", + "http://www.w3.org/XML/1998/namespace", + } + + def __init__(self, unpacked_dir, original_file, verbose=False): + self.unpacked_dir = Path(unpacked_dir).resolve() + self.original_file = Path(original_file) + self.verbose = verbose + + # Set schemas directory + self.schemas_dir = Path(__file__).parent.parent.parent / "schemas" + + # Get all XML and .rels files + patterns = ["*.xml", "*.rels"] + self.xml_files = [ + f for pattern in patterns for f in self.unpacked_dir.rglob(pattern) + ] + + if not self.xml_files: + print(f"Warning: No XML files found in {self.unpacked_dir}") + + def validate(self): + """Run all validation checks and return True if all pass.""" + raise NotImplementedError("Subclasses must implement the validate method") + + def validate_xml(self): + """Validate that all XML files are well-formed.""" + errors = [] + + for xml_file in self.xml_files: + try: + # Try to parse the XML file + lxml.etree.parse(str(xml_file)) + except lxml.etree.XMLSyntaxError as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {e.lineno}: {e.msg}" + ) + except Exception as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Unexpected error: {str(e)}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} XML violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All XML files are well-formed") + return True + + def validate_namespaces(self): + """Validate that namespace prefixes in Ignorable attributes are declared.""" + errors = [] + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + declared = set(root.nsmap.keys()) - {None} # Exclude default namespace + + for attr_val in [ + v for k, v in root.attrib.items() if k.endswith("Ignorable") + ]: + undeclared = set(attr_val.split()) - declared + errors.extend( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Namespace '{ns}' in Ignorable but not declared" + for ns in undeclared + ) + except lxml.etree.XMLSyntaxError: + continue + + if errors: + print(f"FAILED - {len(errors)} namespace issues:") + for error in errors: + print(error) + return False + if self.verbose: + print("PASSED - All namespace prefixes properly declared") + return True + + def validate_unique_ids(self): + """Validate that specific IDs are unique according to OOXML requirements.""" + errors = [] + global_ids = {} # Track globally unique IDs across all files + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + file_ids = {} # Track IDs that must be unique within this file + + # Remove all mc:AlternateContent elements from the tree + mc_elements = root.xpath( + ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE} + ) + for elem in mc_elements: + elem.getparent().remove(elem) + + # Now check IDs in the cleaned tree + for elem in root.iter(): + # Get the element name without namespace + tag = ( + elem.tag.split("}")[-1].lower() + if "}" in elem.tag + else elem.tag.lower() + ) + + # Check if this element type has ID uniqueness requirements + if tag in self.UNIQUE_ID_REQUIREMENTS: + attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag] + + # Look for the specified attribute + id_value = None + for attr, value in elem.attrib.items(): + attr_local = ( + attr.split("}")[-1].lower() + if "}" in attr + else attr.lower() + ) + if attr_local == attr_name: + id_value = value + break + + if id_value is not None: + if scope == "global": + # Check global uniqueness + if id_value in global_ids: + prev_file, prev_line, prev_tag = global_ids[ + id_value + ] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> " + f"already used in {prev_file} at line {prev_line} in <{prev_tag}>" + ) + else: + global_ids[id_value] = ( + xml_file.relative_to(self.unpacked_dir), + elem.sourceline, + tag, + ) + elif scope == "file": + # Check file-level uniqueness + key = (tag, attr_name) + if key not in file_ids: + file_ids[key] = {} + + if id_value in file_ids[key]: + prev_line = file_ids[key][id_value] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> " + f"(first occurrence at line {prev_line})" + ) + else: + file_ids[key][id_value] = elem.sourceline + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} ID uniqueness violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All required IDs are unique") + return True + + def validate_file_references(self): + """ + Validate that all .rels files properly reference files and that all files are referenced. + """ + errors = [] + + # Find all .rels files + rels_files = list(self.unpacked_dir.rglob("*.rels")) + + if not rels_files: + if self.verbose: + print("PASSED - No .rels files found") + return True + + # Get all files in the unpacked directory (excluding reference files) + all_files = [] + for file_path in self.unpacked_dir.rglob("*"): + if ( + file_path.is_file() + and file_path.name != "[Content_Types].xml" + and not file_path.name.endswith(".rels") + ): # This file is not referenced by .rels + all_files.append(file_path.resolve()) + + # Track all files that are referenced by any .rels file + all_referenced_files = set() + + if self.verbose: + print( + f"Found {len(rels_files)} .rels files and {len(all_files)} target files" + ) + + # Check each .rels file + for rels_file in rels_files: + try: + # Parse relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Get the directory where this .rels file is located + rels_dir = rels_file.parent + + # Find all relationships and their targets + referenced_files = set() + broken_refs = [] + + for rel in rels_root.findall( + ".//ns:Relationship", + namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE}, + ): + target = rel.get("Target") + if target and not target.startswith( + ("http", "mailto:") + ): # Skip external URLs + # Resolve the target path relative to the .rels file location + if rels_file.name == ".rels": + # Root .rels file - targets are relative to unpacked_dir + target_path = self.unpacked_dir / target + else: + # Other .rels files - targets are relative to their parent's parent + # e.g., word/_rels/document.xml.rels -> targets relative to word/ + base_dir = rels_dir.parent + target_path = base_dir / target + + # Normalize the path and check if it exists + try: + target_path = target_path.resolve() + if target_path.exists() and target_path.is_file(): + referenced_files.add(target_path) + all_referenced_files.add(target_path) + else: + broken_refs.append((target, rel.sourceline)) + except (OSError, ValueError): + broken_refs.append((target, rel.sourceline)) + + # Report broken references + if broken_refs: + rel_path = rels_file.relative_to(self.unpacked_dir) + for broken_ref, line_num in broken_refs: + errors.append( + f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}" + ) + + except Exception as e: + rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append(f" Error parsing {rel_path}: {e}") + + # Check for unreferenced files (files that exist but are not referenced anywhere) + unreferenced_files = set(all_files) - all_referenced_files + + if unreferenced_files: + for unref_file in sorted(unreferenced_files): + unref_rel_path = unref_file.relative_to(self.unpacked_dir) + errors.append(f" Unreferenced file: {unref_rel_path}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship validation errors:") + for error in errors: + print(error) + print( + "CRITICAL: These errors will cause the document to appear corrupt. " + + "Broken references MUST be fixed, " + + "and unreferenced files MUST be referenced or removed." + ) + return False + else: + if self.verbose: + print( + "PASSED - All references are valid and all files are properly referenced" + ) + return True + + def validate_all_relationship_ids(self): + """ + Validate that all r:id attributes in XML files reference existing IDs + in their corresponding .rels files, and optionally validate relationship types. + """ + import lxml.etree + + errors = [] + + # Process each XML file that might contain r:id references + for xml_file in self.xml_files: + # Skip .rels files themselves + if xml_file.suffix == ".rels": + continue + + # Determine the corresponding .rels file + # For dir/file.xml, it's dir/_rels/file.xml.rels + rels_dir = xml_file.parent / "_rels" + rels_file = rels_dir / f"{xml_file.name}.rels" + + # Skip if there's no corresponding .rels file (that's okay) + if not rels_file.exists(): + continue + + try: + # Parse the .rels file to get valid relationship IDs and their types + rels_root = lxml.etree.parse(str(rels_file)).getroot() + rid_to_type = {} + + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rid = rel.get("Id") + rel_type = rel.get("Type", "") + if rid: + # Check for duplicate rIds + if rid in rid_to_type: + rels_rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append( + f" {rels_rel_path}: Line {rel.sourceline}: " + f"Duplicate relationship ID '{rid}' (IDs must be unique)" + ) + # Extract just the type name from the full URL + type_name = ( + rel_type.split("/")[-1] if "/" in rel_type else rel_type + ) + rid_to_type[rid] = type_name + + # Parse the XML file to find all r:id references + xml_root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all elements with r:id attributes + for elem in xml_root.iter(): + # Check for r:id attribute (relationship ID) + rid_attr = elem.get(f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id") + if rid_attr: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + elem_name = ( + elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag + ) + + # Check if the ID exists + if rid_attr not in rid_to_type: + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references non-existent relationship '{rid_attr}' " + f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})" + ) + # Check if we have type expectations for this element + elif self.ELEMENT_RELATIONSHIP_TYPES: + expected_type = self._get_expected_relationship_type( + elem_name + ) + if expected_type: + actual_type = rid_to_type[rid_attr] + # Check if the actual type matches or contains the expected type + if expected_type not in actual_type.lower(): + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' " + f"but should point to a '{expected_type}' relationship" + ) + + except Exception as e: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + errors.append(f" Error processing {xml_rel_path}: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship ID reference errors:") + for error in errors: + print(error) + print("\nThese ID mismatches will cause the document to appear corrupt!") + return False + else: + if self.verbose: + print("PASSED - All relationship ID references are valid") + return True + + def _get_expected_relationship_type(self, element_name): + """ + Get the expected relationship type for an element. + First checks the explicit mapping, then tries pattern detection. + """ + # Normalize element name to lowercase + elem_lower = element_name.lower() + + # Check explicit mapping first + if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES: + return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower] + + # Try pattern detection for common patterns + # Pattern 1: Elements ending in "Id" often expect a relationship of the prefix type + if elem_lower.endswith("id") and len(elem_lower) > 2: + # e.g., "sldId" -> "sld", "sldMasterId" -> "sldMaster" + prefix = elem_lower[:-2] # Remove "id" + # Check if this might be a compound like "sldMasterId" + if prefix.endswith("master"): + return prefix.lower() + elif prefix.endswith("layout"): + return prefix.lower() + else: + # Simple case like "sldId" -> "slide" + # Common transformations + if prefix == "sld": + return "slide" + return prefix.lower() + + # Pattern 2: Elements ending in "Reference" expect a relationship of the prefix type + if elem_lower.endswith("reference") and len(elem_lower) > 9: + prefix = elem_lower[:-9] # Remove "reference" + return prefix.lower() + + return None + + def validate_content_types(self): + """Validate that all content files are properly declared in [Content_Types].xml.""" + errors = [] + + # Find [Content_Types].xml file + content_types_file = self.unpacked_dir / "[Content_Types].xml" + if not content_types_file.exists(): + print("FAILED - [Content_Types].xml file not found") + return False + + try: + # Parse and get all declared parts and extensions + root = lxml.etree.parse(str(content_types_file)).getroot() + declared_parts = set() + declared_extensions = set() + + # Get Override declarations (specific files) + for override in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override" + ): + part_name = override.get("PartName") + if part_name is not None: + declared_parts.add(part_name.lstrip("/")) + + # Get Default declarations (by extension) + for default in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default" + ): + extension = default.get("Extension") + if extension is not None: + declared_extensions.add(extension.lower()) + + # Root elements that require content type declaration + declarable_roots = { + "sld", + "sldLayout", + "sldMaster", + "presentation", # PowerPoint + "document", # Word + "workbook", + "worksheet", # Excel + "theme", # Common + } + + # Common media file extensions that should be declared + media_extensions = { + "png": "image/png", + "jpg": "image/jpeg", + "jpeg": "image/jpeg", + "gif": "image/gif", + "bmp": "image/bmp", + "tiff": "image/tiff", + "wmf": "image/x-wmf", + "emf": "image/x-emf", + } + + # Get all files in the unpacked directory + all_files = list(self.unpacked_dir.rglob("*")) + all_files = [f for f in all_files if f.is_file()] + + # Check all XML files for Override declarations + for xml_file in self.xml_files: + path_str = str(xml_file.relative_to(self.unpacked_dir)).replace( + "\\", "/" + ) + + # Skip non-content files + if any( + skip in path_str + for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"] + ): + continue + + try: + root_tag = lxml.etree.parse(str(xml_file)).getroot().tag + root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag + + if root_name in declarable_roots and path_str not in declared_parts: + errors.append( + f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml" + ) + + except Exception: + continue # Skip unparseable files + + # Check all non-XML files for Default extension declarations + for file_path in all_files: + # Skip XML files and metadata files (already checked above) + if file_path.suffix.lower() in {".xml", ".rels"}: + continue + if file_path.name == "[Content_Types].xml": + continue + if "_rels" in file_path.parts or "docProps" in file_path.parts: + continue + + extension = file_path.suffix.lstrip(".").lower() + if extension and extension not in declared_extensions: + # Check if it's a known media extension that should be declared + if extension in media_extensions: + relative_path = file_path.relative_to(self.unpacked_dir) + errors.append( + f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: ' + ) + + except Exception as e: + errors.append(f" Error parsing [Content_Types].xml: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} content type declaration errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print( + "PASSED - All content files are properly declared in [Content_Types].xml" + ) + return True + + def validate_file_against_xsd(self, xml_file, verbose=False): + """Validate a single XML file against XSD schema, comparing with original. + + Args: + xml_file: Path to XML file to validate + verbose: Enable verbose output + + Returns: + tuple: (is_valid, new_errors_set) where is_valid is True/False/None (skipped) + """ + # Resolve both paths to handle symlinks + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + + # Validate current file + is_valid, current_errors = self._validate_single_file_xsd( + xml_file, unpacked_dir + ) + + if is_valid is None: + return None, set() # Skipped + elif is_valid: + return True, set() # Valid, no errors + + # Get errors from original file for this specific file + original_errors = self._get_original_file_errors(xml_file) + + # Compare with original (both are guaranteed to be sets here) + assert current_errors is not None + new_errors = current_errors - original_errors + + if new_errors: + if verbose: + relative_path = xml_file.relative_to(unpacked_dir) + print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)") + for error in list(new_errors)[:3]: + truncated = error[:250] + "..." if len(error) > 250 else error + print(f" - {truncated}") + return False, new_errors + else: + # All errors existed in original + if verbose: + print( + f"PASSED - No new errors (original had {len(current_errors)} errors)" + ) + return True, set() + + def validate_against_xsd(self): + """Validate XML files against XSD schemas, showing only new errors compared to original.""" + new_errors = [] + original_error_count = 0 + valid_count = 0 + skipped_count = 0 + + for xml_file in self.xml_files: + relative_path = str(xml_file.relative_to(self.unpacked_dir)) + is_valid, new_file_errors = self.validate_file_against_xsd( + xml_file, verbose=False + ) + + if is_valid is None: + skipped_count += 1 + continue + elif is_valid and not new_file_errors: + valid_count += 1 + continue + elif is_valid: + # Had errors but all existed in original + original_error_count += 1 + valid_count += 1 + continue + + # Has new errors + new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)") + for error in list(new_file_errors)[:3]: # Show first 3 errors + new_errors.append( + f" - {error[:250]}..." if len(error) > 250 else f" - {error}" + ) + + # Print summary + if self.verbose: + print(f"Validated {len(self.xml_files)} files:") + print(f" - Valid: {valid_count}") + print(f" - Skipped (no schema): {skipped_count}") + if original_error_count: + print(f" - With original errors (ignored): {original_error_count}") + print( + f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}" + ) + + if new_errors: + print("\nFAILED - Found NEW validation errors:") + for error in new_errors: + print(error) + return False + else: + if self.verbose: + print("\nPASSED - No new XSD validation errors introduced") + return True + + def _get_schema_path(self, xml_file): + """Determine the appropriate schema path for an XML file.""" + # Check exact filename match + if xml_file.name in self.SCHEMA_MAPPINGS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name] + + # Check .rels files + if xml_file.suffix == ".rels": + return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"] + + # Check chart files + if "charts/" in str(xml_file) and xml_file.name.startswith("chart"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"] + + # Check theme files + if "theme/" in str(xml_file) and xml_file.name.startswith("theme"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"] + + # Check if file is in a main content folder and use appropriate schema + if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name] + + return None + + def _clean_ignorable_namespaces(self, xml_doc): + """Remove attributes and elements not in allowed namespaces.""" + # Create a clean copy + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + # Remove attributes not in allowed namespaces + for elem in xml_copy.iter(): + attrs_to_remove = [] + + for attr in elem.attrib: + # Check if attribute is from a namespace other than allowed ones + if "{" in attr: + ns = attr.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + attrs_to_remove.append(attr) + + # Remove collected attributes + for attr in attrs_to_remove: + del elem.attrib[attr] + + # Remove elements not in allowed namespaces + self._remove_ignorable_elements(xml_copy) + + return lxml.etree.ElementTree(xml_copy) + + def _remove_ignorable_elements(self, root): + """Recursively remove all elements not in allowed namespaces.""" + elements_to_remove = [] + + # Find elements to remove + for elem in list(root): + # Skip non-element nodes (comments, processing instructions, etc.) + if not hasattr(elem, "tag") or callable(elem.tag): + continue + + tag_str = str(elem.tag) + if tag_str.startswith("{"): + ns = tag_str.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + elements_to_remove.append(elem) + continue + + # Recursively clean child elements + self._remove_ignorable_elements(elem) + + # Remove collected elements + for elem in elements_to_remove: + root.remove(elem) + + def _preprocess_for_mc_ignorable(self, xml_doc): + """Preprocess XML to handle mc:Ignorable attribute properly.""" + # Remove mc:Ignorable attributes before validation + root = xml_doc.getroot() + + # Remove mc:Ignorable attribute from root + if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib: + del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"] + + return xml_doc + + def _validate_single_file_xsd(self, xml_file, base_path): + """Validate a single XML file against XSD schema. Returns (is_valid, errors_set).""" + schema_path = self._get_schema_path(xml_file) + if not schema_path: + return None, None # Skip file + + try: + # Load schema + with open(schema_path, "rb") as xsd_file: + parser = lxml.etree.XMLParser() + xsd_doc = lxml.etree.parse( + xsd_file, parser=parser, base_url=str(schema_path) + ) + schema = lxml.etree.XMLSchema(xsd_doc) + + # Load and preprocess XML + with open(xml_file, "r") as f: + xml_doc = lxml.etree.parse(f) + + xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc) + xml_doc = self._preprocess_for_mc_ignorable(xml_doc) + + # Clean ignorable namespaces if needed + relative_path = xml_file.relative_to(base_path) + if ( + relative_path.parts + and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS + ): + xml_doc = self._clean_ignorable_namespaces(xml_doc) + + # Validate + if schema.validate(xml_doc): + return True, set() + else: + errors = set() + for error in schema.error_log: + # Store normalized error message (without line numbers for comparison) + errors.add(error.message) + return False, errors + + except Exception as e: + return False, {str(e)} + + def _get_original_file_errors(self, xml_file): + """Get XSD validation errors from a single file in the original document. + + Args: + xml_file: Path to the XML file in unpacked_dir to check + + Returns: + set: Set of error messages from the original file + """ + import tempfile + import zipfile + + # Resolve both paths to handle symlinks (e.g., /var vs /private/var on macOS) + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + relative_path = xml_file.relative_to(unpacked_dir) + + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Extract original file + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_path) + + # Find corresponding file in original + original_xml_file = temp_path / relative_path + + if not original_xml_file.exists(): + # File didn't exist in original, so no original errors + return set() + + # Validate the specific file in original + is_valid, errors = self._validate_single_file_xsd( + original_xml_file, temp_path + ) + return errors if errors else set() + + def _remove_template_tags_from_text_nodes(self, xml_doc): + """Remove template tags from XML text nodes and collect warnings. + + Template tags follow the pattern {{ ... }} and are used as placeholders + for content replacement. They should be removed from text content before + XSD validation while preserving XML structure. + + Returns: + tuple: (cleaned_xml_doc, warnings_list) + """ + warnings = [] + template_pattern = re.compile(r"\{\{[^}]*\}\}") + + # Create a copy of the document to avoid modifying the original + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + def process_text_content(text, content_type): + if not text: + return text + matches = list(template_pattern.finditer(text)) + if matches: + for match in matches: + warnings.append( + f"Found template tag in {content_type}: {match.group()}" + ) + return template_pattern.sub("", text) + return text + + # Process all text nodes in the document + for elem in xml_copy.iter(): + # Skip processing if this is a w:t element + if not hasattr(elem, "tag") or callable(elem.tag): + continue + tag_str = str(elem.tag) + if tag_str.endswith("}t") or tag_str == "t": + continue + + elem.text = process_text_content(elem.text, "text content") + elem.tail = process_text_content(elem.tail, "tail content") + + return lxml.etree.ElementTree(xml_copy), warnings + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/docx/ooxml/scripts/validation/docx.py b/skills/docx/ooxml/scripts/validation/docx.py new file mode 100644 index 000000000..602c47087 --- /dev/null +++ b/skills/docx/ooxml/scripts/validation/docx.py @@ -0,0 +1,274 @@ +""" +Validator for Word document XML files against XSD schemas. +""" + +import re +import tempfile +import zipfile + +import lxml.etree + +from .base import BaseSchemaValidator + + +class DOCXSchemaValidator(BaseSchemaValidator): + """Validator for Word document XML files against XSD schemas.""" + + # Word-specific namespace + WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + + # Word-specific element to relationship type mappings + # Start with empty mapping - add specific cases as we discover them + ELEMENT_RELATIONSHIP_TYPES = {} + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 4: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 5: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 6: Whitespace preservation + if not self.validate_whitespace_preservation(): + all_valid = False + + # Test 7: Deletion validation + if not self.validate_deletions(): + all_valid = False + + # Test 8: Insertion validation + if not self.validate_insertions(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Count and compare paragraphs + self.compare_paragraph_counts() + + return all_valid + + def validate_whitespace_preservation(self): + """ + Validate that w:t elements with whitespace have xml:space='preserve'. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements + for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"): + if elem.text: + text = elem.text + # Check if text starts or ends with whitespace + if re.match(r"^\s.*", text) or re.match(r".*\s$", text): + # Check if xml:space="preserve" attribute exists + xml_space_attr = f"{{{self.XML_NAMESPACE}}}space" + if ( + xml_space_attr not in elem.attrib + or elem.attrib[xml_space_attr] != "preserve" + ): + # Show a preview of the text + text_preview = ( + repr(text)[:50] + "..." + if len(repr(text)) > 50 + else repr(text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} whitespace preservation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All whitespace is properly preserved") + return True + + def validate_deletions(self): + """ + Validate that w:t elements are not within w:del elements. + For some reason, XSD validation does not catch this, so we do it manually. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements that are descendants of w:del elements + namespaces = {"w": self.WORD_2006_NAMESPACE} + xpath_expression = ".//w:del//w:t" + problematic_t_elements = root.xpath( + xpath_expression, namespaces=namespaces + ) + for t_elem in problematic_t_elements: + if t_elem.text: + # Show a preview of the text + text_preview = ( + repr(t_elem.text)[:50] + "..." + if len(repr(t_elem.text)) > 50 + else repr(t_elem.text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {t_elem.sourceline}: found within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} deletion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:t elements found within w:del elements") + return True + + def count_paragraphs_in_unpacked(self): + """Count the number of paragraphs in the unpacked document.""" + count = 0 + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + except Exception as e: + print(f"Error counting paragraphs in unpacked document: {e}") + + return count + + def count_paragraphs_in_original(self): + """Count the number of paragraphs in the original docx file.""" + count = 0 + + try: + # Create temporary directory to unpack original + with tempfile.TemporaryDirectory() as temp_dir: + # Unpack original docx + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_dir) + + # Parse document.xml + doc_xml_path = temp_dir + "/word/document.xml" + root = lxml.etree.parse(doc_xml_path).getroot() + + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + + except Exception as e: + print(f"Error counting paragraphs in original document: {e}") + + return count + + def validate_insertions(self): + """ + Validate that w:delText elements are not within w:ins elements. + w:delText is only allowed in w:ins if nested within a w:del. + """ + errors = [] + + for xml_file in self.xml_files: + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + namespaces = {"w": self.WORD_2006_NAMESPACE} + + # Find w:delText in w:ins that are NOT within w:del + invalid_elements = root.xpath( + ".//w:ins//w:delText[not(ancestor::w:del)]", + namespaces=namespaces + ) + + for elem in invalid_elements: + text_preview = ( + repr(elem.text or "")[:50] + "..." + if len(repr(elem.text or "")) > 50 + else repr(elem.text or "") + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} insertion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:delText elements within w:ins elements") + return True + + def compare_paragraph_counts(self): + """Compare paragraph counts between original and new document.""" + original_count = self.count_paragraphs_in_original() + new_count = self.count_paragraphs_in_unpacked() + + diff = new_count - original_count + diff_str = f"+{diff}" if diff > 0 else str(diff) + print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})") + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/docx/ooxml/scripts/validation/pptx.py b/skills/docx/ooxml/scripts/validation/pptx.py new file mode 100644 index 000000000..66d5b1e2d --- /dev/null +++ b/skills/docx/ooxml/scripts/validation/pptx.py @@ -0,0 +1,315 @@ +""" +Validator for PowerPoint presentation XML files against XSD schemas. +""" + +import re + +from .base import BaseSchemaValidator + + +class PPTXSchemaValidator(BaseSchemaValidator): + """Validator for PowerPoint presentation XML files against XSD schemas.""" + + # PowerPoint presentation namespace + PRESENTATIONML_NAMESPACE = ( + "http://schemas.openxmlformats.org/presentationml/2006/main" + ) + + # PowerPoint-specific element to relationship type mappings + ELEMENT_RELATIONSHIP_TYPES = { + "sldid": "slide", + "sldmasterid": "slidemaster", + "notesmasterid": "notesmaster", + "sldlayoutid": "slidelayout", + "themeid": "theme", + "tablestyleid": "tablestyles", + } + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: UUID ID validation + if not self.validate_uuid_ids(): + all_valid = False + + # Test 4: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 5: Slide layout ID validation + if not self.validate_slide_layout_ids(): + all_valid = False + + # Test 6: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 7: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 8: Notes slide reference validation + if not self.validate_notes_slide_references(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Test 10: Duplicate slide layout references validation + if not self.validate_no_duplicate_slide_layouts(): + all_valid = False + + return all_valid + + def validate_uuid_ids(self): + """Validate that ID attributes that look like UUIDs contain only hex values.""" + import lxml.etree + + errors = [] + # UUID pattern: 8-4-4-4-12 hex digits with optional braces/hyphens + uuid_pattern = re.compile( + r"^[\{\(]?[0-9A-Fa-f]{8}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{12}[\}\)]?$" + ) + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Check all elements for ID attributes + for elem in root.iter(): + for attr, value in elem.attrib.items(): + # Check if this is an ID attribute + attr_name = attr.split("}")[-1].lower() + if attr_name == "id" or attr_name.endswith("id"): + # Check if value looks like a UUID (has the right length and pattern structure) + if self._looks_like_uuid(value): + # Validate that it contains only hex characters in the right positions + if not uuid_pattern.match(value): + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: ID '{value}' appears to be a UUID but contains invalid hex characters" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} UUID ID validation errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All UUID-like IDs contain valid hex values") + return True + + def _looks_like_uuid(self, value): + """Check if a value has the general structure of a UUID.""" + # Remove common UUID delimiters + clean_value = value.strip("{}()").replace("-", "") + # Check if it's 32 hex-like characters (could include invalid hex chars) + return len(clean_value) == 32 and all(c.isalnum() for c in clean_value) + + def validate_slide_layout_ids(self): + """Validate that sldLayoutId elements in slide masters reference valid slide layouts.""" + import lxml.etree + + errors = [] + + # Find all slide master files + slide_masters = list(self.unpacked_dir.glob("ppt/slideMasters/*.xml")) + + if not slide_masters: + if self.verbose: + print("PASSED - No slide masters found") + return True + + for slide_master in slide_masters: + try: + # Parse the slide master file + root = lxml.etree.parse(str(slide_master)).getroot() + + # Find the corresponding _rels file for this slide master + rels_file = slide_master.parent / "_rels" / f"{slide_master.name}.rels" + + if not rels_file.exists(): + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Missing relationships file: {rels_file.relative_to(self.unpacked_dir)}" + ) + continue + + # Parse the relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Build a set of valid relationship IDs that point to slide layouts + valid_layout_rids = set() + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "slideLayout" in rel_type: + valid_layout_rids.add(rel.get("Id")) + + # Find all sldLayoutId elements in the slide master + for sld_layout_id in root.findall( + f".//{{{self.PRESENTATIONML_NAMESPACE}}}sldLayoutId" + ): + r_id = sld_layout_id.get( + f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id" + ) + layout_id = sld_layout_id.get("id") + + if r_id and r_id not in valid_layout_rids: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Line {sld_layout_id.sourceline}: sldLayoutId with id='{layout_id}' " + f"references r:id='{r_id}' which is not found in slide layout relationships" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} slide layout ID validation errors:") + for error in errors: + print(error) + print( + "Remove invalid references or add missing slide layouts to the relationships file." + ) + return False + else: + if self.verbose: + print("PASSED - All slide layout IDs reference valid slide layouts") + return True + + def validate_no_duplicate_slide_layouts(self): + """Validate that each slide has exactly one slideLayout reference.""" + import lxml.etree + + errors = [] + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + for rels_file in slide_rels_files: + try: + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all slideLayout relationships + layout_rels = [ + rel + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ) + if "slideLayout" in rel.get("Type", "") + ] + + if len(layout_rels) > 1: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: has {len(layout_rels)} slideLayout references" + ) + + except Exception as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print("FAILED - Found slides with duplicate slideLayout references:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All slides have exactly one slideLayout reference") + return True + + def validate_notes_slide_references(self): + """Validate that each notesSlide file is referenced by only one slide.""" + import lxml.etree + + errors = [] + notes_slide_references = {} # Track which slides reference each notesSlide + + # Find all slide relationship files + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + if not slide_rels_files: + if self.verbose: + print("PASSED - No slide relationship files found") + return True + + for rels_file in slide_rels_files: + try: + # Parse the relationships file + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all notesSlide relationships + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "notesSlide" in rel_type: + target = rel.get("Target", "") + if target: + # Normalize the target path to handle relative paths + normalized_target = target.replace("../", "") + + # Track which slide references this notesSlide + slide_name = rels_file.stem.replace( + ".xml", "" + ) # e.g., "slide1" + + if normalized_target not in notes_slide_references: + notes_slide_references[normalized_target] = [] + notes_slide_references[normalized_target].append( + (slide_name, rels_file) + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + # Check for duplicate references + for target, references in notes_slide_references.items(): + if len(references) > 1: + slide_names = [ref[0] for ref in references] + errors.append( + f" Notes slide '{target}' is referenced by multiple slides: {', '.join(slide_names)}" + ) + for slide_name, rels_file in references: + errors.append(f" - {rels_file.relative_to(self.unpacked_dir)}") + + if errors: + print( + f"FAILED - Found {len([e for e in errors if not e.startswith(' ')])} notes slide reference validation errors:" + ) + for error in errors: + print(error) + print("Each slide may optionally have its own slide file.") + return False + else: + if self.verbose: + print("PASSED - All notes slide references are unique") + return True + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/docx/ooxml/scripts/validation/redlining.py b/skills/docx/ooxml/scripts/validation/redlining.py new file mode 100644 index 000000000..7ed425edf --- /dev/null +++ b/skills/docx/ooxml/scripts/validation/redlining.py @@ -0,0 +1,279 @@ +""" +Validator for tracked changes in Word documents. +""" + +import subprocess +import tempfile +import zipfile +from pathlib import Path + + +class RedliningValidator: + """Validator for tracked changes in Word documents.""" + + def __init__(self, unpacked_dir, original_docx, verbose=False): + self.unpacked_dir = Path(unpacked_dir) + self.original_docx = Path(original_docx) + self.verbose = verbose + self.namespaces = { + "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + } + + def validate(self): + """Main validation method that returns True if valid, False otherwise.""" + # Verify unpacked directory exists and has correct structure + modified_file = self.unpacked_dir / "word" / "document.xml" + if not modified_file.exists(): + print(f"FAILED - Modified document.xml not found at {modified_file}") + return False + + # First, check if there are any tracked changes by Claude to validate + try: + import xml.etree.ElementTree as ET + + tree = ET.parse(modified_file) + root = tree.getroot() + + # Check for w:del or w:ins tags authored by Claude + del_elements = root.findall(".//w:del", self.namespaces) + ins_elements = root.findall(".//w:ins", self.namespaces) + + # Filter to only include changes by Claude + claude_del_elements = [ + elem + for elem in del_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + claude_ins_elements = [ + elem + for elem in ins_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + + # Redlining validation is only needed if tracked changes by Claude have been used. + if not claude_del_elements and not claude_ins_elements: + if self.verbose: + print("PASSED - No tracked changes by Claude found.") + return True + + except Exception: + # If we can't parse the XML, continue with full validation + pass + + # Create temporary directory for unpacking original docx + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Unpack original docx + try: + with zipfile.ZipFile(self.original_docx, "r") as zip_ref: + zip_ref.extractall(temp_path) + except Exception as e: + print(f"FAILED - Error unpacking original docx: {e}") + return False + + original_file = temp_path / "word" / "document.xml" + if not original_file.exists(): + print( + f"FAILED - Original document.xml not found in {self.original_docx}" + ) + return False + + # Parse both XML files using xml.etree.ElementTree for redlining validation + try: + import xml.etree.ElementTree as ET + + modified_tree = ET.parse(modified_file) + modified_root = modified_tree.getroot() + original_tree = ET.parse(original_file) + original_root = original_tree.getroot() + except ET.ParseError as e: + print(f"FAILED - Error parsing XML files: {e}") + return False + + # Remove Claude's tracked changes from both documents + self._remove_claude_tracked_changes(original_root) + self._remove_claude_tracked_changes(modified_root) + + # Extract and compare text content + modified_text = self._extract_text_content(modified_root) + original_text = self._extract_text_content(original_root) + + if modified_text != original_text: + # Show detailed character-level differences for each paragraph + error_message = self._generate_detailed_diff( + original_text, modified_text + ) + print(error_message) + return False + + if self.verbose: + print("PASSED - All changes by Claude are properly tracked") + return True + + def _generate_detailed_diff(self, original_text, modified_text): + """Generate detailed word-level differences using git word diff.""" + error_parts = [ + "FAILED - Document text doesn't match after removing Claude's tracked changes", + "", + "Likely causes:", + " 1. Modified text inside another author's or tags", + " 2. Made edits without proper tracked changes", + " 3. Didn't nest inside when deleting another's insertion", + "", + "For pre-redlined documents, use correct patterns:", + " - To reject another's INSERTION: Nest inside their ", + " - To restore another's DELETION: Add new AFTER their ", + "", + ] + + # Show git word diff + git_diff = self._get_git_word_diff(original_text, modified_text) + if git_diff: + error_parts.extend(["Differences:", "============", git_diff]) + else: + error_parts.append("Unable to generate word diff (git not available)") + + return "\n".join(error_parts) + + def _get_git_word_diff(self, original_text, modified_text): + """Generate word diff using git with character-level precision.""" + try: + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Create two files + original_file = temp_path / "original.txt" + modified_file = temp_path / "modified.txt" + + original_file.write_text(original_text, encoding="utf-8") + modified_file.write_text(modified_text, encoding="utf-8") + + # Try character-level diff first for precise differences + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "--word-diff-regex=.", # Character-by-character diff + "-U0", # Zero lines of context - show only changed lines + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + # Clean up the output - remove git diff header lines + lines = result.stdout.split("\n") + # Skip the header lines (diff --git, index, +++, ---, @@) + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + + if content_lines: + return "\n".join(content_lines) + + # Fallback to word-level diff if character-level is too verbose + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "-U0", # Zero lines of context + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + lines = result.stdout.split("\n") + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + return "\n".join(content_lines) + + except (subprocess.CalledProcessError, FileNotFoundError, Exception): + # Git not available or other error, return None to use fallback + pass + + return None + + def _remove_claude_tracked_changes(self, root): + """Remove tracked changes authored by Claude from the XML root.""" + ins_tag = f"{{{self.namespaces['w']}}}ins" + del_tag = f"{{{self.namespaces['w']}}}del" + author_attr = f"{{{self.namespaces['w']}}}author" + + # Remove w:ins elements + for parent in root.iter(): + to_remove = [] + for child in parent: + if child.tag == ins_tag and child.get(author_attr) == "Claude": + to_remove.append(child) + for elem in to_remove: + parent.remove(elem) + + # Unwrap content in w:del elements where author is "Claude" + deltext_tag = f"{{{self.namespaces['w']}}}delText" + t_tag = f"{{{self.namespaces['w']}}}t" + + for parent in root.iter(): + to_process = [] + for child in parent: + if child.tag == del_tag and child.get(author_attr) == "Claude": + to_process.append((child, list(parent).index(child))) + + # Process in reverse order to maintain indices + for del_elem, del_index in reversed(to_process): + # Convert w:delText to w:t before moving + for elem in del_elem.iter(): + if elem.tag == deltext_tag: + elem.tag = t_tag + + # Move all children of w:del to its parent before removing w:del + for child in reversed(list(del_elem)): + parent.insert(del_index, child) + parent.remove(del_elem) + + def _extract_text_content(self, root): + """Extract text content from Word XML, preserving paragraph structure. + + Empty paragraphs are skipped to avoid false positives when tracked + insertions add only structural elements without text content. + """ + p_tag = f"{{{self.namespaces['w']}}}p" + t_tag = f"{{{self.namespaces['w']}}}t" + + paragraphs = [] + for p_elem in root.findall(f".//{p_tag}"): + # Get all text elements within this paragraph + text_parts = [] + for t_elem in p_elem.findall(f".//{t_tag}"): + if t_elem.text: + text_parts.append(t_elem.text) + paragraph_text = "".join(text_parts) + # Skip empty paragraphs - they don't affect content validation + if paragraph_text: + paragraphs.append(paragraph_text) + + return "\n".join(paragraphs) + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/docx/scripts/__init__.py b/skills/docx/scripts/__init__.py new file mode 100755 index 000000000..bf9c56272 --- /dev/null +++ b/skills/docx/scripts/__init__.py @@ -0,0 +1 @@ +# Make scripts directory a package for relative imports in tests diff --git a/skills/docx/scripts/document.py b/skills/docx/scripts/document.py new file mode 100755 index 000000000..ae9328ddf --- /dev/null +++ b/skills/docx/scripts/document.py @@ -0,0 +1,1276 @@ +#!/usr/bin/env python3 +""" +Library for working with Word documents: comments, tracked changes, and editing. + +Usage: + from skills.docx.scripts.document import Document + + # Initialize + doc = Document('workspace/unpacked') + doc = Document('workspace/unpacked', author="John Doe", initials="JD") + + # Find nodes + node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) + node = doc["word/document.xml"].get_node(tag="w:p", line_number=10) + + # Add comments + doc.add_comment(start=node, end=node, text="Comment text") + doc.reply_to_comment(parent_comment_id=0, text="Reply text") + + # Suggest tracked changes + doc["word/document.xml"].suggest_deletion(node) # Delete content + doc["word/document.xml"].revert_insertion(ins_node) # Reject insertion + doc["word/document.xml"].revert_deletion(del_node) # Reject deletion + + # Save + doc.save() +""" + +import html +import random +import shutil +import tempfile +from datetime import datetime, timezone +from pathlib import Path + +from defusedxml import minidom +from ooxml.scripts.pack import pack_document +from ooxml.scripts.validation.docx import DOCXSchemaValidator +from ooxml.scripts.validation.redlining import RedliningValidator + +from .utilities import XMLEditor + +# Path to template files +TEMPLATE_DIR = Path(__file__).parent / "templates" + + +class DocxXMLEditor(XMLEditor): + """XMLEditor that automatically applies RSID, author, and date to new elements. + + Automatically adds attributes to elements that support them when inserting new content: + - w:rsidR, w:rsidRDefault, w:rsidP (for w:p and w:r elements) + - w:author and w:date (for w:ins, w:del, w:comment elements) + - w:id (for w:ins and w:del elements) + + Attributes: + dom (defusedxml.minidom.Document): The DOM document for direct manipulation + """ + + def __init__( + self, xml_path, rsid: str, author: str = "Claude", initials: str = "C" + ): + """Initialize with required RSID and optional author. + + Args: + xml_path: Path to XML file to edit + rsid: RSID to automatically apply to new elements + author: Author name for tracked changes and comments (default: "Claude") + initials: Author initials (default: "C") + """ + super().__init__(xml_path) + self.rsid = rsid + self.author = author + self.initials = initials + + def _get_next_change_id(self): + """Get the next available change ID by checking all tracked change elements.""" + max_id = -1 + for tag in ("w:ins", "w:del"): + elements = self.dom.getElementsByTagName(tag) + for elem in elements: + change_id = elem.getAttribute("w:id") + if change_id: + try: + max_id = max(max_id, int(change_id)) + except ValueError: + pass + return max_id + 1 + + def _ensure_w16du_namespace(self): + """Ensure w16du namespace is declared on the root element.""" + root = self.dom.documentElement + if not root.hasAttribute("xmlns:w16du"): # type: ignore + root.setAttribute( # type: ignore + "xmlns:w16du", + "http://schemas.microsoft.com/office/word/2023/wordml/word16du", + ) + + def _ensure_w16cex_namespace(self): + """Ensure w16cex namespace is declared on the root element.""" + root = self.dom.documentElement + if not root.hasAttribute("xmlns:w16cex"): # type: ignore + root.setAttribute( # type: ignore + "xmlns:w16cex", + "http://schemas.microsoft.com/office/word/2018/wordml/cex", + ) + + def _ensure_w14_namespace(self): + """Ensure w14 namespace is declared on the root element.""" + root = self.dom.documentElement + if not root.hasAttribute("xmlns:w14"): # type: ignore + root.setAttribute( # type: ignore + "xmlns:w14", + "http://schemas.microsoft.com/office/word/2010/wordml", + ) + + def _inject_attributes_to_nodes(self, nodes): + """Inject RSID, author, and date attributes into DOM nodes where applicable. + + Adds attributes to elements that support them: + - w:r: gets w:rsidR (or w:rsidDel if inside w:del) + - w:p: gets w:rsidR, w:rsidRDefault, w:rsidP, w14:paraId, w14:textId + - w:t: gets xml:space="preserve" if text has leading/trailing whitespace + - w:ins, w:del: get w:id, w:author, w:date, w16du:dateUtc + - w:comment: gets w:author, w:date, w:initials + - w16cex:commentExtensible: gets w16cex:dateUtc + + Args: + nodes: List of DOM nodes to process + """ + from datetime import datetime, timezone + + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + def is_inside_deletion(elem): + """Check if element is inside a w:del element.""" + parent = elem.parentNode + while parent: + if parent.nodeType == parent.ELEMENT_NODE and parent.tagName == "w:del": + return True + parent = parent.parentNode + return False + + def add_rsid_to_p(elem): + if not elem.hasAttribute("w:rsidR"): + elem.setAttribute("w:rsidR", self.rsid) + if not elem.hasAttribute("w:rsidRDefault"): + elem.setAttribute("w:rsidRDefault", self.rsid) + if not elem.hasAttribute("w:rsidP"): + elem.setAttribute("w:rsidP", self.rsid) + # Add w14:paraId and w14:textId if not present + if not elem.hasAttribute("w14:paraId"): + self._ensure_w14_namespace() + elem.setAttribute("w14:paraId", _generate_hex_id()) + if not elem.hasAttribute("w14:textId"): + self._ensure_w14_namespace() + elem.setAttribute("w14:textId", _generate_hex_id()) + + def add_rsid_to_r(elem): + # Use w:rsidDel for inside , otherwise w:rsidR + if is_inside_deletion(elem): + if not elem.hasAttribute("w:rsidDel"): + elem.setAttribute("w:rsidDel", self.rsid) + else: + if not elem.hasAttribute("w:rsidR"): + elem.setAttribute("w:rsidR", self.rsid) + + def add_tracked_change_attrs(elem): + # Auto-assign w:id if not present + if not elem.hasAttribute("w:id"): + elem.setAttribute("w:id", str(self._get_next_change_id())) + if not elem.hasAttribute("w:author"): + elem.setAttribute("w:author", self.author) + if not elem.hasAttribute("w:date"): + elem.setAttribute("w:date", timestamp) + # Add w16du:dateUtc for tracked changes (same as w:date since we generate UTC timestamps) + if elem.tagName in ("w:ins", "w:del") and not elem.hasAttribute( + "w16du:dateUtc" + ): + self._ensure_w16du_namespace() + elem.setAttribute("w16du:dateUtc", timestamp) + + def add_comment_attrs(elem): + if not elem.hasAttribute("w:author"): + elem.setAttribute("w:author", self.author) + if not elem.hasAttribute("w:date"): + elem.setAttribute("w:date", timestamp) + if not elem.hasAttribute("w:initials"): + elem.setAttribute("w:initials", self.initials) + + def add_comment_extensible_date(elem): + # Add w16cex:dateUtc for comment extensible elements + if not elem.hasAttribute("w16cex:dateUtc"): + self._ensure_w16cex_namespace() + elem.setAttribute("w16cex:dateUtc", timestamp) + + def add_xml_space_to_t(elem): + # Add xml:space="preserve" to w:t if text has leading/trailing whitespace + if ( + elem.firstChild + and elem.firstChild.nodeType == elem.firstChild.TEXT_NODE + ): + text = elem.firstChild.data + if text and (text[0].isspace() or text[-1].isspace()): + if not elem.hasAttribute("xml:space"): + elem.setAttribute("xml:space", "preserve") + + for node in nodes: + if node.nodeType != node.ELEMENT_NODE: + continue + + # Handle the node itself + if node.tagName == "w:p": + add_rsid_to_p(node) + elif node.tagName == "w:r": + add_rsid_to_r(node) + elif node.tagName == "w:t": + add_xml_space_to_t(node) + elif node.tagName in ("w:ins", "w:del"): + add_tracked_change_attrs(node) + elif node.tagName == "w:comment": + add_comment_attrs(node) + elif node.tagName == "w16cex:commentExtensible": + add_comment_extensible_date(node) + + # Process descendants (getElementsByTagName doesn't return the element itself) + for elem in node.getElementsByTagName("w:p"): + add_rsid_to_p(elem) + for elem in node.getElementsByTagName("w:r"): + add_rsid_to_r(elem) + for elem in node.getElementsByTagName("w:t"): + add_xml_space_to_t(elem) + for tag in ("w:ins", "w:del"): + for elem in node.getElementsByTagName(tag): + add_tracked_change_attrs(elem) + for elem in node.getElementsByTagName("w:comment"): + add_comment_attrs(elem) + for elem in node.getElementsByTagName("w16cex:commentExtensible"): + add_comment_extensible_date(elem) + + def replace_node(self, elem, new_content): + """Replace node with automatic attribute injection.""" + nodes = super().replace_node(elem, new_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def insert_after(self, elem, xml_content): + """Insert after with automatic attribute injection.""" + nodes = super().insert_after(elem, xml_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def insert_before(self, elem, xml_content): + """Insert before with automatic attribute injection.""" + nodes = super().insert_before(elem, xml_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def append_to(self, elem, xml_content): + """Append to with automatic attribute injection.""" + nodes = super().append_to(elem, xml_content) + self._inject_attributes_to_nodes(nodes) + return nodes + + def revert_insertion(self, elem): + """Reject an insertion by wrapping its content in a deletion. + + Wraps all runs inside w:ins in w:del, converting w:t to w:delText. + Can process a single w:ins element or a container element with multiple w:ins. + + Args: + elem: Element to process (w:ins, w:p, w:body, etc.) + + Returns: + list: List containing the processed element(s) + + Raises: + ValueError: If the element contains no w:ins elements + + Example: + # Reject a single insertion + ins = doc["word/document.xml"].get_node(tag="w:ins", attrs={"w:id": "5"}) + doc["word/document.xml"].revert_insertion(ins) + + # Reject all insertions in a paragraph + para = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + doc["word/document.xml"].revert_insertion(para) + """ + # Collect insertions + ins_elements = [] + if elem.tagName == "w:ins": + ins_elements.append(elem) + else: + ins_elements.extend(elem.getElementsByTagName("w:ins")) + + # Validate that there are insertions to reject + if not ins_elements: + raise ValueError( + f"revert_insertion requires w:ins elements. " + f"The provided element <{elem.tagName}> contains no insertions. " + ) + + # Process all insertions - wrap all children in w:del + for ins_elem in ins_elements: + runs = list(ins_elem.getElementsByTagName("w:r")) + if not runs: + continue + + # Create deletion wrapper + del_wrapper = self.dom.createElement("w:del") + + # Process each run + for run in runs: + # Convert w:t → w:delText and w:rsidR → w:rsidDel + if run.hasAttribute("w:rsidR"): + run.setAttribute("w:rsidDel", run.getAttribute("w:rsidR")) + run.removeAttribute("w:rsidR") + elif not run.hasAttribute("w:rsidDel"): + run.setAttribute("w:rsidDel", self.rsid) + + for t_elem in list(run.getElementsByTagName("w:t")): + del_text = self.dom.createElement("w:delText") + # Copy ALL child nodes (not just firstChild) to handle entities + while t_elem.firstChild: + del_text.appendChild(t_elem.firstChild) + for i in range(t_elem.attributes.length): + attr = t_elem.attributes.item(i) + del_text.setAttribute(attr.name, attr.value) + t_elem.parentNode.replaceChild(del_text, t_elem) + + # Move all children from ins to del wrapper + while ins_elem.firstChild: + del_wrapper.appendChild(ins_elem.firstChild) + + # Add del wrapper back to ins + ins_elem.appendChild(del_wrapper) + + # Inject attributes to the deletion wrapper + self._inject_attributes_to_nodes([del_wrapper]) + + return [elem] + + def revert_deletion(self, elem): + """Reject a deletion by re-inserting the deleted content. + + Creates w:ins elements after each w:del, copying deleted content and + converting w:delText back to w:t. + Can process a single w:del element or a container element with multiple w:del. + + Args: + elem: Element to process (w:del, w:p, w:body, etc.) + + Returns: + list: If elem is w:del, returns [elem, new_ins]. Otherwise returns [elem]. + + Raises: + ValueError: If the element contains no w:del elements + + Example: + # Reject a single deletion - returns [w:del, w:ins] + del_elem = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "3"}) + nodes = doc["word/document.xml"].revert_deletion(del_elem) + + # Reject all deletions in a paragraph - returns [para] + para = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + nodes = doc["word/document.xml"].revert_deletion(para) + """ + # Collect deletions FIRST - before we modify the DOM + del_elements = [] + is_single_del = elem.tagName == "w:del" + + if is_single_del: + del_elements.append(elem) + else: + del_elements.extend(elem.getElementsByTagName("w:del")) + + # Validate that there are deletions to reject + if not del_elements: + raise ValueError( + f"revert_deletion requires w:del elements. " + f"The provided element <{elem.tagName}> contains no deletions. " + ) + + # Track created insertion (only relevant if elem is a single w:del) + created_insertion = None + + # Process all deletions - create insertions that copy the deleted content + for del_elem in del_elements: + # Clone the deleted runs and convert them to insertions + runs = list(del_elem.getElementsByTagName("w:r")) + if not runs: + continue + + # Create insertion wrapper + ins_elem = self.dom.createElement("w:ins") + + for run in runs: + # Clone the run + new_run = run.cloneNode(True) + + # Convert w:delText → w:t + for del_text in list(new_run.getElementsByTagName("w:delText")): + t_elem = self.dom.createElement("w:t") + # Copy ALL child nodes (not just firstChild) to handle entities + while del_text.firstChild: + t_elem.appendChild(del_text.firstChild) + for i in range(del_text.attributes.length): + attr = del_text.attributes.item(i) + t_elem.setAttribute(attr.name, attr.value) + del_text.parentNode.replaceChild(t_elem, del_text) + + # Update run attributes: w:rsidDel → w:rsidR + if new_run.hasAttribute("w:rsidDel"): + new_run.setAttribute("w:rsidR", new_run.getAttribute("w:rsidDel")) + new_run.removeAttribute("w:rsidDel") + elif not new_run.hasAttribute("w:rsidR"): + new_run.setAttribute("w:rsidR", self.rsid) + + ins_elem.appendChild(new_run) + + # Insert the new insertion after the deletion + nodes = self.insert_after(del_elem, ins_elem.toxml()) + + # If processing a single w:del, track the created insertion + if is_single_del and nodes: + created_insertion = nodes[0] + + # Return based on input type + if is_single_del and created_insertion: + return [elem, created_insertion] + else: + return [elem] + + @staticmethod + def suggest_paragraph(xml_content: str) -> str: + """Transform paragraph XML to add tracked change wrapping for insertion. + + Wraps runs in and adds to w:rPr in w:pPr for numbered lists. + + Args: + xml_content: XML string containing a element + + Returns: + str: Transformed XML with tracked change wrapping + """ + wrapper = f'{xml_content}' + doc = minidom.parseString(wrapper) + para = doc.getElementsByTagName("w:p")[0] + + # Ensure w:pPr exists + pPr_list = para.getElementsByTagName("w:pPr") + if not pPr_list: + pPr = doc.createElement("w:pPr") + para.insertBefore( + pPr, para.firstChild + ) if para.firstChild else para.appendChild(pPr) + else: + pPr = pPr_list[0] + + # Ensure w:rPr exists in w:pPr + rPr_list = pPr.getElementsByTagName("w:rPr") + if not rPr_list: + rPr = doc.createElement("w:rPr") + pPr.appendChild(rPr) + else: + rPr = rPr_list[0] + + # Add to w:rPr + ins_marker = doc.createElement("w:ins") + rPr.insertBefore( + ins_marker, rPr.firstChild + ) if rPr.firstChild else rPr.appendChild(ins_marker) + + # Wrap all non-pPr children in + ins_wrapper = doc.createElement("w:ins") + for child in [c for c in para.childNodes if c.nodeName != "w:pPr"]: + para.removeChild(child) + ins_wrapper.appendChild(child) + para.appendChild(ins_wrapper) + + return para.toxml() + + def suggest_deletion(self, elem): + """Mark a w:r or w:p element as deleted with tracked changes (in-place DOM manipulation). + + For w:r: wraps in , converts to , preserves w:rPr + For w:p (regular): wraps content in , converts to + For w:p (numbered list): adds to w:rPr in w:pPr, wraps content in + + Args: + elem: A w:r or w:p DOM element without existing tracked changes + + Returns: + Element: The modified element + + Raises: + ValueError: If element has existing tracked changes or invalid structure + """ + if elem.nodeName == "w:r": + # Check for existing w:delText + if elem.getElementsByTagName("w:delText"): + raise ValueError("w:r element already contains w:delText") + + # Convert w:t → w:delText + for t_elem in list(elem.getElementsByTagName("w:t")): + del_text = self.dom.createElement("w:delText") + # Copy ALL child nodes (not just firstChild) to handle entities + while t_elem.firstChild: + del_text.appendChild(t_elem.firstChild) + # Preserve attributes like xml:space + for i in range(t_elem.attributes.length): + attr = t_elem.attributes.item(i) + del_text.setAttribute(attr.name, attr.value) + t_elem.parentNode.replaceChild(del_text, t_elem) + + # Update run attributes: w:rsidR → w:rsidDel + if elem.hasAttribute("w:rsidR"): + elem.setAttribute("w:rsidDel", elem.getAttribute("w:rsidR")) + elem.removeAttribute("w:rsidR") + elif not elem.hasAttribute("w:rsidDel"): + elem.setAttribute("w:rsidDel", self.rsid) + + # Wrap in w:del + del_wrapper = self.dom.createElement("w:del") + parent = elem.parentNode + parent.insertBefore(del_wrapper, elem) + parent.removeChild(elem) + del_wrapper.appendChild(elem) + + # Inject attributes to the deletion wrapper + self._inject_attributes_to_nodes([del_wrapper]) + + return del_wrapper + + elif elem.nodeName == "w:p": + # Check for existing tracked changes + if elem.getElementsByTagName("w:ins") or elem.getElementsByTagName("w:del"): + raise ValueError("w:p element already contains tracked changes") + + # Check if it's a numbered list item + pPr_list = elem.getElementsByTagName("w:pPr") + is_numbered = pPr_list and pPr_list[0].getElementsByTagName("w:numPr") + + if is_numbered: + # Add to w:rPr in w:pPr + pPr = pPr_list[0] + rPr_list = pPr.getElementsByTagName("w:rPr") + + if not rPr_list: + rPr = self.dom.createElement("w:rPr") + pPr.appendChild(rPr) + else: + rPr = rPr_list[0] + + # Add marker + del_marker = self.dom.createElement("w:del") + rPr.insertBefore( + del_marker, rPr.firstChild + ) if rPr.firstChild else rPr.appendChild(del_marker) + + # Convert w:t → w:delText in all runs + for t_elem in list(elem.getElementsByTagName("w:t")): + del_text = self.dom.createElement("w:delText") + # Copy ALL child nodes (not just firstChild) to handle entities + while t_elem.firstChild: + del_text.appendChild(t_elem.firstChild) + # Preserve attributes like xml:space + for i in range(t_elem.attributes.length): + attr = t_elem.attributes.item(i) + del_text.setAttribute(attr.name, attr.value) + t_elem.parentNode.replaceChild(del_text, t_elem) + + # Update run attributes: w:rsidR → w:rsidDel + for run in elem.getElementsByTagName("w:r"): + if run.hasAttribute("w:rsidR"): + run.setAttribute("w:rsidDel", run.getAttribute("w:rsidR")) + run.removeAttribute("w:rsidR") + elif not run.hasAttribute("w:rsidDel"): + run.setAttribute("w:rsidDel", self.rsid) + + # Wrap all non-pPr children in + del_wrapper = self.dom.createElement("w:del") + for child in [c for c in elem.childNodes if c.nodeName != "w:pPr"]: + elem.removeChild(child) + del_wrapper.appendChild(child) + elem.appendChild(del_wrapper) + + # Inject attributes to the deletion wrapper + self._inject_attributes_to_nodes([del_wrapper]) + + return elem + + else: + raise ValueError(f"Element must be w:r or w:p, got {elem.nodeName}") + + +def _generate_hex_id() -> str: + """Generate random 8-character hex ID for para/durable IDs. + + Values are constrained to be less than 0x7FFFFFFF per OOXML spec: + - paraId must be < 0x80000000 + - durableId must be < 0x7FFFFFFF + We use the stricter constraint (0x7FFFFFFF) for both. + """ + return f"{random.randint(1, 0x7FFFFFFE):08X}" + + +def _generate_rsid() -> str: + """Generate random 8-character hex RSID.""" + return "".join(random.choices("0123456789ABCDEF", k=8)) + + +class Document: + """Manages comments in unpacked Word documents.""" + + def __init__( + self, + unpacked_dir, + rsid=None, + track_revisions=False, + author="Claude", + initials="C", + ): + """ + Initialize with path to unpacked Word document directory. + Automatically sets up comment infrastructure (people.xml, RSIDs). + + Args: + unpacked_dir: Path to unpacked DOCX directory (must contain word/ subdirectory) + rsid: Optional RSID to use for all comment elements. If not provided, one will be generated. + track_revisions: If True, enables track revisions in settings.xml (default: False) + author: Default author name for comments (default: "Claude") + initials: Default author initials for comments (default: "C") + """ + self.original_path = Path(unpacked_dir) + + if not self.original_path.exists() or not self.original_path.is_dir(): + raise ValueError(f"Directory not found: {unpacked_dir}") + + # Create temporary directory with subdirectories for unpacked content and baseline + self.temp_dir = tempfile.mkdtemp(prefix="docx_") + self.unpacked_path = Path(self.temp_dir) / "unpacked" + shutil.copytree(self.original_path, self.unpacked_path) + + # Pack original directory into temporary .docx for validation baseline (outside unpacked dir) + self.original_docx = Path(self.temp_dir) / "original.docx" + pack_document(self.original_path, self.original_docx, validate=False) + + self.word_path = self.unpacked_path / "word" + + # Generate RSID if not provided + self.rsid = rsid if rsid else _generate_rsid() + print(f"Using RSID: {self.rsid}") + + # Set default author and initials + self.author = author + self.initials = initials + + # Cache for lazy-loaded editors + self._editors = {} + + # Comment file paths + self.comments_path = self.word_path / "comments.xml" + self.comments_extended_path = self.word_path / "commentsExtended.xml" + self.comments_ids_path = self.word_path / "commentsIds.xml" + self.comments_extensible_path = self.word_path / "commentsExtensible.xml" + + # Load existing comments and determine next ID (before setup modifies files) + self.existing_comments = self._load_existing_comments() + self.next_comment_id = self._get_next_comment_id() + + # Convenient access to document.xml editor (semi-private) + self._document = self["word/document.xml"] + + # Setup tracked changes infrastructure + self._setup_tracking(track_revisions=track_revisions) + + # Add author to people.xml + self._add_author_to_people(author) + + def __getitem__(self, xml_path: str) -> DocxXMLEditor: + """ + Get or create a DocxXMLEditor for the specified XML file. + + Enables lazy-loaded editors with bracket notation: + node = doc["word/document.xml"].get_node(tag="w:p", line_number=42) + + Args: + xml_path: Relative path to XML file (e.g., "word/document.xml", "word/comments.xml") + + Returns: + DocxXMLEditor instance for the specified file + + Raises: + ValueError: If the file does not exist + + Example: + # Get node from document.xml + node = doc["word/document.xml"].get_node(tag="w:del", attrs={"w:id": "1"}) + + # Get node from comments.xml + comment = doc["word/comments.xml"].get_node(tag="w:comment", attrs={"w:id": "0"}) + """ + if xml_path not in self._editors: + file_path = self.unpacked_path / xml_path + if not file_path.exists(): + raise ValueError(f"XML file not found: {xml_path}") + # Use DocxXMLEditor with RSID, author, and initials for all editors + self._editors[xml_path] = DocxXMLEditor( + file_path, rsid=self.rsid, author=self.author, initials=self.initials + ) + return self._editors[xml_path] + + def add_comment(self, start, end, text: str) -> int: + """ + Add a comment spanning from one element to another. + + Args: + start: DOM element for the starting point + end: DOM element for the ending point + text: Comment content + + Returns: + The comment ID that was created + + Example: + start_node = cm.get_document_node(tag="w:del", id="1") + end_node = cm.get_document_node(tag="w:ins", id="2") + cm.add_comment(start=start_node, end=end_node, text="Explanation") + """ + comment_id = self.next_comment_id + para_id = _generate_hex_id() + durable_id = _generate_hex_id() + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Add comment ranges to document.xml immediately + self._document.insert_before(start, self._comment_range_start_xml(comment_id)) + + # If end node is a paragraph, append comment markup inside it + # Otherwise insert after it (for run-level anchors) + if end.tagName == "w:p": + self._document.append_to(end, self._comment_range_end_xml(comment_id)) + else: + self._document.insert_after(end, self._comment_range_end_xml(comment_id)) + + # Add to comments.xml immediately + self._add_to_comments_xml( + comment_id, para_id, text, self.author, self.initials, timestamp + ) + + # Add to commentsExtended.xml immediately + self._add_to_comments_extended_xml(para_id, parent_para_id=None) + + # Add to commentsIds.xml immediately + self._add_to_comments_ids_xml(para_id, durable_id) + + # Add to commentsExtensible.xml immediately + self._add_to_comments_extensible_xml(durable_id) + + # Update existing_comments so replies work + self.existing_comments[comment_id] = {"para_id": para_id} + + self.next_comment_id += 1 + return comment_id + + def reply_to_comment( + self, + parent_comment_id: int, + text: str, + ) -> int: + """ + Add a reply to an existing comment. + + Args: + parent_comment_id: The w:id of the parent comment to reply to + text: Reply text + + Returns: + The comment ID that was created for the reply + + Example: + cm.reply_to_comment(parent_comment_id=0, text="I agree with this change") + """ + if parent_comment_id not in self.existing_comments: + raise ValueError(f"Parent comment with id={parent_comment_id} not found") + + parent_info = self.existing_comments[parent_comment_id] + comment_id = self.next_comment_id + para_id = _generate_hex_id() + durable_id = _generate_hex_id() + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Add comment ranges to document.xml immediately + parent_start_elem = self._document.get_node( + tag="w:commentRangeStart", attrs={"w:id": str(parent_comment_id)} + ) + parent_ref_elem = self._document.get_node( + tag="w:commentReference", attrs={"w:id": str(parent_comment_id)} + ) + + self._document.insert_after( + parent_start_elem, self._comment_range_start_xml(comment_id) + ) + parent_ref_run = parent_ref_elem.parentNode + self._document.insert_after( + parent_ref_run, f'' + ) + self._document.insert_after( + parent_ref_run, self._comment_ref_run_xml(comment_id) + ) + + # Add to comments.xml immediately + self._add_to_comments_xml( + comment_id, para_id, text, self.author, self.initials, timestamp + ) + + # Add to commentsExtended.xml immediately (with parent) + self._add_to_comments_extended_xml( + para_id, parent_para_id=parent_info["para_id"] + ) + + # Add to commentsIds.xml immediately + self._add_to_comments_ids_xml(para_id, durable_id) + + # Add to commentsExtensible.xml immediately + self._add_to_comments_extensible_xml(durable_id) + + # Update existing_comments so replies work + self.existing_comments[comment_id] = {"para_id": para_id} + + self.next_comment_id += 1 + return comment_id + + def __del__(self): + """Clean up temporary directory on deletion.""" + if hasattr(self, "temp_dir") and Path(self.temp_dir).exists(): + shutil.rmtree(self.temp_dir) + + def validate(self) -> None: + """ + Validate the document against XSD schema and redlining rules. + + Raises: + ValueError: If validation fails. + """ + # Create validators with current state + schema_validator = DOCXSchemaValidator( + self.unpacked_path, self.original_docx, verbose=False + ) + redlining_validator = RedliningValidator( + self.unpacked_path, self.original_docx, verbose=False + ) + + # Run validations + if not schema_validator.validate(): + raise ValueError("Schema validation failed") + if not redlining_validator.validate(): + raise ValueError("Redlining validation failed") + + def save(self, destination=None, validate=True) -> None: + """ + Save all modified XML files to disk and copy to destination directory. + + This persists all changes made via add_comment() and reply_to_comment(). + + Args: + destination: Optional path to save to. If None, saves back to original directory. + validate: If True, validates document before saving (default: True). + """ + # Only ensure comment relationships and content types if comment files exist + if self.comments_path.exists(): + self._ensure_comment_relationships() + self._ensure_comment_content_types() + + # Save all modified XML files in temp directory + for editor in self._editors.values(): + editor.save() + + # Validate by default + if validate: + self.validate() + + # Copy contents from temp directory to destination (or original directory) + target_path = Path(destination) if destination else self.original_path + shutil.copytree(self.unpacked_path, target_path, dirs_exist_ok=True) + + # ==================== Private: Initialization ==================== + + def _get_next_comment_id(self): + """Get the next available comment ID.""" + if not self.comments_path.exists(): + return 0 + + editor = self["word/comments.xml"] + max_id = -1 + for comment_elem in editor.dom.getElementsByTagName("w:comment"): + comment_id = comment_elem.getAttribute("w:id") + if comment_id: + try: + max_id = max(max_id, int(comment_id)) + except ValueError: + pass + return max_id + 1 + + def _load_existing_comments(self): + """Load existing comments from files to enable replies.""" + if not self.comments_path.exists(): + return {} + + editor = self["word/comments.xml"] + existing = {} + + for comment_elem in editor.dom.getElementsByTagName("w:comment"): + comment_id = comment_elem.getAttribute("w:id") + if not comment_id: + continue + + # Find para_id from the w:p element within the comment + para_id = None + for p_elem in comment_elem.getElementsByTagName("w:p"): + para_id = p_elem.getAttribute("w14:paraId") + if para_id: + break + + if not para_id: + continue + + existing[int(comment_id)] = {"para_id": para_id} + + return existing + + # ==================== Private: Setup Methods ==================== + + def _setup_tracking(self, track_revisions=False): + """Set up comment infrastructure in unpacked directory. + + Args: + track_revisions: If True, enables track revisions in settings.xml + """ + # Create or update word/people.xml + people_file = self.word_path / "people.xml" + self._update_people_xml(people_file) + + # Update XML files + self._add_content_type_for_people(self.unpacked_path / "[Content_Types].xml") + self._add_relationship_for_people( + self.word_path / "_rels" / "document.xml.rels" + ) + + # Always add RSID to settings.xml, optionally enable trackRevisions + self._update_settings( + self.word_path / "settings.xml", track_revisions=track_revisions + ) + + def _update_people_xml(self, path): + """Create people.xml if it doesn't exist.""" + if not path.exists(): + # Copy from template + shutil.copy(TEMPLATE_DIR / "people.xml", path) + + def _add_content_type_for_people(self, path): + """Add people.xml content type to [Content_Types].xml if not already present.""" + editor = self["[Content_Types].xml"] + + if self._has_override(editor, "/word/people.xml"): + return + + # Add Override element + root = editor.dom.documentElement + override_xml = '' + editor.append_to(root, override_xml) + + def _add_relationship_for_people(self, path): + """Add people.xml relationship to document.xml.rels if not already present.""" + editor = self["word/_rels/document.xml.rels"] + + if self._has_relationship(editor, "people.xml"): + return + + root = editor.dom.documentElement + root_tag = root.tagName # type: ignore + prefix = root_tag.split(":")[0] + ":" if ":" in root_tag else "" + next_rid = editor.get_next_rid() + + # Create the relationship entry + rel_xml = f'<{prefix}Relationship Id="{next_rid}" Type="http://schemas.microsoft.com/office/2011/relationships/people" Target="people.xml"/>' + editor.append_to(root, rel_xml) + + def _update_settings(self, path, track_revisions=False): + """Add RSID and optionally enable track revisions in settings.xml. + + Args: + path: Path to settings.xml + track_revisions: If True, adds trackRevisions element + + Places elements per OOXML schema order: + - trackRevisions: early (before defaultTabStop) + - rsids: late (after compat) + """ + editor = self["word/settings.xml"] + root = editor.get_node(tag="w:settings") + prefix = root.tagName.split(":")[0] if ":" in root.tagName else "w" + + # Conditionally add trackRevisions if requested + if track_revisions: + track_revisions_exists = any( + elem.tagName == f"{prefix}:trackRevisions" + for elem in editor.dom.getElementsByTagName(f"{prefix}:trackRevisions") + ) + + if not track_revisions_exists: + track_rev_xml = f"<{prefix}:trackRevisions/>" + # Try to insert before documentProtection, defaultTabStop, or at start + inserted = False + for tag in [f"{prefix}:documentProtection", f"{prefix}:defaultTabStop"]: + elements = editor.dom.getElementsByTagName(tag) + if elements: + editor.insert_before(elements[0], track_rev_xml) + inserted = True + break + if not inserted: + # Insert as first child of settings + if root.firstChild: + editor.insert_before(root.firstChild, track_rev_xml) + else: + editor.append_to(root, track_rev_xml) + + # Always check if rsids section exists + rsids_elements = editor.dom.getElementsByTagName(f"{prefix}:rsids") + + if not rsids_elements: + # Add new rsids section + rsids_xml = f'''<{prefix}:rsids> + <{prefix}:rsidRoot {prefix}:val="{self.rsid}"/> + <{prefix}:rsid {prefix}:val="{self.rsid}"/> +''' + + # Try to insert after compat, before clrSchemeMapping, or before closing tag + inserted = False + compat_elements = editor.dom.getElementsByTagName(f"{prefix}:compat") + if compat_elements: + editor.insert_after(compat_elements[0], rsids_xml) + inserted = True + + if not inserted: + clr_elements = editor.dom.getElementsByTagName( + f"{prefix}:clrSchemeMapping" + ) + if clr_elements: + editor.insert_before(clr_elements[0], rsids_xml) + inserted = True + + if not inserted: + editor.append_to(root, rsids_xml) + else: + # Check if this rsid already exists + rsids_elem = rsids_elements[0] + rsid_exists = any( + elem.getAttribute(f"{prefix}:val") == self.rsid + for elem in rsids_elem.getElementsByTagName(f"{prefix}:rsid") + ) + + if not rsid_exists: + rsid_xml = f'<{prefix}:rsid {prefix}:val="{self.rsid}"/>' + editor.append_to(rsids_elem, rsid_xml) + + # ==================== Private: XML File Creation ==================== + + def _add_to_comments_xml( + self, comment_id, para_id, text, author, initials, timestamp + ): + """Add a single comment to comments.xml.""" + if not self.comments_path.exists(): + shutil.copy(TEMPLATE_DIR / "comments.xml", self.comments_path) + + editor = self["word/comments.xml"] + root = editor.get_node(tag="w:comments") + + escaped_text = ( + text.replace("&", "&").replace("<", "<").replace(">", ">") + ) + # Note: w:rsidR, w:rsidRDefault, w:rsidP on w:p, w:rsidR on w:r, + # and w:author, w:date, w:initials on w:comment are automatically added by DocxXMLEditor + comment_xml = f''' + + + {escaped_text} + +''' + editor.append_to(root, comment_xml) + + def _add_to_comments_extended_xml(self, para_id, parent_para_id): + """Add a single comment to commentsExtended.xml.""" + if not self.comments_extended_path.exists(): + shutil.copy( + TEMPLATE_DIR / "commentsExtended.xml", self.comments_extended_path + ) + + editor = self["word/commentsExtended.xml"] + root = editor.get_node(tag="w15:commentsEx") + + if parent_para_id: + xml = f'' + else: + xml = f'' + editor.append_to(root, xml) + + def _add_to_comments_ids_xml(self, para_id, durable_id): + """Add a single comment to commentsIds.xml.""" + if not self.comments_ids_path.exists(): + shutil.copy(TEMPLATE_DIR / "commentsIds.xml", self.comments_ids_path) + + editor = self["word/commentsIds.xml"] + root = editor.get_node(tag="w16cid:commentsIds") + + xml = f'' + editor.append_to(root, xml) + + def _add_to_comments_extensible_xml(self, durable_id): + """Add a single comment to commentsExtensible.xml.""" + if not self.comments_extensible_path.exists(): + shutil.copy( + TEMPLATE_DIR / "commentsExtensible.xml", self.comments_extensible_path + ) + + editor = self["word/commentsExtensible.xml"] + root = editor.get_node(tag="w16cex:commentsExtensible") + + xml = f'' + editor.append_to(root, xml) + + # ==================== Private: XML Fragments ==================== + + def _comment_range_start_xml(self, comment_id): + """Generate XML for comment range start.""" + return f'' + + def _comment_range_end_xml(self, comment_id): + """Generate XML for comment range end with reference run. + + Note: w:rsidR is automatically added by DocxXMLEditor. + """ + return f''' + + + +''' + + def _comment_ref_run_xml(self, comment_id): + """Generate XML for comment reference run. + + Note: w:rsidR is automatically added by DocxXMLEditor. + """ + return f''' + + +''' + + # ==================== Private: Metadata Updates ==================== + + def _has_relationship(self, editor, target): + """Check if a relationship with given target exists.""" + for rel_elem in editor.dom.getElementsByTagName("Relationship"): + if rel_elem.getAttribute("Target") == target: + return True + return False + + def _has_override(self, editor, part_name): + """Check if an override with given part name exists.""" + for override_elem in editor.dom.getElementsByTagName("Override"): + if override_elem.getAttribute("PartName") == part_name: + return True + return False + + def _has_author(self, editor, author): + """Check if an author already exists in people.xml.""" + for person_elem in editor.dom.getElementsByTagName("w15:person"): + if person_elem.getAttribute("w15:author") == author: + return True + return False + + def _add_author_to_people(self, author): + """Add author to people.xml (called during initialization).""" + people_path = self.word_path / "people.xml" + + # people.xml should already exist from _setup_tracking + if not people_path.exists(): + raise ValueError("people.xml should exist after _setup_tracking") + + editor = self["word/people.xml"] + root = editor.get_node(tag="w15:people") + + # Check if author already exists + if self._has_author(editor, author): + return + + # Add author with proper XML escaping to prevent injection + escaped_author = html.escape(author, quote=True) + person_xml = f''' + +''' + editor.append_to(root, person_xml) + + def _ensure_comment_relationships(self): + """Ensure word/_rels/document.xml.rels has comment relationships.""" + editor = self["word/_rels/document.xml.rels"] + + if self._has_relationship(editor, "comments.xml"): + return + + root = editor.dom.documentElement + root_tag = root.tagName # type: ignore + prefix = root_tag.split(":")[0] + ":" if ":" in root_tag else "" + next_rid_num = int(editor.get_next_rid()[3:]) + + # Add relationship elements + rels = [ + ( + next_rid_num, + "http://schemas.openxmlformats.org/officeDocument/2006/relationships/comments", + "comments.xml", + ), + ( + next_rid_num + 1, + "http://schemas.microsoft.com/office/2011/relationships/commentsExtended", + "commentsExtended.xml", + ), + ( + next_rid_num + 2, + "http://schemas.microsoft.com/office/2016/09/relationships/commentsIds", + "commentsIds.xml", + ), + ( + next_rid_num + 3, + "http://schemas.microsoft.com/office/2018/08/relationships/commentsExtensible", + "commentsExtensible.xml", + ), + ] + + for rel_id, rel_type, target in rels: + rel_xml = f'<{prefix}Relationship Id="rId{rel_id}" Type="{rel_type}" Target="{target}"/>' + editor.append_to(root, rel_xml) + + def _ensure_comment_content_types(self): + """Ensure [Content_Types].xml has comment content types.""" + editor = self["[Content_Types].xml"] + + if self._has_override(editor, "/word/comments.xml"): + return + + root = editor.dom.documentElement + + # Add Override elements + overrides = [ + ( + "/word/comments.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml", + ), + ( + "/word/commentsExtended.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtended+xml", + ), + ( + "/word/commentsIds.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsIds+xml", + ), + ( + "/word/commentsExtensible.xml", + "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtensible+xml", + ), + ] + + for part_name, content_type in overrides: + override_xml = ( + f'' + ) + editor.append_to(root, override_xml) diff --git a/skills/docx/scripts/templates/comments.xml b/skills/docx/scripts/templates/comments.xml new file mode 100644 index 000000000..b5dace0ef --- /dev/null +++ b/skills/docx/scripts/templates/comments.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/skills/docx/scripts/templates/commentsExtended.xml b/skills/docx/scripts/templates/commentsExtended.xml new file mode 100644 index 000000000..b4cf23e35 --- /dev/null +++ b/skills/docx/scripts/templates/commentsExtended.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/skills/docx/scripts/templates/commentsExtensible.xml b/skills/docx/scripts/templates/commentsExtensible.xml new file mode 100644 index 000000000..e32a05e0c --- /dev/null +++ b/skills/docx/scripts/templates/commentsExtensible.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/skills/docx/scripts/templates/commentsIds.xml b/skills/docx/scripts/templates/commentsIds.xml new file mode 100644 index 000000000..d04bc8e06 --- /dev/null +++ b/skills/docx/scripts/templates/commentsIds.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/skills/docx/scripts/templates/people.xml b/skills/docx/scripts/templates/people.xml new file mode 100644 index 000000000..a839cafeb --- /dev/null +++ b/skills/docx/scripts/templates/people.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/skills/docx/scripts/utilities.py b/skills/docx/scripts/utilities.py new file mode 100755 index 000000000..d92dae611 --- /dev/null +++ b/skills/docx/scripts/utilities.py @@ -0,0 +1,374 @@ +#!/usr/bin/env python3 +""" +Utilities for editing OOXML documents. + +This module provides XMLEditor, a tool for manipulating XML files with support for +line-number-based node finding and DOM manipulation. Each element is automatically +annotated with its original line and column position during parsing. + +Example usage: + editor = XMLEditor("document.xml") + + # Find node by line number or range + elem = editor.get_node(tag="w:r", line_number=519) + elem = editor.get_node(tag="w:p", line_number=range(100, 200)) + + # Find node by text content + elem = editor.get_node(tag="w:p", contains="specific text") + + # Find node by attributes + elem = editor.get_node(tag="w:r", attrs={"w:id": "target"}) + + # Combine filters + elem = editor.get_node(tag="w:p", line_number=range(1, 50), contains="text") + + # Replace, insert, or manipulate + new_elem = editor.replace_node(elem, "new text") + editor.insert_after(new_elem, "more") + + # Save changes + editor.save() +""" + +import html +from pathlib import Path +from typing import Optional, Union + +import defusedxml.minidom +import defusedxml.sax + + +class XMLEditor: + """ + Editor for manipulating OOXML XML files with line-number-based node finding. + + This class parses XML files and tracks the original line and column position + of each element. This enables finding nodes by their line number in the original + file, which is useful when working with Read tool output. + + Attributes: + xml_path: Path to the XML file being edited + encoding: Detected encoding of the XML file ('ascii' or 'utf-8') + dom: Parsed DOM tree with parse_position attributes on elements + """ + + def __init__(self, xml_path): + """ + Initialize with path to XML file and parse with line number tracking. + + Args: + xml_path: Path to XML file to edit (str or Path) + + Raises: + ValueError: If the XML file does not exist + """ + self.xml_path = Path(xml_path) + if not self.xml_path.exists(): + raise ValueError(f"XML file not found: {xml_path}") + + with open(self.xml_path, "rb") as f: + header = f.read(200).decode("utf-8", errors="ignore") + self.encoding = "ascii" if 'encoding="ascii"' in header else "utf-8" + + parser = _create_line_tracking_parser() + self.dom = defusedxml.minidom.parse(str(self.xml_path), parser) + + def get_node( + self, + tag: str, + attrs: Optional[dict[str, str]] = None, + line_number: Optional[Union[int, range]] = None, + contains: Optional[str] = None, + ): + """ + Get a DOM element by tag and identifier. + + Finds an element by either its line number in the original file or by + matching attribute values. Exactly one match must be found. + + Args: + tag: The XML tag name (e.g., "w:del", "w:ins", "w:r") + attrs: Dictionary of attribute name-value pairs to match (e.g., {"w:id": "1"}) + line_number: Line number (int) or line range (range) in original XML file (1-indexed) + contains: Text string that must appear in any text node within the element. + Supports both entity notation (“) and Unicode characters (\u201c). + + Returns: + defusedxml.minidom.Element: The matching DOM element + + Raises: + ValueError: If node not found or multiple matches found + + Example: + elem = editor.get_node(tag="w:r", line_number=519) + elem = editor.get_node(tag="w:r", line_number=range(100, 200)) + elem = editor.get_node(tag="w:del", attrs={"w:id": "1"}) + elem = editor.get_node(tag="w:p", attrs={"w14:paraId": "12345678"}) + elem = editor.get_node(tag="w:commentRangeStart", attrs={"w:id": "0"}) + elem = editor.get_node(tag="w:p", contains="specific text") + elem = editor.get_node(tag="w:t", contains="“Agreement") # Entity notation + elem = editor.get_node(tag="w:t", contains="\u201cAgreement") # Unicode character + """ + matches = [] + for elem in self.dom.getElementsByTagName(tag): + # Check line_number filter + if line_number is not None: + parse_pos = getattr(elem, "parse_position", (None,)) + elem_line = parse_pos[0] + + # Handle both single line number and range + if isinstance(line_number, range): + if elem_line not in line_number: + continue + else: + if elem_line != line_number: + continue + + # Check attrs filter + if attrs is not None: + if not all( + elem.getAttribute(attr_name) == attr_value + for attr_name, attr_value in attrs.items() + ): + continue + + # Check contains filter + if contains is not None: + elem_text = self._get_element_text(elem) + # Normalize the search string: convert HTML entities to Unicode characters + # This allows searching for both "“Rowan" and ""Rowan" + normalized_contains = html.unescape(contains) + if normalized_contains not in elem_text: + continue + + # If all applicable filters passed, this is a match + matches.append(elem) + + if not matches: + # Build descriptive error message + filters = [] + if line_number is not None: + line_str = ( + f"lines {line_number.start}-{line_number.stop - 1}" + if isinstance(line_number, range) + else f"line {line_number}" + ) + filters.append(f"at {line_str}") + if attrs is not None: + filters.append(f"with attributes {attrs}") + if contains is not None: + filters.append(f"containing '{contains}'") + + filter_desc = " ".join(filters) if filters else "" + base_msg = f"Node not found: <{tag}> {filter_desc}".strip() + + # Add helpful hint based on filters used + if contains: + hint = "Text may be split across elements or use different wording." + elif line_number: + hint = "Line numbers may have changed if document was modified." + elif attrs: + hint = "Verify attribute values are correct." + else: + hint = "Try adding filters (attrs, line_number, or contains)." + + raise ValueError(f"{base_msg}. {hint}") + if len(matches) > 1: + raise ValueError( + f"Multiple nodes found: <{tag}>. " + f"Add more filters (attrs, line_number, or contains) to narrow the search." + ) + return matches[0] + + def _get_element_text(self, elem): + """ + Recursively extract all text content from an element. + + Skips text nodes that contain only whitespace (spaces, tabs, newlines), + which typically represent XML formatting rather than document content. + + Args: + elem: defusedxml.minidom.Element to extract text from + + Returns: + str: Concatenated text from all non-whitespace text nodes within the element + """ + text_parts = [] + for node in elem.childNodes: + if node.nodeType == node.TEXT_NODE: + # Skip whitespace-only text nodes (XML formatting) + if node.data.strip(): + text_parts.append(node.data) + elif node.nodeType == node.ELEMENT_NODE: + text_parts.append(self._get_element_text(node)) + return "".join(text_parts) + + def replace_node(self, elem, new_content): + """ + Replace a DOM element with new XML content. + + Args: + elem: defusedxml.minidom.Element to replace + new_content: String containing XML to replace the node with + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.replace_node(old_elem, "text") + """ + parent = elem.parentNode + nodes = self._parse_fragment(new_content) + for node in nodes: + parent.insertBefore(node, elem) + parent.removeChild(elem) + return nodes + + def insert_after(self, elem, xml_content): + """ + Insert XML content after a DOM element. + + Args: + elem: defusedxml.minidom.Element to insert after + xml_content: String containing XML to insert + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.insert_after(elem, "text") + """ + parent = elem.parentNode + next_sibling = elem.nextSibling + nodes = self._parse_fragment(xml_content) + for node in nodes: + if next_sibling: + parent.insertBefore(node, next_sibling) + else: + parent.appendChild(node) + return nodes + + def insert_before(self, elem, xml_content): + """ + Insert XML content before a DOM element. + + Args: + elem: defusedxml.minidom.Element to insert before + xml_content: String containing XML to insert + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.insert_before(elem, "text") + """ + parent = elem.parentNode + nodes = self._parse_fragment(xml_content) + for node in nodes: + parent.insertBefore(node, elem) + return nodes + + def append_to(self, elem, xml_content): + """ + Append XML content as a child of a DOM element. + + Args: + elem: defusedxml.minidom.Element to append to + xml_content: String containing XML to append + + Returns: + List[defusedxml.minidom.Node]: All inserted nodes + + Example: + new_nodes = editor.append_to(elem, "text") + """ + nodes = self._parse_fragment(xml_content) + for node in nodes: + elem.appendChild(node) + return nodes + + def get_next_rid(self): + """Get the next available rId for relationships files.""" + max_id = 0 + for rel_elem in self.dom.getElementsByTagName("Relationship"): + rel_id = rel_elem.getAttribute("Id") + if rel_id.startswith("rId"): + try: + max_id = max(max_id, int(rel_id[3:])) + except ValueError: + pass + return f"rId{max_id + 1}" + + def save(self): + """ + Save the edited XML back to the file. + + Serializes the DOM tree and writes it back to the original file path, + preserving the original encoding (ascii or utf-8). + """ + content = self.dom.toxml(encoding=self.encoding) + self.xml_path.write_bytes(content) + + def _parse_fragment(self, xml_content): + """ + Parse XML fragment and return list of imported nodes. + + Args: + xml_content: String containing XML fragment + + Returns: + List of defusedxml.minidom.Node objects imported into this document + + Raises: + AssertionError: If fragment contains no element nodes + """ + # Extract namespace declarations from the root document element + root_elem = self.dom.documentElement + namespaces = [] + if root_elem and root_elem.attributes: + for i in range(root_elem.attributes.length): + attr = root_elem.attributes.item(i) + if attr.name.startswith("xmlns"): # type: ignore + namespaces.append(f'{attr.name}="{attr.value}"') # type: ignore + + ns_decl = " ".join(namespaces) + wrapper = f"{xml_content}" + fragment_doc = defusedxml.minidom.parseString(wrapper) + nodes = [ + self.dom.importNode(child, deep=True) + for child in fragment_doc.documentElement.childNodes # type: ignore + ] + elements = [n for n in nodes if n.nodeType == n.ELEMENT_NODE] + assert elements, "Fragment must contain at least one element" + return nodes + + +def _create_line_tracking_parser(): + """ + Create a SAX parser that tracks line and column numbers for each element. + + Monkey patches the SAX content handler to store the current line and column + position from the underlying expat parser onto each element as a parse_position + attribute (line, column) tuple. + + Returns: + defusedxml.sax.xmlreader.XMLReader: Configured SAX parser + """ + + def set_content_handler(dom_handler): + def startElementNS(name, tagName, attrs): + orig_start_cb(name, tagName, attrs) + cur_elem = dom_handler.elementStack[-1] + cur_elem.parse_position = ( + parser._parser.CurrentLineNumber, # type: ignore + parser._parser.CurrentColumnNumber, # type: ignore + ) + + orig_start_cb = dom_handler.startElementNS + dom_handler.startElementNS = startElementNS + orig_set_content_handler(dom_handler) + + parser = defusedxml.sax.make_parser() + orig_set_content_handler = parser.setContentHandler + parser.setContentHandler = set_content_handler # type: ignore + return parser diff --git a/skills/file-organizer/SKILL.md b/skills/file-organizer/SKILL.md new file mode 100644 index 000000000..66762b834 --- /dev/null +++ b/skills/file-organizer/SKILL.md @@ -0,0 +1,433 @@ +--- +name: file-organizer +description: Intelligently organizes your files and folders across your computer by understanding context, finding duplicates, suggesting better structures, and automating cleanup tasks. Reduces cognitive load and keeps your digital workspace tidy without manual effort. +--- + +# File Organizer + +This skill acts as your personal organization assistant, helping you maintain a clean, logical file structure across your computer without the mental overhead of constant manual organization. + +## When to Use This Skill + +- Your Downloads folder is a chaotic mess +- You can't find files because they're scattered everywhere +- You have duplicate files taking up space +- Your folder structure doesn't make sense anymore +- You want to establish better organization habits +- You're starting a new project and need a good structure +- You're cleaning up before archiving old projects + +## What This Skill Does + +1. **Analyzes Current Structure**: Reviews your folders and files to understand what you have +2. **Finds Duplicates**: Identifies duplicate files across your system +3. **Suggests Organization**: Proposes logical folder structures based on your content +4. **Automates Cleanup**: Moves, renames, and organizes files with your approval +5. **Maintains Context**: Makes smart decisions based on file types, dates, and content +6. **Reduces Clutter**: Identifies old files you probably don't need anymore + +## How to Use + +### From Your Home Directory + +``` +cd ~ +``` + +Then run Claude Code and ask for help: + +``` +Help me organize my Downloads folder +``` + +``` +Find duplicate files in my Documents folder +``` + +``` +Review my project directories and suggest improvements +``` + +### Specific Organization Tasks + +``` +Organize these downloads into proper folders based on what they are +``` + +``` +Find duplicate files and help me decide which to keep +``` + +``` +Clean up old files I haven't touched in 6+ months +``` + +``` +Create a better folder structure for my [work/projects/photos/etc] +``` + +## Instructions + +When a user requests file organization help: + +1. **Understand the Scope** + + Ask clarifying questions: + - Which directory needs organization? (Downloads, Documents, entire home folder?) + - What's the main problem? (Can't find things, duplicates, too messy, no structure?) + - Any files or folders to avoid? (Current projects, sensitive data?) + - How aggressively to organize? (Conservative vs. comprehensive cleanup) + +2. **Analyze Current State** + + Review the target directory: + ```bash + # Get overview of current structure + ls -la [target_directory] + + # Check file types and sizes + find [target_directory] -type f -exec file {} \; | head -20 + + # Identify largest files + du -sh [target_directory]/* | sort -rh | head -20 + + # Count file types + find [target_directory] -type f | sed 's/.*\.//' | sort | uniq -c | sort -rn + ``` + + Summarize findings: + - Total files and folders + - File type breakdown + - Size distribution + - Date ranges + - Obvious organization issues + +3. **Identify Organization Patterns** + + Based on the files, determine logical groupings: + + **By Type**: + - Documents (PDFs, DOCX, TXT) + - Images (JPG, PNG, SVG) + - Videos (MP4, MOV) + - Archives (ZIP, TAR, DMG) + - Code/Projects (directories with code) + - Spreadsheets (XLSX, CSV) + - Presentations (PPTX, KEY) + + **By Purpose**: + - Work vs. Personal + - Active vs. Archive + - Project-specific + - Reference materials + - Temporary/scratch files + + **By Date**: + - Current year/month + - Previous years + - Very old (archive candidates) + +4. **Find Duplicates** + + When requested, search for duplicates: + ```bash + # Find exact duplicates by hash + find [directory] -type f -exec md5 {} \; | sort | uniq -d + + # Find files with same name + find [directory] -type f -printf '%f\n' | sort | uniq -d + + # Find similar-sized files + find [directory] -type f -printf '%s %p\n' | sort -n + ``` + + For each set of duplicates: + - Show all file paths + - Display sizes and modification dates + - Recommend which to keep (usually newest or best-named) + - **Important**: Always ask for confirmation before deleting + +5. **Propose Organization Plan** + + Present a clear plan before making changes: + + ```markdown + # Organization Plan for [Directory] + + ## Current State + - X files across Y folders + - [Size] total + - File types: [breakdown] + - Issues: [list problems] + + ## Proposed Structure + + ``` + [Directory]/ + ├── Work/ + │ ├── Projects/ + │ ├── Documents/ + │ └── Archive/ + ├── Personal/ + │ ├── Photos/ + │ ├── Documents/ + │ └── Media/ + └── Downloads/ + ├── To-Sort/ + └── Archive/ + ``` + + ## Changes I'll Make + + 1. **Create new folders**: [list] + 2. **Move files**: + - X PDFs → Work/Documents/ + - Y images → Personal/Photos/ + - Z old files → Archive/ + 3. **Rename files**: [any renaming patterns] + 4. **Delete**: [duplicates or trash files] + + ## Files Needing Your Decision + + - [List any files you're unsure about] + + Ready to proceed? (yes/no/modify) + ``` + +6. **Execute Organization** + + After approval, organize systematically: + + ```bash + # Create folder structure + mkdir -p "path/to/new/folders" + + # Move files with clear logging + mv "old/path/file.pdf" "new/path/file.pdf" + + # Rename files with consistent patterns + # Example: "YYYY-MM-DD - Description.ext" + ``` + + **Important Rules**: + - Always confirm before deleting anything + - Log all moves for potential undo + - Preserve original modification dates + - Handle filename conflicts gracefully + - Stop and ask if you encounter unexpected situations + +7. **Provide Summary and Maintenance Tips** + + After organizing: + + ```markdown + # Organization Complete! ✨ + + ## What Changed + + - Created [X] new folders + - Organized [Y] files + - Freed [Z] GB by removing duplicates + - Archived [W] old files + + ## New Structure + + [Show the new folder tree] + + ## Maintenance Tips + + To keep this organized: + + 1. **Weekly**: Sort new downloads + 2. **Monthly**: Review and archive completed projects + 3. **Quarterly**: Check for new duplicates + 4. **Yearly**: Archive old files + + ## Quick Commands for You + + ```bash + # Find files modified this week + find . -type f -mtime -7 + + # Sort downloads by type + [custom command for their setup] + + # Find duplicates + [custom command] + ``` + + Want to organize another folder? + ``` + +## Examples + +### Example 1: Organizing Downloads (From Justin Dielmann) + +**User**: "My Downloads folder is a mess with 500+ files. Help me organize it." + +**Process**: +1. Analyzes Downloads folder +2. Finds patterns: work docs, personal photos, installers, random PDFs +3. Proposes structure: + - Downloads/ + - Work/ + - Personal/ + - Installers/ (DMG, PKG files) + - Archive/ + - ToSort/ (things needing decisions) +4. Asks for confirmation +5. Moves files intelligently based on content and names +6. Results: 500 files → 5 organized folders + +### Example 2: Finding and Removing Duplicates + +**User**: "Find duplicate files in my Documents and help me decide which to keep." + +**Output**: +```markdown +# Found 23 Sets of Duplicates (156 MB total) + +## Duplicate Set 1: "proposal.pdf" +- `/Documents/proposal.pdf` (2.3 MB, modified: 2024-03-15) +- `/Documents/old/proposal.pdf` (2.3 MB, modified: 2024-03-15) +- `/Desktop/proposal.pdf` (2.3 MB, modified: 2024-03-10) + +**Recommendation**: Keep `/Documents/proposal.pdf` (most recent in correct location) +Delete the other 2 copies? + +[Continue for all duplicates...] +``` + +### Example 3: Restructuring Projects Folder + +**User**: "Review my ~/Projects directory and suggest improvements." + +**Output**: +```markdown +# Analysis of ~/Projects + +## Current Structure Issues +- Mix of active and archived projects (3+ years old) +- No consistent naming convention +- Some projects at root, others in random subfolders +- Duplicate folders (project-name, project-name-old, project-name-v2) + +## Proposed Structure + +``` +Projects/ +├── Active/ +│ ├── client-work/ +│ ├── side-projects/ +│ └── learning/ +├── Archive/ +│ ├── 2022/ +│ ├── 2023/ +│ └── 2024/ +└── Templates/ +``` + +## Specific Changes +1. Move 12 projects not touched since 2022 → Archive/ +2. Consolidate 4 duplicate project folders +3. Rename projects to consistent format: "client-name-project-name" +4. Create Archive for old work + +Want me to implement this? +``` + +### Example 4: Organizing Photos by Date + +**User**: "Organize my photo folders by year and month." + +**Output**: Creates structure like: +``` +Photos/ +├── 2023/ +│ ├── 01-January/ +│ ├── 02-February/ +│ └── ... +├── 2024/ +│ ├── 01-January/ +│ └── ... +└── Unsorted/ +``` + +Then moves photos based on EXIF data or file modification dates. + +## Common Organization Tasks + +### Downloads Cleanup +``` +Organize my Downloads folder - move documents to Documents, +images to Pictures, keep installers separate, and archive files +older than 3 months. +``` + +### Project Organization +``` +Review my Projects folder structure and help me separate active +projects from old ones I should archive. +``` + +### Duplicate Removal +``` +Find all duplicate files in my Documents folder and help me +decide which ones to keep. +``` + +### Desktop Cleanup +``` +My Desktop is covered in files. Help me organize everything into +my Documents folder properly. +``` + +### Photo Organization +``` +Organize all photos in this folder by date (year/month) based +on when they were taken. +``` + +### Work/Personal Separation +``` +Help me separate my work files from personal files across my +Documents folder. +``` + +## Pro Tips + +1. **Start Small**: Begin with one messy folder (like Downloads) to build trust +2. **Regular Maintenance**: Run weekly cleanup on Downloads +3. **Consistent Naming**: Use "YYYY-MM-DD - Description" format for important files +4. **Archive Aggressively**: Move old projects to Archive instead of deleting +5. **Keep Active Separate**: Maintain clear boundaries between active and archived work +6. **Trust the Process**: Let Claude handle the cognitive load of where things go + +## Best Practices + +### Folder Naming +- Use clear, descriptive names +- Avoid spaces (use hyphens or underscores) +- Be specific: "client-proposals" not "docs" +- Use prefixes for ordering: "01-current", "02-archive" + +### File Naming +- Include dates: "2024-10-17-meeting-notes.md" +- Be descriptive: "q3-financial-report.xlsx" +- Avoid version numbers in names (use version control instead) +- Remove download artifacts: "document-final-v2 (1).pdf" → "document.pdf" + +### When to Archive +- Projects not touched in 6+ months +- Completed work that might be referenced later +- Old versions after migration to new systems +- Files you're hesitant to delete (archive first) + +## Related Use Cases + +- Setting up organization for a new computer +- Preparing files for backup/archiving +- Cleaning up before storage cleanup +- Organizing shared team folders +- Structuring new project directories + diff --git a/skills/gmail-intelligence/SKILL.md b/skills/gmail-intelligence/SKILL.md new file mode 100644 index 000000000..1c3f94a29 --- /dev/null +++ b/skills/gmail-intelligence/SKILL.md @@ -0,0 +1,231 @@ +--- +name: gmail-intelligence +description: Transform Gmail into a business intelligence system for MTL Craft Cocktails. Answer questions about emails, detect leads, draft responses, and track client communications with context about the cocktail catering business. +--- + +# Gmail Intelligence for MTL Craft Cocktails + +You are a specialized Gmail intelligence agent for MTL Craft Cocktails, a bilingual mobile cocktail bar catering company in Montreal. Your role is to answer natural language questions about emails, detect and score leads, draft professional responses, and provide business intelligence. + +## Core Capabilities + +1. **Natural Language Email Search** + - Answer questions like "Did Alex pick a black or wood bar?" + - Find specific details from client communications + - Search across emails efficiently + +2. **Lead Detection & Scoring** + - Identify wedding, corporate, and private event leads + - Score leads based on: budget, guest count, date proximity, service complexity + - Categorize by event type automatically + +3. **Client Memory & Tracking** + - Remember client preferences and past interactions + - Track conversation history across email threads + - Build persistent client profiles + +4. **Professional Email Drafting** + - Generate bilingual responses (French/English) + - Apply MTL Craft Cocktails brand voice + - Include accurate pricing and package details + +5. **Business Intelligence** + - Report on unpaid invoices + - Identify high-priority leads + - Track follow-up requirements + +## When to Use This Skill + +Use this skill when: +- Searching for specific information in MTL Craft Cocktails Gmail +- Analyzing leads or client communications +- Drafting email responses for the cocktail business +- Generating business intelligence reports +- Tracking client preferences and event details + +## Business Context + +**Company**: MTL Craft Cocktails +**Email**: info@mtlcraftcocktails.com +**Services**: Mobile bar catering (weddings, corporate, private events) +**Location**: Montreal, QC (bilingual: French/English) +**Volume**: 8,390+ messages, 5,263 threads + +## Technical Integration + +This skill works with: +- **RUBE MCP**: Real Gmail access via Composio +- **Mem0**: Persistent client memory storage +- **Anthropic Claude**: Deep email content analysis +- **Agency Swarm**: Multi-agent orchestration + +## Instructions + +### 1. Email Search & Analysis + +When answering questions about emails: + +``` +1. Use SEARCH_TOOLS to find relevant Gmail tools +2. Search with specific queries (sender, keywords, date ranges) +3. Analyze email content for the requested information +4. Provide confident answers with source citations +``` + +**Example Query**: "What color bar did Alex Curtis want?" + +**Process**: +- Search: `from:alexandercurtis (bar OR black OR wood OR color)` +- Extract: Find specific mentions in email body +- Cite: Include date, sender, and message ID +- Confidence: State 100% if found in email text + +### 2. Lead Detection + +For lead scoring, reference: `references/lead-scoring.md` + +**Triggers**: Words like "wedding", "event", "quote", "corporate", "private party" + +**Score Calculation**: +- Budget tier (1-3 points) +- Guest count (1-3 points) +- Date urgency (1-3 points) +- Service complexity (1-3 points) + +**Output Format**: +``` +Lead: [Name] +Type: [Wedding/Corporate/Private] +Score: [X/12] - [Hot/Warm/Cold] +Budget: [Estimated range] +Guest Count: [Number] +Event Date: [Date] +Next Action: [Specific follow-up] +``` + +### 3. Email Drafting + +For email responses, reference: +- `references/brand-voice.md` - Communication style +- `references/email-templates.md` - Standard responses +- `references/pricing-packages.md` - Accurate pricing + +**Drafting Process**: +``` +1. Analyze the incoming email context +2. Apply MTL Craft Cocktails brand voice (professional, warm, bilingual) +3. Include accurate pricing from pricing-packages.md +4. Structure: Greeting → Answer → Next Steps → Signature +5. Flag for approval before sending +``` + +### 4. Client Memory + +When tracking client information: + +**Store in Mem0**: +- Event preferences (bar color, cocktail choices) +- Communication history summaries +- Lead score and status +- Follow-up requirements +- Budget and package selections + +**Retrieve Before Drafting**: +- Check Mem0 for existing client profile +- Reference past conversations +- Maintain context continuity + +### 5. Business Intelligence + +For BI queries, reference: `references/business-queries.md` + +**Common Reports**: +- Unpaid invoices (search: `label:Invoice_To_Pay`) +- High-priority leads (score ≥8/12) +- Follow-up needed (search: `label:Follow_Up_Needed`) +- Recent leads by type (last 30 days) + +## Auto-Labeling System + +Apply Gmail labels automatically: + +**Lead Labels**: +- `Lead_Wedding` - Wedding inquiries +- `Lead_Corporate` - Corporate events +- `Lead_Private` - Private parties + +**Status Labels**: +- `Active_Client` - Confirmed bookings +- `Follow_Up_Needed` - Requires response +- `Question_Answered` - Resolved inquiries + +**Financial Labels**: +- `Invoice_To_Pay` - Outstanding invoices +- `Invoice_Paid` - Paid invoices + +**Other Labels**: +- `Supplier` - Vendor communications +- `Personal` - Non-business emails + +## Error Handling + +If search returns no results: +1. Try broader search terms +2. Check date range assumptions +3. Suggest alternative search strategies +4. Don't hallucinate - state "No results found" + +If pricing is requested: +1. ALWAYS reference `references/pricing-packages.md` +2. NEVER estimate or guess prices +3. Clarify package details if ambiguous + +## Quality Standards + +- **Accuracy**: Cite specific emails as sources +- **Confidence**: State confidence level (0-100%) +- **No Hallucination**: Only report information found in emails +- **Brand Consistency**: Follow MTL Craft Cocktails voice +- **Bilingual**: Support French and English inquiries + +## Progressive Disclosure + +This SKILL.md provides core instructions (~5k tokens). Additional detailed references load as needed: + +- `references/lead-scoring.md` - Detailed scoring algorithm +- `references/brand-voice.md` - Complete style guide +- `references/email-templates.md` - Response templates +- `references/pricing-packages.md` - Full pricing structure +- `references/business-queries.md` - BI report templates +- `references/cocktail-menu.md` - Complete drink menu and recipes + +Load these files when specific tasks require their detailed information. + +## Example Workflow + +**User**: "Did Alex Curtis choose the black or wood bar for his October 18th wedding?" + +**Agent Process**: +1. Use RUBE_SEARCH_TOOLS → GMAIL_SEARCH_PEOPLE for "Alex Curtis" +2. Find email addresses: alexandercurtis@icloud.com, alexandercurtis@me.com +3. Use GMAIL_FETCH_EMAILS with query: `from:alexandercurtis (bar OR black OR wood)` +4. Analyze email content for bar color mention +5. Extract: "black bar with light wood top" from August 24-25 emails +6. Store in Mem0: "Alex Curtis - October 18 wedding - black bar with wood top" +7. Return confident answer with source citations + +**Response**: +``` +Alex Curtis chose the BLACK bar with light wood top for his October 18, 2025 wedding. + +Source: Email from Alex Curtis on August 24, 2025 +Message: "Let me know the dimensions of your black bar (with the light wood top)" + +Confirmed: August 25, 2025 - "You have it in black with the light wood top yea?" + +Confidence: 100% +``` + +--- + +*This skill integrates with RUBE MCP (Composio), Mem0, and Agency Swarm to provide real-time Gmail intelligence for MTL Craft Cocktails business operations.* diff --git a/skills/image-enhancer/SKILL.md b/skills/image-enhancer/SKILL.md new file mode 100644 index 000000000..eeb9fac33 --- /dev/null +++ b/skills/image-enhancer/SKILL.md @@ -0,0 +1,99 @@ +--- +name: image-enhancer +description: Improves the quality of images, especially screenshots, by enhancing resolution, sharpness, and clarity. Perfect for preparing images for presentations, documentation, or social media posts. +--- + +# Image Enhancer + +This skill takes your images and screenshots and makes them look better—sharper, clearer, and more professional. + +## When to Use This Skill + +- Improving screenshot quality for blog posts or documentation +- Enhancing images before sharing on social media +- Preparing images for presentations or reports +- Upscaling low-resolution images +- Sharpening blurry photos +- Cleaning up compressed images + +## What This Skill Does + +1. **Analyzes Image Quality**: Checks resolution, sharpness, and compression artifacts +2. **Enhances Resolution**: Upscales images intelligently +3. **Improves Sharpness**: Enhances edges and details +4. **Reduces Artifacts**: Cleans up compression artifacts and noise +5. **Optimizes for Use Case**: Adjusts based on intended use (web, print, social media) + +## How to Use + +### Basic Enhancement + +``` +Improve the image quality of screenshot.png +``` + +``` +Enhance all images in this folder +``` + +### Specific Improvements + +``` +Upscale this image to 4K resolution +``` + +``` +Sharpen this blurry screenshot +``` + +``` +Reduce compression artifacts in this image +``` + +### Batch Processing + +``` +Improve the quality of all PNG files in this directory +``` + +## Example + +**User**: "Improve the image quality of screenshot-2024.png" + +**Output**: +``` +Analyzing screenshot-2024.png... + +Current specs: +- Resolution: 1920x1080 +- Format: PNG +- Quality: Good, but slight blur + +Enhancements applied: +✓ Upscaled to 2560x1440 (retina) +✓ Sharpened edges +✓ Enhanced text clarity +✓ Optimized file size + +Saved as: screenshot-2024-enhanced.png +Original preserved as: screenshot-2024-original.png +``` + +**Inspired by:** Lenny Rachitsky's workflow from his newsletter - used for screenshots in his articles + +## Tips + +- Always keeps original files as backup +- Works best with screenshots and digital images +- Can batch process entire folders +- Specify output format if needed (PNG for quality, JPG for smaller size) +- For social media, mention the platform for optimal sizing + +## Common Use Cases + +- **Blog Posts**: Enhance screenshots before publishing +- **Documentation**: Make UI screenshots crystal clear +- **Social Media**: Optimize images for Twitter, LinkedIn, Instagram +- **Presentations**: Upscale images for large screens +- **Print Materials**: Increase resolution for physical media + diff --git a/skills/ios-simulator-testing/SKILL.md b/skills/ios-simulator-testing/SKILL.md new file mode 100644 index 000000000..6fc69d9c7 --- /dev/null +++ b/skills/ios-simulator-testing/SKILL.md @@ -0,0 +1,1035 @@ +--- +name: iOS Simulator Skill +description: Build, test, and automate iOS apps with accessibility-driven navigation. 13 production-ready scripts including ultra token-efficient Xcode build automation, log monitoring, intelligent simulator selection, and semantic UI navigation. Use when user wants to test iOS apps, navigate simulator UI, build Xcode projects, or automate iOS testing. +license: Apache-2.0 +--- + +# iOS Simulator Skill + +Build, test, and automate iOS applications with progressive disclosure and accessibility-first navigation. This skill provides 12 production-ready scripts for the complete iOS development lifecycle. + +## What This Skill Does + +Instead of pixel-based navigation (fragile, breaks on UI changes), use semantic navigation that understands what elements mean: + +```bash +# ❌ Fragile - breaks if UI changes +idb ui tap 320 400 # What's at those coordinates? + +# ✅ Robust - finds by meaning +python scripts/navigator.py --find-text "Login" --tap +``` + +## Prerequisites + +Verify your environment is ready: + +```bash +bash scripts/sim_health_check.sh +``` + +**Requires:** +- macOS 12+ +- Xcode Command Line Tools +- Python 3 +- IDB (optional but recommended) + +## ⚠️ Important: Use Skill Scripts, Not Raw Tools + +**Always use these skill scripts instead of running `xcrun simctl`, `idb`, or `xcodebuild` directly.** + +Why? This skill provides: +- ✅ **Semantic navigation** - Find elements by meaning, not coordinates +- ✅ **Progressive disclosure** - Minimal output by default, details on demand +- ✅ **Structured data** - Consistent JSON output, not raw CLI text +- ✅ **Error handling** - Clear, actionable error messages +- ✅ **Token efficiency** - Optimized for AI agents (5-10 tokens vs 400+) + +**What you lose by using raw tools:** +- Coordinate-based navigation (fragile, breaks on UI changes) +- Massive token consumption (entire build logs, full accessibility trees) +- Inconsistent output formats +- Generic error messages + +**Example - Find and tap a button:** +```bash +# ❌ Fragile - uses raw coordinates +idb ui tap 320 400 # Which element is this? Will it work next week? + +# ✅ Robust - semantic navigation with skill script +python scripts/navigator.py --find-text "Login" --tap +``` + +The 12 scripts in this skill cover all common workflows. **Only use raw tools if you need something not covered by these scripts.** + +## Configuration (Optional) + +The skill **automatically learns your simulator preferences**. No setup required! + +### Auto-Learning Behavior + +After each successful build, the skill remembers which simulator was used: + +```json +# Created at: .claude/skills/ios-simulator-skill/config.json +{ + "device": { + "last_used_simulator": "iPhone 16 Pro", + "last_used_at": "2025-10-18T13:36:18Z" + } +} +``` + +**Next time you build without `--simulator`, it uses the remembered device automatically.** + +### Simulator Selection Priority + +1. `--simulator` CLI flag ← One-off override +2. `preferred_simulator` in config ← Manual preference (always used) +3. `last_used_simulator` in config ← Auto-learned from successful builds +4. Auto-detect first available iPhone +5. Generic iOS Simulator ← Fallback + +### Manual Preference (Optional) + +To always use a specific simulator, edit the config: + +```json +{ + "device": { + "preferred_simulator": "iPhone 15 Pro Max" + } +} +``` + +**Config location**: `.claude/skills/ios-simulator-skill/config.json` (created automatically on first build) + +## Quick Navigation + +**First time?** → Start with screen mapping +**Know what you want?** → Jump to the right script + +## 12 Production Scripts + +### Build & Development (2 scripts) + +#### 1. Build & Test Automation - "Build and run tests" + +Build Xcode projects with **ultra token-efficient progressive disclosure**: + +```bash +# Build project (ultra-minimal output: 5-10 tokens) +python scripts/build_and_test.py --project MyApp.xcodeproj +# Output: Build: SUCCESS (0 errors, 3 warnings) [xcresult-20251018-143052] + +# Get error details on demand +python scripts/build_and_test.py --get-errors xcresult-20251018-143052 + +# Get warning details +python scripts/build_and_test.py --get-warnings xcresult-20251018-143052 + +# Get full build log +python scripts/build_and_test.py --get-log xcresult-20251018-143052 + +# Get everything as JSON +python scripts/build_and_test.py --get-all xcresult-20251018-143052 --json + +# List recent builds +python scripts/build_and_test.py --list-xcresults +``` + +**Traditional Options:** +```bash +# Build workspace with specific scheme +python scripts/build_and_test.py --workspace MyApp.xcworkspace --scheme MyApp + +# Run tests +python scripts/build_and_test.py --project MyApp.xcodeproj --test + +# Clean build with simulator selection +python scripts/build_and_test.py --project MyApp.xcodeproj --clean --simulator "iPhone 15 Pro" + +# Verbose mode (for debugging) +python scripts/build_and_test.py --project MyApp.xcodeproj --verbose +``` + +**Output (default - ultra-minimal):** +``` +Build: SUCCESS (0 errors, 3 warnings) [xcresult-20251018-143052] +``` + +**Output (progressive disclosure - get warnings):** +``` +Warnings (3): + +1. 'UIWebView' is deprecated + Location: LoginView.swift:line 45 + +2. Unused variable 'tempValue' + Location: DataModel.swift:line 112 + +3. ... +``` + +**Output (on failure):** +``` +Build: FAILED (2 errors, 1 warnings) [xcresult-20251018-143100] +``` + +**Then get error details:** +```bash +python scripts/build_and_test.py --get-errors xcresult-20251018-143100 +``` + +**Key Features:** +- ✅ **Ultra token-efficient**: Default output is 5-10 tokens +- ✅ **Progressive disclosure**: Load error/warning/log details only when needed +- ✅ **Native xcresult**: Uses Apple's official result bundle format +- ✅ **Structured data**: JSON output via xcresulttool +- ✅ **Cached results**: Access build details hours/days later + +**Options:** + +*Build/Test:* +- `--project` or `--workspace` - Xcode project/workspace path +- `--scheme` - Build scheme (auto-detected if not specified) +- `--configuration` - Debug or Release (default: Debug) +- `--clean` - Clean before building +- `--test` - Run test suite +- `--suite` - Specific test suite to run +- `--simulator` - Target simulator name + +*Progressive Disclosure:* +- `--get-errors XCRESULT_ID` - Get error details +- `--get-warnings XCRESULT_ID` - Get warning details +- `--get-log XCRESULT_ID` - Get full build log +- `--get-all XCRESULT_ID` - Get complete details +- `--list-xcresults` - List recent build results + +*Output:* +- `--verbose` - Show detailed output +- `--json` - Output as JSON + +**Use when:** You need to build your app or run automated tests. + +**Integrations:** +- Combines with `sim_health_check.sh` to verify environment first +- Uses `app_launcher.py` to install built app +- Works with `test_recorder.py` for test documentation + +**Progressive Disclosure Workflow:** +1. Build returns minimal result + xcresult ID +2. Agent sees build failed +3. Agent requests error details using xcresult ID +4. Agent gets structured error list +5. Agent fixes errors and rebuilds + +--- + +#### 2. Log Monitor - "Watch app logs in real-time" + +Monitor simulator logs with intelligent filtering and error detection: + +```bash +# Monitor app logs in real-time (follow mode) +python scripts/log_monitor.py --app com.myapp.MyApp --follow + +# Capture logs for specific duration +python scripts/log_monitor.py --app com.myapp.MyApp --duration 30s + +# Show errors and warnings only from last 5 minutes +python scripts/log_monitor.py --severity error,warning --last 5m + +# Save logs to file +python scripts/log_monitor.py --app com.myapp.MyApp --duration 1m --output logs/ + +# Verbose output with full log lines +python scripts/log_monitor.py --app com.myapp.MyApp --duration 30s --verbose +``` + +**Output:** +``` +Logs for: com.myapp.MyApp +Total lines: 342 +Errors: 2, Warnings: 5, Info: 87 + +Top Errors (2): + ❌ Network request failed: timeout after 30s + ❌ Image loading failed: invalid URL + +Top Warnings (5): + ⚠️ Deprecated API usage: UIWebView + ⚠️ Main thread performance warning: 2.3s + ⚠️ Memory warning received +``` + +**Options:** +- `--app` - App bundle ID to filter logs +- `--severity` - Filter by severity (error,warning,info,debug) +- `--follow` - Continuous streaming (Ctrl+C to stop) +- `--duration` - Capture duration (e.g., 30s, 5m, 1h) +- `--last` - Show logs from last N minutes +- `--output` - Save logs to directory +- `--verbose` - Show detailed log lines +- `--json` - Output as JSON + +**Use when:** You need to debug issues, monitor app behavior, or capture logs during testing. + +**Integrations:** +- Enhanced version of `app_state_capture.py` log capture +- Runs alongside `test_recorder.py` for comprehensive test documentation +- Complements `navigator.py` - see logs while interacting with UI + +--- + +### Navigation & Interaction (5 scripts) + +#### 3. Screen Mapper - "What's on this screen?" + +See current screen in 5 lines: + +```bash +python scripts/screen_mapper.py +``` + +**Output:** +``` +Screen: LoginViewController (45 elements, 7 interactive) +Buttons: "Login", "Cancel", "Forgot Password" +TextFields: 2 (0 filled) +Navigation: NavBar: "Sign In" +Focusable: 7 elements +``` + +**Options:** +- `--verbose` - Full element breakdown +- `--hints` - Navigation suggestions +- `--json` - Complete analysis object + +**Use when:** You need to understand what's currently visible and where to navigate next. + +--- + +#### 4. Navigator - "Tap and interact with specific elements" + +Find and interact with UI elements by meaning: + +```bash +# Find and tap a button +python scripts/navigator.py --find-text "Login" --tap + +# Enter text into first text field +python scripts/navigator.py --find-type TextField --index 0 --enter-text "user@test.com" + +# Tap by accessibility ID +python scripts/navigator.py --find-id "submitButton" --tap + +# List all tappable elements +python scripts/navigator.py --list +``` + +**Finding strategies (in order of preference):** +1. By text (fuzzy matching): `--find-text "Button text"` +2. By type: `--find-type TextField` +3. By accessibility ID: `--find-id "elementID"` +4. By coordinates (fallback): `--tap-at 200,400` + +**Output:** +``` +Tapped: Button "Login" at (320, 450) +Entered text in: TextField "Username" +Not found: text='Submit' +``` + +**Use when:** You need to find specific elements and interact with them (tap, type). + +--- + +#### 5. Gesture Controller - "Swipe, scroll, and complex gestures" + +Perform navigation gestures: + +```bash +# Directional swipes +python scripts/gesture.py --swipe up|down|left|right + +# Scroll multiple times +python scripts/gesture.py --scroll down --scroll-amount 3 + +# Pull to refresh +python scripts/gesture.py --refresh + +# Pinch to zoom +python scripts/gesture.py --pinch in|out + +# Long press +python scripts/gesture.py --long-press 200,300 --duration 2.0 + +# Custom swipe +python scripts/gesture.py --swipe-from 100,500 --swipe-to 100,100 +``` + +**Output:** +``` +Swiped up +Scrolled down (3x) +Performed pull to refresh +``` + +**Use when:** You need to navigate using gestures (scrolling lists, dismissing overlays, etc.). + +--- + +#### 6. Keyboard Controller - "Type and press buttons" + +Text entry and hardware button control: + +```bash +# Type text (fast) +python scripts/keyboard.py --type "hello@example.com" + +# Type slowly (for animations) +python scripts/keyboard.py --type "slow typing" --slow + +# Press special keys +python scripts/keyboard.py --key return # Submit +python scripts/keyboard.py --key delete # Delete character +python scripts/keyboard.py --key tab # Next field +python scripts/keyboard.py --key space # Space +python scripts/keyboard.py --key up|down|left|right # Arrow keys + +# Press hardware buttons +python scripts/keyboard.py --button home # Go home +python scripts/keyboard.py --button lock # Lock device +python scripts/keyboard.py --button volume-up # Volume up +python scripts/keyboard.py --button screenshot # Take screenshot + +# Key sequences +python scripts/keyboard.py --key-sequence return,return,delete + +# Clear field +python scripts/keyboard.py --clear + +# Dismiss keyboard +python scripts/keyboard.py --dismiss +``` + +**Output:** +``` +Typed: "hello@example.com" +Pressed return +Pressed home button +``` + +**Use when:** You need to enter text or press buttons (including hardware). + +--- + +#### 7. App Launcher - "Start/stop apps and manage installation" + +App lifecycle control: + +```bash +# Launch app by bundle ID +python scripts/app_launcher.py --launch com.example.app + +# Terminate app +python scripts/app_launcher.py --terminate com.example.app + +# Restart app +python scripts/app_launcher.py --restart com.example.app + +# Install app from bundle +python scripts/app_launcher.py --install /path/to/app.app + +# Uninstall app +python scripts/app_launcher.py --uninstall com.example.app + +# Open deep link +python scripts/app_launcher.py --open-url "myapp://profile/123" + +# List installed apps +python scripts/app_launcher.py --list + +# Check app state +python scripts/app_launcher.py --state com.example.app +``` + +**Output:** +``` +Launched com.example.app (PID: 12345) +Installed /path/to/app.app +Opened URL: myapp://profile/123 +Installed apps (15): + com.example.app: My App (v1.0.0) + ... +``` + +**Use when:** You need to control app lifecycle or manage app installation. + +--- + +### Testing & Analysis (5 scripts) + +#### 8. Accessibility Auditor - "Check WCAG compliance" + +Find accessibility issues: + +```bash +# Quick audit (default - top 3 issues) +python scripts/accessibility_audit.py + +# Full detailed report +python scripts/accessibility_audit.py --verbose + +# Save report to file +python scripts/accessibility_audit.py --output audit.json +``` + +**Output (default):** +``` +Elements: 45, Issues: 7 +Critical: 2, Warning: 3, Info: 2 + +Top issues: + [critical] missing_label (2x) - Add accessibilityLabel + [warning] missing_hint (3x) - Add accessibilityHint + [info] no_identifier (2x) - Add accessibilityIdentifier for testing +``` + +**Checks for:** +- **Critical:** Missing labels on buttons, empty buttons, images without alt text +- **Warnings:** Missing hints on controls, small touch targets (< 44x44pt) +- **Info:** Missing automation identifiers, deep nesting (> 5 levels) + +**Exit codes:** +- 0 = No critical issues (pass) +- 1 = Critical issues found (fail) + +**Use when:** You need to verify accessibility compliance or find UI issues. + +--- + +#### 9. Visual Differ - "Compare screenshots for visual changes" + +Pixel-by-pixel screenshot comparison: + +```bash +# Compare two screenshots +python scripts/visual_diff.py baseline.png current.png + +# With custom threshold (1% = 0.01) +python scripts/visual_diff.py baseline.png current.png --threshold 0.02 + +# Detailed output +python scripts/visual_diff.py baseline.png current.png --details +``` + +**Output:** +``` +Difference: 0.5% (PASS) +Changed pixels: 1,234 +Artifacts saved to: ./ +``` + +**Generated artifacts:** +- `diff.png` - Changes highlighted in red +- `side-by-side.png` - Baseline and current comparison +- `diff-report.json` - Detailed metrics + +**Use when:** You need to detect visual regressions or compare UI states. + +--- + +#### 10. Test Recorder - "Document test execution automatically" + +Record test steps with screenshots and accessibility snapshots: + +```bash +# Start recording (call from Python or use as module) +python scripts/test_recorder.py --test-name "Login Flow" --output test-reports/ +``` + +**Then in your test code:** +```python +from scripts.test_recorder import TestRecorder + +recorder = TestRecorder("Login Flow", output_dir="test-reports/") + +# Record each step +recorder.step("Launch app") +recorder.step("Tap login button") +recorder.step("Enter credentials", metadata={"user": "test@example.com"}) +recorder.step("Verify login", assertion="Home screen visible") + +# Generate report +recorder.generate_report() +``` + +**Output structure:** +``` +test-reports/login-flow-TIMESTAMP/ +├── report.md (Markdown with screenshots) +├── metadata.json (Complete timing data) +├── screenshots/ (Numbered screenshots per step) +└── accessibility/ (UI trees per step) +``` + +**Use when:** You need to document test execution with visual proof and timing data. + +--- + +#### 11. App State Capture - "Create debugging snapshots" + +Capture complete app state for bug reproduction: + +```bash +# Capture everything +python scripts/app_state_capture.py --app-bundle-id com.example.app + +# Custom output location and log lines +python scripts/app_state_capture.py \ + --app-bundle-id com.example.app \ + --output bug-reports/ \ + --log-lines 200 +``` + +**Output:** +``` +State captured: app-state-TIMESTAMP/ +Issues found: 2 errors, 1 warning +Elements: 45 +``` + +**Captures:** +- Screenshot of current screen +- Full accessibility tree (UI hierarchy) +- Recent app logs (filtered by app) +- Device information +- Error/warning counts +- Markdown summary + +**Generated files:** +``` +app-state-TIMESTAMP/ +├── screenshot.png +├── accessibility-tree.json (full UI hierarchy) +├── app-logs.txt (recent logs) +├── device-info.json +├── summary.json (metadata) +└── summary.md (human-readable) +``` + +**Use when:** You need to capture the complete state for debugging or bug reports. + +--- + +#### 12. Environment Health Check - "Verify everything is set up correctly" + +Verify your environment before testing: + +```bash +bash scripts/sim_health_check.sh +``` + +**Checks (8 total):** +1. macOS version +2. Xcode Command Line Tools +3. simctl availability +4. IDB installation +5. Python 3 installation +6. Available simulators +7. Booted simulators +8. Python packages (Pillow for visual_diff) + +**Output:** +``` +✓ macOS detected (version 14.1.1) +✓ Xcode Command Line Tools installed +✓ simctl is available +⚠ IDB not found (optional, for advanced features) +✓ Python 3 is installed (3.11.0) +✓ Found 6 available simulator(s) +⚠ No simulators currently booted +✓ Pillow (PIL) installed +``` + +**Exit codes:** +- 0 = Ready to test +- 1 = Fix issues before testing + +**Use when:** Starting fresh or troubleshooting environment problems. + +--- + +## Complete Workflow Examples + +### Example 1: Login Automation + +```bash +# 1. Check environment +bash scripts/sim_health_check.sh + +# 2. Launch app +python scripts/app_launcher.py --launch com.example.app + +# 3. See what's on screen +python scripts/screen_mapper.py + +# 4. Fill login form +python scripts/navigator.py --find-type TextField --index 0 --enter-text "user@test.com" +python scripts/navigator.py --find-type SecureTextField --enter-text "password123" + +# 5. Submit +python scripts/navigator.py --find-text "Login" --tap + +# 6. Check for accessibility issues +python scripts/accessibility_audit.py +``` + +### Example 2: Scroll and Verify + +```bash +# See current screen +python scripts/screen_mapper.py + +# Scroll down to see more +python scripts/gesture.py --scroll down --scroll-amount 3 + +# Find and tap a result +python scripts/navigator.py --find-text "Search Result" --tap +``` + +### Example 3: Visual Regression Testing + +```bash +# Capture baseline +python scripts/app_state_capture.py --output baseline/ + +# Make changes... + +# Capture current state +python scripts/app_state_capture.py --output current/ + +# Compare visually +python scripts/visual_diff.py baseline/screenshot.png current/screenshot.png --threshold 0.02 +``` + +### Example 4: Full Test Documentation + +```bash +# Start recording +python scripts/test_recorder.py --test-name "User Registration" --output test-reports/ + +# In your test (Python): +# recorder.step("View registration form") +# recorder.step("Enter name", metadata={"name": "John Doe"}) +# recorder.step("Enter email", metadata={"email": "john@example.com"}) +# recorder.step("Submit form") +# recorder.step("Verify confirmation", assertion="Success message visible") +# recorder.generate_report() + +# Get complete report in: test-reports/user-registration-TIMESTAMP/report.md +``` + +### Example 5: Debug a Problem + +```bash +# Capture everything for analysis +python scripts/app_state_capture.py \ + --app-bundle-id com.example.app \ + --output bug-reports/ \ + --log-lines 200 + +# Creates bug-reports/app-state-TIMESTAMP/ with: +# - Current screenshot +# - Full UI hierarchy +# - App logs +# - Device info +# - summary.md (human-readable) +``` + +--- + +## Decision Tree + +``` +Want to... + +├─ Build your Xcode app or run tests? +│ └─ python scripts/build_and_test.py --project MyApp.xcodeproj +│ +├─ Watch app logs in real-time? +│ └─ python scripts/log_monitor.py --app com.app.id --follow +│ +├─ See what's on screen? +│ └─ python scripts/screen_mapper.py +│ +├─ Tap a button or enter text? +│ └─ python scripts/navigator.py --find-text "..." --tap +│ +├─ Scroll or swipe? +│ └─ python scripts/gesture.py --scroll down +│ +├─ Type or press keys? +│ └─ python scripts/keyboard.py --type "..." +│ +├─ Launch/stop an app? +│ └─ python scripts/app_launcher.py --launch com.app.id +│ +├─ Check accessibility? +│ └─ python scripts/accessibility_audit.py +│ +├─ Compare screenshots? +│ └─ python scripts/visual_diff.py baseline.png current.png +│ +├─ Document a test? +│ └─ python scripts/test_recorder.py --test-name "Test Name" +│ +├─ Debug a problem? +│ └─ python scripts/app_state_capture.py --app-bundle-id com.app.id +│ +├─ Pick which simulator to use? +│ └─ python scripts/simulator_selector.py --suggest +│ +└─ Verify environment? + └─ bash scripts/sim_health_check.sh +``` + +--- + +## Selecting a Simulator - The Smart Way + +When you first start testing, Claude can **automatically suggest the best simulator for you**: + +```bash +# Get top 4 recommended simulators +python scripts/simulator_selector.py --suggest +``` + +**Output example:** +``` +Available Simulators: + +1. iPhone 16 Pro (iOS 18.0) + Recommended, Latest iOS, #1 common model + UDID: 67A99DF0-27BD-4507-A3DE-B7D8C38F764A + +2. iPhone 16 Pro Max (iOS 18.0) + Latest iOS, #1 common model + UDID: 3CF3A78B-F899-4C50-A158-3707C6E16E15 + +3. iPhone 16 (iOS 18.0) + Latest iOS, #2 common model + UDID: 20D618BD-AB45-41E5-8A4C-C11890E49205 + +4. iPhone 15 Pro (iOS 17.5) + #1 common model + UDID: E4190DEA-B937-4331-A58E-15C747722308 +``` + +### How Claude Helps + +When Claude sees this, it will: +1. **Parse the suggestions** as JSON +2. **Ask you to pick** from the top options +3. **Boot your choice** automatically +4. **Remember your preference** for next time + +**How it ranks simulators:** + +1. **Recently used** ← If you picked iPhone 16 Pro last time, it suggests it first +2. **Latest iOS version** ← Testing on current iOS is important +3. **Common models** ← iPhone 16 Pro, iPhone 15, iPhone SE (best for testing) +4. **Currently booted** ← If one's already running, suggest it + +### Using the Selector + +```bash +# Get top N suggestions (default: 4) +python scripts/simulator_selector.py --suggest --count 3 + +# Get suggestions as JSON for programmatic use +python scripts/simulator_selector.py --suggest --json + +# List all available simulators +python scripts/simulator_selector.py --list + +# Boot a specific simulator +python scripts/simulator_selector.py --boot 67A99DF0-27BD-4507-A3DE-B7D8C38F764A +``` + +### Auto-Learning + +The skill **remembers which simulator you used last time**: + +```json +# Saved in: .claude/skills/ios-simulator-skill/config.json +{ + "device": { + "last_used_simulator": "iPhone 16 Pro", + "last_used_at": "2025-10-18T16:50:00Z" + } +} +``` + +Next time you test, the selector will suggest your previous device first. + +--- + +## Token Efficiency + +All scripts are optimized for minimal output: + +| Operation | Raw Output | Skill Output | Savings | +|-----------|-----------|-------------|---------| +| Screen analysis | 200+ lines | 5 lines | 97.5% | +| Find & tap | 100+ lines | 1 line | 99% | +| Type text | 50+ lines | 1 line | 98% | +| Login flow | 400+ lines | 15 lines | 96% | + +**Default modes:** +- ✅ Minimal output (3-5 lines) +- ✅ `--verbose` for details +- ✅ `--json` for machine-readable format + +--- + +## Accessibility-First Philosophy + +**Why semantic navigation instead of coordinates?** + +```bash +# Fragile - breaks on any UI change +idb ui tap 320 400 + +# Robust - works even if layout changes +python scripts/navigator.py --find-text "Login" --tap +``` + +**Benefits:** +- Works across different screen sizes +- Survives UI redesigns +- Matches human understanding of the app +- Faster (no pixel processing) +- More reliable (structured data) + +--- + +## When to Use Raw Tools (Advanced) + +The 12 scripts in this skill cover all standard workflows. Raw tools should only be used for edge cases not covered: + +```bash +# ✅ Covered by skill - use script +python scripts/navigator.py --find-text "Login" --tap + +# ⚠️ Not covered - only then use raw tool +idb ui tap 320 400 # Only if you absolutely need coordinates + +# ✅ Covered by skill - use script +python scripts/app_launcher.py --launch com.example.app + +# ⚠️ Not covered - only then use raw tool +xcrun simctl launch booted com.example.app # Bypass all skill benefits +``` + +**Benefits you get with skill scripts:** +- Robust semantic navigation (survives UI changes) +- Token-efficient output (5-10 tokens vs 400+) +- Structured error messages (clear fixes) +- Consistent output across all scripts + +**What you lose with raw tools:** +- Fragile coordinate-based navigation +- Massive token consumption +- Unstructured output +- Generic error messages + +**Rule of thumb:** If one of the 12 scripts can do the job, use it. Never use raw tools for standard operations. + +--- + +## Help for Each Script + +All scripts provide detailed help: + +```bash +# Simulator Selection +python scripts/simulator_selector.py --help + +# Development & Testing +python scripts/build_and_test.py --help +python scripts/log_monitor.py --help + +# Navigation & Interaction +python scripts/screen_mapper.py --help +python scripts/navigator.py --help +python scripts/gesture.py --help +python scripts/keyboard.py --help +python scripts/app_launcher.py --help + +# Testing & Analysis +python scripts/accessibility_audit.py --help +python scripts/visual_diff.py --help +python scripts/test_recorder.py --help +python scripts/app_state_capture.py --help + +# Environment +bash scripts/sim_health_check.sh --help +``` + +--- + +## Best Practices + +1. **Always start with screen mapping** - Understand what's on screen before navigating +2. **Use semantic finding** - Find by text or type, not coordinates +3. **Verify state after actions** - Use screen_mapper to confirm navigation worked +4. **Minimize verbose output** - Only use `--verbose` when debugging +5. **Capture state for bugs** - Use app_state_capture for reproduction +6. **Check accessibility** - Run accessibility_audit after major changes +7. **Use test_recorder** - Document important workflows for team reference + +--- + +## Troubleshooting + +**Environment issues?** +```bash +bash scripts/sim_health_check.sh +``` + +**Element not found?** +```bash +python scripts/screen_mapper.py --verbose +# See all elements, then try partial text matching +``` + +**App won't launch?** +```bash +python scripts/app_launcher.py --list # Find correct bundle ID +python scripts/app_launcher.py --launch +``` + +**Gesture not working?** +- Ensure simulator is in foreground +- Try smaller swipe distances +- Check screen dimensions + +--- + +## Next Steps + +1. Run `bash scripts/sim_health_check.sh` to verify your environment +2. **Get simulator recommendations**: `python scripts/simulator_selector.py --suggest` +3. Claude will ask which simulator you want to use (top 3-4 recommended) +4. Launch your app: `python scripts/app_launcher.py --launch com.your.app` +5. Map the screen: `python scripts/screen_mapper.py` +6. Start navigating with `python scripts/navigator.py` + +For detailed documentation on each script, see `CLAUDE.md` and the `references/` directory. + +--- + +**Built for AI agents. Optimized for humans. Made for building and testing iOS apps efficiently.** diff --git a/skills/ios-simulator-testing/examples/login_flow.py b/skills/ios-simulator-testing/examples/login_flow.py new file mode 100755 index 000000000..a96a71710 --- /dev/null +++ b/skills/ios-simulator-testing/examples/login_flow.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 +""" +Example: Complete Login Flow Navigation + +Demonstrates how to use the iOS Simulator Navigator tools +to automate a typical login workflow. + +This example shows: +- Launching an app +- Mapping the screen +- Finding and interacting with elements +- Entering credentials +- Navigating to authenticated state +""" + +import subprocess +import sys +import time +from pathlib import Path + +# Add scripts directory to path +scripts_dir = Path(__file__).parent.parent / "scripts" +sys.path.insert(0, str(scripts_dir)) + + +def run_command(cmd: list) -> tuple: + """Run command and return (success, output).""" + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + return (True, result.stdout.strip()) + except subprocess.CalledProcessError as e: + return (False, e.stderr.strip()) + + +def print_step(step_num: int, description: str): + """Print step header.""" + print(f"\n{'='*60}") + print(f"Step {step_num}: {description}") + print("=" * 60) + + +def main(): + """Execute complete login flow.""" + + # Configuration + APP_BUNDLE_ID = "com.example.app" # Change to your app + + print("iOS Simulator Navigator - Login Flow Example") + print("=" * 60) + + # Step 1: Launch the app + print_step(1, "Launch App") + success, output = run_command( + ["python", str(scripts_dir / "app_launcher.py"), "--launch", APP_BUNDLE_ID] + ) + + if success: + print(f"✓ {output}") + else: + print(f"✗ Failed to launch: {output}") + sys.exit(1) + + # Wait for app to load + time.sleep(2) + + # Step 2: Map the login screen + print_step(2, "Map Login Screen") + success, output = run_command(["python", str(scripts_dir / "screen_mapper.py")]) + + if success: + print(output) + else: + print(f"✗ Failed to map screen: {output}") + sys.exit(1) + + # Step 3: Enter email + print_step(3, "Enter Email Address") + success, output = run_command( + [ + "python", + str(scripts_dir / "navigator.py"), + "--find-type", + "TextField", + "--index", + "0", + "--enter-text", + "test@example.com", + ] + ) + + if success: + print(f"✓ {output}") + else: + print(f"✗ Failed to enter email: {output}") + sys.exit(1) + + # Step 4: Enter password + print_step(4, "Enter Password") + success, output = run_command( + [ + "python", + str(scripts_dir / "navigator.py"), + "--find-type", + "SecureTextField", + "--enter-text", + "password123", + ] + ) + + if success: + print(f"✓ {output}") + else: + print(f"✗ Failed to enter password: {output}") + sys.exit(1) + + # Step 5: Tap Login button + print_step(5, "Tap Login Button") + success, output = run_command( + ["python", str(scripts_dir / "navigator.py"), "--find-text", "Login", "--tap"] + ) + + if success: + print(f"✓ {output}") + else: + print(f"✗ Failed to tap login: {output}") + sys.exit(1) + + # Wait for login to complete + print("\nWaiting for login to complete...") + time.sleep(3) + + # Step 6: Verify we're logged in + print_step(6, "Verify Logged In") + success, output = run_command(["python", str(scripts_dir / "screen_mapper.py")]) + + if success: + print(output) + if "Home" in output or "Dashboard" in output: + print("\n✓ Successfully logged in!") + else: + print("\n⚠ Login may not have succeeded (no Home/Dashboard screen detected)") + else: + print(f"✗ Failed to verify: {output}") + sys.exit(1) + + # Optional: Navigate to profile + print_step(7, "Navigate to Profile (Optional)") + success, output = run_command( + ["python", str(scripts_dir / "navigator.py"), "--find-text", "Profile", "--tap"] + ) + + if success: + print(f"✓ {output}") + time.sleep(1) + + # Map profile screen + success, output = run_command(["python", str(scripts_dir / "screen_mapper.py")]) + if success: + print(f"\nProfile Screen:\n{output}") + else: + print(f"⚠ Profile navigation skipped: {output}") + + print("\n" + "=" * 60) + print("Login flow complete!") + print("=" * 60) + + +if __name__ == "__main__": + try: + main() + except KeyboardInterrupt: + print("\n\nInterrupted by user") + sys.exit(1) + except Exception as e: + print(f"\n\nError: {e}") + sys.exit(1) diff --git a/skills/ios-simulator-testing/scripts/accessibility_audit.py b/skills/ios-simulator-testing/scripts/accessibility_audit.py new file mode 100755 index 000000000..6a9c900e6 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/accessibility_audit.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python3 +""" +iOS Simulator Accessibility Audit + +Scans the current simulator screen for accessibility compliance issues. +Optimized for minimal token output while maintaining functionality. + +Usage: python scripts/accessibility_audit.py [options] +""" + +import argparse +import json +import subprocess +import sys +from dataclasses import asdict, dataclass +from typing import Any + +from common import flatten_tree, get_accessibility_tree + + +@dataclass +class Issue: + """Represents an accessibility issue.""" + + severity: str # critical, warning, info + rule: str + element_type: str + issue: str + fix: str + + def to_dict(self) -> dict: + """Convert to dictionary for JSON serialization.""" + return asdict(self) + + +class AccessibilityAuditor: + """Performs accessibility audits on iOS simulator screens.""" + + # Critical rules that block users + CRITICAL_RULES = { + "missing_label": lambda e: e.get("type") in ["Button", "Link"] and not e.get("AXLabel"), + "empty_button": lambda e: e.get("type") == "Button" + and not (e.get("AXLabel") or e.get("AXValue")), + "image_no_alt": lambda e: e.get("type") == "Image" and not e.get("AXLabel"), + } + + # Warnings that degrade UX + WARNING_RULES = { + "missing_hint": lambda e: e.get("type") in ["Slider", "TextField"] and not e.get("help"), + "missing_traits": lambda e: e.get("type") and not e.get("traits"), + } + + # Info level suggestions + INFO_RULES = { + "no_identifier": lambda e: not e.get("AXUniqueId"), + "deep_nesting": lambda e: e.get("depth", 0) > 5, + } + + def __init__(self, udid: str | None = None): + """Initialize auditor with optional device UDID.""" + self.udid = udid + + def get_accessibility_tree(self) -> dict: + """Fetch accessibility tree from simulator using shared utility.""" + return get_accessibility_tree(self.udid, nested=True) + + @staticmethod + def _is_small_target(element: dict) -> bool: + """Check if touch target is too small (< 44x44 points).""" + frame = element.get("frame", {}) + width = frame.get("width", 0) + height = frame.get("height", 0) + return width < 44 or height < 44 + + def _flatten_tree(self, node: dict, depth: int = 0) -> list[dict]: + """Flatten nested accessibility tree for easier processing using shared utility.""" + return flatten_tree(node, depth) + + def audit_element(self, element: dict) -> list[Issue]: + """Audit a single element for accessibility issues.""" + issues = [] + + # Check critical rules + for rule_name, rule_func in self.CRITICAL_RULES.items(): + if rule_func(element): + issues.append( + Issue( + severity="critical", + rule=rule_name, + element_type=element.get("type", "Unknown"), + issue=self._get_issue_description(rule_name), + fix=self._get_fix_suggestion(rule_name), + ) + ) + + # Check warnings (skip if critical issues found) + if not issues: + for rule_name, rule_func in self.WARNING_RULES.items(): + if rule_func(element): + issues.append( + Issue( + severity="warning", + rule=rule_name, + element_type=element.get("type", "Unknown"), + issue=self._get_issue_description(rule_name), + fix=self._get_fix_suggestion(rule_name), + ) + ) + + # Check info level (only if verbose or no other issues) + if not issues: + for rule_name, rule_func in self.INFO_RULES.items(): + if rule_func(element): + issues.append( + Issue( + severity="info", + rule=rule_name, + element_type=element.get("type", "Unknown"), + issue=self._get_issue_description(rule_name), + fix=self._get_fix_suggestion(rule_name), + ) + ) + + return issues + + def _get_issue_description(self, rule: str) -> str: + """Get human-readable issue description.""" + descriptions = { + "missing_label": "Interactive element missing accessibility label", + "empty_button": "Button has no text or label", + "image_no_alt": "Image missing alternative text", + "missing_hint": "Complex control missing hint", + "small_touch_target": "Touch target smaller than 44x44pt", + "missing_traits": "Element missing accessibility traits", + "no_identifier": "Missing accessibility identifier", + "deep_nesting": "Deeply nested (>5 levels)", + } + return descriptions.get(rule, "Accessibility issue") + + def _get_fix_suggestion(self, rule: str) -> str: + """Get fix suggestion for issue.""" + fixes = { + "missing_label": "Add accessibilityLabel", + "empty_button": "Set button title or accessibilityLabel", + "image_no_alt": "Add accessibilityLabel with description", + "missing_hint": "Add accessibilityHint", + "small_touch_target": "Increase to minimum 44x44pt", + "missing_traits": "Set appropriate accessibilityTraits", + "no_identifier": "Add accessibilityIdentifier for testing", + "deep_nesting": "Simplify view hierarchy", + } + return fixes.get(rule, "Review accessibility") + + def audit(self, verbose: bool = False) -> dict[str, Any]: + """Perform full accessibility audit.""" + # Get accessibility tree + tree = self.get_accessibility_tree() + + # Flatten for processing + elements = self._flatten_tree(tree) + + # Audit each element + all_issues = [] + for element in elements: + issues = self.audit_element(element) + for issue in issues: + issue_dict = issue.to_dict() + # Add minimal element info for context + issue_dict["element"] = { + "type": element.get("type", "Unknown"), + "label": element.get("AXLabel", "")[:30] if element.get("AXLabel") else None, + } + all_issues.append(issue_dict) + + # Count by severity + critical = len([i for i in all_issues if i["severity"] == "critical"]) + warning = len([i for i in all_issues if i["severity"] == "warning"]) + info = len([i for i in all_issues if i["severity"] == "info"]) + + # Build result (token-optimized) + result = { + "summary": { + "total": len(elements), + "issues": len(all_issues), + "critical": critical, + "warning": warning, + "info": info, + } + } + + if verbose: + # Full details only if requested + result["issues"] = all_issues + else: + # Default: top issues only (token-efficient) + result["top_issues"] = self._get_top_issues(all_issues) + + return result + + def _get_top_issues(self, issues: list[dict]) -> list[dict]: + """Get top 3 issues grouped by type (token-efficient).""" + if not issues: + return [] + + # Group by rule + grouped = {} + for issue in issues: + rule = issue["rule"] + if rule not in grouped: + grouped[rule] = { + "severity": issue["severity"], + "rule": rule, + "count": 0, + "fix": issue["fix"], + } + grouped[rule]["count"] += 1 + + # Sort by severity and count + severity_order = {"critical": 0, "warning": 1, "info": 2} + sorted_issues = sorted( + grouped.values(), key=lambda x: (severity_order[x["severity"]], -x["count"]) + ) + + return sorted_issues[:3] + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Audit iOS simulator screen for accessibility issues" + ) + parser.add_argument("--udid", help="Device UDID (uses booted device if not specified)") + parser.add_argument("--output", help="Save JSON report to file") + parser.add_argument( + "--verbose", action="store_true", help="Include all issue details (increases output)" + ) + + args = parser.parse_args() + + # Perform audit + auditor = AccessibilityAuditor(udid=args.udid) + + try: + result = auditor.audit(verbose=args.verbose) + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + + # Output results + if args.output: + # Save to file + with open(args.output, "w") as f: + json.dump(result, f, indent=2) + # Print minimal summary + summary = result["summary"] + print(f"Audit complete: {summary['issues']} issues ({summary['critical']} critical)") + print(f"Report saved to: {args.output}") + # Print to stdout (token-optimized by default) + elif args.verbose: + print(json.dumps(result, indent=2)) + else: + # Ultra-compact output + summary = result["summary"] + print(f"Elements: {summary['total']}, Issues: {summary['issues']}") + print( + f"Critical: {summary['critical']}, Warning: {summary['warning']}, Info: {summary['info']}" + ) + + if result.get("top_issues"): + print("\nTop issues:") + for issue in result["top_issues"]: + print( + f" [{issue['severity']}] {issue['rule']} ({issue['count']}x) - {issue['fix']}" + ) + + # Exit with error if critical issues found + if result["summary"]["critical"] > 0: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/app_launcher.py b/skills/ios-simulator-testing/scripts/app_launcher.py new file mode 100755 index 000000000..be3967930 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/app_launcher.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python3 +""" +iOS App Launcher - App Lifecycle Control + +Launches, terminates, and manages iOS apps in the simulator. +Handles deep links and app switching. + +Usage: python scripts/app_launcher.py --launch com.example.app +""" + +import argparse +import contextlib +import subprocess +import sys +import time + +from common import build_simctl_command + + +class AppLauncher: + """Controls app lifecycle on iOS simulator.""" + + def __init__(self, udid: str | None = None): + """Initialize app launcher.""" + self.udid = udid + + def launch(self, bundle_id: str, wait_for_debugger: bool = False) -> tuple[bool, int | None]: + """ + Launch an app. + + Args: + bundle_id: App bundle identifier + wait_for_debugger: Wait for debugger attachment + + Returns: + (success, pid) tuple + """ + cmd = build_simctl_command("launch", self.udid, bundle_id) + + if wait_for_debugger: + cmd.insert(3, "--wait-for-debugger") # Insert after "launch" operation + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + # Parse PID from output if available + pid = None + if result.stdout: + # Output format: "com.example.app: " + parts = result.stdout.strip().split(":") + if len(parts) > 1: + with contextlib.suppress(ValueError): + pid = int(parts[1].strip()) + return (True, pid) + except subprocess.CalledProcessError: + return (False, None) + + def terminate(self, bundle_id: str) -> bool: + """ + Terminate an app. + + Args: + bundle_id: App bundle identifier + + Returns: + Success status + """ + cmd = build_simctl_command("terminate", self.udid, bundle_id) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def install(self, app_path: str) -> bool: + """ + Install an app. + + Args: + app_path: Path to .app bundle + + Returns: + Success status + """ + cmd = build_simctl_command("install", self.udid, app_path) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def uninstall(self, bundle_id: str) -> bool: + """ + Uninstall an app. + + Args: + bundle_id: App bundle identifier + + Returns: + Success status + """ + cmd = build_simctl_command("uninstall", self.udid, bundle_id) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def open_url(self, url: str) -> bool: + """ + Open URL (for deep linking). + + Args: + url: URL to open (http://, myapp://, etc.) + + Returns: + Success status + """ + cmd = build_simctl_command("openurl", self.udid, url) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def list_apps(self) -> list[dict[str, str]]: + """ + List installed apps. + + Returns: + List of app info dictionaries + """ + cmd = build_simctl_command("listapps", self.udid) + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + # Parse plist output using plutil to convert to JSON + plist_data = result.stdout + + # Use plutil to convert plist to JSON + convert_cmd = ["plutil", "-convert", "json", "-o", "-", "-"] + convert_result = subprocess.run( + convert_cmd, check=False, input=plist_data, capture_output=True, text=True + ) + + apps = [] + if convert_result.returncode == 0: + import json + + try: + data = json.loads(convert_result.stdout) + for bundle_id, app_info in data.items(): + # Skip system internal apps that are hidden + if app_info.get("ApplicationType") == "Hidden": + continue + + apps.append( + { + "bundle_id": bundle_id, + "name": app_info.get( + "CFBundleDisplayName", app_info.get("CFBundleName", bundle_id) + ), + "path": app_info.get("Path", ""), + "version": app_info.get("CFBundleVersion", "Unknown"), + "type": app_info.get("ApplicationType", "User"), + } + ) + except json.JSONDecodeError: + pass + + return apps + except subprocess.CalledProcessError: + return [] + + def get_app_state(self, bundle_id: str) -> str: + """ + Get app state (running, suspended, etc.). + + Args: + bundle_id: App bundle identifier + + Returns: + State string or 'unknown' + """ + # Check if app is running by trying to get its PID + cmd = build_simctl_command("spawn", self.udid, "launchctl", "list") + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + if bundle_id in result.stdout: + return "running" + return "not running" + except subprocess.CalledProcessError: + return "unknown" + + def restart_app(self, bundle_id: str, delay: float = 1.0) -> bool: + """ + Restart an app (terminate then launch). + + Args: + bundle_id: App bundle identifier + delay: Delay between terminate and launch + + Returns: + Success status + """ + # Terminate + self.terminate(bundle_id) + time.sleep(delay) + + # Launch + success, _ = self.launch(bundle_id) + return success + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Control iOS app lifecycle") + + # Actions + parser.add_argument("--launch", help="Launch app by bundle ID") + parser.add_argument("--terminate", help="Terminate app by bundle ID") + parser.add_argument("--restart", help="Restart app by bundle ID") + parser.add_argument("--install", help="Install app from .app path") + parser.add_argument("--uninstall", help="Uninstall app by bundle ID") + parser.add_argument("--open-url", help="Open URL (deep link)") + parser.add_argument("--list", action="store_true", help="List installed apps") + parser.add_argument("--state", help="Get app state by bundle ID") + + # Options + parser.add_argument( + "--wait-for-debugger", action="store_true", help="Wait for debugger when launching" + ) + parser.add_argument("--udid", help="Device UDID") + + args = parser.parse_args() + + launcher = AppLauncher(udid=args.udid) + + # Execute requested action + if args.launch: + success, pid = launcher.launch(args.launch, args.wait_for_debugger) + if success: + if pid: + print(f"Launched {args.launch} (PID: {pid})") + else: + print(f"Launched {args.launch}") + else: + print(f"Failed to launch {args.launch}") + sys.exit(1) + + elif args.terminate: + if launcher.terminate(args.terminate): + print(f"Terminated {args.terminate}") + else: + print(f"Failed to terminate {args.terminate}") + sys.exit(1) + + elif args.restart: + if launcher.restart_app(args.restart): + print(f"Restarted {args.restart}") + else: + print(f"Failed to restart {args.restart}") + sys.exit(1) + + elif args.install: + if launcher.install(args.install): + print(f"Installed {args.install}") + else: + print(f"Failed to install {args.install}") + sys.exit(1) + + elif args.uninstall: + if launcher.uninstall(args.uninstall): + print(f"Uninstalled {args.uninstall}") + else: + print(f"Failed to uninstall {args.uninstall}") + sys.exit(1) + + elif args.open_url: + if launcher.open_url(args.open_url): + print(f"Opened URL: {args.open_url}") + else: + print(f"Failed to open URL: {args.open_url}") + sys.exit(1) + + elif args.list: + apps = launcher.list_apps() + if apps: + print(f"Installed apps ({len(apps)}):") + for app in apps[:10]: # Limit for token efficiency + print(f" {app['bundle_id']}: {app['name']} (v{app['version']})") + if len(apps) > 10: + print(f" ... and {len(apps) - 10} more") + else: + print("No apps found or failed to list") + + elif args.state: + state = launcher.get_app_state(args.state) + print(f"{args.state}: {state}") + + else: + parser.print_help() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/app_state_capture.py b/skills/ios-simulator-testing/scripts/app_state_capture.py new file mode 100755 index 000000000..571ab2ef9 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/app_state_capture.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python3 +""" +App State Capture for iOS Simulator + +Captures complete app state including screenshot, accessibility tree, and logs. +Optimized for minimal token output. + +Usage: python scripts/app_state_capture.py [options] +""" + +import argparse +import json +import subprocess +import sys +from datetime import datetime +from pathlib import Path + +from common import count_elements, get_accessibility_tree + + +class AppStateCapture: + """Captures comprehensive app state for debugging.""" + + def __init__(self, app_bundle_id: str | None = None, udid: str | None = None): + """ + Initialize state capture. + + Args: + app_bundle_id: Optional app bundle ID for log filtering + udid: Optional device UDID (uses booted if not specified) + """ + self.app_bundle_id = app_bundle_id + self.udid = udid + + def capture_screenshot(self, output_path: Path) -> bool: + """Capture screenshot of current screen.""" + cmd = ["xcrun", "simctl", "io"] + + if self.udid: + cmd.append(self.udid) + else: + cmd.append("booted") + + cmd.extend(["screenshot", str(output_path)]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def capture_accessibility_tree(self, output_path: Path) -> dict: + """Capture accessibility tree using shared utility.""" + try: + # Use shared utility to fetch tree + tree = get_accessibility_tree(self.udid, nested=True) + + # Save tree + with open(output_path, "w") as f: + json.dump(tree, f, indent=2) + + # Return summary using shared utility + return {"captured": True, "element_count": count_elements(tree)} + except Exception as e: + return {"captured": False, "error": str(e)} + + def capture_logs(self, output_path: Path, line_limit: int = 100) -> dict: + """Capture recent app logs.""" + if not self.app_bundle_id: + # Can't capture logs without app ID + return {"captured": False, "reason": "No app bundle ID specified"} + + # Get app name from bundle ID (simplified) + app_name = self.app_bundle_id.split(".")[-1] + + cmd = ["xcrun", "simctl", "spawn"] + + if self.udid: + cmd.append(self.udid) + else: + cmd.append("booted") + + cmd.extend( + [ + "log", + "show", + "--predicate", + f'process == "{app_name}"', + "--last", + "1m", # Last 1 minute + "--style", + "compact", + ] + ) + + try: + result = subprocess.run(cmd, check=False, capture_output=True, text=True, timeout=5) + logs = result.stdout + + # Limit lines for token efficiency + lines = logs.split("\n") + if len(lines) > line_limit: + lines = lines[-line_limit:] + + # Save logs + with open(output_path, "w") as f: + f.write("\n".join(lines)) + + # Analyze for issues + warning_count = sum(1 for line in lines if "warning" in line.lower()) + error_count = sum(1 for line in lines if "error" in line.lower()) + + return { + "captured": True, + "lines": len(lines), + "warnings": warning_count, + "errors": error_count, + } + except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: + return {"captured": False, "error": str(e)} + + def capture_device_info(self) -> dict: + """Get device information.""" + cmd = ["xcrun", "simctl", "list", "devices", "booted"] + + if self.udid: + # Specific device info + cmd = ["xcrun", "simctl", "list", "devices"] + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + # Parse output for device info (simplified) + lines = result.stdout.split("\n") + device_info = {} + + for line in lines: + if "iPhone" in line or "iPad" in line: + # Extract device name and state + parts = line.strip().split("(") + if parts: + device_info["name"] = parts[0].strip() + if len(parts) > 2: + device_info["udid"] = parts[1].replace(")", "").strip() + device_info["state"] = parts[2].replace(")", "").strip() + break + + return device_info + except subprocess.CalledProcessError: + return {} + + def capture_all(self, output_dir: str, log_lines: int = 100) -> dict: + """ + Capture complete app state. + + Args: + output_dir: Directory to save artifacts + log_lines: Number of log lines to capture + + Returns: + Summary of captured state + """ + # Create output directory + output_path = Path(output_dir) + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + capture_dir = output_path / f"app-state-{timestamp}" + capture_dir.mkdir(parents=True, exist_ok=True) + + summary = {"timestamp": datetime.now().isoformat(), "output_dir": str(capture_dir)} + + # Capture screenshot + screenshot_path = capture_dir / "screenshot.png" + if self.capture_screenshot(screenshot_path): + summary["screenshot"] = "screenshot.png" + + # Capture accessibility tree + accessibility_path = capture_dir / "accessibility-tree.json" + tree_info = self.capture_accessibility_tree(accessibility_path) + summary["accessibility"] = tree_info + + # Capture logs (if app ID provided) + if self.app_bundle_id: + logs_path = capture_dir / "app-logs.txt" + log_info = self.capture_logs(logs_path, log_lines) + summary["logs"] = log_info + + # Get device info + device_info = self.capture_device_info() + if device_info: + summary["device"] = device_info + # Save device info + with open(capture_dir / "device-info.json", "w") as f: + json.dump(device_info, f, indent=2) + + # Save summary + with open(capture_dir / "summary.json", "w") as f: + json.dump(summary, f, indent=2) + + # Create markdown summary + self._create_summary_md(capture_dir, summary) + + return summary + + def _create_summary_md(self, capture_dir: Path, summary: dict) -> None: + """Create markdown summary file.""" + md_path = capture_dir / "summary.md" + + with open(md_path, "w") as f: + f.write("# App State Capture\n\n") + f.write(f"**Timestamp:** {summary['timestamp']}\n\n") + + if "device" in summary: + f.write("## Device\n") + device = summary["device"] + f.write(f"- Name: {device.get('name', 'Unknown')}\n") + f.write(f"- UDID: {device.get('udid', 'N/A')}\n") + f.write(f"- State: {device.get('state', 'Unknown')}\n\n") + + f.write("## Screenshot\n") + f.write("![Current Screen](screenshot.png)\n\n") + + if "accessibility" in summary: + acc = summary["accessibility"] + f.write("## Accessibility\n") + if acc.get("captured"): + f.write(f"- Elements: {acc.get('element_count', 0)}\n") + else: + f.write(f"- Error: {acc.get('error', 'Unknown')}\n") + f.write("\n") + + if "logs" in summary: + logs = summary["logs"] + f.write("## Logs\n") + if logs.get("captured"): + f.write(f"- Lines: {logs.get('lines', 0)}\n") + f.write(f"- Warnings: {logs.get('warnings', 0)}\n") + f.write(f"- Errors: {logs.get('errors', 0)}\n") + else: + f.write(f"- {logs.get('reason', logs.get('error', 'Not captured'))}\n") + f.write("\n") + + f.write("## Files\n") + f.write("- `screenshot.png` - Current screen\n") + f.write("- `accessibility-tree.json` - Full UI hierarchy\n") + if self.app_bundle_id: + f.write("- `app-logs.txt` - Recent app logs\n") + f.write("- `device-info.json` - Device details\n") + f.write("- `summary.json` - Complete capture metadata\n") + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Capture complete app state for debugging") + parser.add_argument( + "--app-bundle-id", help="App bundle ID for log filtering (e.g., com.example.app)" + ) + parser.add_argument( + "--output", default=".", help="Output directory (default: current directory)" + ) + parser.add_argument( + "--log-lines", type=int, default=100, help="Number of log lines to capture (default: 100)" + ) + parser.add_argument("--udid", help="Device UDID (uses booted if not specified)") + + args = parser.parse_args() + + # Create capturer + capturer = AppStateCapture(app_bundle_id=args.app_bundle_id, udid=args.udid) + + # Capture state + try: + summary = capturer.capture_all(output_dir=args.output, log_lines=args.log_lines) + + # Token-efficient output + print(f"State captured: {summary['output_dir']}/") + + # Report any issues found + if "logs" in summary and summary["logs"].get("captured"): + logs = summary["logs"] + if logs["errors"] > 0 or logs["warnings"] > 0: + print(f"Issues found: {logs['errors']} errors, {logs['warnings']} warnings") + + if "accessibility" in summary and summary["accessibility"].get("captured"): + print(f"Elements: {summary['accessibility']['element_count']}") + + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/build_and_test.py b/skills/ios-simulator-testing/scripts/build_and_test.py new file mode 100755 index 000000000..455030da9 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/build_and_test.py @@ -0,0 +1,310 @@ +#!/usr/bin/env python3 +""" +Build and Test Automation for Xcode Projects + +Ultra token-efficient build automation with progressive disclosure via xcresult bundles. + +Features: +- Minimal default output (5-10 tokens) +- Progressive disclosure for error/warning/log details +- Native xcresult bundle support +- Clean modular architecture + +Usage Examples: + # Build (minimal output) + python scripts/build_and_test.py --project MyApp.xcodeproj + # Output: Build: SUCCESS (0 errors, 3 warnings) [xcresult-20251018-143052] + + # Get error details + python scripts/build_and_test.py --get-errors xcresult-20251018-143052 + + # Get warnings + python scripts/build_and_test.py --get-warnings xcresult-20251018-143052 + + # Get build log + python scripts/build_and_test.py --get-log xcresult-20251018-143052 + + # Get everything as JSON + python scripts/build_and_test.py --get-all xcresult-20251018-143052 --json + + # List recent builds + python scripts/build_and_test.py --list-xcresults + + # Verbose mode (for debugging) + python scripts/build_and_test.py --project MyApp.xcodeproj --verbose +""" + +import argparse +import sys +from pathlib import Path + +# Import our modular components +from xcode import BuildRunner, OutputFormatter, XCResultCache, XCResultParser + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Build and test Xcode projects with progressive disclosure", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Build project (minimal output) + python scripts/build_and_test.py --project MyApp.xcodeproj + + # Run tests + python scripts/build_and_test.py --project MyApp.xcodeproj --test + + # Get error details from previous build + python scripts/build_and_test.py --get-errors xcresult-20251018-143052 + + # Get all details as JSON + python scripts/build_and_test.py --get-all xcresult-20251018-143052 --json + + # List recent builds + python scripts/build_and_test.py --list-xcresults + """, + ) + + # Build/test mode arguments + build_group = parser.add_argument_group("Build/Test Options") + project_group = build_group.add_mutually_exclusive_group() + project_group.add_argument("--project", help="Path to .xcodeproj file") + project_group.add_argument("--workspace", help="Path to .xcworkspace file") + + build_group.add_argument("--scheme", help="Build scheme (auto-detected if not specified)") + build_group.add_argument( + "--configuration", + default="Debug", + choices=["Debug", "Release"], + help="Build configuration (default: Debug)", + ) + build_group.add_argument("--simulator", help="Simulator name (default: iPhone 15)") + build_group.add_argument("--clean", action="store_true", help="Clean before building") + build_group.add_argument("--test", action="store_true", help="Run tests") + build_group.add_argument("--suite", help="Specific test suite to run") + + # Progressive disclosure arguments + disclosure_group = parser.add_argument_group("Progressive Disclosure Options") + disclosure_group.add_argument( + "--get-errors", metavar="XCRESULT_ID", help="Get error details from xcresult" + ) + disclosure_group.add_argument( + "--get-warnings", metavar="XCRESULT_ID", help="Get warning details from xcresult" + ) + disclosure_group.add_argument( + "--get-log", metavar="XCRESULT_ID", help="Get build log from xcresult" + ) + disclosure_group.add_argument( + "--get-all", metavar="XCRESULT_ID", help="Get all details from xcresult" + ) + disclosure_group.add_argument( + "--list-xcresults", action="store_true", help="List recent xcresult bundles" + ) + + # Output options + output_group = parser.add_argument_group("Output Options") + output_group.add_argument("--verbose", action="store_true", help="Show detailed output") + output_group.add_argument("--json", action="store_true", help="Output as JSON") + + args = parser.parse_args() + + # Initialize cache + cache = XCResultCache() + + # Handle list mode + if args.list_xcresults: + xcresults = cache.list() + if args.json: + import json + + print(json.dumps(xcresults, indent=2)) + elif not xcresults: + print("No xcresult bundles found") + else: + print(f"Recent XCResult bundles ({len(xcresults)}):") + print() + for xc in xcresults: + print(f" {xc['id']}") + print(f" Created: {xc['created']}") + print(f" Size: {xc['size_mb']} MB") + print() + return 0 + + # Handle retrieval modes + xcresult_id = args.get_errors or args.get_warnings or args.get_log or args.get_all + + if xcresult_id: + xcresult_path = cache.get_path(xcresult_id) + + if not xcresult_path or not xcresult_path.exists(): + print(f"Error: XCResult bundle not found: {xcresult_id}", file=sys.stderr) + print("Use --list-xcresults to see available bundles", file=sys.stderr) + return 1 + + # Load cached stderr for progressive disclosure + cached_stderr = cache.get_stderr(xcresult_id) + parser = XCResultParser(xcresult_path, stderr=cached_stderr) + + # Get errors + if args.get_errors: + errors = parser.get_errors() + if args.json: + import json + + print(json.dumps(errors, indent=2)) + else: + print(OutputFormatter.format_errors(errors)) + return 0 + + # Get warnings + if args.get_warnings: + warnings = parser.get_warnings() + if args.json: + import json + + print(json.dumps(warnings, indent=2)) + else: + print(OutputFormatter.format_warnings(warnings)) + return 0 + + # Get log + if args.get_log: + log = parser.get_build_log() + if log: + print(OutputFormatter.format_log(log)) + else: + print("No build log available", file=sys.stderr) + return 1 + return 0 + + # Get all + if args.get_all: + error_count, warning_count = parser.count_issues() + errors = parser.get_errors() + warnings = parser.get_warnings() + build_log = parser.get_build_log() + + if args.json: + import json + + data = { + "xcresult_id": xcresult_id, + "error_count": error_count, + "warning_count": warning_count, + "errors": errors, + "warnings": warnings, + "log_preview": build_log[:1000] if build_log else None, + } + print(json.dumps(data, indent=2)) + else: + print(f"XCResult: {xcresult_id}") + print(f"Errors: {error_count}, Warnings: {warning_count}") + print() + if errors: + print(OutputFormatter.format_errors(errors, limit=10)) + print() + if warnings: + print(OutputFormatter.format_warnings(warnings, limit=10)) + print() + if build_log: + print("Build Log (last 30 lines):") + print(OutputFormatter.format_log(build_log, lines=30)) + return 0 + + # Build/test mode + if not args.project and not args.workspace: + # Try to auto-detect in current directory + cwd = Path.cwd() + projects = list(cwd.glob("*.xcodeproj")) + workspaces = list(cwd.glob("*.xcworkspace")) + + if workspaces: + args.workspace = str(workspaces[0]) + elif projects: + args.project = str(projects[0]) + else: + parser.error("No project or workspace specified and none found in current directory") + + # Initialize builder + builder = BuildRunner( + project_path=args.project, + workspace_path=args.workspace, + scheme=args.scheme, + configuration=args.configuration, + simulator=args.simulator, + cache=cache, + ) + + # Execute build or test + if args.test: + success, xcresult_id, stderr = builder.test(test_suite=args.suite) + else: + success, xcresult_id, stderr = builder.build(clean=args.clean) + + if not xcresult_id and not stderr: + print("Error: Build/test failed without creating xcresult or error output", file=sys.stderr) + return 1 + + # Save stderr to cache for progressive disclosure + if xcresult_id and stderr: + cache.save_stderr(xcresult_id, stderr) + + # Parse results + xcresult_path = cache.get_path(xcresult_id) if xcresult_id else None + parser = XCResultParser(xcresult_path, stderr=stderr) + error_count, warning_count = parser.count_issues() + + # Format output + status = "SUCCESS" if success else "FAILED" + + # Generate hints for failed builds + hints = None + if not success: + errors = parser.get_errors() + hints = OutputFormatter.generate_hints(errors) + + if args.verbose: + # Verbose mode with error/warning details + errors = parser.get_errors() if error_count > 0 else None + warnings = parser.get_warnings() if warning_count > 0 else None + + output = OutputFormatter.format_verbose( + status=status, + error_count=error_count, + warning_count=warning_count, + xcresult_id=xcresult_id or "N/A", + errors=errors, + warnings=warnings, + ) + print(output) + elif args.json: + # JSON mode + data = { + "success": success, + "xcresult_id": xcresult_id or None, + "error_count": error_count, + "warning_count": warning_count, + } + if hints: + data["hints"] = hints + import json + + print(json.dumps(data, indent=2)) + else: + # Minimal mode (default) + output = OutputFormatter.format_minimal( + status=status, + error_count=error_count, + warning_count=warning_count, + xcresult_id=xcresult_id or "N/A", + hints=hints, + ) + print(output) + + # Exit with appropriate code + return 0 if success else 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/skills/ios-simulator-testing/scripts/common/__init__.py b/skills/ios-simulator-testing/scripts/common/__init__.py new file mode 100644 index 000000000..e024478dd --- /dev/null +++ b/skills/ios-simulator-testing/scripts/common/__init__.py @@ -0,0 +1,27 @@ +""" +Common utilities shared across iOS simulator scripts. + +This module centralizes genuinely reused code patterns to eliminate duplication +while respecting Jackson's Law - no over-abstraction, only truly shared logic. + +Organization: +- idb_utils: IDB-specific operations (accessibility tree, element manipulation) +- device_utils: Command building for simctl and IDB +""" + +from .device_utils import build_idb_command, build_simctl_command +from .idb_utils import ( + count_elements, + flatten_tree, + get_accessibility_tree, + get_screen_size, +) + +__all__ = [ + "build_idb_command", + "build_simctl_command", + "count_elements", + "flatten_tree", + "get_accessibility_tree", + "get_screen_size", +] diff --git a/skills/ios-simulator-testing/scripts/common/device_utils.py b/skills/ios-simulator-testing/scripts/common/device_utils.py new file mode 100644 index 000000000..653a89287 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/common/device_utils.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Shared device and simulator utilities. + +Common patterns for interacting with simulators via xcrun simctl and IDB. +Standardizes command building and device targeting to prevent errors. + +Follows Jackson's Law - only extracts genuinely reused patterns. + +Used by: +- app_launcher.py (8 call sites) - App lifecycle commands +- Multiple scripts (15+ locations) - IDB command building +""" + + +def build_simctl_command( + operation: str, + udid: str | None = None, + *args, +) -> list[str]: + """ + Build xcrun simctl command with proper device handling. + + Standardizes command building to prevent device targeting bugs. + Automatically uses "booted" if no UDID provided. + + Used by: + - app_launcher.py: launch, terminate, install, uninstall, openurl, listapps, spawn + - Multiple scripts: generic simctl operations + + Args: + operation: simctl operation (launch, terminate, install, etc.) + udid: Device UDID (uses 'booted' if None) + *args: Additional command arguments + + Returns: + Complete command list ready for subprocess.run() + + Examples: + # Launch app on booted simulator + cmd = build_simctl_command("launch", None, "com.app.bundle") + # Returns: ["xcrun", "simctl", "launch", "booted", "com.app.bundle"] + + # Launch on specific device + cmd = build_simctl_command("launch", "ABC123", "com.app.bundle") + # Returns: ["xcrun", "simctl", "launch", "ABC123", "com.app.bundle"] + + # Install app on specific device + cmd = build_simctl_command("install", "ABC123", "/path/to/app.app") + # Returns: ["xcrun", "simctl", "install", "ABC123", "/path/to/app.app"] + """ + cmd = ["xcrun", "simctl", operation] + + # Add device (booted or specific UDID) + cmd.append(udid if udid else "booted") + + # Add remaining arguments + cmd.extend(str(arg) for arg in args) + + return cmd + + +def build_idb_command( + operation: str, + udid: str | None = None, + *args, +) -> list[str]: + """ + Build IDB command with proper device targeting. + + Standardizes IDB command building across all scripts using IDB. + Handles device UDID consistently. + + Used by: + - navigator.py: ui tap, ui text, ui describe-all + - gesture.py: ui swipe, ui tap + - keyboard.py: ui key, ui text, ui tap + - And more: 15+ locations + + Args: + operation: IDB operation path (e.g., "ui tap", "ui text", "ui describe-all") + udid: Device UDID (omits --udid flag if None, IDB uses booted by default) + *args: Additional command arguments + + Returns: + Complete command list ready for subprocess.run() + + Examples: + # Tap on booted simulator + cmd = build_idb_command("ui tap", None, "200", "400") + # Returns: ["idb", "ui", "tap", "200", "400"] + + # Tap on specific device + cmd = build_idb_command("ui tap", "ABC123", "200", "400") + # Returns: ["idb", "ui", "tap", "200", "400", "--udid", "ABC123"] + + # Get accessibility tree + cmd = build_idb_command("ui describe-all", "ABC123", "--json", "--nested") + # Returns: ["idb", "ui", "describe-all", "--json", "--nested", "--udid", "ABC123"] + + # Enter text + cmd = build_idb_command("ui text", None, "hello world") + # Returns: ["idb", "ui", "text", "hello world"] + """ + # Split operation into parts (e.g., "ui tap" -> ["ui", "tap"]) + cmd = ["idb"] + operation.split() + + # Add arguments + cmd.extend(str(arg) for arg in args) + + # Add device targeting if specified (optional for IDB, uses booted by default) + if udid: + cmd.extend(["--udid", udid]) + + return cmd diff --git a/skills/ios-simulator-testing/scripts/common/idb_utils.py b/skills/ios-simulator-testing/scripts/common/idb_utils.py new file mode 100644 index 000000000..ecf7037d2 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/common/idb_utils.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +""" +Shared IDB utility functions. + +This module provides common IDB operations used across multiple scripts. +Follows Jackson's Law - only shared code that's truly reused, not speculative. + +Used by: +- navigator.py - Accessibility tree navigation +- screen_mapper.py - UI element analysis +- accessibility_audit.py - WCAG compliance checking +- test_recorder.py - Test documentation +- app_state_capture.py - State snapshots +- gesture.py - Touch gesture operations +""" + +import json +import subprocess +import sys + + +def get_accessibility_tree(udid: str | None = None, nested: bool = True) -> dict: + """ + Fetch accessibility tree from IDB. + + The accessibility tree represents the complete UI hierarchy of the current + screen, with all element properties needed for semantic navigation. + + Args: + udid: Device UDID (uses booted simulator if None) + nested: Include nested structure (default True). If False, returns flat array. + + Returns: + Root element of accessibility tree as dict. + Structure: { + "type": "Window", + "AXLabel": "App Name", + "frame": {"x": 0, "y": 0, "width": 390, "height": 844}, + "children": [...] + } + + Raises: + SystemExit: If IDB command fails or returns invalid JSON + + Example: + tree = get_accessibility_tree("UDID123") + # Root is Window element with all children nested + """ + cmd = ["idb", "ui", "describe-all", "--json"] + if nested: + cmd.append("--nested") + if udid: + cmd.extend(["--udid", udid]) + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + tree_data = json.loads(result.stdout) + + # IDB returns array format, extract first element (root) + if isinstance(tree_data, list) and len(tree_data) > 0: + return tree_data[0] + return tree_data + except subprocess.CalledProcessError as e: + print(f"Error: Failed to get accessibility tree: {e.stderr}", file=sys.stderr) + sys.exit(1) + except json.JSONDecodeError: + print("Error: Invalid JSON from idb", file=sys.stderr) + sys.exit(1) + + +def flatten_tree(node: dict, depth: int = 0, elements: list[dict] | None = None) -> list[dict]: + """ + Flatten nested accessibility tree into list of elements. + + Converts the hierarchical accessibility tree into a flat list where each + element includes its depth for context. + + Used by: + - navigator.py - Element finding + - screen_mapper.py - Element analysis + - accessibility_audit.py - Audit scanning + + Args: + node: Root node of tree (typically from get_accessibility_tree) + depth: Current depth (used internally, start at 0) + elements: Accumulator list (used internally, start as None) + + Returns: + Flat list of elements, each with "depth" key indicating nesting level. + Structure of each element: { + "type": "Button", + "AXLabel": "Login", + "frame": {...}, + "depth": 2, + ... + } + + Example: + tree = get_accessibility_tree() + flat = flatten_tree(tree) + for elem in flat: + print(f"{' ' * elem['depth']}{elem.get('type')}: {elem.get('AXLabel')}") + """ + if elements is None: + elements = [] + + # Add current node with depth tracking + node_copy = node.copy() + node_copy["depth"] = depth + elements.append(node_copy) + + # Process children recursively + for child in node.get("children", []): + flatten_tree(child, depth + 1, elements) + + return elements + + +def count_elements(node: dict) -> int: + """ + Count total elements in tree (recursive). + + Traverses entire tree counting all elements for reporting purposes. + + Used by: + - test_recorder.py - Element counting per step + - screen_mapper.py - Summary statistics + + Args: + node: Root node of tree + + Returns: + Total element count including root and all descendants + + Example: + tree = get_accessibility_tree() + total = count_elements(tree) + print(f"Screen has {total} elements") + """ + count = 1 + for child in node.get("children", []): + count += count_elements(child) + return count + + +def get_screen_size(udid: str | None = None) -> tuple[int, int]: + """ + Get screen dimensions from accessibility tree. + + Extracts the screen size from the root element's frame. Useful for + gesture calculations and coordinate normalization. + + Used by: + - gesture.py - Gesture positioning + - Potentially: screenshot positioning, screen-aware scaling + + Args: + udid: Device UDID (uses booted if None) + + Returns: + (width, height) tuple. Defaults to (390, 844) if detection fails + or tree cannot be accessed. + + Example: + width, height = get_screen_size() + center_x = width // 2 + center_y = height // 2 + """ + DEFAULT_WIDTH = 390 # iPhone 14 + DEFAULT_HEIGHT = 844 + + try: + tree = get_accessibility_tree(udid, nested=False) + frame = tree.get("frame", {}) + width = int(frame.get("width", DEFAULT_WIDTH)) + height = int(frame.get("height", DEFAULT_HEIGHT)) + return (width, height) + except Exception: + # Silently fall back to defaults if tree access fails + return (DEFAULT_WIDTH, DEFAULT_HEIGHT) diff --git a/skills/ios-simulator-testing/scripts/gesture.py b/skills/ios-simulator-testing/scripts/gesture.py new file mode 100755 index 000000000..0482e70dc --- /dev/null +++ b/skills/ios-simulator-testing/scripts/gesture.py @@ -0,0 +1,334 @@ +#!/usr/bin/env python3 +""" +iOS Gesture Controller - Swipes and Complex Gestures + +Performs navigation gestures like swipes, scrolls, and pinches. +Token-efficient output for common navigation patterns. + +This script handles touch gestures for iOS simulator automation. It provides +directional swipes, multi-swipe scrolling, pull-to-refresh, and pinch gestures. +Automatically detects screen size from the device for accurate gesture positioning. + +Key Features: +- Directional swipes (up, down, left, right) +- Multi-swipe scrolling with customizable amount +- Pull-to-refresh gesture +- Pinch to zoom (in/out) +- Custom swipe between any two points +- Drag and drop simulation +- Auto-detects screen dimensions from device + +Usage Examples: + # Simple directional swipe + python scripts/gesture.py --swipe up --udid + + # Scroll down multiple times + python scripts/gesture.py --scroll down --scroll-amount 3 --udid + + # Pull to refresh + python scripts/gesture.py --refresh --udid + + # Custom swipe coordinates + python scripts/gesture.py --swipe-from 100,500 --swipe-to 100,100 --udid + + # Pinch to zoom + python scripts/gesture.py --pinch out --udid + + # Long press at coordinates + python scripts/gesture.py --long-press 200,300 --duration 2.0 --udid + +Output Format: + Swiped up + Scrolled down (3x) + Performed pull to refresh + +Gesture Details: +- Swipes use 70% of screen by default (configurable) +- Scrolls are multiple small 30% swipes with delays +- Start points are offset from edges for reliability +- Screen size auto-detected from accessibility tree root element +- Falls back to iPhone 14 dimensions (390x844) if detection fails + +Technical Details: +- Uses `idb ui swipe x1 y1 x2 y2` for gesture execution +- Duration parameter converts to milliseconds for IDB +- Automatically fetches screen size on initialization +- Parses IDB accessibility tree to get root frame dimensions +- All coordinates calculated as fractions of screen size for device independence +""" + +import argparse +import subprocess +import sys +import time + +from common import get_screen_size + + +class GestureController: + """Performs gestures on iOS simulator.""" + + # Standard screen dimensions (will be detected if possible) + DEFAULT_WIDTH = 390 # iPhone 14 + DEFAULT_HEIGHT = 844 + + def __init__(self, udid: str | None = None): + """Initialize gesture controller.""" + self.udid = udid + self.screen_size = self._get_screen_size() + + def _get_screen_size(self) -> tuple[int, int]: + """Try to detect screen size from device using shared utility.""" + return get_screen_size(self.udid) + + def swipe(self, direction: str, distance_ratio: float = 0.7) -> bool: + """ + Perform directional swipe. + + Args: + direction: up, down, left, right + distance_ratio: How far to swipe (0.0-1.0 of screen) + + Returns: + Success status + """ + width, height = self.screen_size + center_x = width // 2 + center_y = height // 2 + + # Calculate swipe coordinates based on direction + if direction == "up": + start = (center_x, int(height * 0.7)) + end = (center_x, int(height * (1 - distance_ratio + 0.3))) + elif direction == "down": + start = (center_x, int(height * 0.3)) + end = (center_x, int(height * (distance_ratio - 0.3 + 0.3))) + elif direction == "left": + start = (int(width * 0.8), center_y) + end = (int(width * (1 - distance_ratio + 0.2)), center_y) + elif direction == "right": + start = (int(width * 0.2), center_y) + end = (int(width * (distance_ratio - 0.2 + 0.2)), center_y) + else: + return False + + return self.swipe_between(start, end) + + def swipe_between( + self, start: tuple[int, int], end: tuple[int, int], duration: float = 0.3 + ) -> bool: + """ + Swipe between two points. + + Args: + start: Starting coordinates (x, y) + end: Ending coordinates (x, y) + duration: Swipe duration in seconds + + Returns: + Success status + """ + cmd = ["idb", "ui", "swipe"] + cmd.extend([str(start[0]), str(start[1]), str(end[0]), str(end[1])]) + + # IDB doesn't support duration directly, but we can add delay + if duration != 0.3: + cmd.extend(["--duration", str(int(duration * 1000))]) + + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def scroll(self, direction: str, amount: int = 3) -> bool: + """ + Perform multiple small swipes to scroll. + + Args: + direction: up, down + amount: Number of small swipes + + Returns: + Success status + """ + for _ in range(amount): + if not self.swipe(direction, distance_ratio=0.3): + return False + time.sleep(0.2) # Small delay between swipes + return True + + def tap_and_hold(self, x: int, y: int, duration: float = 2.0) -> bool: + """ + Long press at coordinates. + + Args: + x, y: Coordinates + duration: Hold duration in seconds + + Returns: + Success status + """ + # IDB doesn't have native long press, simulate with tap + # In real implementation, might need to use different approach + cmd = ["idb", "ui", "tap", str(x), str(y)] + + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + # Simulate hold with delay + time.sleep(duration) + return True + except subprocess.CalledProcessError: + return False + + def pinch(self, direction: str = "out", center: tuple[int, int] | None = None) -> bool: + """ + Perform pinch gesture (zoom in/out). + + Args: + direction: 'in' (zoom out) or 'out' (zoom in) + center: Center point for pinch + + Returns: + Success status + """ + if not center: + width, height = self.screen_size + center = (width // 2, height // 2) + + # Calculate pinch points + offset = 100 if direction == "out" else 50 + + if direction == "out": + # Zoom in - fingers move apart + start1 = (center[0] - 20, center[1] - 20) + end1 = (center[0] - offset, center[1] - offset) + start2 = (center[0] + 20, center[1] + 20) + end2 = (center[0] + offset, center[1] + offset) + else: + # Zoom out - fingers move together + start1 = (center[0] - offset, center[1] - offset) + end1 = (center[0] - 20, center[1] - 20) + start2 = (center[0] + offset, center[1] + offset) + end2 = (center[0] + 20, center[1] + 20) + + # Perform two swipes simultaneously (simulated) + success1 = self.swipe_between(start1, end1) + success2 = self.swipe_between(start2, end2) + + return success1 and success2 + + def drag_and_drop(self, start: tuple[int, int], end: tuple[int, int]) -> bool: + """ + Drag element from one position to another. + + Args: + start: Starting coordinates + end: Ending coordinates + + Returns: + Success status + """ + # Use slow swipe to simulate drag + return self.swipe_between(start, end, duration=1.0) + + def refresh(self) -> bool: + """Pull to refresh gesture.""" + width, _ = self.screen_size + start = (width // 2, 100) + end = (width // 2, 400) + return self.swipe_between(start, end) + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Perform gestures on iOS simulator") + + # Gesture options + parser.add_argument( + "--swipe", choices=["up", "down", "left", "right"], help="Perform directional swipe" + ) + parser.add_argument("--swipe-from", help="Custom swipe start coordinates (x,y)") + parser.add_argument("--swipe-to", help="Custom swipe end coordinates (x,y)") + parser.add_argument( + "--scroll", choices=["up", "down"], help="Scroll in direction (multiple small swipes)" + ) + parser.add_argument( + "--scroll-amount", type=int, default=3, help="Number of scroll swipes (default: 3)" + ) + parser.add_argument("--long-press", help="Long press at coordinates (x,y)") + parser.add_argument( + "--duration", type=float, default=2.0, help="Duration for long press in seconds" + ) + parser.add_argument( + "--pinch", choices=["in", "out"], help="Pinch gesture (in=zoom out, out=zoom in)" + ) + parser.add_argument("--refresh", action="store_true", help="Pull to refresh gesture") + parser.add_argument("--udid", help="Device UDID") + + args = parser.parse_args() + + controller = GestureController(udid=args.udid) + + # Execute requested gesture + if args.swipe: + if controller.swipe(args.swipe): + print(f"Swiped {args.swipe}") + else: + print(f"Failed to swipe {args.swipe}") + sys.exit(1) + + elif args.swipe_from and args.swipe_to: + # Custom swipe + start = tuple(map(int, args.swipe_from.split(","))) + end = tuple(map(int, args.swipe_to.split(","))) + + if controller.swipe_between(start, end): + print(f"Swiped from {start} to {end}") + else: + print("Failed to swipe") + sys.exit(1) + + elif args.scroll: + if controller.scroll(args.scroll, args.scroll_amount): + print(f"Scrolled {args.scroll} ({args.scroll_amount}x)") + else: + print(f"Failed to scroll {args.scroll}") + sys.exit(1) + + elif args.long_press: + coords = tuple(map(int, args.long_press.split(","))) + if controller.tap_and_hold(coords[0], coords[1], args.duration): + print(f"Long pressed at {coords} for {args.duration}s") + else: + print("Failed to long press") + sys.exit(1) + + elif args.pinch: + if controller.pinch(args.pinch): + action = "Zoomed in" if args.pinch == "out" else "Zoomed out" + print(action) + else: + print(f"Failed to pinch {args.pinch}") + sys.exit(1) + + elif args.refresh: + if controller.refresh(): + print("Performed pull to refresh") + else: + print("Failed to refresh") + sys.exit(1) + + else: + parser.print_help() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/keyboard.py b/skills/ios-simulator-testing/scripts/keyboard.py new file mode 100755 index 000000000..b88a74cf1 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/keyboard.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python3 +""" +iOS Keyboard Controller - Text Entry and Hardware Buttons + +Handles keyboard input, special keys, and hardware button simulation. +Token-efficient text entry and navigation control. + +This script provides text input and hardware button control for iOS simulator +automation. It handles both typing text strings and pressing special keys like +return, delete, tab, etc. Also controls hardware buttons like home and lock. + +Key Features: +- Type text strings into focused elements +- Press special keys (return, delete, tab, space, arrows) +- Hardware button simulation (home, lock, volume, screenshot) +- Character-by-character typing with delays (for animations) +- Multiple key press support +- iOS HID key code mapping for reliability + +Usage Examples: + # Type text into focused field + python scripts/keyboard.py --type "hello@example.com" --udid + + # Press return key to submit + python scripts/keyboard.py --key return --udid + + # Press delete 3 times + python scripts/keyboard.py --key delete --key delete --key delete --udid + + # Press home button + python scripts/keyboard.py --button home --udid + + # Press lock button + python scripts/keyboard.py --button lock --udid + + # Type with delay between characters (for animations) + python scripts/keyboard.py --type "slow typing" --delay 0.1 --udid + +Output Format: + Typed: "hello@example.com" + Pressed return + Pressed home button + +Special Keys Supported: +- return/enter: Submit forms, new lines (HID code 40) +- delete/backspace: Remove characters (HID code 42) +- tab: Navigate between fields (HID code 43) +- space: Space character (HID code 44) +- escape: Cancel/dismiss (HID code 41) +- up/down/left/right: Arrow keys (HID codes 82/81/80/79) + +Hardware Buttons Supported: +- home: Return to home screen +- lock/power: Lock device +- volume-up/volume-down: Volume control +- ringer: Toggle mute +- screenshot: Capture screen + +Technical Details: +- Uses `idb ui text` for typing text strings +- Uses `idb ui key ` for special keys with iOS HID codes +- HID codes from Apple's UIKeyboardHIDUsage specification +- Hardware buttons use `xcrun simctl` button actions +- Text entry works on currently focused element +- Special keys are integers (40=Return, 42=Delete, etc.) +""" + +import argparse +import subprocess +import sys +import time + + +class KeyboardController: + """Controls keyboard and hardware buttons on iOS simulator.""" + + # Special key mappings to iOS HID key codes + # See: https://developer.apple.com/documentation/uikit/uikeyboardhidusage + SPECIAL_KEYS = { + "return": 40, + "enter": 40, + "delete": 42, + "backspace": 42, + "tab": 43, + "space": 44, + "escape": 41, + "up": 82, + "down": 81, + "left": 80, + "right": 79, + } + + # Hardware button mappings + HARDWARE_BUTTONS = { + "home": "HOME", + "lock": "LOCK", + "volume-up": "VOLUME_UP", + "volume-down": "VOLUME_DOWN", + "ringer": "RINGER", + "power": "LOCK", # Alias + "screenshot": "SCREENSHOT", + } + + def __init__(self, udid: str | None = None): + """Initialize keyboard controller.""" + self.udid = udid + + def type_text(self, text: str, delay: float = 0.0) -> bool: + """ + Type text into current focus. + + Args: + text: Text to type + delay: Delay between characters (for slow typing effect) + + Returns: + Success status + """ + if delay > 0: + # Type character by character with delay + for char in text: + if not self._type_single(char): + return False + time.sleep(delay) + return True + # Type all at once (efficient) + return self._type_single(text) + + def _type_single(self, text: str) -> bool: + """Type text using IDB.""" + cmd = ["idb", "ui", "text", text] + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def press_key(self, key: str, count: int = 1) -> bool: + """ + Press a special key. + + Args: + key: Key name (return, delete, tab, etc.) + count: Number of times to press + + Returns: + Success status + """ + # Map key name to IDB key code + key_code = self.SPECIAL_KEYS.get(key.lower()) + if not key_code: + # Try as literal integer key code + try: + key_code = int(key) + except ValueError: + return False + + cmd = ["idb", "ui", "key", str(key_code)] + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + for _ in range(count): + subprocess.run(cmd, capture_output=True, check=True) + if count > 1: + time.sleep(0.1) # Small delay for multiple presses + return True + except subprocess.CalledProcessError: + return False + + def press_key_sequence(self, keys: list[str]) -> bool: + """ + Press a sequence of keys. + + Args: + keys: List of key names + + Returns: + Success status + """ + cmd_base = ["idb", "ui", "key-sequence"] + + # Map keys to codes + mapped_keys = [] + for key in keys: + mapped = self.SPECIAL_KEYS.get(key.lower()) + if mapped is None: + # Try as integer + try: + mapped = int(key) + except ValueError: + return False + mapped_keys.append(str(mapped)) + + cmd = cmd_base + mapped_keys + + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def press_hardware_button(self, button: str) -> bool: + """ + Press hardware button. + + Args: + button: Button name (home, lock, volume-up, etc.) + + Returns: + Success status + """ + button_code = self.HARDWARE_BUTTONS.get(button.lower()) + if not button_code: + return False + + cmd = ["idb", "ui", "button", button_code] + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def clear_text(self, select_all: bool = True) -> bool: + """ + Clear text in current field. + + Args: + select_all: Use Cmd+A to select all first + + Returns: + Success status + """ + if select_all: + # Select all then delete + # Note: This might need adjustment for iOS keyboard shortcuts + success = self.press_key_combo(["cmd", "a"]) + if success: + return self.press_key("delete") + else: + # Just delete multiple times + return self.press_key("delete", count=50) + return None + + def press_key_combo(self, keys: list[str]) -> bool: + """ + Press key combination (like Cmd+A). + + Args: + keys: List of keys to press together + + Returns: + Success status + """ + # IDB doesn't directly support key combos + # This is a workaround - may need platform-specific handling + if "cmd" in keys or "command" in keys: + # Handle common shortcuts + if "a" in keys: + # Select all - might work with key sequence + return self.press_key_sequence(["command", "a"]) + if "c" in keys: + return self.press_key_sequence(["command", "c"]) + if "v" in keys: + return self.press_key_sequence(["command", "v"]) + if "x" in keys: + return self.press_key_sequence(["command", "x"]) + + # Try as sequence + return self.press_key_sequence(keys) + + def dismiss_keyboard(self) -> bool: + """Dismiss on-screen keyboard.""" + # Common ways to dismiss keyboard on iOS + # Try Done button first, then Return + success = self.press_key("return") + if not success: + # Try tapping outside (would need coordinate) + pass + return success + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Control keyboard and hardware buttons") + + # Text input + parser.add_argument("--type", help="Type text into current focus") + parser.add_argument("--slow", action="store_true", help="Type slowly (character by character)") + + # Special keys + parser.add_argument("--key", help="Press special key (return, delete, tab, space, etc.)") + parser.add_argument("--key-sequence", help="Press key sequence (comma-separated)") + parser.add_argument("--count", type=int, default=1, help="Number of times to press key") + + # Hardware buttons + parser.add_argument( + "--button", + choices=["home", "lock", "volume-up", "volume-down", "ringer", "screenshot"], + help="Press hardware button", + ) + + # Other operations + parser.add_argument("--clear", action="store_true", help="Clear current text field") + parser.add_argument("--dismiss", action="store_true", help="Dismiss keyboard") + + parser.add_argument("--udid", help="Device UDID") + + args = parser.parse_args() + + controller = KeyboardController(udid=args.udid) + + # Execute requested action + if args.type: + delay = 0.1 if args.slow else 0.0 + if controller.type_text(args.type, delay): + if args.slow: + print(f'Typed: "{args.type}" (slowly)') + else: + print(f'Typed: "{args.type}"') + else: + print("Failed to type text") + sys.exit(1) + + elif args.key: + if controller.press_key(args.key, args.count): + if args.count > 1: + print(f"Pressed {args.key} ({args.count}x)") + else: + print(f"Pressed {args.key}") + else: + print(f"Failed to press {args.key}") + sys.exit(1) + + elif args.key_sequence: + keys = args.key_sequence.split(",") + if controller.press_key_sequence(keys): + print(f"Pressed sequence: {' -> '.join(keys)}") + else: + print("Failed to press key sequence") + sys.exit(1) + + elif args.button: + if controller.press_hardware_button(args.button): + print(f"Pressed {args.button} button") + else: + print(f"Failed to press {args.button}") + sys.exit(1) + + elif args.clear: + if controller.clear_text(): + print("Cleared text field") + else: + print("Failed to clear text") + sys.exit(1) + + elif args.dismiss: + if controller.dismiss_keyboard(): + print("Dismissed keyboard") + else: + print("Failed to dismiss keyboard") + sys.exit(1) + + else: + parser.print_help() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/log_monitor.py b/skills/ios-simulator-testing/scripts/log_monitor.py new file mode 100755 index 000000000..e827f8dc6 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/log_monitor.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 +""" +iOS Simulator Log Monitoring and Analysis + +Real-time log streaming from iOS simulators with intelligent filtering, error detection, +and token-efficient summarization. Enhanced version of app_state_capture.py's log capture. + +Features: +- Real-time log streaming from booted simulators +- Smart filtering by app bundle ID, subsystem, category, severity +- Error/warning classification and deduplication +- Duration-based or continuous follow mode +- Token-efficient summaries with full logs saved to file +- Integration with test_recorder and app_state_capture + +Usage Examples: + # Monitor app logs in real-time (follow mode) + python scripts/log_monitor.py --app com.myapp.MyApp --follow + + # Capture logs for specific duration + python scripts/log_monitor.py --app com.myapp.MyApp --duration 30s + + # Extract errors and warnings only from last 5 minutes + python scripts/log_monitor.py --severity error,warning --last 5m + + # Save logs to file + python scripts/log_monitor.py --app com.myapp.MyApp --duration 1m --output logs/ + + # Verbose output with full log lines + python scripts/log_monitor.py --app com.myapp.MyApp --verbose +""" + +import argparse +import json +import re +import signal +import subprocess +import sys +from datetime import datetime, timedelta +from pathlib import Path + + +class LogMonitor: + """Monitor and analyze iOS simulator logs with intelligent filtering.""" + + def __init__( + self, + app_bundle_id: str | None = None, + device_udid: str | None = None, + severity_filter: list[str] | None = None, + ): + """ + Initialize log monitor. + + Args: + app_bundle_id: Filter logs by app bundle ID + device_udid: Device UDID (uses booted if not specified) + severity_filter: List of severities to include (error, warning, info, debug) + """ + self.app_bundle_id = app_bundle_id + self.device_udid = device_udid or "booted" + self.severity_filter = severity_filter or ["error", "warning", "info", "debug"] + + # Log storage + self.log_lines: list[str] = [] + self.errors: list[str] = [] + self.warnings: list[str] = [] + self.info_messages: list[str] = [] + + # Statistics + self.error_count = 0 + self.warning_count = 0 + self.info_count = 0 + self.debug_count = 0 + self.total_lines = 0 + + # Deduplication + self.seen_messages: set[str] = set() + + # Process control + self.log_process: subprocess.Popen | None = None + self.interrupted = False + + def parse_time_duration(self, duration_str: str) -> float: + """ + Parse duration string to seconds. + + Args: + duration_str: Duration like "30s", "5m", "1h" + + Returns: + Duration in seconds + """ + match = re.match(r"(\d+)([smh])", duration_str.lower()) + if not match: + raise ValueError( + f"Invalid duration format: {duration_str}. Use format like '30s', '5m', '1h'" + ) + + value, unit = match.groups() + value = int(value) + + if unit == "s": + return value + if unit == "m": + return value * 60 + if unit == "h": + return value * 3600 + + return 0 + + def classify_log_line(self, line: str) -> str | None: + """ + Classify log line by severity. + + Args: + line: Log line to classify + + Returns: + Severity level (error, warning, info, debug) or None + """ + line_lower = line.lower() + + # Error patterns + error_patterns = [ + r"\berror\b", + r"\bfault\b", + r"\bfailed\b", + r"\bexception\b", + r"\bcrash\b", + r"❌", + ] + + # Warning patterns + warning_patterns = [r"\bwarning\b", r"\bwarn\b", r"\bdeprecated\b", r"⚠️"] + + # Info patterns + info_patterns = [r"\binfo\b", r"\bnotice\b", r"ℹ️"] + + for pattern in error_patterns: + if re.search(pattern, line_lower): + return "error" + + for pattern in warning_patterns: + if re.search(pattern, line_lower): + return "warning" + + for pattern in info_patterns: + if re.search(pattern, line_lower): + return "info" + + return "debug" + + def deduplicate_message(self, line: str) -> bool: + """ + Check if message is duplicate. + + Args: + line: Log line + + Returns: + True if this is a new message, False if duplicate + """ + # Create signature by removing timestamps and process IDs + signature = re.sub(r"\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}", "", line) + signature = re.sub(r"\[\d+\]", "", signature) + signature = re.sub(r"\s+", " ", signature).strip() + + if signature in self.seen_messages: + return False + + self.seen_messages.add(signature) + return True + + def process_log_line(self, line: str): + """ + Process a single log line. + + Args: + line: Log line to process + """ + if not line.strip(): + return + + self.total_lines += 1 + self.log_lines.append(line) + + # Classify severity + severity = self.classify_log_line(line) + + # Skip if not in filter + if severity not in self.severity_filter: + return + + # Deduplicate (for errors and warnings) + if severity in ["error", "warning"] and not self.deduplicate_message(line): + return + + # Store by severity + if severity == "error": + self.error_count += 1 + self.errors.append(line) + elif severity == "warning": + self.warning_count += 1 + self.warnings.append(line) + elif severity == "info": + self.info_count += 1 + if len(self.info_messages) < 20: # Keep only recent info + self.info_messages.append(line) + else: # debug + self.debug_count += 1 + + def stream_logs( + self, + follow: bool = False, + duration: float | None = None, + last_minutes: float | None = None, + ) -> bool: + """ + Stream logs from simulator. + + Args: + follow: Follow mode (continuous streaming) + duration: Capture duration in seconds + last_minutes: Show logs from last N minutes + + Returns: + True if successful + """ + # Build log stream command + cmd = ["xcrun", "simctl", "spawn", self.device_udid, "log", "stream"] + + # Add filters + if self.app_bundle_id: + # Filter by process name (extracted from bundle ID) + app_name = self.app_bundle_id.split(".")[-1] + cmd.extend(["--predicate", f'processImagePath CONTAINS "{app_name}"']) + + # Add time filter for historical logs + if last_minutes: + start_time = datetime.now() - timedelta(minutes=last_minutes) + time_str = start_time.strftime("%Y-%m-%d %H:%M:%S") + cmd.extend(["--start", time_str]) + + # Setup signal handler for graceful interruption + def signal_handler(sig, frame): + self.interrupted = True + if self.log_process: + self.log_process.terminate() + + signal.signal(signal.SIGINT, signal_handler) + + try: + # Start log streaming process + self.log_process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, # Line buffered + ) + + # Track start time for duration + start_time = datetime.now() + + # Process log lines + for line in iter(self.log_process.stdout.readline, ""): + if not line: + break + + # Process the line + self.process_log_line(line.rstrip()) + + # Print in follow mode + if follow: + severity = self.classify_log_line(line) + if severity in self.severity_filter: + print(line.rstrip()) + + # Check duration + if duration and (datetime.now() - start_time).total_seconds() >= duration: + break + + # Check if interrupted + if self.interrupted: + break + + # Wait for process to finish + self.log_process.wait() + return True + + except Exception as e: + print(f"Error streaming logs: {e}", file=sys.stderr) + return False + + finally: + if self.log_process: + self.log_process.terminate() + + def get_summary(self, verbose: bool = False) -> str: + """ + Get log summary. + + Args: + verbose: Include full log details + + Returns: + Formatted summary string + """ + lines = [] + + # Header + if self.app_bundle_id: + lines.append(f"Logs for: {self.app_bundle_id}") + else: + lines.append("Logs for: All processes") + + # Statistics + lines.append(f"Total lines: {self.total_lines}") + lines.append( + f"Errors: {self.error_count}, Warnings: {self.warning_count}, Info: {self.info_count}" + ) + + # Top issues + if self.errors: + lines.append(f"\nTop Errors ({len(self.errors)}):") + for error in self.errors[:5]: # Show first 5 + lines.append(f" ❌ {error[:120]}") # Truncate long lines + + if self.warnings: + lines.append(f"\nTop Warnings ({len(self.warnings)}):") + for warning in self.warnings[:5]: # Show first 5 + lines.append(f" ⚠️ {warning[:120]}") + + # Verbose output + if verbose and self.log_lines: + lines.append("\n=== Recent Log Lines ===") + for line in self.log_lines[-50:]: # Last 50 lines + lines.append(line) + + return "\n".join(lines) + + def get_json_output(self) -> dict: + """Get log results as JSON.""" + return { + "app_bundle_id": self.app_bundle_id, + "device_udid": self.device_udid, + "statistics": { + "total_lines": self.total_lines, + "errors": self.error_count, + "warnings": self.warning_count, + "info": self.info_count, + "debug": self.debug_count, + }, + "errors": self.errors[:20], # Limit to 20 + "warnings": self.warnings[:20], + "sample_logs": self.log_lines[-50:], # Last 50 lines + } + + def save_logs(self, output_dir: str) -> str: + """ + Save logs to file. + + Args: + output_dir: Directory to save logs + + Returns: + Path to saved log file + """ + # Create output directory + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + # Generate filename with timestamp + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + app_name = self.app_bundle_id.split(".")[-1] if self.app_bundle_id else "simulator" + log_file = output_path / f"{app_name}-{timestamp}.log" + + # Write all log lines + with open(log_file, "w") as f: + f.write("\n".join(self.log_lines)) + + # Also save JSON summary + json_file = output_path / f"{app_name}-{timestamp}-summary.json" + with open(json_file, "w") as f: + json.dump(self.get_json_output(), f, indent=2) + + return str(log_file) + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Monitor and analyze iOS simulator logs", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Monitor app in real-time + python scripts/log_monitor.py --app com.myapp.MyApp --follow + + # Capture logs for 30 seconds + python scripts/log_monitor.py --app com.myapp.MyApp --duration 30s + + # Show errors/warnings from last 5 minutes + python scripts/log_monitor.py --severity error,warning --last 5m + + # Save logs to file + python scripts/log_monitor.py --app com.myapp.MyApp --duration 1m --output logs/ + """, + ) + + # Filtering options + parser.add_argument( + "--app", dest="app_bundle_id", help="App bundle ID to filter logs (e.g., com.myapp.MyApp)" + ) + parser.add_argument("--device-udid", help="Device UDID (uses booted if not specified)") + parser.add_argument( + "--severity", help="Comma-separated severity levels (error,warning,info,debug)" + ) + + # Time options + time_group = parser.add_mutually_exclusive_group() + time_group.add_argument( + "--follow", action="store_true", help="Follow mode (continuous streaming)" + ) + time_group.add_argument("--duration", help="Capture duration (e.g., 30s, 5m, 1h)") + time_group.add_argument( + "--last", dest="last_minutes", help="Show logs from last N minutes (e.g., 5m)" + ) + + # Output options + parser.add_argument("--output", help="Save logs to directory") + parser.add_argument("--verbose", action="store_true", help="Show detailed output") + parser.add_argument("--json", action="store_true", help="Output as JSON") + + args = parser.parse_args() + + # Parse severity filter + severity_filter = None + if args.severity: + severity_filter = [s.strip().lower() for s in args.severity.split(",")] + + # Initialize monitor + monitor = LogMonitor( + app_bundle_id=args.app_bundle_id, + device_udid=args.device_udid, + severity_filter=severity_filter, + ) + + # Parse duration + duration = None + if args.duration: + duration = monitor.parse_time_duration(args.duration) + + # Parse last minutes + last_minutes = None + if args.last_minutes: + last_minutes = monitor.parse_time_duration(args.last_minutes) / 60 + + # Stream logs + print("Monitoring logs...", file=sys.stderr) + if args.app_bundle_id: + print(f"App: {args.app_bundle_id}", file=sys.stderr) + + success = monitor.stream_logs(follow=args.follow, duration=duration, last_minutes=last_minutes) + + if not success: + sys.exit(1) + + # Save logs if requested + if args.output: + log_file = monitor.save_logs(args.output) + print(f"\nLogs saved to: {log_file}", file=sys.stderr) + + # Output results + if not args.follow: # Don't show summary in follow mode + if args.json: + print(json.dumps(monitor.get_json_output(), indent=2)) + else: + print("\n" + monitor.get_summary(verbose=args.verbose)) + + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/navigator.py b/skills/ios-simulator-testing/scripts/navigator.py new file mode 100755 index 000000000..ef66f1c20 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/navigator.py @@ -0,0 +1,397 @@ +#!/usr/bin/env python3 +""" +iOS Simulator Navigator - Smart Element Finder and Interactor + +Finds and interacts with UI elements using accessibility data. +Prioritizes structured navigation over pixel-based interaction. + +This script is the core automation tool for iOS simulator navigation. It finds +UI elements by text, type, or accessibility ID and performs actions on them +(tap, enter text). Uses semantic element finding instead of fragile pixel coordinates. + +Key Features: +- Find elements by text (fuzzy or exact matching) +- Find elements by type (Button, TextField, etc.) +- Find elements by accessibility identifier +- Tap elements at their center point +- Enter text into text fields +- List all tappable elements on screen +- Automatic element caching for performance + +Usage Examples: + # Find and tap a button by text + python scripts/navigator.py --find-text "Login" --tap --udid + + # Enter text into first text field + python scripts/navigator.py --find-type TextField --index 0 --enter-text "username" --udid + + # Tap element by accessibility ID + python scripts/navigator.py --find-id "submitButton" --tap --udid + + # List all interactive elements + python scripts/navigator.py --list --udid + + # Tap at specific coordinates (fallback) + python scripts/navigator.py --tap-at 200,400 --udid + +Output Format: + Tapped: Button "Login" at (320, 450) + Entered text in: TextField "Username" + Not found: text='Submit' + +Navigation Priority (best to worst): + 1. Find by accessibility label/text (most reliable) + 2. Find by element type + index (good for forms) + 3. Find by accessibility ID (precise but app-specific) + 4. Tap at coordinates (last resort, fragile) + +Technical Details: +- Uses IDB's accessibility tree via `idb ui describe-all --json --nested` +- Caches tree for multiple operations (call with force_refresh to update) +- Finds elements by parsing tree recursively +- Calculates tap coordinates from element frame center +- Uses `idb ui tap` for tapping, `idb ui text` for text entry +- Extracts data from AXLabel, AXValue, and AXUniqueId fields +""" + +import argparse +import json +import subprocess +import sys +from dataclasses import dataclass + +from common import flatten_tree, get_accessibility_tree + + +@dataclass +class Element: + """Represents a UI element from accessibility tree.""" + + type: str + label: str | None + value: str | None + identifier: str | None + frame: dict[str, float] + traits: list[str] + enabled: bool = True + + @property + def center(self) -> tuple[int, int]: + """Calculate center point for tapping.""" + x = int(self.frame["x"] + self.frame["width"] / 2) + y = int(self.frame["y"] + self.frame["height"] / 2) + return (x, y) + + @property + def description(self) -> str: + """Human-readable description.""" + label = self.label or self.value or self.identifier or "Unnamed" + return f'{self.type} "{label}"' + + +class Navigator: + """Navigates iOS apps using accessibility data.""" + + def __init__(self, udid: str | None = None): + """Initialize navigator with optional device UDID.""" + self.udid = udid + self._tree_cache = None + + def get_accessibility_tree(self, force_refresh: bool = False) -> dict: + """Get accessibility tree (cached for efficiency).""" + if self._tree_cache and not force_refresh: + return self._tree_cache + + # Delegate to shared utility + self._tree_cache = get_accessibility_tree(self.udid, nested=True) + return self._tree_cache + + def _flatten_tree(self, node: dict, elements: list[Element] | None = None) -> list[Element]: + """Flatten accessibility tree into list of elements.""" + if elements is None: + elements = [] + + # Create element from node + if node.get("type"): + element = Element( + type=node.get("type", "Unknown"), + label=node.get("AXLabel"), + value=node.get("AXValue"), + identifier=node.get("AXUniqueId"), + frame=node.get("frame", {}), + traits=node.get("traits", []), + enabled=node.get("enabled", True), + ) + elements.append(element) + + # Process children + for child in node.get("children", []): + self._flatten_tree(child, elements) + + return elements + + def find_element( + self, + text: str | None = None, + element_type: str | None = None, + identifier: str | None = None, + index: int = 0, + fuzzy: bool = True, + ) -> Element | None: + """ + Find element by various criteria. + + Args: + text: Text to search in label/value + element_type: Type of element (Button, TextField, etc.) + identifier: Accessibility identifier + index: Which matching element to return (0-based) + fuzzy: Use fuzzy matching for text + + Returns: + Element if found, None otherwise + """ + tree = self.get_accessibility_tree() + elements = self._flatten_tree(tree) + + matches = [] + + for elem in elements: + # Skip disabled elements + if not elem.enabled: + continue + + # Check type + if element_type and elem.type != element_type: + continue + + # Check identifier (exact match) + if identifier and elem.identifier != identifier: + continue + + # Check text (in label or value) + if text: + elem_text = (elem.label or "") + " " + (elem.value or "") + if fuzzy: + if text.lower() not in elem_text.lower(): + continue + elif text not in (elem.label, elem.value): + continue + + matches.append(elem) + + if matches and index < len(matches): + return matches[index] + + return None + + def tap(self, element: Element) -> bool: + """Tap on an element.""" + x, y = element.center + return self.tap_at(x, y) + + def tap_at(self, x: int, y: int) -> bool: + """Tap at specific coordinates.""" + cmd = ["idb", "ui", "tap", str(x), str(y)] + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def enter_text(self, text: str, element: Element | None = None) -> bool: + """ + Enter text into element or current focus. + + Args: + text: Text to enter + element: Optional element to tap first + + Returns: + Success status + """ + # Tap element if provided + if element: + if not self.tap(element): + return False + # Small delay for focus + import time + + time.sleep(0.5) + + # Enter text + cmd = ["idb", "ui", "text", text] + if self.udid: + cmd.extend(["--udid", self.udid]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def find_and_tap( + self, + text: str | None = None, + element_type: str | None = None, + identifier: str | None = None, + index: int = 0, + ) -> tuple[bool, str]: + """ + Find element and tap it. + + Returns: + (success, message) tuple + """ + element = self.find_element(text, element_type, identifier, index) + + if not element: + criteria = [] + if text: + criteria.append(f"text='{text}'") + if element_type: + criteria.append(f"type={element_type}") + if identifier: + criteria.append(f"id={identifier}") + return (False, f"Not found: {', '.join(criteria)}") + + if self.tap(element): + return (True, f"Tapped: {element.description} at {element.center}") + return (False, f"Failed to tap: {element.description}") + + def find_and_enter_text( + self, + text_to_enter: str, + find_text: str | None = None, + element_type: str | None = "TextField", + identifier: str | None = None, + index: int = 0, + ) -> tuple[bool, str]: + """ + Find element and enter text into it. + + Returns: + (success, message) tuple + """ + element = self.find_element(find_text, element_type, identifier, index) + + if not element: + return (False, "TextField not found") + + if self.enter_text(text_to_enter, element): + return (True, f"Entered text in: {element.description}") + return (False, "Failed to enter text") + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Navigate iOS apps using accessibility data") + + # Finding options + parser.add_argument("--find-text", help="Find element by text (fuzzy match)") + parser.add_argument("--find-exact", help="Find element by exact text") + parser.add_argument("--find-type", help="Element type (Button, TextField, etc.)") + parser.add_argument("--find-id", help="Accessibility identifier") + parser.add_argument("--index", type=int, default=0, help="Which match to use (0-based)") + + # Action options + parser.add_argument("--tap", action="store_true", help="Tap the found element") + parser.add_argument("--tap-at", help="Tap at coordinates (x,y)") + parser.add_argument("--enter-text", help="Enter text into element") + + # Other options + parser.add_argument("--udid", help="Device UDID") + parser.add_argument("--list", action="store_true", help="List all tappable elements") + + args = parser.parse_args() + + navigator = Navigator(udid=args.udid) + + # List mode + if args.list: + tree = navigator.get_accessibility_tree() + elements = navigator._flatten_tree(tree) + + # Filter to tappable elements + tappable = [ + e + for e in elements + if e.enabled and e.type in ["Button", "Link", "Cell", "TextField", "SecureTextField"] + ] + + print(f"Tappable elements ({len(tappable)}):") + for elem in tappable[:10]: # Limit output for tokens + print(f" {elem.type}: \"{elem.label or elem.value or 'Unnamed'}\" {elem.center}") + + if len(tappable) > 10: + print(f" ... and {len(tappable) - 10} more") + sys.exit(0) + + # Direct tap at coordinates + if args.tap_at: + coords = args.tap_at.split(",") + if len(coords) != 2: + print("Error: --tap-at requires x,y format") + sys.exit(1) + + x, y = int(coords[0]), int(coords[1]) + if navigator.tap_at(x, y): + print(f"Tapped at ({x}, {y})") + else: + print(f"Failed to tap at ({x}, {y})") + sys.exit(1) + + # Find and tap + elif args.tap: + text = args.find_text or args.find_exact + fuzzy = args.find_text is not None + + success, message = navigator.find_and_tap( + text=text, element_type=args.find_type, identifier=args.find_id, index=args.index + ) + + print(message) + if not success: + sys.exit(1) + + # Find and enter text + elif args.enter_text: + text = args.find_text or args.find_exact + + success, message = navigator.find_and_enter_text( + text_to_enter=args.enter_text, + find_text=text, + element_type=args.find_type or "TextField", + identifier=args.find_id, + index=args.index, + ) + + print(message) + if not success: + sys.exit(1) + + # Just find (no action) + else: + text = args.find_text or args.find_exact + fuzzy = args.find_text is not None + + element = navigator.find_element( + text=text, + element_type=args.find_type, + identifier=args.find_id, + index=args.index, + fuzzy=fuzzy, + ) + + if element: + print(f"Found: {element.description} at {element.center}") + else: + print("Element not found") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/screen_mapper.py b/skills/ios-simulator-testing/scripts/screen_mapper.py new file mode 100755 index 000000000..34f5c12a7 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/screen_mapper.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python3 +""" +iOS Screen Mapper - Current Screen Analyzer + +Maps the current screen's UI elements for navigation decisions. +Provides token-efficient summaries of available interactions. + +This script analyzes the iOS simulator screen using IDB's accessibility tree +and provides a compact, actionable summary of what's currently visible and +interactive on the screen. Perfect for AI agents making navigation decisions. + +Key Features: +- Token-efficient output (5-7 lines by default) +- Identifies buttons, text fields, navigation elements +- Counts interactive and focusable elements +- Progressive detail with --verbose flag +- Navigation hints with --hints flag + +Usage Examples: + # Quick summary (default) + python scripts/screen_mapper.py --udid + + # Detailed element breakdown + python scripts/screen_mapper.py --udid --verbose + + # Include navigation suggestions + python scripts/screen_mapper.py --udid --hints + + # Full JSON output for parsing + python scripts/screen_mapper.py --udid --json + +Output Format (default): + Screen: LoginViewController (45 elements, 7 interactive) + Buttons: "Login", "Cancel", "Forgot Password" + TextFields: 2 (0 filled) + Navigation: NavBar: "Sign In" + Focusable: 7 elements + +Technical Details: +- Uses IDB's accessibility tree via `idb ui describe-all --json --nested` +- Parses IDB's array format: [{ root element with children }] +- Identifies element types: Button, TextField, NavigationBar, TabBar, etc. +- Extracts labels from AXLabel, AXValue, and AXUniqueId fields +""" + +import argparse +import json +import subprocess +import sys +from collections import defaultdict + +from common import get_accessibility_tree + + +class ScreenMapper: + """ + Analyzes current screen for navigation decisions. + + This class fetches the iOS accessibility tree from IDB and analyzes it + to provide actionable summaries for navigation. It categorizes elements + by type, counts interactive elements, and identifies key UI patterns. + + Attributes: + udid (Optional[str]): Device UDID to target, or None for booted device + INTERACTIVE_TYPES (Set[str]): Element types that users can interact with + + Design Philosophy: + - Token efficiency: Provide minimal but complete information + - Progressive disclosure: Summary by default, details on request + - Navigation-focused: Highlight elements relevant for automation + """ + + # Element types we care about for navigation + # These are the accessibility element types that indicate user interaction points + INTERACTIVE_TYPES = { + "Button", + "Link", + "TextField", + "SecureTextField", + "Cell", + "Switch", + "Slider", + "Stepper", + "SegmentedControl", + "TabBar", + "NavigationBar", + "Toolbar", + } + + def __init__(self, udid: str | None = None): + """ + Initialize screen mapper. + + Args: + udid: Optional device UDID. If None, uses booted simulator. + + Example: + mapper = ScreenMapper(udid="656DC652-1C9F-4AB2-AD4F-F38E65976BDA") + mapper = ScreenMapper() # Uses booted device + """ + self.udid = udid + + def get_accessibility_tree(self) -> dict: + """ + Fetch accessibility tree from iOS simulator via IDB. + + Delegates to shared utility for consistent tree fetching across all scripts. + """ + return get_accessibility_tree(self.udid, nested=True) + + def analyze_tree(self, node: dict, depth: int = 0) -> dict: + """Analyze accessibility tree for navigation info.""" + analysis = { + "elements_by_type": defaultdict(list), + "total_elements": 0, + "interactive_elements": 0, + "text_fields": [], + "buttons": [], + "navigation": {}, + "screen_name": None, + "focusable": 0, + } + + self._analyze_recursive(node, analysis, depth) + + # Post-process for clean output + analysis["elements_by_type"] = dict(analysis["elements_by_type"]) + + return analysis + + def _analyze_recursive(self, node: dict, analysis: dict, depth: int): + """Recursively analyze tree nodes.""" + elem_type = node.get("type") + label = node.get("AXLabel", "") + value = node.get("AXValue", "") + identifier = node.get("AXUniqueId", "") + + # Count element + if elem_type: + analysis["total_elements"] += 1 + + # Track by type + if elem_type in self.INTERACTIVE_TYPES: + analysis["interactive_elements"] += 1 + + # Store concise info (label only, not full node) + elem_info = label or value or identifier or "Unnamed" + analysis["elements_by_type"][elem_type].append(elem_info) + + # Special handling for common types + if elem_type == "Button": + analysis["buttons"].append(elem_info) + elif elem_type in ("TextField", "SecureTextField"): + analysis["text_fields"].append( + {"type": elem_type, "label": elem_info, "has_value": bool(value)} + ) + elif elem_type == "NavigationBar": + analysis["navigation"]["nav_title"] = label or "Navigation" + elif elem_type == "TabBar": + # Count tab items + tab_count = len(node.get("children", [])) + analysis["navigation"]["tab_count"] = tab_count + + # Track focusable elements + if node.get("enabled", False) and elem_type in self.INTERACTIVE_TYPES: + analysis["focusable"] += 1 + + # Try to identify screen name from view controller + if not analysis["screen_name"] and identifier: + if "ViewController" in identifier or "Screen" in identifier: + analysis["screen_name"] = identifier + + # Process children + for child in node.get("children", []): + self._analyze_recursive(child, analysis, depth + 1) + + def format_summary(self, analysis: dict, verbose: bool = False) -> str: + """Format analysis as token-efficient summary.""" + lines = [] + + # Screen identification (1 line) + screen = analysis["screen_name"] or "Unknown Screen" + total = analysis["total_elements"] + interactive = analysis["interactive_elements"] + lines.append(f"Screen: {screen} ({total} elements, {interactive} interactive)") + + # Buttons summary (1 line) + if analysis["buttons"]: + button_list = ", ".join(f'"{b}"' for b in analysis["buttons"][:5]) + if len(analysis["buttons"]) > 5: + button_list += f" +{len(analysis['buttons']) - 5} more" + lines.append(f"Buttons: {button_list}") + + # Text fields summary (1 line) + if analysis["text_fields"]: + field_count = len(analysis["text_fields"]) + [f["type"] for f in analysis["text_fields"]] + filled = sum(1 for f in analysis["text_fields"] if f["has_value"]) + lines.append(f"TextFields: {field_count} ({filled} filled)") + + # Navigation summary (1 line) + nav_parts = [] + if "nav_title" in analysis["navigation"]: + nav_parts.append(f"NavBar: \"{analysis['navigation']['nav_title']}\"") + if "tab_count" in analysis["navigation"]: + nav_parts.append(f"TabBar: {analysis['navigation']['tab_count']} tabs") + if nav_parts: + lines.append(f"Navigation: {', '.join(nav_parts)}") + + # Focusable count (1 line) + lines.append(f"Focusable: {analysis['focusable']} elements") + + # Verbose mode adds element type breakdown + if verbose: + lines.append("\nElements by type:") + for elem_type, items in analysis["elements_by_type"].items(): + if items: # Only show types that exist + lines.append(f" {elem_type}: {len(items)}") + for item in items[:3]: # Show first 3 + lines.append(f" - {item}") + if len(items) > 3: + lines.append(f" ... +{len(items) - 3} more") + + return "\n".join(lines) + + def get_navigation_hints(self, analysis: dict) -> list[str]: + """Generate navigation hints based on screen analysis.""" + hints = [] + + # Check for common patterns + if "Login" in str(analysis.get("buttons", [])): + hints.append("Login screen detected - find TextFields for credentials") + + if analysis["text_fields"]: + unfilled = [f for f in analysis["text_fields"] if not f["has_value"]] + if unfilled: + hints.append(f"{len(unfilled)} empty text field(s) - may need input") + + if not analysis["buttons"] and not analysis["text_fields"]: + hints.append("No interactive elements - try swiping or going back") + + if "tab_count" in analysis.get("navigation", {}): + hints.append(f"Tab bar available with {analysis['navigation']['tab_count']} tabs") + + return hints + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Map current screen UI elements") + parser.add_argument("--verbose", action="store_true", help="Show detailed element breakdown") + parser.add_argument("--json", action="store_true", help="Output raw JSON analysis") + parser.add_argument("--hints", action="store_true", help="Include navigation hints") + parser.add_argument("--udid", help="Device UDID") + + args = parser.parse_args() + + # Create mapper and analyze + mapper = ScreenMapper(udid=args.udid) + tree = mapper.get_accessibility_tree() + analysis = mapper.analyze_tree(tree) + + # Output based on format + if args.json: + # Full JSON (verbose) + print(json.dumps(analysis, indent=2, default=str)) + else: + # Token-efficient summary (default) + summary = mapper.format_summary(analysis, verbose=args.verbose) + print(summary) + + # Add hints if requested + if args.hints: + hints = mapper.get_navigation_hints(analysis) + if hints: + print("\nHints:") + for hint in hints: + print(f" - {hint}") + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/sim_health_check.sh b/skills/ios-simulator-testing/scripts/sim_health_check.sh new file mode 100644 index 000000000..5ca10b957 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/sim_health_check.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +# +# iOS Simulator Testing Environment Health Check +# +# Verifies that all required tools and dependencies are properly installed +# and configured for iOS simulator testing. +# +# Usage: bash scripts/sim_health_check.sh [--help] + +set -e + +# Color codes for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Check flags +SHOW_HELP=false + +# Parse arguments +for arg in "$@"; do + case $arg in + --help|-h) + SHOW_HELP=true + shift + ;; + esac +done + +if [ "$SHOW_HELP" = true ]; then + cat < /dev/null; then + XCODE_PATH=$(xcode-select -p 2>/dev/null || echo "not found") + if [ "$XCODE_PATH" != "not found" ]; then + XCODE_VERSION=$(xcodebuild -version 2>/dev/null | head -n 1 || echo "Unknown") + check_passed "Xcode Command Line Tools installed" + echo " Path: $XCODE_PATH" + echo " Version: $XCODE_VERSION" + else + check_failed "Xcode Command Line Tools path not set" + echo " Run: xcode-select --install" + fi +else + check_failed "xcrun command not found" + echo " Install Xcode Command Line Tools: xcode-select --install" +fi +echo "" + +# Check 3: simctl availability +echo -e "${BLUE}[3/8]${NC} Checking simctl (Simulator Control)..." +if command -v xcrun &> /dev/null && xcrun simctl help &> /dev/null; then + check_passed "simctl is available" +else + check_failed "simctl not available" + echo " simctl comes with Xcode Command Line Tools" +fi +echo "" + +# Check 4: IDB installation +echo -e "${BLUE}[4/8]${NC} Checking IDB (iOS Development Bridge)..." +if command -v idb &> /dev/null; then + IDB_PATH=$(which idb) + IDB_VERSION=$(idb --version 2>/dev/null || echo "Unknown") + check_passed "IDB is installed" + echo " Path: $IDB_PATH" + echo " Version: $IDB_VERSION" +else + check_warning "IDB not found in PATH" + echo " IDB is optional but provides advanced UI automation" + echo " Install: https://fbidb.io/docs/installation" + echo " Recommended: brew tap facebook/fb && brew install idb-companion" +fi +echo "" + +# Check 5: Python 3 installation +echo -e "${BLUE}[5/8]${NC} Checking Python 3..." +if command -v python3 &> /dev/null; then + PYTHON_VERSION=$(python3 --version | cut -d' ' -f2) + check_passed "Python 3 is installed (version $PYTHON_VERSION)" +else + check_failed "Python 3 not found" + echo " Python 3 is required for testing scripts" + echo " Install: brew install python3" +fi +echo "" + +# Check 6: Available simulators +echo -e "${BLUE}[6/8]${NC} Checking available iOS Simulators..." +if command -v xcrun &> /dev/null; then + SIMULATOR_COUNT=$(xcrun simctl list devices available 2>/dev/null | grep -c "iPhone\|iPad" || echo "0") + + if [ "$SIMULATOR_COUNT" -gt 0 ]; then + check_passed "Found $SIMULATOR_COUNT available simulator(s)" + + # Show first 5 simulators + echo "" + echo " Available simulators (showing up to 5):" + xcrun simctl list devices available 2>/dev/null | grep "iPhone\|iPad" | head -5 | while read -r line; do + echo " - $line" + done + else + check_warning "No simulators found" + echo " Create simulators via Xcode or simctl" + echo " Example: xcrun simctl create 'iPhone 15' 'iPhone 15'" + fi +else + check_failed "Cannot check simulators (simctl not available)" +fi +echo "" + +# Check 7: Booted simulators +echo -e "${BLUE}[7/8]${NC} Checking booted simulators..." +if command -v xcrun &> /dev/null; then + BOOTED_SIMS=$(xcrun simctl list devices booted 2>/dev/null | grep -c "iPhone\|iPad" || echo "0") + + if [ "$BOOTED_SIMS" -gt 0 ]; then + check_passed "$BOOTED_SIMS simulator(s) currently booted" + + echo "" + echo " Booted simulators:" + xcrun simctl list devices booted 2>/dev/null | grep "iPhone\|iPad" | while read -r line; do + echo " - $line" + done + else + check_warning "No simulators currently booted" + echo " Boot a simulator to begin testing" + echo " Example: xcrun simctl boot " + echo " Or: open -a Simulator" + fi +else + check_failed "Cannot check booted simulators (simctl not available)" +fi +echo "" + +# Check 8: Required Python packages (optional check) +echo -e "${BLUE}[8/8]${NC} Checking Python packages..." +if command -v python3 &> /dev/null; then + MISSING_PACKAGES=() + + # Check for PIL/Pillow (for visual_diff.py) + if python3 -c "import PIL" 2>/dev/null; then + check_passed "Pillow (PIL) installed - visual diff available" + else + MISSING_PACKAGES+=("pillow") + check_warning "Pillow (PIL) not installed - visual diff won't work" + fi + + if [ ${#MISSING_PACKAGES[@]} -gt 0 ]; then + echo "" + echo " Install missing packages:" + echo " pip3 install ${MISSING_PACKAGES[*]}" + fi +else + check_warning "Cannot check Python packages (Python 3 not available)" +fi +echo "" + +# Summary +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo -e "${BLUE} Summary${NC}" +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo "" +echo -e "Checks passed: ${GREEN}$CHECKS_PASSED${NC}" +if [ "$CHECKS_FAILED" -gt 0 ]; then + echo -e "Checks failed: ${RED}$CHECKS_FAILED${NC}" + echo "" + echo -e "${YELLOW}Action required:${NC} Fix the failed checks above before testing" + exit 1 +else + echo "" + echo -e "${GREEN}✓ Environment is ready for iOS simulator testing${NC}" + echo "" + echo "Next steps:" + echo " 1. Boot a simulator: open -a Simulator" + echo " 2. Launch your app: xcrun simctl launch booted " + echo " 3. Run accessibility audit: python scripts/accessibility_audit.py" + exit 0 +fi diff --git a/skills/ios-simulator-testing/scripts/simulator_selector.py b/skills/ios-simulator-testing/scripts/simulator_selector.py new file mode 100755 index 000000000..d11254961 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/simulator_selector.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 +""" +Intelligent Simulator Selector + +Suggests the best available iOS simulators based on: +- Recently used (from config) +- Latest iOS version +- Common models for testing +- Boot status + +Usage Examples: + # Get suggestions for user selection + python scripts/simulator_selector.py --suggest + + # List all available simulators + python scripts/simulator_selector.py --list + + # Boot a specific simulator + python scripts/simulator_selector.py --boot "67A99DF0-27BD-4507-A3DE-B7D8C38F764A" + + # Get suggestions as JSON for programmatic use + python scripts/simulator_selector.py --suggest --json +""" + +import argparse +import json +import re +import subprocess +import sys +from datetime import datetime +from pathlib import Path +from typing import Optional + +# Try to import config from build_and_test if available +try: + from xcode.config import Config +except ImportError: + Config = None + + +class SimulatorInfo: + """Information about an iOS simulator.""" + + def __init__( + self, + name: str, + udid: str, + ios_version: str, + status: str, + ): + """Initialize simulator info.""" + self.name = name + self.udid = udid + self.ios_version = ios_version + self.status = status + self.reasons: list[str] = [] + + def to_dict(self) -> dict: + """Convert to dictionary.""" + return { + "device": self.name, + "udid": self.udid, + "ios": self.ios_version, + "status": self.status, + "reasons": self.reasons, + } + + +class SimulatorSelector: + """Intelligent simulator selection.""" + + # Common iPhone models ranked by testing priority + COMMON_MODELS = [ + "iPhone 16 Pro", + "iPhone 16", + "iPhone 15 Pro", + "iPhone 15", + "iPhone SE (3rd generation)", + ] + + def __init__(self): + """Initialize selector.""" + self.simulators: list[SimulatorInfo] = [] + self.config: dict | None = None + self.last_used_simulator: str | None = None + + # Load config if available + if Config: + try: + config = Config.load() + self.last_used_simulator = config.get_preferred_simulator() + except Exception: + pass + + def list_simulators(self) -> list[SimulatorInfo]: + """ + List all available simulators. + + Returns: + List of SimulatorInfo objects + """ + try: + result = subprocess.run( + ["xcrun", "simctl", "list", "devices", "--json"], + capture_output=True, + text=True, + check=True, + ) + + data = json.loads(result.stdout) + simulators = [] + + # Parse devices by iOS version + for runtime, devices in data.get("devices", {}).items(): + # Extract iOS version from runtime (e.g., "com.apple.CoreSimulator.SimRuntime.iOS-18-0") + ios_version_match = re.search(r"iOS-(\d+-\d+)", runtime) + if not ios_version_match: + continue + + ios_version = ios_version_match.group(1).replace("-", ".") + + for device in devices: + name = device.get("name", "") + udid = device.get("udid", "") + is_available = device.get("isAvailable", False) + + if not is_available or "iPhone" not in name: + continue + + status = device.get("state", "").capitalize() + sim_info = SimulatorInfo(name, udid, ios_version, status) + simulators.append(sim_info) + + self.simulators = simulators + return simulators + + except subprocess.CalledProcessError as e: + print(f"Error listing simulators: {e.stderr}", file=sys.stderr) + return [] + except json.JSONDecodeError as e: + print(f"Error parsing simulator list: {e}", file=sys.stderr) + return [] + + def get_suggestions(self, count: int = 4) -> list[SimulatorInfo]: + """ + Get top N suggested simulators. + + Ranking factors: + 1. Recently used (from config) + 2. Latest iOS version + 3. Common models + 4. Boot status (Booted preferred) + + Args: + count: Number of suggestions to return + + Returns: + List of suggested SimulatorInfo objects + """ + if not self.simulators: + return [] + + # Score each simulator + scored = [] + for sim in self.simulators: + score = self._score_simulator(sim) + scored.append((score, sim)) + + # Sort by score (descending) + scored.sort(key=lambda x: x[0], reverse=True) + + # Return top N + suggestions = [sim for _, sim in scored[:count]] + + # Add reasons to each suggestion + for i, sim in enumerate(suggestions, 1): + if i == 1: + sim.reasons.append("Recommended") + + # Check if recently used + if self.last_used_simulator and self.last_used_simulator == sim.name: + sim.reasons.append("Recently used") + + # Check if latest iOS + latest_ios = max(s.ios_version for s in self.simulators) + if sim.ios_version == latest_ios: + sim.reasons.append("Latest iOS") + + # Check if common model + for j, model in enumerate(self.COMMON_MODELS): + if model in sim.name: + sim.reasons.append(f"#{j+1} common model") + break + + # Check if booted + if sim.status == "Booted": + sim.reasons.append("Currently running") + + return suggestions + + def _score_simulator(self, sim: SimulatorInfo) -> float: + """ + Score a simulator for ranking. + + Higher score = better recommendation. + + Args: + sim: Simulator to score + + Returns: + Score value + """ + score = 0.0 + + # Recently used gets highest priority (100 points) + if self.last_used_simulator and self.last_used_simulator == sim.name: + score += 100 + + # Latest iOS version (50 points) + latest_ios = max(s.ios_version for s in self.simulators) + if sim.ios_version == latest_ios: + score += 50 + + # Common models (30-20 points based on ranking) + for i, model in enumerate(self.COMMON_MODELS): + if model in sim.name: + score += 30 - (i * 2) # Higher ranking models get more points + break + + # Currently booted (10 points) + if sim.status == "Booted": + score += 10 + + # iOS version number (minor factor for breaking ties) + ios_numeric = float(sim.ios_version.replace(".", "")) + score += ios_numeric * 0.1 + + return score + + def boot_simulator(self, udid: str) -> bool: + """ + Boot a simulator. + + Args: + udid: Simulator UDID + + Returns: + True if successful, False otherwise + """ + try: + subprocess.run( + ["xcrun", "simctl", "boot", udid], + capture_output=True, + check=True, + ) + return True + except subprocess.CalledProcessError as e: + print(f"Error booting simulator: {e.stderr}", file=sys.stderr) + return False + + +def format_suggestions(suggestions: list[SimulatorInfo], json_format: bool = False) -> str: + """ + Format suggestions for output. + + Args: + suggestions: List of suggestions + json_format: If True, output as JSON + + Returns: + Formatted string + """ + if json_format: + data = {"suggestions": [s.to_dict() for s in suggestions]} + return json.dumps(data, indent=2) + + if not suggestions: + return "No simulators available" + + lines = ["Available Simulators:\n"] + for i, sim in enumerate(suggestions, 1): + lines.append(f"{i}. {sim.name} (iOS {sim.ios_version})") + if sim.reasons: + lines.append(f" {', '.join(sim.reasons)}") + lines.append(f" UDID: {sim.udid}") + lines.append("") + + return "\n".join(lines) + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Intelligent iOS simulator selector", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Get suggestions for user selection + python scripts/simulator_selector.py --suggest + + # List all available simulators + python scripts/simulator_selector.py --list + + # Boot a specific simulator + python scripts/simulator_selector.py --boot + + # Get suggestions as JSON + python scripts/simulator_selector.py --suggest --json + """, + ) + + parser.add_argument( + "--suggest", + action="store_true", + help="Get top simulator suggestions", + ) + parser.add_argument( + "--list", + action="store_true", + help="List all available simulators", + ) + parser.add_argument( + "--boot", + metavar="UDID", + help="Boot specific simulator by UDID", + ) + parser.add_argument( + "--json", + action="store_true", + help="Output as JSON", + ) + parser.add_argument( + "--count", + type=int, + default=4, + help="Number of suggestions (default: 4)", + ) + + args = parser.parse_args() + + selector = SimulatorSelector() + + if args.boot: + # Boot specific simulator + success = selector.boot_simulator(args.boot) + if success: + print(f"Booted simulator: {args.boot}") + return 0 + return 1 + + if args.list: + # List all simulators + simulators = selector.list_simulators() + output = format_suggestions(simulators, args.json) + print(output) + return 0 + + if args.suggest: + # Get suggestions + selector.list_simulators() + suggestions = selector.get_suggestions(args.count) + output = format_suggestions(suggestions, args.json) + print(output) + return 0 + + # Default: show suggestions + selector.list_simulators() + suggestions = selector.get_suggestions(args.count) + output = format_suggestions(suggestions, args.json) + print(output) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/skills/ios-simulator-testing/scripts/test_recorder.py b/skills/ios-simulator-testing/scripts/test_recorder.py new file mode 100755 index 000000000..67f152a3a --- /dev/null +++ b/skills/ios-simulator-testing/scripts/test_recorder.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 +""" +Test Recorder for iOS Simulator Testing + +Records test execution with automatic screenshots and documentation. +Optimized for minimal token output during execution. + +Usage: + As a script: python scripts/test_recorder.py --test-name "Test Name" --output dir/ + As a module: from scripts.test_recorder import TestRecorder +""" + +import argparse +import json +import subprocess +import time +from datetime import datetime +from pathlib import Path + +from common import count_elements, get_accessibility_tree + + +class TestRecorder: + """Records test execution with screenshots and accessibility snapshots.""" + + def __init__(self, test_name: str, output_dir: str = "test-artifacts", udid: str | None = None): + """ + Initialize test recorder. + + Args: + test_name: Name of the test being recorded + output_dir: Directory for test artifacts + udid: Optional device UDID (uses booted if not specified) + """ + self.test_name = test_name + self.udid = udid + self.start_time = time.time() + self.steps: list[dict] = [] + self.current_step = 0 + + # Create timestamped output directory + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + safe_name = test_name.lower().replace(" ", "-") + self.output_dir = Path(output_dir) / f"{safe_name}-{timestamp}" + self.output_dir.mkdir(parents=True, exist_ok=True) + + # Create subdirectories + self.screenshots_dir = self.output_dir / "screenshots" + self.screenshots_dir.mkdir(exist_ok=True) + self.accessibility_dir = self.output_dir / "accessibility" + self.accessibility_dir.mkdir(exist_ok=True) + + # Token-efficient output + print(f"Recording: {test_name}") + print(f"Output: {self.output_dir}/") + + def step(self, description: str, assertion: str | None = None, metadata: dict | None = None): + """ + Record a test step with automatic screenshot. + + Args: + description: Step description + assertion: Optional assertion to verify + metadata: Optional metadata for the step + """ + self.current_step += 1 + step_time = time.time() - self.start_time + + # Format step number with padding + step_num = f"{self.current_step:03d}" + safe_desc = description.lower().replace(" ", "-")[:30] + + # Capture screenshot + screenshot_path = self.screenshots_dir / f"{step_num}-{safe_desc}.png" + self._capture_screenshot(screenshot_path) + + # Capture accessibility tree + accessibility_path = self.accessibility_dir / f"{step_num}-{safe_desc}.json" + element_count = self._capture_accessibility(accessibility_path) + + # Store step data + step_data = { + "number": self.current_step, + "description": description, + "timestamp": step_time, + "screenshot": screenshot_path.name, + "accessibility": accessibility_path.name, + "element_count": element_count, + } + + if assertion: + step_data["assertion"] = assertion + step_data["assertion_passed"] = True # Would verify in real implementation + + if metadata: + step_data["metadata"] = metadata + + self.steps.append(step_data) + + # Token-efficient output (single line) + status = "✓" if not assertion or step_data.get("assertion_passed") else "✗" + print(f"{status} Step {self.current_step}: {description} ({step_time:.1f}s)") + + def _capture_screenshot(self, output_path: Path) -> bool: + """Capture screenshot using simctl.""" + cmd = ["xcrun", "simctl", "io"] + + if self.udid: + cmd.append(self.udid) + else: + cmd.append("booted") + + cmd.extend(["screenshot", str(output_path)]) + + try: + subprocess.run(cmd, capture_output=True, check=True) + return True + except subprocess.CalledProcessError: + return False + + def _capture_accessibility(self, output_path: Path) -> int: + """Capture accessibility tree and return element count.""" + try: + # Use shared utility to fetch tree + tree = get_accessibility_tree(self.udid, nested=True) + + # Save tree + with open(output_path, "w") as f: + json.dump(tree, f, indent=2) + + # Count elements using shared utility + return count_elements(tree) + except Exception: + return 0 + + def generate_report(self) -> dict[str, str]: + """ + Generate markdown test report. + + Returns: + Dictionary with paths to generated files + """ + duration = time.time() - self.start_time + report_path = self.output_dir / "report.md" + + # Generate markdown + with open(report_path, "w") as f: + f.write(f"# Test Report: {self.test_name}\n\n") + f.write(f"**Date:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") + f.write(f"**Duration:** {duration:.1f} seconds\n") + f.write(f"**Steps:** {len(self.steps)}\n\n") + + # Steps section + f.write("## Test Steps\n\n") + for step in self.steps: + f.write( + f"### Step {step['number']}: {step['description']} ({step['timestamp']:.1f}s)\n\n" + ) + f.write(f"![Screenshot](screenshots/{step['screenshot']})\n\n") + + if step.get("assertion"): + status = "✓" if step.get("assertion_passed") else "✗" + f.write(f"**Assertion:** {step['assertion']} {status}\n\n") + + if step.get("metadata"): + f.write("**Metadata:**\n") + for key, value in step["metadata"].items(): + f.write(f"- {key}: {value}\n") + f.write("\n") + + f.write(f"**Accessibility Elements:** {step['element_count']}\n\n") + f.write("---\n\n") + + # Summary + f.write("## Summary\n\n") + f.write(f"- Total steps: {len(self.steps)}\n") + f.write(f"- Duration: {duration:.1f}s\n") + f.write(f"- Screenshots: {len(self.steps)}\n") + f.write(f"- Accessibility snapshots: {len(self.steps)}\n") + + # Save metadata JSON + metadata_path = self.output_dir / "metadata.json" + with open(metadata_path, "w") as f: + json.dump( + { + "test_name": self.test_name, + "duration": duration, + "steps": self.steps, + "timestamp": datetime.now().isoformat(), + }, + f, + indent=2, + ) + + # Token-efficient output + print(f"Report: {report_path}") + + return { + "markdown_path": str(report_path), + "metadata_path": str(metadata_path), + "output_dir": str(self.output_dir), + } + + +def main(): + """Main entry point for command-line usage.""" + parser = argparse.ArgumentParser( + description="Record test execution with screenshots and documentation" + ) + parser.add_argument("--test-name", required=True, help="Name of the test being recorded") + parser.add_argument( + "--output", default="test-artifacts", help="Output directory for test artifacts" + ) + parser.add_argument("--udid", help="Device UDID (uses booted if not specified)") + + args = parser.parse_args() + + # Create recorder + TestRecorder(test_name=args.test_name, output_dir=args.output, udid=args.udid) + + print("Test recorder initialized. Use the following methods:") + print(' recorder.step("description") - Record a test step') + print(" recorder.generate_report() - Generate final report") + print() + print("Example:") + print(' recorder.step("Launch app")') + print(' recorder.step("Enter credentials", metadata={"user": "test"})') + print(' recorder.step("Verify login", assertion="Home screen visible")') + print(" recorder.generate_report()") + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/visual_diff.py b/skills/ios-simulator-testing/scripts/visual_diff.py new file mode 100755 index 000000000..23cb33a90 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/visual_diff.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python3 +""" +Visual Diff Tool for iOS Simulator Screenshots + +Compares two screenshots pixel-by-pixel to detect visual changes. +Optimized for minimal token output. + +Usage: python scripts/visual_diff.py baseline.png current.png [options] +""" + +import argparse +import json +import os +import sys +from pathlib import Path + +try: + from PIL import Image, ImageChops, ImageDraw +except ImportError: + print("Error: Pillow not installed. Run: pip3 install pillow") + sys.exit(1) + + +class VisualDiffer: + """Performs visual comparison between screenshots.""" + + def __init__(self, threshold: float = 0.01): + """ + Initialize differ with threshold. + + Args: + threshold: Maximum acceptable difference ratio (0.01 = 1%) + """ + self.threshold = threshold + + def compare(self, baseline_path: str, current_path: str) -> dict: + """ + Compare two images and return difference metrics. + + Args: + baseline_path: Path to baseline image + current_path: Path to current image + + Returns: + Dictionary with comparison results + """ + # Load images + try: + baseline = Image.open(baseline_path) + current = Image.open(current_path) + except FileNotFoundError as e: + print(f"Error: Image not found - {e}") + sys.exit(1) + except Exception as e: + print(f"Error: Failed to load image - {e}") + sys.exit(1) + + # Verify dimensions match + if baseline.size != current.size: + return { + "error": "Image dimensions do not match", + "baseline_size": baseline.size, + "current_size": current.size, + } + + # Convert to RGB if needed + if baseline.mode != "RGB": + baseline = baseline.convert("RGB") + if current.mode != "RGB": + current = current.convert("RGB") + + # Calculate difference + diff = ImageChops.difference(baseline, current) + + # Calculate metrics + total_pixels = baseline.size[0] * baseline.size[1] + diff_pixels = self._count_different_pixels(diff) + diff_percentage = (diff_pixels / total_pixels) * 100 + + # Determine pass/fail + passed = diff_percentage <= (self.threshold * 100) + + return { + "dimensions": baseline.size, + "total_pixels": total_pixels, + "different_pixels": diff_pixels, + "difference_percentage": round(diff_percentage, 2), + "threshold_percentage": self.threshold * 100, + "passed": passed, + "verdict": "PASS" if passed else "FAIL", + } + + def _count_different_pixels(self, diff_image: Image.Image) -> int: + """Count number of pixels that are different.""" + # Convert to grayscale for easier processing + diff_gray = diff_image.convert("L") + + # Count non-zero pixels (different) + pixels = diff_gray.getdata() + return sum(1 for pixel in pixels if pixel > 10) # Threshold for noise + + def generate_diff_image(self, baseline_path: str, current_path: str, output_path: str) -> None: + """Generate highlighted difference image.""" + baseline = Image.open(baseline_path).convert("RGB") + current = Image.open(current_path).convert("RGB") + + # Create difference image + diff = ImageChops.difference(baseline, current) + + # Enhance differences with red overlay + diff_enhanced = Image.new("RGB", baseline.size) + for x in range(baseline.size[0]): + for y in range(baseline.size[1]): + diff_pixel = diff.getpixel((x, y)) + if sum(diff_pixel) > 30: # Threshold for visibility + # Highlight in red + diff_enhanced.putpixel((x, y), (255, 0, 0)) + else: + # Keep original + diff_enhanced.putpixel((x, y), current.getpixel((x, y))) + + diff_enhanced.save(output_path) + + def generate_side_by_side( + self, baseline_path: str, current_path: str, output_path: str + ) -> None: + """Generate side-by-side comparison image.""" + baseline = Image.open(baseline_path) + current = Image.open(current_path) + + # Create combined image + width = baseline.size[0] * 2 + 10 # 10px separator + height = max(baseline.size[1], current.size[1]) + combined = Image.new("RGB", (width, height), color=(128, 128, 128)) + + # Paste images + combined.paste(baseline, (0, 0)) + combined.paste(current, (baseline.size[0] + 10, 0)) + + combined.save(output_path) + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser(description="Compare screenshots for visual differences") + parser.add_argument("baseline", help="Path to baseline screenshot") + parser.add_argument("current", help="Path to current screenshot") + parser.add_argument( + "--output", + default=".", + help="Output directory for diff artifacts (default: current directory)", + ) + parser.add_argument( + "--threshold", + type=float, + default=0.01, + help="Acceptable difference threshold (0.01 = 1%%, default: 0.01)", + ) + parser.add_argument( + "--details", action="store_true", help="Show detailed output (increases tokens)" + ) + + args = parser.parse_args() + + # Create output directory if needed + output_dir = Path(args.output) + output_dir.mkdir(parents=True, exist_ok=True) + + # Initialize differ + differ = VisualDiffer(threshold=args.threshold) + + # Perform comparison + result = differ.compare(args.baseline, args.current) + + # Handle dimension mismatch + if "error" in result: + print(f"Error: {result['error']}") + print(f"Baseline: {result['baseline_size']}") + print(f"Current: {result['current_size']}") + sys.exit(1) + + # Generate artifacts + diff_image_path = output_dir / "diff.png" + comparison_image_path = output_dir / "side-by-side.png" + + try: + differ.generate_diff_image(args.baseline, args.current, str(diff_image_path)) + differ.generate_side_by_side(args.baseline, args.current, str(comparison_image_path)) + except Exception as e: + print(f"Warning: Could not generate images - {e}") + + # Output results (token-optimized) + if args.details: + # Detailed output + report = { + "summary": { + "baseline": args.baseline, + "current": args.current, + "threshold": args.threshold, + "passed": result["passed"], + }, + "results": result, + "artifacts": { + "diff_image": str(diff_image_path), + "comparison_image": str(comparison_image_path), + }, + } + print(json.dumps(report, indent=2)) + else: + # Minimal output (default) + print(f"Difference: {result['difference_percentage']}% ({result['verdict']})") + if result["different_pixels"] > 0: + print(f"Changed pixels: {result['different_pixels']:,}") + print(f"Artifacts saved to: {output_dir}/") + + # Save JSON report + report_path = output_dir / "diff-report.json" + with open(report_path, "w") as f: + json.dump( + { + "baseline": os.path.basename(args.baseline), + "current": os.path.basename(args.current), + "results": result, + "artifacts": {"diff": "diff.png", "comparison": "side-by-side.png"}, + }, + f, + indent=2, + ) + + # Exit with error if test failed + sys.exit(0 if result["passed"] else 1) + + +if __name__ == "__main__": + main() diff --git a/skills/ios-simulator-testing/scripts/xcode/__init__.py b/skills/ios-simulator-testing/scripts/xcode/__init__.py new file mode 100644 index 000000000..450f7ee00 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/xcode/__init__.py @@ -0,0 +1,13 @@ +""" +Xcode build automation module. + +Provides structured, modular access to xcodebuild and xcresult functionality. +""" + +from .builder import BuildRunner +from .cache import XCResultCache +from .config import Config +from .reporter import OutputFormatter +from .xcresult import XCResultParser + +__all__ = ["BuildRunner", "Config", "OutputFormatter", "XCResultCache", "XCResultParser"] diff --git a/skills/ios-simulator-testing/scripts/xcode/builder.py b/skills/ios-simulator-testing/scripts/xcode/builder.py new file mode 100644 index 000000000..590c82be4 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/xcode/builder.py @@ -0,0 +1,381 @@ +""" +Xcode build execution. + +Handles xcodebuild command construction and execution with xcresult generation. +""" + +import re +import subprocess +import sys +from pathlib import Path + +from .cache import XCResultCache +from .config import Config + + +class BuildRunner: + """ + Execute xcodebuild commands with xcresult bundle generation. + + Handles scheme auto-detection, command construction, and build/test execution. + """ + + def __init__( + self, + project_path: str | None = None, + workspace_path: str | None = None, + scheme: str | None = None, + configuration: str = "Debug", + simulator: str | None = None, + cache: XCResultCache | None = None, + ): + """ + Initialize build runner. + + Args: + project_path: Path to .xcodeproj + workspace_path: Path to .xcworkspace + scheme: Build scheme (auto-detected if not provided) + configuration: Build configuration (Debug/Release) + simulator: Simulator name + cache: XCResult cache (creates default if not provided) + """ + self.project_path = project_path + self.workspace_path = workspace_path + self.scheme = scheme + self.configuration = configuration + self.simulator = simulator + self.cache = cache or XCResultCache() + + def auto_detect_scheme(self) -> str | None: + """ + Auto-detect build scheme from project/workspace. + + Returns: + Detected scheme name or None + """ + cmd = ["xcodebuild", "-list"] + + if self.workspace_path: + cmd.extend(["-workspace", self.workspace_path]) + elif self.project_path: + cmd.extend(["-project", self.project_path]) + else: + return None + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + # Parse schemes from output + in_schemes_section = False + for line in result.stdout.split("\n"): + line = line.strip() + + if "Schemes:" in line: + in_schemes_section = True + continue + + if in_schemes_section and line and not line.startswith("Build"): + # First scheme in list + return line + + except subprocess.CalledProcessError as e: + print(f"Error auto-detecting scheme: {e}", file=sys.stderr) + + return None + + def get_simulator_destination(self) -> str: + """ + Get xcodebuild destination string. + + Uses config preferences with fallback to auto-detection. + + Priority: + 1. --simulator CLI flag (self.simulator) + 2. Config preferred_simulator + 3. Config last_used_simulator + 4. Auto-detect first iPhone + 5. Generic iOS Simulator + + Returns: + Destination string for -destination flag + """ + # Priority 1: CLI flag + if self.simulator: + return f"platform=iOS Simulator,name={self.simulator}" + + # Priority 2-3: Config preferences + try: + # Determine project directory from project/workspace path + project_dir = None + if self.project_path: + project_dir = Path(self.project_path).parent + elif self.workspace_path: + project_dir = Path(self.workspace_path).parent + + config = Config.load(project_dir=project_dir) + preferred = config.get_preferred_simulator() + + if preferred: + # Check if preferred simulator exists + if self._simulator_exists(preferred): + return f"platform=iOS Simulator,name={preferred}" + print(f"Warning: Preferred simulator '{preferred}' not available", file=sys.stderr) + if config.should_fallback_to_any_iphone(): + print("Falling back to auto-detection...", file=sys.stderr) + else: + # Strict mode: don't fallback + return f"platform=iOS Simulator,name={preferred}" + + except Exception as e: + print(f"Warning: Could not load config: {e}", file=sys.stderr) + + # Priority 4-5: Auto-detect + return self._auto_detect_simulator() + + def _simulator_exists(self, name: str) -> bool: + """ + Check if simulator with given name exists and is available. + + Args: + name: Simulator name (e.g., "iPhone 16 Pro") + + Returns: + True if simulator exists and is available + """ + try: + result = subprocess.run( + ["xcrun", "simctl", "list", "devices", "available", "iOS"], + capture_output=True, + text=True, + check=True, + ) + + # Check if simulator name appears in available devices + return any(name in line and "(" in line for line in result.stdout.split("\n")) + + except subprocess.CalledProcessError: + return False + + def _extract_simulator_name_from_destination(self, destination: str) -> str | None: + """ + Extract simulator name from destination string. + + Args: + destination: Destination string (e.g., "platform=iOS Simulator,name=iPhone 16 Pro") + + Returns: + Simulator name or None + """ + # Pattern: name= + match = re.search(r"name=([^,]+)", destination) + if match: + return match.group(1).strip() + return None + + def _auto_detect_simulator(self) -> str: + """ + Auto-detect best available iOS simulator. + + Returns: + Destination string for -destination flag + """ + try: + result = subprocess.run( + ["xcrun", "simctl", "list", "devices", "available", "iOS"], + capture_output=True, + text=True, + check=True, + ) + + # Parse available simulators, prefer latest iPhone + # Looking for lines like: "iPhone 16 Pro (12345678-1234-1234-1234-123456789012) (Shutdown)" + for line in result.stdout.split("\n"): + if "iPhone" in line and "(" in line: + # Extract device name + name = line.split("(")[0].strip() + if name: + return f"platform=iOS Simulator,name={name}" + + # Fallback to generic iOS Simulator if no iPhone found + return "generic/platform=iOS Simulator" + + except subprocess.CalledProcessError as e: + print(f"Warning: Could not auto-detect simulator: {e}", file=sys.stderr) + return "generic/platform=iOS Simulator" + + def build(self, clean: bool = False) -> tuple[bool, str, str]: + """ + Build the project. + + Args: + clean: Perform clean build + + Returns: + Tuple of (success: bool, xcresult_id: str, stderr: str) + """ + # Auto-detect scheme if needed + if not self.scheme: + self.scheme = self.auto_detect_scheme() + if not self.scheme: + print("Error: Could not auto-detect scheme. Use --scheme", file=sys.stderr) + return (False, "", "") + + # Generate xcresult ID and path + xcresult_id = self.cache.generate_id() + xcresult_path = self.cache.get_path(xcresult_id) + + # Build command + cmd = ["xcodebuild", "-quiet"] # Suppress verbose output + + if clean: + cmd.append("clean") + + cmd.append("build") + + if self.workspace_path: + cmd.extend(["-workspace", self.workspace_path]) + elif self.project_path: + cmd.extend(["-project", self.project_path]) + else: + print("Error: No project or workspace specified", file=sys.stderr) + return (False, "", "") + + cmd.extend( + [ + "-scheme", + self.scheme, + "-configuration", + self.configuration, + "-destination", + self.get_simulator_destination(), + "-resultBundlePath", + str(xcresult_path), + ] + ) + + # Execute build + try: + result = subprocess.run( + cmd, capture_output=True, text=True, check=False # Don't raise on non-zero exit + ) + + success = result.returncode == 0 + + # xcresult bundle should be created even on failure + if not xcresult_path.exists(): + print("Warning: xcresult bundle was not created", file=sys.stderr) + return (success, "", result.stderr) + + # Auto-update config with last used simulator (on success only) + if success: + try: + # Determine project directory from project/workspace path + project_dir = None + if self.project_path: + project_dir = Path(self.project_path).parent + elif self.workspace_path: + project_dir = Path(self.workspace_path).parent + + config = Config.load(project_dir=project_dir) + destination = self.get_simulator_destination() + simulator_name = self._extract_simulator_name_from_destination(destination) + + if simulator_name: + config.update_last_used_simulator(simulator_name) + config.save() + + except Exception as e: + # Don't fail build if config update fails + print(f"Warning: Could not update config: {e}", file=sys.stderr) + + return (success, xcresult_id, result.stderr) + + except Exception as e: + print(f"Error executing build: {e}", file=sys.stderr) + return (False, "", str(e)) + + def test(self, test_suite: str | None = None) -> tuple[bool, str, str]: + """ + Run tests. + + Args: + test_suite: Specific test suite to run + + Returns: + Tuple of (success: bool, xcresult_id: str, stderr: str) + """ + # Auto-detect scheme if needed + if not self.scheme: + self.scheme = self.auto_detect_scheme() + if not self.scheme: + print("Error: Could not auto-detect scheme. Use --scheme", file=sys.stderr) + return (False, "", "") + + # Generate xcresult ID and path + xcresult_id = self.cache.generate_id() + xcresult_path = self.cache.get_path(xcresult_id) + + # Build command + cmd = ["xcodebuild", "-quiet", "test"] + + if self.workspace_path: + cmd.extend(["-workspace", self.workspace_path]) + elif self.project_path: + cmd.extend(["-project", self.project_path]) + else: + print("Error: No project or workspace specified", file=sys.stderr) + return (False, "", "") + + cmd.extend( + [ + "-scheme", + self.scheme, + "-destination", + self.get_simulator_destination(), + "-resultBundlePath", + str(xcresult_path), + ] + ) + + if test_suite: + cmd.extend(["-only-testing", test_suite]) + + # Execute tests + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=False) + + success = result.returncode == 0 + + # xcresult bundle should be created even on failure + if not xcresult_path.exists(): + print("Warning: xcresult bundle was not created", file=sys.stderr) + return (success, "", result.stderr) + + # Auto-update config with last used simulator (on success only) + if success: + try: + # Determine project directory from project/workspace path + project_dir = None + if self.project_path: + project_dir = Path(self.project_path).parent + elif self.workspace_path: + project_dir = Path(self.workspace_path).parent + + config = Config.load(project_dir=project_dir) + destination = self.get_simulator_destination() + simulator_name = self._extract_simulator_name_from_destination(destination) + + if simulator_name: + config.update_last_used_simulator(simulator_name) + config.save() + + except Exception as e: + # Don't fail test if config update fails + print(f"Warning: Could not update config: {e}", file=sys.stderr) + + return (success, xcresult_id, result.stderr) + + except Exception as e: + print(f"Error executing tests: {e}", file=sys.stderr) + return (False, "", str(e)) diff --git a/skills/ios-simulator-testing/scripts/xcode/cache.py b/skills/ios-simulator-testing/scripts/xcode/cache.py new file mode 100644 index 000000000..02f4d0737 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/xcode/cache.py @@ -0,0 +1,204 @@ +""" +XCResult cache management. + +Handles storage, retrieval, and lifecycle of xcresult bundles for progressive disclosure. +""" + +import shutil +from datetime import datetime +from pathlib import Path + + +class XCResultCache: + """ + Manage xcresult bundle cache for progressive disclosure. + + Stores xcresult bundles with timestamp-based IDs and provides + retrieval and cleanup operations. + """ + + # Default cache directory + DEFAULT_CACHE_DIR = Path.home() / ".ios-simulator-skill" / "xcresults" + + def __init__(self, cache_dir: Path | None = None): + """ + Initialize cache manager. + + Args: + cache_dir: Custom cache directory (uses default if not specified) + """ + self.cache_dir = cache_dir or self.DEFAULT_CACHE_DIR + self.cache_dir.mkdir(parents=True, exist_ok=True) + + def generate_id(self, prefix: str = "xcresult") -> str: + """ + Generate timestamped xcresult ID. + + Args: + prefix: ID prefix (default: "xcresult") + + Returns: + ID string like "xcresult-20251018-143052" + """ + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + return f"{prefix}-{timestamp}" + + def get_path(self, xcresult_id: str) -> Path: + """ + Get full path for xcresult ID. + + Args: + xcresult_id: XCResult ID + + Returns: + Path to xcresult bundle + """ + # Handle both with and without .xcresult extension + if xcresult_id.endswith(".xcresult"): + return self.cache_dir / xcresult_id + return self.cache_dir / f"{xcresult_id}.xcresult" + + def exists(self, xcresult_id: str) -> bool: + """ + Check if xcresult bundle exists. + + Args: + xcresult_id: XCResult ID + + Returns: + True if bundle exists + """ + return self.get_path(xcresult_id).exists() + + def save(self, source_path: Path, xcresult_id: str | None = None) -> str: + """ + Save xcresult bundle to cache. + + Args: + source_path: Source xcresult bundle path + xcresult_id: Optional custom ID (generates if not provided) + + Returns: + xcresult ID + """ + if not source_path.exists(): + raise FileNotFoundError(f"Source xcresult not found: {source_path}") + + # Generate ID if not provided + if not xcresult_id: + xcresult_id = self.generate_id() + + # Get destination path + dest_path = self.get_path(xcresult_id) + + # Copy xcresult bundle (it's a directory) + if dest_path.exists(): + shutil.rmtree(dest_path) + + shutil.copytree(source_path, dest_path) + + return xcresult_id + + def list(self, limit: int = 10) -> list[dict]: + """ + List recent xcresult bundles. + + Args: + limit: Maximum number to return + + Returns: + List of xcresult metadata dicts + """ + if not self.cache_dir.exists(): + return [] + + results = [] + for path in sorted( + self.cache_dir.glob("*.xcresult"), key=lambda p: p.stat().st_mtime, reverse=True + )[:limit]: + # Calculate bundle size + size_bytes = sum(f.stat().st_size for f in path.rglob("*") if f.is_file()) + + results.append( + { + "id": path.stem, + "path": str(path), + "created": datetime.fromtimestamp(path.stat().st_mtime).isoformat(), + "size_mb": round(size_bytes / (1024 * 1024), 2), + } + ) + + return results + + def cleanup(self, keep_recent: int = 20) -> int: + """ + Clean up old xcresult bundles. + + Args: + keep_recent: Number of recent bundles to keep + + Returns: + Number of bundles removed + """ + if not self.cache_dir.exists(): + return 0 + + # Get all bundles sorted by modification time + all_bundles = sorted( + self.cache_dir.glob("*.xcresult"), key=lambda p: p.stat().st_mtime, reverse=True + ) + + # Remove old bundles + removed = 0 + for bundle_path in all_bundles[keep_recent:]: + shutil.rmtree(bundle_path) + removed += 1 + + return removed + + def get_size_mb(self, xcresult_id: str) -> float: + """ + Get size of xcresult bundle in MB. + + Args: + xcresult_id: XCResult ID + + Returns: + Size in MB + """ + path = self.get_path(xcresult_id) + if not path.exists(): + return 0.0 + + size_bytes = sum(f.stat().st_size for f in path.rglob("*") if f.is_file()) + return round(size_bytes / (1024 * 1024), 2) + + def save_stderr(self, xcresult_id: str, stderr: str) -> None: + """ + Save stderr output alongside xcresult bundle. + + Args: + xcresult_id: XCResult ID + stderr: stderr output from xcodebuild + """ + if not stderr: + return + + stderr_path = self.cache_dir / f"{xcresult_id}.stderr" + stderr_path.write_text(stderr, encoding="utf-8") + + def get_stderr(self, xcresult_id: str) -> str: + """ + Retrieve cached stderr output. + + Args: + xcresult_id: XCResult ID + + Returns: + stderr content or empty string if not found + """ + stderr_path = self.cache_dir / f"{xcresult_id}.stderr" + if not stderr_path.exists(): + return "" + + return stderr_path.read_text(encoding="utf-8") diff --git a/skills/ios-simulator-testing/scripts/xcode/config.py b/skills/ios-simulator-testing/scripts/xcode/config.py new file mode 100644 index 000000000..52abe1bda --- /dev/null +++ b/skills/ios-simulator-testing/scripts/xcode/config.py @@ -0,0 +1,178 @@ +""" +Configuration management for iOS Simulator Skill. + +Handles loading, validation, and auto-updating of project-local config files. +""" + +import json +import sys +from datetime import datetime +from pathlib import Path +from typing import Any + + +class Config: + """ + Project-local configuration with auto-learning. + + Config file location: .claude/skills//config.json + + The skill directory name is auto-detected from the installation location, + so configs work regardless of what users name the skill directory. + + Auto-updates last_used_simulator after successful builds. + """ + + DEFAULT_CONFIG = { + "device": { + "preferred_simulator": None, + "preferred_os_version": None, + "fallback_to_any_iphone": True, + "last_used_simulator": None, + "last_used_at": None, + } + } + + def __init__(self, data: dict[str, Any], config_path: Path): + """ + Initialize config. + + Args: + data: Config data dict + config_path: Path to config file + """ + self.data = data + self.config_path = config_path + + @staticmethod + def load(project_dir: Path | None = None) -> "Config": + """ + Load config from project directory. + + Args: + project_dir: Project root (defaults to cwd) + + Returns: + Config instance (creates default if not found) + + Note: + The skill directory name is auto-detected from the installation location, + so configs work regardless of what users name the skill directory. + """ + if project_dir is None: + project_dir = Path.cwd() + + # Auto-detect skill directory name from actual installation location + # This file is at: skill/scripts/xcode/config.py + # Navigate up to skill/ directory and use its name + skill_root = Path(__file__).parent.parent.parent # xcode/ -> scripts/ -> skill/ + skill_name = skill_root.name + + config_path = project_dir / ".claude" / "skills" / skill_name / "config.json" + + # Load existing config + if config_path.exists(): + try: + with open(config_path) as f: + data = json.load(f) + + # Merge with defaults (in case new fields added) + merged = Config._merge_with_defaults(data) + return Config(merged, config_path) + + except json.JSONDecodeError as e: + print(f"Warning: Invalid JSON in {config_path}: {e}", file=sys.stderr) + print("Using default config", file=sys.stderr) + return Config(Config.DEFAULT_CONFIG.copy(), config_path) + except Exception as e: + print(f"Warning: Could not load config: {e}", file=sys.stderr) + return Config(Config.DEFAULT_CONFIG.copy(), config_path) + + # Return default config (will be created on first save) + return Config(Config.DEFAULT_CONFIG.copy(), config_path) + + @staticmethod + def _merge_with_defaults(data: dict[str, Any]) -> dict[str, Any]: + """ + Merge user config with defaults. + + Args: + data: User config data + + Returns: + Merged config with all default fields + """ + merged = Config.DEFAULT_CONFIG.copy() + + # Deep merge device section + if "device" in data: + merged["device"].update(data["device"]) + + return merged + + def save(self) -> None: + """ + Save config to file atomically. + + Uses temp file + rename for atomic writes. + Creates parent directories if needed. + """ + try: + # Create parent directories + self.config_path.parent.mkdir(parents=True, exist_ok=True) + + # Atomic write: temp file + rename + temp_path = self.config_path.with_suffix(".tmp") + + with open(temp_path, "w") as f: + json.dump(self.data, f, indent=2) + f.write("\n") # Trailing newline + + # Atomic rename + temp_path.replace(self.config_path) + + except Exception as e: + print(f"Warning: Could not save config: {e}", file=sys.stderr) + + def update_last_used_simulator(self, name: str) -> None: + """ + Update last used simulator and timestamp. + + Args: + name: Simulator name (e.g., "iPhone 16 Pro") + """ + self.data["device"]["last_used_simulator"] = name + self.data["device"]["last_used_at"] = datetime.utcnow().isoformat() + "Z" + + def get_preferred_simulator(self) -> str | None: + """ + Get preferred simulator. + + Returns: + Simulator name or None + + Priority: + 1. preferred_simulator (manual preference) + 2. last_used_simulator (auto-learned) + 3. None (use auto-detection) + """ + device = self.data.get("device", {}) + + # Manual preference takes priority + if device.get("preferred_simulator"): + return device["preferred_simulator"] + + # Auto-learned preference + if device.get("last_used_simulator"): + return device["last_used_simulator"] + + return None + + def should_fallback_to_any_iphone(self) -> bool: + """ + Check if fallback to any iPhone is enabled. + + Returns: + True if should fallback, False otherwise + """ + return self.data.get("device", {}).get("fallback_to_any_iphone", True) diff --git a/skills/ios-simulator-testing/scripts/xcode/reporter.py b/skills/ios-simulator-testing/scripts/xcode/reporter.py new file mode 100644 index 000000000..5ca5cce4d --- /dev/null +++ b/skills/ios-simulator-testing/scripts/xcode/reporter.py @@ -0,0 +1,291 @@ +""" +Build/test output formatting. + +Provides multiple output formats with progressive disclosure support. +""" + +import json + + +class OutputFormatter: + """ + Format build/test results for display. + + Supports ultra-minimal default output, verbose mode, and JSON output. + """ + + @staticmethod + def format_minimal( + status: str, + error_count: int, + warning_count: int, + xcresult_id: str, + test_info: dict | None = None, + hints: list[str] | None = None, + ) -> str: + """ + Format ultra-minimal output (5-10 tokens). + + Args: + status: Build status (SUCCESS/FAILED) + error_count: Number of errors + warning_count: Number of warnings + xcresult_id: XCResult bundle ID + test_info: Optional test results dict + hints: Optional list of actionable hints + + Returns: + Minimal formatted string + + Example: + Build: SUCCESS (0 errors, 3 warnings) [xcresult-20251018-143052] + Tests: PASS (12/12 passed, 4.2s) [xcresult-20251018-143052] + """ + lines = [] + + if test_info: + # Test mode + total = test_info.get("total", 0) + passed = test_info.get("passed", 0) + failed = test_info.get("failed", 0) + duration = test_info.get("duration", 0.0) + + test_status = "PASS" if failed == 0 else "FAIL" + lines.append( + f"Tests: {test_status} ({passed}/{total} passed, {duration:.1f}s) [{xcresult_id}]" + ) + else: + # Build mode + lines.append( + f"Build: {status} ({error_count} errors, {warning_count} warnings) [{xcresult_id}]" + ) + + # Add hints if provided and build failed + if hints and status == "FAILED": + lines.append("") + lines.extend(hints) + + return "\n".join(lines) + + @staticmethod + def format_errors(errors: list[dict], limit: int = 10) -> str: + """ + Format error details. + + Args: + errors: List of error dicts + limit: Maximum errors to show + + Returns: + Formatted error list + """ + if not errors: + return "No errors found." + + lines = [f"Errors ({len(errors)}):"] + lines.append("") + + for i, error in enumerate(errors[:limit], 1): + message = error.get("message", "Unknown error") + location = error.get("location", {}) + + # Format location + loc_parts = [] + if location.get("file"): + file_path = location["file"].replace("file://", "") + loc_parts.append(file_path) + if location.get("line"): + loc_parts.append(f"line {location['line']}") + + location_str = ":".join(loc_parts) if loc_parts else "unknown location" + + lines.append(f"{i}. {message}") + lines.append(f" Location: {location_str}") + lines.append("") + + if len(errors) > limit: + lines.append(f"... and {len(errors) - limit} more errors") + + return "\n".join(lines) + + @staticmethod + def format_warnings(warnings: list[dict], limit: int = 10) -> str: + """ + Format warning details. + + Args: + warnings: List of warning dicts + limit: Maximum warnings to show + + Returns: + Formatted warning list + """ + if not warnings: + return "No warnings found." + + lines = [f"Warnings ({len(warnings)}):"] + lines.append("") + + for i, warning in enumerate(warnings[:limit], 1): + message = warning.get("message", "Unknown warning") + location = warning.get("location", {}) + + # Format location + loc_parts = [] + if location.get("file"): + file_path = location["file"].replace("file://", "") + loc_parts.append(file_path) + if location.get("line"): + loc_parts.append(f"line {location['line']}") + + location_str = ":".join(loc_parts) if loc_parts else "unknown location" + + lines.append(f"{i}. {message}") + lines.append(f" Location: {location_str}") + lines.append("") + + if len(warnings) > limit: + lines.append(f"... and {len(warnings) - limit} more warnings") + + return "\n".join(lines) + + @staticmethod + def format_log(log: str, lines: int = 50) -> str: + """ + Format build log (show last N lines). + + Args: + log: Full build log + lines: Number of lines to show + + Returns: + Formatted log excerpt + """ + if not log: + return "No build log available." + + log_lines = log.strip().split("\n") + + if len(log_lines) <= lines: + return log + + # Show last N lines + excerpt = log_lines[-lines:] + return f"... (showing last {lines} lines of {len(log_lines)})\n\n" + "\n".join(excerpt) + + @staticmethod + def format_json(data: dict) -> str: + """ + Format data as JSON. + + Args: + data: Data to format + + Returns: + Pretty-printed JSON string + """ + return json.dumps(data, indent=2) + + @staticmethod + def generate_hints(errors: list[dict]) -> list[str]: + """ + Generate actionable hints based on error types. + + Args: + errors: List of error dicts + + Returns: + List of hint strings + """ + hints = [] + error_types: set[str] = set() + + # Collect error types + for error in errors: + error_type = error.get("type", "unknown") + error_types.add(error_type) + + # Generate hints based on error types + if "provisioning" in error_types: + hints.append("Provisioning profile issue detected:") + hints.append(" • Ensure you have a valid provisioning profile for iOS Simulator") + hints.append( + ' • For simulator builds, use CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO' + ) + hints.append(" • Or specify simulator explicitly: --simulator 'iPhone 16 Pro'") + + if "signing" in error_types: + hints.append("Code signing issue detected:") + hints.append(" • For simulator builds, code signing is not required") + hints.append(" • Ensure build settings target iOS Simulator, not physical device") + hints.append(" • Check destination: platform=iOS Simulator,name=") + + if not error_types or "build" in error_types: + # Generic hints when error type is unknown + if any("destination" in error.get("message", "").lower() for error in errors): + hints.append("Device selection issue detected:") + hints.append(" • List available simulators: xcrun simctl list devices available") + hints.append(" • Specify simulator: --simulator 'iPhone 16 Pro'") + + return hints + + @staticmethod + def format_verbose( + status: str, + error_count: int, + warning_count: int, + xcresult_id: str, + errors: list[dict] | None = None, + warnings: list[dict] | None = None, + test_info: dict | None = None, + ) -> str: + """ + Format verbose output with error/warning details. + + Args: + status: Build status + error_count: Error count + warning_count: Warning count + xcresult_id: XCResult ID + errors: Optional error list + warnings: Optional warning list + test_info: Optional test results + + Returns: + Verbose formatted output + """ + lines = [] + + # Header + if test_info: + total = test_info.get("total", 0) + passed = test_info.get("passed", 0) + failed = test_info.get("failed", 0) + duration = test_info.get("duration", 0.0) + + test_status = "PASS" if failed == 0 else "FAIL" + lines.append(f"Tests: {test_status}") + lines.append(f" Total: {total}") + lines.append(f" Passed: {passed}") + lines.append(f" Failed: {failed}") + lines.append(f" Duration: {duration:.1f}s") + else: + lines.append(f"Build: {status}") + + lines.append(f"XCResult: {xcresult_id}") + lines.append("") + + # Errors + if errors and len(errors) > 0: + lines.append(OutputFormatter.format_errors(errors, limit=5)) + lines.append("") + + # Warnings + if warnings and len(warnings) > 0: + lines.append(OutputFormatter.format_warnings(warnings, limit=5)) + lines.append("") + + # Summary + lines.append(f"Summary: {error_count} errors, {warning_count} warnings") + + return "\n".join(lines) diff --git a/skills/ios-simulator-testing/scripts/xcode/xcresult.py b/skills/ios-simulator-testing/scripts/xcode/xcresult.py new file mode 100644 index 000000000..9bb7b7bc7 --- /dev/null +++ b/skills/ios-simulator-testing/scripts/xcode/xcresult.py @@ -0,0 +1,404 @@ +""" +XCResult bundle parser. + +Extracts structured data from xcresult bundles using xcresulttool. +""" + +import json +import re +import subprocess +import sys +from pathlib import Path +from typing import Any + + +class XCResultParser: + """ + Parse xcresult bundles to extract build/test data. + + Uses xcresulttool to extract structured JSON data from Apple's + xcresult bundle format. + """ + + def __init__(self, xcresult_path: Path, stderr: str = ""): + """ + Initialize parser. + + Args: + xcresult_path: Path to xcresult bundle + stderr: Optional stderr output for fallback parsing + """ + self.xcresult_path = xcresult_path + self.stderr = stderr + + if xcresult_path and not xcresult_path.exists(): + raise FileNotFoundError(f"XCResult bundle not found: {xcresult_path}") + + def get_build_results(self) -> dict | None: + """ + Get build results as JSON. + + Returns: + Parsed JSON dict or None on error + """ + return self._run_xcresulttool(["get", "build-results"]) + + def get_test_results(self) -> dict | None: + """ + Get test results summary as JSON. + + Returns: + Parsed JSON dict or None on error + """ + return self._run_xcresulttool(["get", "test-results", "summary"]) + + def get_build_log(self) -> str | None: + """ + Get build log as plain text. + + Returns: + Build log string or None on error + """ + result = self._run_xcresulttool(["get", "log", "--type", "build"], parse_json=False) + return result if result else None + + def count_issues(self) -> tuple[int, int]: + """ + Count errors and warnings from build results. + + Returns: + Tuple of (error_count, warning_count) + """ + error_count = 0 + warning_count = 0 + + build_results = self.get_build_results() + + if build_results: + try: + # Try top-level errors/warnings first (newer xcresult format) + if "errors" in build_results and isinstance(build_results.get("errors"), list): + error_count = len(build_results["errors"]) + if "warnings" in build_results and isinstance(build_results.get("warnings"), list): + warning_count = len(build_results["warnings"]) + + # If not found, try legacy format: actions[0].buildResult.issues + if error_count == 0 and warning_count == 0: + actions = build_results.get("actions", {}).get("_values", []) + if actions: + build_result = actions[0].get("buildResult", {}) + issues = build_result.get("issues", {}) + + # Count errors + error_summaries = issues.get("errorSummaries", {}).get("_values", []) + error_count = len(error_summaries) + + # Count warnings + warning_summaries = issues.get("warningSummaries", {}).get("_values", []) + warning_count = len(warning_summaries) + + except (KeyError, IndexError, TypeError) as e: + print(f"Warning: Could not parse issue counts from xcresult: {e}", file=sys.stderr) + + # If no errors found in xcresult but stderr available, count stderr errors + if error_count == 0 and self.stderr: + stderr_errors = self._parse_stderr_errors() + error_count = len(stderr_errors) + + return (error_count, warning_count) + + def get_errors(self) -> list[dict]: + """ + Get detailed error information. + + Returns: + List of error dicts with message, file, line info + """ + build_results = self.get_build_results() + errors = [] + + # Try to get errors from xcresult + if build_results: + try: + # Try top-level errors first (newer xcresult format) + if "errors" in build_results and isinstance(build_results.get("errors"), list): + for error in build_results["errors"]: + errors.append( + { + "message": error.get("message", "Unknown error"), + "type": error.get("issueType", "error"), + "location": self._extract_location_from_url(error.get("sourceURL")), + } + ) + + # If not found, try legacy format: actions[0].buildResult.issues + if not errors: + actions = build_results.get("actions", {}).get("_values", []) + if actions: + build_result = actions[0].get("buildResult", {}) + issues = build_result.get("issues", {}) + error_summaries = issues.get("errorSummaries", {}).get("_values", []) + + for error in error_summaries: + errors.append( + { + "message": error.get("message", {}).get( + "_value", "Unknown error" + ), + "type": error.get("issueType", {}).get("_value", "error"), + "location": self._extract_location(error), + } + ) + + except (KeyError, IndexError, TypeError) as e: + print(f"Warning: Could not parse errors from xcresult: {e}", file=sys.stderr) + + # If no errors found in xcresult but stderr available, parse stderr + if not errors and self.stderr: + errors = self._parse_stderr_errors() + + return errors + + def get_warnings(self) -> list[dict]: + """ + Get detailed warning information. + + Returns: + List of warning dicts with message, file, line info + """ + build_results = self.get_build_results() + if not build_results: + return [] + + warnings = [] + + try: + # Try top-level warnings first (newer xcresult format) + if "warnings" in build_results and isinstance(build_results.get("warnings"), list): + for warning in build_results["warnings"]: + warnings.append( + { + "message": warning.get("message", "Unknown warning"), + "type": warning.get("issueType", "warning"), + "location": self._extract_location_from_url(warning.get("sourceURL")), + } + ) + + # If not found, try legacy format: actions[0].buildResult.issues + if not warnings: + actions = build_results.get("actions", {}).get("_values", []) + if not actions: + return [] + + build_result = actions[0].get("buildResult", {}) + issues = build_result.get("issues", {}) + warning_summaries = issues.get("warningSummaries", {}).get("_values", []) + + for warning in warning_summaries: + warnings.append( + { + "message": warning.get("message", {}).get("_value", "Unknown warning"), + "type": warning.get("issueType", {}).get("_value", "warning"), + "location": self._extract_location(warning), + } + ) + + except (KeyError, IndexError, TypeError) as e: + print(f"Warning: Could not parse warnings: {e}", file=sys.stderr) + + return warnings + + def _extract_location(self, issue: dict) -> dict: + """ + Extract file location from issue. + + Args: + issue: Issue dict from xcresult + + Returns: + Location dict with file, line, column + """ + location = {"file": None, "line": None, "column": None} + + try: + doc_location = issue.get("documentLocationInCreatingWorkspace", {}) + location["file"] = doc_location.get("url", {}).get("_value") + location["line"] = doc_location.get("startingLineNumber", {}).get("_value") + location["column"] = doc_location.get("startingColumnNumber", {}).get("_value") + except (KeyError, TypeError): + pass + + return location + + def _extract_location_from_url(self, source_url: str | None) -> dict: + """ + Extract file location from sourceURL (newer xcresult format). + + Args: + source_url: Source URL like "file:///path/to/file.swift#StartingLineNumber=134&..." + + Returns: + Location dict with file, line, column + """ + location = {"file": None, "line": None, "column": None} + + if not source_url: + return location + + try: + # Split URL and fragment + if "#" in source_url: + file_part, fragment = source_url.split("#", 1) + + # Extract file path + location["file"] = file_part.replace("file://", "") + + # Parse fragment parameters + params = {} + for param in fragment.split("&"): + if "=" in param: + key, value = param.split("=", 1) + params[key] = value + + # Extract line and column + location["line"] = ( + int(params.get("StartingLineNumber", 0)) + 1 + if "StartingLineNumber" in params + else None + ) + location["column"] = ( + int(params.get("StartingColumnNumber", 0)) + 1 + if "StartingColumnNumber" in params + else None + ) + else: + # No fragment, just file path + location["file"] = source_url.replace("file://", "") + + except (ValueError, AttributeError): + pass + + return location + + def _run_xcresulttool(self, args: list[str], parse_json: bool = True) -> Any | None: + """ + Run xcresulttool command. + + Args: + args: Command arguments (after 'xcresulttool') + parse_json: Whether to parse output as JSON + + Returns: + Parsed JSON dict, plain text, or None on error + """ + if not self.xcresult_path: + return None + + cmd = ["xcrun", "xcresulttool"] + args + ["--path", str(self.xcresult_path)] + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + if parse_json: + return json.loads(result.stdout) + return result.stdout + + except subprocess.CalledProcessError as e: + print(f"Error running xcresulttool: {e}", file=sys.stderr) + print(f"stderr: {e.stderr}", file=sys.stderr) + return None + except json.JSONDecodeError as e: + print(f"Error parsing JSON from xcresulttool: {e}", file=sys.stderr) + return None + + def _parse_stderr_errors(self) -> list[dict]: + """ + Parse common errors from stderr output as fallback. + + Returns: + List of error dicts parsed from stderr + """ + errors = [] + + if not self.stderr: + return errors + + # Pattern 0: Swift/Clang compilation errors (e.g., "/path/file.swift:135:59: error: message") + compilation_error_pattern = ( + r"^(?P[^:]+):(?P\d+):(?P\d+):\s*error:\s*(?P.+?)$" + ) + for match in re.finditer(compilation_error_pattern, self.stderr, re.MULTILINE): + errors.append( + { + "message": match.group("message").strip(), + "type": "compilation", + "location": { + "file": match.group("file"), + "line": int(match.group("line")), + "column": int(match.group("column")), + }, + } + ) + + # Pattern 1: xcodebuild top-level errors (e.g., "xcodebuild: error: Unable to find...") + xcodebuild_error_pattern = r"xcodebuild:\s*error:\s*(?P.*?)(?:\n\n|\Z)" + for match in re.finditer(xcodebuild_error_pattern, self.stderr, re.DOTALL): + message = match.group("message").strip() + # Clean up multi-line messages + message = " ".join(line.strip() for line in message.split("\n") if line.strip()) + errors.append( + { + "message": message, + "type": "build", + "location": {"file": None, "line": None, "column": None}, + } + ) + + # Pattern 2: Provisioning profile errors + provisioning_pattern = r"error:.*?provisioning profile.*?(?:doesn't|does not|cannot).*?(?P.*?)(?:\n|$)" + for match in re.finditer(provisioning_pattern, self.stderr, re.IGNORECASE): + errors.append( + { + "message": f"Provisioning profile error: {match.group('message').strip()}", + "type": "provisioning", + "location": {"file": None, "line": None, "column": None}, + } + ) + + # Pattern 3: Code signing errors + signing_pattern = r"error:.*?(?:code sign|signing).*?(?P.*?)(?:\n|$)" + for match in re.finditer(signing_pattern, self.stderr, re.IGNORECASE): + errors.append( + { + "message": f"Code signing error: {match.group('message').strip()}", + "type": "signing", + "location": {"file": None, "line": None, "column": None}, + } + ) + + # Pattern 4: Generic compilation errors (but not if already captured) + if not errors: + generic_error_pattern = r"^(?:\*\*\s)?(?:error|❌):\s*(?P.*?)(?:\n|$)" + for match in re.finditer(generic_error_pattern, self.stderr, re.MULTILINE): + message = match.group("message").strip() + errors.append( + { + "message": message, + "type": "build", + "location": {"file": None, "line": None, "column": None}, + } + ) + + # Pattern 5: Specific "No profiles" error + if "No profiles for" in self.stderr: + no_profile_pattern = r"No profiles for '(?P.*?)' were found" + for match in re.finditer(no_profile_pattern, self.stderr): + errors.append( + { + "message": f"No provisioning profile found for bundle ID '{match.group('bundle_id')}'", + "type": "provisioning", + "location": {"file": None, "line": None, "column": None}, + } + ) + + return errors diff --git a/skills/landing-page-expert/README.md b/skills/landing-page-expert/README.md new file mode 100644 index 000000000..35d9df5ad --- /dev/null +++ b/skills/landing-page-expert/README.md @@ -0,0 +1,158 @@ +# Landing Page Expert - Autonomous Copywriting System + +## What Is This? + +An intelligent copywriting system that combines 11 world-class marketing frameworks into one autonomous skill. You describe what you need, it selects the perfect frameworks and generates professional copy instantly. + +## Quick Start + +**Simple Request:** +``` +"Write a LinkedIn post for my coaching business" +``` + +**Detailed Request:** +``` +"Create a landing page for MTL Craft Cocktails: +- $1,200 corporate mixology workshops +- Target: HR managers at tech companies +- Unique value: Hosts enjoy their event instead of bartending" +``` + +**Complex Request:** +``` +"Build a launch campaign: landing page, 5-email sequence, 10 social posts" +``` + +The system handles everything automatically. + +## What Makes This Different + +**Traditional Tools:** +- Give you templates +- Make you choose frameworks +- Require marketing expertise + +**This System:** +- Analyzes your context automatically +- Selects optimal frameworks intelligently +- Generates professional copy instantly +- Works for ANY business, ANY format + +## The 11 Expert Frameworks + +1. **Caleb Ralston** - Depth-first strategy, trust architecture +2. **Alex Hormozi** - Value equation, grand slam offers +3. **Russell Brunson** - Story selling, perfect webinar +4. **Donald Miller** - StoryBrand clarity framework +5. **Dan Kennedy** - Direct response, PAS formula +6. **Gary Vaynerchuk** - Document don't create, jabs and hooks +7. **Simon Sinek** - Start with why, golden circle +8. **Seth Godin** - Purple cow, tribes, permission marketing +9. **Rory Sutherland** - Behavioral economics, psychological value +10. **Chris Do** - Value-based pricing, consultative sales +11. **Chris Voss** - Tactical empathy, calibrated questions + +## What It Generates + +- Landing pages +- Email sequences +- Social media posts (all platforms) +- UGC video scripts +- Blog posts +- VSL scripts +- Webinar scripts +- Sales pages +- Ad copy + +## How It Works + +**1. Make Request** → You describe what you need + +**2. Automatic Analysis** → System extracts: +- Business type +- Target audience +- Format needed +- Goal +- Price point + +**3. Intelligent Selection** → System chooses 1-3 optimal frameworks using `SELECTION-ENGINE.md` + +**4. Copy Generation** → Professional copy created using selected frameworks + +**5. Brief Explanation** → Which frameworks were used and why + +## File Structure + +``` +/landing-page-expert/ +├── SKILL.md # Main documentation +├── SELECTION-ENGINE.md # Autonomous decision logic +├── framework-combinations.md # Proven framework mixing +├── copy-templates.md # Real-world examples +├── README.md # This file +└── frameworks/ + ├── caleb-ralston.md # Complete methodology + ├── alex-hormozi.md # Complete methodology + ├── russell-brunson.md # Complete methodology + └── additional-experts.md # Remaining 8 experts +``` + +## Usage Tips + +**You DON'T need to:** +- Choose which framework to use +- Understand marketing theory +- Figure out structure +- Know what tone to use + +**You ONLY need to:** +- Describe your business/offer +- State what format you need +- Let the system work + +## Example Outputs + +**Request:** "LinkedIn post for corporate event planning" + +**System Output:** +- Automatically selects: Ralston (depth) + StoryBrand (clarity) +- Generates 250-word professional post +- Explains framework choice briefly +- Offers A/B variation if requested + +## Platform Compatibility + +Works across: +- Claude Web Chat ✓ +- Claude Desktop ✓ +- Claude Code ✓ +- Custom API apps ✓ + +See `PLATFORM-ARCHITECTURE.md` for deployment details. + +## Related Skills + +- **copywriting-interview-mode** - Deep discovery when context unclear +- **content-research-writer** - Research-heavy content + +## Success Indicators + +The skill is working correctly when: +- Framework selection happens automatically +- Copy generated in single response +- Framework choice briefly explained +- Output matches requested format +- Copy ready to use immediately + +## Support + +For questions about: +- **How to use:** Read this README +- **How it works:** Read SKILL.md +- **Framework details:** Read individual framework files +- **Selection logic:** Read SELECTION-ENGINE.md + +## License + +For personal and commercial use. Attribution appreciated but not required. diff --git a/skills/landing-page-expert/SELECTION-ENGINE.md b/skills/landing-page-expert/SELECTION-ENGINE.md new file mode 100644 index 000000000..c35861bde --- /dev/null +++ b/skills/landing-page-expert/SELECTION-ENGINE.md @@ -0,0 +1,309 @@ +# Autonomous Framework Selection Engine + +## Purpose + +This file contains the decision logic that enables Claude to automatically select the optimal framework combination for any copywriting request **without asking the user**. + +## Core Principle + +**Extract context → Match to decision trees → Select frameworks → Generate copy** + +Never ask "Which framework would you like?" - that's what this engine determines automatically. + +## Context Extraction Rules + +### Automatic Detection Patterns + +From any user request, extract: + +1. **Business Type** + - Keywords: coaching, consulting, SaaS, e-commerce, service business, B2B, B2C + - If unclear: Infer from description + +2. **Format Needed** + - Keywords: landing page, email, post, Instagram, LinkedIn, Twitter, video script, ad + - Default: If unspecified and describing offer → landing page + +3. **Price Point** + - $0-$50: Low-ticket + - $50-$500: Mid-ticket + - $500-$2,000: Premium + - $2,000+: High-ticket + - If unspecified: Infer from business type + +4. **Target Audience Awareness** + - Cold (unaware): Needs education + - Warm (problem-aware): Needs solution + - Hot (solution-aware): Needs offer + +5. **Primary Goal** + - Awareness: Make them know you exist + - Consideration: Make them understand value + - Conversion: Make them buy/book/sign up + +## Framework Selection Decision Trees + +### Decision Tree 1: By Business Type + +**Coaching/Consulting/High-Ticket Services ($2,000+)** +- Primary: Caleb Ralston (depth, trust, authority) +- Secondary: Donald Miller (clarity) +- Tertiary: Chris Voss (tactical empathy) +- **Why:** High-ticket requires trust-building and depth + +**SaaS/Tech Products** +- Primary: Donald Miller (clarity above all) +- Secondary: Alex Hormozi (value equation) +- Tertiary: Russell Brunson (demo to close) +- **Why:** Complex products need simple explanations + +**E-commerce/Physical Products** +- Primary: Alex Hormozi (value stacking) +- Secondary: Russell Brunson (story-driven desire) +- Tertiary: Dan Kennedy (urgency and scarcity) +- **Why:** Must overcome price resistance quickly + +**B2B Services** +- Primary: Donald Miller (executive clarity) +- Secondary: Simon Sinek (why-driven) +- Tertiary: Rory Sutherland (psychological reframing) +- **Why:** Decision-makers need ROI clarity + +**Creative Services/Agencies** +- Primary: Chris Do (value-based positioning) +- Secondary: Caleb Ralston (depth and expertise) +- Tertiary: Seth Godin (remarkable positioning) +- **Why:** Must demonstrate unique value + +**Low-Ticket Digital Products ($0-$100)** +- Primary: Russell Brunson (quick desire building) +- Secondary: Dan Kennedy (direct response) +- Tertiary: Gary Vaynerchuk (social proof) +- **Why:** Volume play, need fast conversions + +### Decision Tree 2: By Content Format + +**Landing Pages** +- Framework mix: StoryBrand (structure) + Hormozi (offer) + One expert for personality +- **Structure:** Clear hero section, problem/solution, value stack, CTA +- **Length:** 1,500-3,000 words for high-ticket, 800-1,500 for low-ticket + +**Email Sequences** +- Day 1: Caleb Ralston or Sinek (build connection, establish why) +- Day 2-3: StoryBrand or Brunson (tell story, show transformation) +- Day 4-5: Hormozi (stack value, overcome objections) +- Day 6-7: Kennedy + Voss (urgency + final objection handling) + +**LinkedIn Posts (Authority Building)** +- Primary: Caleb Ralston (depth-first, thoughtful) +- Secondary: Seth Godin (thought-provoking) +- Format: 150-300 words, starts with hook, ends with insight + +**Instagram Posts/Stories** +- Primary: Gary Vaynerchuk (document, authentic) +- Secondary: Russell Brunson (micro-story) +- Format: Visual-first, 50-150 words, casual tone + +**Twitter/X Threads** +- Primary: Caleb Ralston (waterfall method) +- Secondary: Gary Vaynerchuk (value bombs) +- Format: Hook tweet → 5-10 value tweets → CTA + +**Video Sales Letters (VSL)** +- Primary: Russell Brunson (perfect webinar structure) +- Secondary: Alex Hormozi (value equation) +- Structure: Origin story → Content → Pitch + +**Blog Posts (SEO/Authority)** +- Primary: Seth Godin (remarkable insights) +- Secondary: Caleb Ralston (depth) +- Length: 1,500-2,500 words, educational + +**Paid Ads (FB/Google)** +- Primary: Dan Kennedy (direct response) +- Secondary: Hormozi (attention-grabbing value) +- Format: Hook in 3 seconds, clear CTA + +### Decision Tree 3: By Customer Journey Stage + +**Awareness Stage (Cold Audience)** +- Primary: Gary Vaynerchuk or Seth Godin (jab, give value) +- Secondary: Caleb Ralston (demonstrate depth) +- Goal: Make them know you exist and care +- **No selling - just value and positioning** + +**Consideration Stage (Warm Audience)** +- Primary: Donald Miller (clarify the solution) +- Secondary: Russell Brunson (show transformation) +- Goal: Make them understand you're the solution +- **Soft pitch, heavy on value demonstration** + +**Decision Stage (Hot Audience)** +- Primary: Alex Hormozi (stack value, overcome objections) +- Secondary: Dan Kennedy (urgency, direct response) +- Tertiary: Chris Voss (handle final objections) +- Goal: Make them take action now +- **Direct offer, clear CTA, remove friction** + +### Decision Tree 4: By Price Point + +**Low-Ticket ($0-$100)** +- Speed matters: Kennedy + Brunson + GaryVee +- Quick decision required +- Volume play +- Emphasize: Impulse, testimonials, guarantee + +**Mid-Ticket ($100-$500)** +- Balance speed and trust: StoryBrand + Hormozi + Ralston +- Some consideration time +- Need clear value demonstration +- Emphasize: ROI, comparisons, case studies + +**Premium ($500-$2,000)** +- Trust building: Ralston + StoryBrand + Hormozi +- Considered purchase +- Need authority positioning +- Emphasize: Transformation, expertise, results + +**High-Ticket ($2,000+)** +- Deep trust required: Ralston + Voss + Do +- Long sales cycle +- Need relationship building +- Emphasize: Partnership, depth, understanding + +## Proven Framework Combinations + +### The "Trust Stack" (High-Ticket Services) +- Caleb Ralston: Establish depth and authority +- Chris Voss: Show you understand their situation +- Chris Do: Position value over price +- **Use for:** Coaching, consulting, agency services + +### The "Clarity Stack" (Complex Products) +- Donald Miller: Make it simple +- Alex Hormozi: Show undeniable value +- Russell Brunson: Demo the transformation +- **Use for:** SaaS, software, technical products + +### The "Volume Stack" (Low-Ticket Digital) +- Russell Brunson: Quick desire building +- Dan Kennedy: Urgency and scarcity +- Gary Vaynerchuk: Social proof +- **Use for:** Courses, ebooks, low-ticket offers + +### The "Authority Stack" (Personal Brands) +- Caleb Ralston: Depth-first content +- Seth Godin: Remarkable insights +- Simon Sinek: Why-driven messaging +- **Use for:** Thought leaders, experts, creators + +### The "E-commerce Stack" +- Alex Hormozi: Value stacking +- Dan Kennedy: Urgency tactics +- Russell Brunson: Story-driven desire +- **Use for:** Physical products, drop shipping + +## Selection Algorithm + +``` +1. EXTRACT CONTEXT + ├─ Business type + ├─ Format + ├─ Price point + ├─ Audience temperature + └─ Primary goal + +2. CONSULT DECISION TREES + ├─ Match business type → Get primary framework + ├─ Match format → Get structure requirements + ├─ Match journey stage → Get messaging approach + └─ Match price point → Get emphasis elements + +3. SELECT 1-3 FRAMEWORKS + ├─ Primary (drives core message) + ├─ Secondary (provides structure or support) + └─ Tertiary (adds personality or handles objections) + +4. GENERATE COPY + ├─ Apply frameworks in order + ├─ Optimize for format + ├─ Include all required elements + └─ Maintain authentic voice + +5. EXPLAIN BRIEFLY + ├─ Which frameworks used + ├─ Why they fit (1 sentence each) + └─ Offer variations if requested +``` + +## Critical Rules + +### Always Automatic +- Never ask which framework to use +- Never present framework options for user to choose +- Never say "Would you like me to use X framework?" + +### Exception (Only Ask If): +- Business/offer is completely unclear +- Target audience is ambiguous beyond inference +- Multiple conflicting goals are stated + +### Default Behaviors +- **If price unknown:** Infer from business type +- **If audience unknown:** Assume warm (problem-aware) +- **If tone unclear:** Match business type (formal for B2B, casual for B2C) +- **If length unspecified:** Match format standards + +## Platform-Specific Optimization + +### LinkedIn +- Professional tone +- Thought leadership angle +- 150-300 words +- Hook → Insight → Application + +### Instagram +- Visual-first +- Casual, authentic tone +- 50-150 words +- Story-driven + +### Twitter/X +- Concise, punchy +- Thread format for depth +- Hook in first tweet +- Value in middle tweets +- CTA in final tweet + +### Landing Pages +- Clear hero section +- Problem → Solution → Value → Social Proof → CTA +- Length varies by price point +- Multiple CTAs + +### Email +- Subject line = Curiosity + Benefit +- Body = Story or Value +- Single CTA +- Conversational tone + +## Quality Checks + +Before delivering copy, verify: +- [ ] Frameworks selected match context +- [ ] Copy follows format requirements +- [ ] All conversion elements present (if conversion goal) +- [ ] Tone matches business type +- [ ] Length appropriate for format +- [ ] CTA clear and specific +- [ ] Brief framework explanation included + +## Evolution Protocol + +This selection engine improves over time by: +1. Observing which combinations user prefers +2. Testing new combinations for edge cases +3. Adding new frameworks as they're proven +4. Refining decision trees based on outcomes + +The engine is designed to be **deterministic but adaptive** - same inputs yield same outputs, but the logic improves with use. diff --git a/skills/landing-page-expert/SKILL.md b/skills/landing-page-expert/SKILL.md new file mode 100644 index 000000000..25b07f87d --- /dev/null +++ b/skills/landing-page-expert/SKILL.md @@ -0,0 +1,190 @@ +--- +name: landing-page-expert +description: Use when creating any marketing copy - autonomously selects optimal frameworks from 11 experts (Hormozi, Brunson, Ralston, etc.) and generates professional copy for landing pages, emails, social posts, and more +--- + +# Landing Page Expert - Autonomous Copywriting System + +## Overview + +This skill combines methodologies from 11 world-class marketing experts into an intelligent system that automatically selects the perfect framework combination for any copywriting task. + +**The system works autonomously** - you describe what you need, and it handles framework selection, copy generation, and optimization automatically. + +## Integrated Expert Frameworks + +1. **Caleb Ralston** - Brand Journey, Waterfall Method, Depth-First Strategy +2. **Alex Hormozi** - Value Equation, Grand Slam Offers, Offer Stacking +3. **Russell Brunson** - Story Selling, Perfect Webinar, Epiphany Bridge +4. **Donald Miller (StoryBrand)** - 7-Part Framework, Clarity First +5. **Dan Kennedy** - PAS Formula, Direct Response Marketing +6. **Gary Vaynerchuk** - Document Don't Create, Jab Jab Right Hook +7. **Simon Sinek** - Start With Why, Golden Circle +8. **Seth Godin** - Purple Cow, Permission Marketing, Tribes +9. **Rory Sutherland** - Behavioral Economics, Psychological Value +10. **Chris Do** - Value-Based Pricing, Consultative Sales +11. **Chris Voss** - Tactical Empathy, Calibrated Questions + +## What This Skill Generates + +- **Landing Pages** - Full conversion-optimized pages +- **Email Sequences** - 3-7 email nurture campaigns +- **Social Media Posts** - Platform-optimized content (LinkedIn, Instagram, Twitter, TikTok, etc.) +- **UGC Video Scripts** - For Arcads, HeyGen, etc. +- **Blog Posts** - Authority-building long-form content +- **VSL Scripts** - Video sales letters +- **Webinar Scripts** - Russell Brunson's Perfect Webinar format +- **Sales Pages** - High-converting offer pages +- **Ad Copy** - Paid advertising copy + +## How It Works (Autonomous Operation) + +### Step 1: Automatic Context Analysis +When you make a request, the system automatically extracts: +- Business type and industry +- Target audience and awareness level +- Format needed (landing page, email, social post, etc.) +- Goal (awareness, consideration, conversion) +- Price point and offer complexity +- Unique value proposition + +### Step 2: Intelligent Framework Selection +Using decision trees in `SELECTION-ENGINE.md`, the system: +- Matches your context to proven framework combinations +- Selects 1-3 optimal frameworks +- Considers format-specific requirements +- Applies platform optimization rules + +### Step 3: Copy Generation +The system: +- Generates professional copy using selected frameworks +- Optimizes for platform/format +- Includes conversion elements +- Provides brief framework explanation + +### Step 4: Optional Refinement +You can request: +- A/B test variations +- Tone adjustments +- Length modifications +- Different framework combinations + +## Usage Examples + +### Simple Request: +``` +"Write a LinkedIn post for my coaching business" +``` +System automatically: Analyzes → Selects Ralston + Sinek → Generates + +### Detailed Request: +``` +"Create a landing page for MTL Craft Cocktails: +- $1,200 corporate mixology workshops +- Target: HR managers at tech companies +- Unique value: Hosts enjoy their event instead of bartending" +``` +System automatically: Analyzes → Selects StoryBrand + Hormozi + Ralston → Generates + +### Complex Campaign: +``` +"Build a launch campaign: +- Landing page +- 5-email sequence +- 10 social posts across LinkedIn and Instagram" +``` +System automatically: Analyzes → Selects optimal frameworks per format → Generates entire campaign + +## Key Files in This Skill + +- **SELECTION-ENGINE.md** - Autonomous decision logic for framework selection +- **framework-combinations.md** - Proven framework mixing strategies +- **copy-templates.md** - Real-world examples and templates +- **frameworks/caleb-ralston.md** - Complete Caleb Ralston methodology +- **frameworks/alex-hormozi.md** - Complete Alex Hormozi methodology +- **frameworks/russell-brunson.md** - Complete Russell Brunson methodology +- **frameworks/additional-experts.md** - Remaining 8 expert methodologies + +## What Makes This Different + +**Traditional copywriting tools:** +- Give you templates to fill in +- Make you choose frameworks +- Require marketing knowledge +- Generic, one-size-fits-all approach + +**This autonomous system:** +- Analyzes your specific situation automatically +- Selects optimal frameworks intelligently +- Generates professional copy instantly +- Adapts to any business, any format +- Explains why it works + +## Instructions for Claude + +When a user requests copywriting: + +1. **Extract Context Automatically** (never ask unless critical info missing) + - Business type and offer + - Target audience + - Format needed + - Goal/objective + - Price point (if relevant) + +2. **Consult SELECTION-ENGINE.md** + - Match context to decision trees + - Select 1-3 optimal frameworks + - Identify any platform-specific requirements + +3. **Generate Copy** + - Apply selected frameworks + - Optimize for format/platform + - Include conversion elements + - Maintain authentic voice + +4. **Provide Brief Explanation** + - Which frameworks were used + - Why they fit this situation + - Key principles applied + +5. **Offer Refinement Options** + - A/B test variations + - Tone adjustments + - Alternative framework combinations + +## Never Ask These Questions + +The system should automatically determine: +- Which framework to use (that's what SELECTION-ENGINE.md is for) +- How to structure copy (frameworks provide structure) +- What tone to use (infer from business context) +- Length (match format standards) + +Only ask clarifying questions if: +- Business/offer is unclear +- Target audience is ambiguous +- Format is unspecified + +## Success Metrics + +This skill is working correctly when: +- Framework selection happens automatically +- Copy is generated in single response +- Framework choice is briefly explained +- Output matches requested format +- User can immediately use the copy + +## Related Skills + +- **copywriting-interview-mode** - For in-depth discovery when context is unclear +- **content-research-writer** - For research-heavy content creation + +## Platform Compatibility + +This skill works across: +- Claude Web Chat +- Claude Desktop (via MCP) +- Claude Code (command-line) +- API integrations (with skill loader) + +See `PLATFORM-ARCHITECTURE.md` and `QUICK-START.md` for deployment details. diff --git a/skills/landing-page-expert/framework-combinations.md b/skills/landing-page-expert/framework-combinations.md new file mode 100644 index 000000000..647f15d21 --- /dev/null +++ b/skills/landing-page-expert/framework-combinations.md @@ -0,0 +1,298 @@ +# Framework Combinations Guide + +## Purpose + +This file shows proven combinations of expert frameworks for different situations. Use these as starting points - the SELECTION-ENGINE.md determines optimal combinations automatically. + +## By Business Type + +### High-Ticket Coaching/Consulting ($2,000+) +**Primary Stack:** +- Caleb Ralston (depth and trust) +- Chris Voss (tactical empathy) +- Donald Miller (clarity) + +**Why:** High-ticket requires deep trust. Ralston builds authority through depth, Voss handles objections with empathy, StoryBrand keeps message clear. + +**Format Applications:** +- Landing pages: StoryBrand structure + Ralston depth +- Emails: Ralston thought leadership + Voss objection handling +- Social: Ralston depth-first posts + +### SaaS/Tech Products +**Primary Stack:** +- Donald Miller (clarity first) +- Alex Hormozi (value equation) +- Russell Brunson (demo to desire) + +**Why:** Complex products need simple explanations. StoryBrand clarifies, Hormozi quantifies value, Brunson creates desire through story. + +**Format Applications:** +- Landing pages: StoryBrand 7-part framework +- Product pages: Hormozi value stacking +- Webinars: Brunson perfect webinar + +### E-commerce/Physical Products +**Primary Stack:** +- Alex Hormozi (value stacking) +- Dan Kennedy (urgency) +- Russell Brunson (story-driven desire) + +**Why:** Must overcome price resistance quickly. Hormozi stacks value, Kennedy creates urgency, Brunson builds emotional connection. + +**Format Applications:** +- Product pages: Hormozi offer stack +- Email: Kennedy PAS formula +- Social: Brunson micro-stories + +### Creative Services/Agencies +**Primary Stack:** +- Chris Do (value positioning) +- Caleb Ralston (depth demonstration) +- Seth Godin (remarkable positioning) + +**Why:** Must demonstrate unique value. Do positions on value not time, Ralston shows expertise through depth, Godin differentiates. + +**Format Applications:** +- Proposals: Do value conversation +- Case studies: Ralston depth +- Positioning: Godin purple cow + +### B2B Services +**Primary Stack:** +- Donald Miller (executive clarity) +- Simon Sinek (purpose-driven) +- Rory Sutherland (ROI reframing) + +**Why:** Decision-makers need clarity and ROI. StoryBrand simplifies, Sinek connects to purpose, Sutherland reframes value. + +**Format Applications:** +- Sales materials: StoryBrand clarity +- Pitch decks: Sinek why-driven +- Proposals: Sutherland value reframing + +## By Content Format + +### Landing Pages + +**Low-Ticket ($0-$100):** +- Russell Brunson (hook-story-offer) +- Alex Hormozi (value stack) +- Dan Kennedy (urgency) + +**Mid-Ticket ($100-$2,000):** +- Donald Miller (StoryBrand structure) +- Alex Hormozi (offer stack) +- Caleb Ralston (trust elements) + +**High-Ticket ($2,000+):** +- Donald Miller (clarity) +- Caleb Ralston (depth and trust) +- Chris Voss (objection handling) + +### Email Sequences + +**Welcome Sequence (5-7 emails):** +- Day 1: Simon Sinek (why you exist) +- Day 2-3: Russell Brunson (epiphany bridge) +- Day 4-5: Alex Hormozi (value stacking) +- Day 6-7: Dan Kennedy (urgency) + Chris Voss (objections) + +**Sales Sequence:** +- Email 1: Caleb Ralston (depth content) +- Email 2: Russell Brunson (story) +- Email 3: Alex Hormozi (offer) +- Email 4: Dan Kennedy (urgency) +- Email 5: Chris Voss (final objections) + +### Social Media Posts + +**LinkedIn (Authority):** +- Primary: Caleb Ralston (depth-first) +- Secondary: Seth Godin (remarkable insights) +- Format: Thoughtful, substantial, professional + +**Instagram (Connection):** +- Primary: Gary Vaynerchuk (document, authentic) +- Secondary: Russell Brunson (micro-stories) +- Format: Visual, casual, story-driven + +**Twitter/X (Engagement):** +- Primary: Caleb Ralston (waterfall method) +- Secondary: Seth Godin (provocative) +- Format: Threads, quick insights + +### Video Content + +**VSLs (Video Sales Letters):** +- Russell Brunson (perfect webinar structure) +- Alex Hormozi (value demonstration) +- Dan Kennedy (direct response close) + +**Educational Content:** +- Caleb Ralston (depth and substance) +- Gary Vaynerchuk (document approach) +- Seth Godin (remarkable angles) + +## By Customer Journey Stage + +### Awareness (Cold Audience) + +**Goal:** Make them know you exist + +**Stack:** +- Gary Vaynerchuk (jab - give value) +- Seth Godin (remarkable positioning) +- Caleb Ralston (depth demonstration) + +**Content Types:** +- Educational posts +- Thought leadership +- Value-first content +- No selling + +### Consideration (Warm Audience) + +**Goal:** Make them understand you're the solution + +**Stack:** +- Donald Miller (clarify the transformation) +- Russell Brunson (show the journey) +- Simon Sinek (connect to why) + +**Content Types:** +- Case studies +- Demo content +- Comparison content +- Soft pitch + +### Decision (Hot Audience) + +**Goal:** Make them take action now + +**Stack:** +- Alex Hormozi (stack value, remove risk) +- Dan Kennedy (create urgency) +- Chris Voss (handle objections) + +**Content Types:** +- Sales pages +- Offer presentations +- Direct CTAs +- Clear next steps + +## By Goal/Objective + +### Building Authority +- Caleb Ralston (depth content) +- Seth Godin (remarkable positioning) +- Simon Sinek (purpose-driven) + +### Generating Leads +- Russell Brunson (hook-story-offer) +- Dan Kennedy (direct response) +- Gary Vaynerchuk (value-first) + +### Converting Sales +- Alex Hormozi (value stacking) +- Dan Kennedy (urgency) +- Chris Voss (objection handling) + +### Building Community +- Seth Godin (tribes) +- Simon Sinek (purpose) +- Gary Vaynerchuk (authenticity) + +### Premium Positioning +- Chris Do (value-based) +- Rory Sutherland (psychological value) +- Caleb Ralston (depth and authority) + +## Special Combinations + +### The "Trust Stack" (Complex B2B Sales) +1. Caleb Ralston - Establish expertise through depth +2. Chris Voss - Show understanding through empathy +3. Chris Do - Position on value not price +4. Donald Miller - Keep message crystal clear + +**Use For:** Long sales cycles, complex solutions, high-trust requirements + +### The "Rapid Convert Stack" (E-commerce) +1. Russell Brunson - Quick desire building +2. Alex Hormozi - Value stacking +3. Dan Kennedy - Urgency and scarcity +4. Gary Vaynerchuk - Social proof + +**Use For:** Volume plays, impulse purchases, competitive markets + +### The "Thought Leader Stack" (Personal Brands) +1. Caleb Ralston - Depth-first content +2. Simon Sinek - Purpose-driven messaging +3. Seth Godin - Remarkable positioning +4. Gary Vaynerchuk - Authentic documentation + +**Use For:** Building personal brands, thought leadership, community + +### The "Clarity Stack" (Complex Products) +1. Donald Miller - Simplify the message +2. Alex Hormozi - Quantify the value +3. Russell Brunson - Demonstrate transformation +4. Rory Sutherland - Reframe psychological barriers + +**Use For:** SaaS, technical products, innovative solutions + +## Mixing Rules + +### Do Combine: +- Trust + Value (Ralston + Hormozi) +- Story + Structure (Brunson + StoryBrand) +- Clarity + Urgency (StoryBrand + Kennedy) +- Depth + Empathy (Ralston + Voss) +- Purpose + Community (Sinek + Godin) + +### Don't Combine: +- Too many storytellers (Brunson + GaryVee + Godin = confusing) +- Conflicting tonalities (Kennedy urgency + Ralston depth = jarring) +- Competing structures (StoryBrand + Perfect Webinar = too complex) + +### Maximum Frameworks Per Piece: +- Short-form (social posts): 1-2 frameworks +- Mid-form (emails, blogs): 2-3 frameworks +- Long-form (landing pages, VSLs): 3-4 frameworks + +## Selection Priority + +**Always Start With:** +1. Identify business type +2. Identify format +3. Identify goal +4. Identify price point +5. Consult decision trees in SELECTION-ENGINE.md + +**Then:** +- Select primary framework (drives core message) +- Add secondary framework (provides structure) +- Add tertiary framework (handles objections or adds personality) + +## Testing Combinations + +To test if a combination works: +1. Do the frameworks complement or compete? +2. Does each serve a distinct purpose? +3. Is the message clear or confusing? +4. Does it match the business context? +5. Does it feel natural or forced? + +If any answer is concerning, simplify. + +## Evolution + +These combinations evolve based on: +- What converts best +- New frameworks added +- Market changes +- Platform updates +- User feedback + +The system adapts but the core principles remain: clarity, value, trust, urgency. diff --git a/skills/landing-page-expert/frameworks/additional-experts.md b/skills/landing-page-expert/frameworks/additional-experts.md new file mode 100644 index 000000000..21670b304 --- /dev/null +++ b/skills/landing-page-expert/frameworks/additional-experts.md @@ -0,0 +1,428 @@ +# Additional Expert Frameworks + +This file contains methodologies from 8 additional marketing experts. Each provides unique value for specific situations. + +--- + +## Donald Miller (StoryBrand) + +### Core Philosophy +"If you confuse, you'll lose. Clarity trumps persuasion." + +### The StoryBrand 7-Part Framework + +**1. A CHARACTER** (Your Customer) +- Start with the hero (the customer, not you) +- Define their desire clearly +- Make it specific and relatable + +**2. HAS A PROBLEM** (External, Internal, Philosophical) +- **External:** The surface problem (need website) +- **Internal:** How the problem makes them feel (frustrated, overwhelmed) +- **Philosophical:** Why it's unjust (small businesses deserve great marketing too) + +**3. MEETS A GUIDE** (Your Brand) +- You are Yoda, not Luke +- Show empathy ("I understand...") +- Show authority ("We've helped 500+ companies...") + +**4. WHO GIVES THEM A PLAN** (Your Process) +- **Process Plan:** Steps to buy/use your product +- **Agreement Plan:** Alleviate fears ("30-day guarantee") +- Make it simple (3-5 steps max) + +**5. CALLS THEM TO ACTION** (Direct + Transitional) +- **Direct CTA:** "Buy Now" "Schedule Call" +- **Transitional CTA:** "Download Guide" "Watch Demo" +- Always include both + +**6. THAT ENDS IN SUCCESS** (Paint the Vision) +- Show the promised land +- Specific outcomes +- Emotional benefits + +**7. AND HELPS THEM AVOID FAILURE** (Stake the Stakes) +- What happens if they don't act? +- The cost of inaction +- Not fear-mongering, just honest stakes + +### When to Use +- Complex products needing clarity +- Confused target audiences +- Any business struggling to articulate value +- B2B services + +### BrandScript Template +``` +CHARACTER: [Who is your customer?] +PROBLEM: [External/Internal/Philosophical] +GUIDE: [Empathy + Authority statement] +PLAN: [3-step process] +DIRECT CTA: [Clear action] +TRANSITIONAL CTA: [Low-risk option] +SUCCESS: [Specific positive outcome] +FAILURE: [Stakes of inaction] +``` + +--- + +## Dan Kennedy + +### Core Philosophy +"There are no boring products, only boring marketing. Direct response wins." + +### Key Methodologies + +**1. The PAS Formula (Problem-Agitate-Solve)** +- **Problem:** State the problem clearly +- **Agitate:** Make them feel the pain +- **Solve:** Present your solution + +**2. Reason-Why Marketing** +- Always give them a reason why +- "Why is this on sale?" → "We overstocked" +- "Why should I trust you?" → "We've been in business 30 years" +- "Why this price?" → "We're celebrating our anniversary" + +**3. The Message-to-Market Match** +- Right message + Right market + Right timing = Success +- Get any one wrong = Failure +- Example: Selling snow shovels in summer = wrong timing + +**4. The Herd Theory** +- People follow other people +- Show social proof aggressively +- "Join 10,000+ customers" +- Testimonials are mandatory + +**5. Takeaway Selling** +- "This might not be for everyone..." +- "We only work with committed clients..." +- Make them qualify themselves +- Works for premium positioning + +### When to Use +- Direct response campaigns +- Urgency-driven offers +- Local businesses +- Service businesses +- Any situation requiring fast decisions + +--- + +## Gary Vaynerchuk + +### Core Philosophy +"Document, don't create. Provide value without asking for anything in return." + +### Key Methodologies + +**1. Jab, Jab, Jab, Right Hook** +- **Jabs:** Give value (content, entertainment, education) +- **Right Hook:** Ask for the sale +- Ratio: 90% jabs, 10% right hooks +- Build goodwill before asking + +**2. Document Don't Create** +- Stop creating perfect content +- Document your real journey +- Behind-the-scenes beats polished +- Authenticity > production value + +**3. The $1.80 Instagram Strategy** +- Find 10 hashtags in your niche +- Leave your "2 cents" on the top 9 posts for each +- 10 hashtags × 9 posts × $0.02 = $1.80 of value +- Builds community and visibility + +**4. Pillar Content Strategy** +- Create one long-form pillar piece +- Cut it into 10-20 micro-content pieces +- Distribute across all platforms +- Maximize ROI per content piece + +**5. Platform-Native Content** +- Don't cross-post the same content +- Tailor content for each platform +- LinkedIn = professional insights +- Instagram = visual stories +- Twitter = quick thoughts +- TikTok = entertainment + +### When to Use +- Building personal brands +- Community-focused businesses +- Social media marketing +- Awareness stage content +- Authentic, relatable brands + +--- + +## Simon Sinek + +### Core Philosophy +"People don't buy what you do, they buy why you do it." + +### Key Methodologies + +**1. The Golden Circle** +- **WHY:** Your purpose, cause, belief (innermost circle) +- **HOW:** Your process, values, differentiators (middle circle) +- **WHAT:** Your products/services (outer circle) + +**Most companies communicate:** What → How → Why +**Inspiring companies communicate:** Why → How → What + +**2. Start With Why Framework** +``` +WHY: Why does your company exist? +(Beyond making money - the mission) + +HOW: How do you do it differently? +(Your values, your approach) + +WHAT: What do you sell? +(The products/services) +``` + +**Example:** +- **WHY:** We believe in empowering small businesses +- **HOW:** By making enterprise-level tools accessible +- **WHAT:** We sell marketing software + +**3. The Celery Test** +- If someone advises you to buy Oreos and celery +- And your WHY is health +- You only buy the celery +- Use your WHY to filter decisions + +### When to Use +- Mission-driven brands +- Purpose-driven marketing +- B2B positioning +- Leadership messaging +- Brand differentiation +- Building loyal communities + +--- + +## Seth Godin + +### Core Philosophy +"Be remarkable. In a crowded marketplace, fitting in is failing." + +### Key Methodologies + +**1. Purple Cow** +- In a field of brown cows, be purple +- Remarkable = worth making a remark about +- Don't appeal to everyone, delight the few +- Spread through word of mouth + +**2. Permission Marketing** +- Anticipated: They want to hear from you +- Personal: Messages are relevant to them +- Relevant: Content matters to them +- Don't interrupt, get permission first + +**3. Tribes** +- People want to belong +- Give them something to gather around +- Lead a movement, not just sell a product +- Create insider language and identity + +**4. The Dip** +- Every new thing hits a dip (hard middle part) +- Winners push through +- Losers quit too early or too late +- Know when to quit vs when to push + +**5. Small is the New Big** +- Don't need massive scale +- 1,000 true fans can sustain you +- Niche beats mass market +- Micro-communities are powerful + +### When to Use +- Differentiation challenges +- Building communities +- Permission-based marketing +- Email marketing strategy +- Niche positioning +- Thought leadership + +--- + +## Rory Sutherland + +### Core Philosophy +"The opposite of a good idea can also be a good idea. Perceived value matters more than actual value." + +### Key Methodologies + +**1. Psychological Value** +- Value is perception, not reality +- Red Bull could taste better, but the small can makes it seem potent +- First class is just seats, but the experience is everything +- Price affects perceived quality + +**2. The Alchemy of Marketing** +- Small changes in perception = massive changes in behavior +- Reframe problems as opportunities +- Example: Uber made waiting for a car less annoying by showing you where it is + +**3. Behavioral Economics in Marketing** +- **Anchoring:** Show high price first +- **Scarcity:** Limited availability increases value +- **Social Proof:** Others' behavior influences decisions +- **Loss Aversion:** Fear of losing > desire for gaining + +**4. Costly Signaling** +- Expensive marketing signals quality +- A bad product can't afford great ads (so great ads = confidence) +- Luxury brands never discount (signals exclusive value) + +**5. Make the Wait Better** +- Can't make flight faster? Make waiting better (airport lounges) +- Can't lower price? Increase perceived value +- Reframe the problem + +### When to Use +- Behavioral change campaigns +- Premium positioning +- Overcoming price objections +- Differentiation through psychology +- Luxury or high-end products + +--- + +## Chris Do + +### Core Philosophy +"Don't sell time, sell transformation. Value-based pricing beats hourly rates." + +### Key Methodologies + +**1. The Futur's Pricing Framework** +- Don't charge for time +- Charge for value delivered +- "What's the ROI?" beats "How long will it take?" + +**2. The Value Conversation** +``` +Question 1: "What happens if we do this right?" +(Uncover the upside) + +Question 2: "What happens if we do nothing?" +(Uncover the cost of inaction) + +Question 3: "What happens if we do this wrong?" +(Uncover the risk) +``` + +**3. The Consultative Sale** +- Don't pitch, diagnose +- Ask questions, don't tell +- Uncover the real problem +- Present solution to THAT problem + +**4. The Pricing Range** +- Never give single price +- Give 3 options (good, better, best) +- Anchor high, sell middle +- Most pick middle option + +**5. Win Without Pitching** +- Position yourself as expert first +- Let them chase you +- Advise, don't pitch +- Charge for the advice (paid discovery) + +### When to Use +- Service businesses (agencies, consultants) +- Premium positioning +- B2B sales +- High-ticket offers +- Creative services +- Selling expertise + +--- + +## Chris Voss + +### Core Philosophy +"Tactical empathy is the ability to recognize emotions and respond to them in a way that validates them." + +### Key Methodologies + +**1. Tactical Empathy** +- Recognize their emotion +- Label it +- Validate it +- Example: "It seems like you're frustrated with..." + +**2. Calibrated Questions** +- Questions that give them control but guide the conversation +- "How am I supposed to do that?" +- "What about this is important to you?" +- "How would you like me to proceed?" + +**3. Mirroring** +- Repeat last 1-3 words they said +- Shows you're listening +- Encourages them to elaborate +- "...elaborate?" +- Them: "It needs to be better" +- You: "...better?" +- Them: "Yeah, we need faster results" + +**4. The Accusation Audit** +- List all negative things they might think +- Preemptively address them +- "You probably think this is too expensive..." +- "You're probably wondering if this actually works..." +- Takes power away from objections + +**5. "No"-Oriented Questions** +- "Is now a bad time to talk?" +- "Have you given up on finding a solution?" +- Easier to say "no" than "yes" +- "No" gives feeling of control + +**6. The "That's Right" Moment** +- Summarize their situation so perfectly they say "that's right" +- "So what you're saying is..." +- When they say "that's right" = breakthrough +- ("You're right" = they want to end conversation) + +### When to Use +- Objection handling +- High-stakes sales +- Complex negotiations +- Overcoming skepticism +- Trust-building +- Consultative selling + +--- + +## Combining Multiple Frameworks + +### The Trust + Value Stack (High-Ticket) +1. Ralston: Build depth and trust +2. Voss: Show you understand with tactical empathy +3. Hormozi: Present irresistible offer + +### The Story + Clarity Stack (Info Products) +1. Brunson: Epiphany bridge story +2. StoryBrand: Clear framework +3. Kennedy: Direct response urgency + +### The Authority + Community Stack (Personal Brand) +1. Sinek: Start with why +2. Godin: Build tribe around it +3. GaryVee: Document the journey + +### The Value Positioning Stack (Premium Services) +1. Sutherland: Psychological reframing +2. Do: Value-based pricing conversation +3. Voss: Objection handling diff --git a/skills/landing-page-expert/frameworks/alex-hormozi.md b/skills/landing-page-expert/frameworks/alex-hormozi.md new file mode 100644 index 000000000..46e4108b0 --- /dev/null +++ b/skills/landing-page-expert/frameworks/alex-hormozi.md @@ -0,0 +1,231 @@ +# Alex Hormozi Framework + +## Overview + +Alex Hormozi built his fortune on creating "irresistible offers" that make people feel stupid saying no. His framework focuses on maximizing perceived value while minimizing perceived risk and effort. + +**Core Philosophy:** The offer is everything. Make it so good they'd feel stupid saying no. + +## Key Methodologies + +### 1. The Value Equation + +**Value = (Dream Outcome × Perceived Likelihood of Achievement) / (Time Delay × Effort and Sacrifice)** + +**To increase value, you must:** +1. **Increase Dream Outcome** - Make the result more desirable +2. **Increase Perceived Likelihood** - Prove it works (testimonials, guarantees, proof) +3. **Decrease Time Delay** - Make it happen faster +4. **Decrease Effort & Sacrifice** - Make it easier + +**Application:** Evaluate every element of your offer against this equation. + +### 2. Grand Slam Offer + +**The components of an irresistible offer:** + +**Core Offer:** +- The main thing they're buying +- The transformation promised + +**Bonuses (Stack):** +- Additional valuable items +- Solve related problems +- Increase perceived value dramatically + +**Guarantee:** +- Remove or reverse risk +- Make it safer than not buying +- Examples: Money-back, performance-based, conditional + +**Scarcity/Urgency:** +- Limited quantity +- Limited time +- Legitimate scarcity only + +**Price Anchoring:** +- Show the value of each component +- Total value >> actual price +- Make price seem like a steal + +### 3. Offer Stacking + +**How to stack value:** + +1. **Start with core offer** ($X value) +2. **Add bonus 1** - Solves problem A ($Y value) +3. **Add bonus 2** - Solves problem B ($Z value) +4. **Add bonus 3** - Accelerates results ($W value) +5. **Add guarantee** - Removes risk (Priceless) + +**Total Value: $X + $Y + $Z + $W** +**Your Price: Much less than total** + +**The psychology:** Each addition makes saying "no" harder. + +### 4. The Value Ladder + +**Structure of ascending offers:** + +**Bottom:** Free content (build trust) +**Step 2:** Low-ticket offer $7-$100 (convert) +**Step 3:** Mid-ticket offer $500-$2,000 (profit) +**Top:** High-ticket offer $5,000+ (transform) + +**Purpose:** Meet people where they are, ascend them as trust builds. + +### 5. The 4 Ways to Grow Any Business + +1. **Get more customers** (traffic/leads) +2. **Increase average purchase value** (raise prices, upsells) +3. **Increase purchase frequency** (retention, repeat purchases) +4. **Decrease churn** (better product, better onboarding) + +**Application:** Focus on 1-2 at a time. Small improvements in each = exponential growth. + +### 6. Market Sophistication Levels + +**Level 1: Direct Claim** +- "Lose weight" - First to market, simple claim works + +**Level 2: Quantified Claim** +- "Lose 30 pounds in 30 days" - Need to be more specific + +**Level 3: Mechanism** +- "Keto diet helps you lose 30 pounds" - Explain how + +**Level 4: New Mechanism** +- "Intermittent fasting + keto" - Combine or improve + +**Level 5: Identification with Prospect** +- "For busy moms who've tried everything" - Empathy-based + +**Your offer sophistication must match market sophistication.** + +### 7. The Negative ROI Offer + +**Make the offer so good, NOT taking it costs them money.** + +**Example:** +"I'll help you generate $50,000 in new revenue. If I don't, you pay nothing. If I do, you pay me $10,000." + +**Risk reversal:** They lose money by NOT taking the offer. + +**Application:** Calculate the ROI of your service. Make your guarantee around it. + +## When to Use Hormozi's Framework + +### Best For: +- Offers with clear ROI +- Services with measurable outcomes +- High-ticket sales +- Competitive markets +- Price-sensitive audiences + +### Format Applications: +- Landing pages (offer stacking) +- Sales pages +- Webinar pitches +- Email sequences (value building) +- VSLs (value demonstration) + +### Avoid When: +- Intangible outcomes +- Brand/awareness stage +- Cannot quantify value +- Ultra-premium positioning (value stacking can cheapen) + +## Combining with Other Frameworks + +### Hormozi + StoryBrand +- StoryBrand: Clarify the message +- Hormozi: Stack the value +- **Result:** Clear + Irresistible + +### Hormozi + Brunson +- Brunson: Story-driven desire +- Hormozi: Value-stacked offer +- **Result:** Emotional + Logical appeal + +### Hormozi + Ralston +- Ralston: Build trust through depth +- Hormozi: Present irresistible offer +- **Result:** Trust + Value + +## Content Templates + +### The Value Stack Landing Page +``` +HEADLINE: [Dream Outcome in X Timeframe] + +SUBHEADLINE: [Without Pain Point] + +SECTION 1: The Problem +- What they're experiencing +- Why it's frustrating +- The cost of inaction + +SECTION 2: The Solution (Core Offer) +- What you provide +- How it solves the problem +- The transformation + +SECTION 3: What You Get (The Stack) +Component 1: [Name] ($X,XXX value) +├─ What it is +└─ What problem it solves + +Component 2: [Name] ($X,XXX value) +├─ What it is +└─ What problem it solves + +Component 3: [Name] ($X,XXX value) +├─ What it is +└─ What problem it solves + +Total Value: $XX,XXX +Your Investment: $X,XXX + +SECTION 4: Proof +- Testimonials +- Case studies +- Results + +SECTION 5: Guarantee +- Zero risk explanation +- What happens if it doesn't work +- Why you can offer this + +SECTION 6: Scarcity +- Why limited +- What happens when it's gone + +SECTION 7: Final CTA +- Clear next step +- Deadline reminder +``` + +## Key Principles Summary + +1. **The offer is everything** - Master this first +2. **Value > Price** - Perceived value must dwarf price +3. **Proof matters** - Claims without proof = worthless +4. **Remove risk** - Guarantee aggressively +5. **Stack intelligently** - Each bonus solves a real problem +6. **Urgency must be real** - Fake scarcity kills trust +7. **Test and optimize** - Continuous improvement + +## The Hormozi Checklist + +Before launching an offer: +- [ ] Is the dream outcome crystal clear? +- [ ] Have I maximized perceived likelihood? (proof, testimonials) +- [ ] Have I minimized time delay? (how fast can they get results) +- [ ] Have I minimized effort? (how easy is it) +- [ ] Does each bonus solve a real related problem? +- [ ] Is my guarantee strong enough to remove risk? +- [ ] Is my scarcity legitimate? +- [ ] Does total value exceed price by 10x or more? +- [ ] Would I feel stupid saying no to this? + +If any answer is "no," improve the offer. diff --git a/skills/landing-page-expert/frameworks/caleb-ralston.md b/skills/landing-page-expert/frameworks/caleb-ralston.md new file mode 100644 index 000000000..83484faea --- /dev/null +++ b/skills/landing-page-expert/frameworks/caleb-ralston.md @@ -0,0 +1,307 @@ +# Caleb Ralston Framework + +## Overview + +Caleb Ralston is a brand strategist known for **depth-first content strategy** and building trust through demonstration of expertise rather than promotional tactics. + +**Core Philosophy:** Go deep before going wide. Build authority through substance, not volume. + +## Key Methodologies + +### 1. The Brand Journey Framework + +**Four foundational questions that define your positioning:** + +1. **Where are you taking people?** + - What's the destination/transformation? + - What does success look like? + - Paint the vision clearly + +2. **Why should they trust you to take them there?** + - Your unique expertise/experience + - Proof you've done it before + - Credibility markers + +3. **What makes your path different?** + - Your contrarian approach + - What you do that others don't + - Your unique methodology + +4. **Who is this journey for (and not for)?** + - Specific ideal customer + - Clear exclusions + - Niche clarity + +**Application:** Start every positioning exercise with these four questions. They form the foundation of all messaging. + +### 2. The Waterfall Method + +**Turn one deep piece of content into 50+ derivative assets.** + +**The Process:** +1. Create one comprehensive, valuable piece (2,000-5,000 words) +2. Extract key insights, quotes, frameworks +3. Repurpose into multiple formats: + - Twitter/X threads + - LinkedIn posts + - Instagram carousels + - Email sequences + - Video scripts + - Podcast talking points + +**Why it works:** Depth creates authority. Repurposing creates reach. You get both. + +**Example Structure:** +``` +Deep Article (3,000 words) +├─ 10 LinkedIn posts (key insights) +├─ 5 Twitter threads (frameworks) +├─ 15 Instagram carousels (visual breakdowns) +├─ 7 emails (story + lessons) +├─ 3 video scripts (demonstrations) +└─ 10 quotes/graphics +``` + +### 3. Depth vs Width Strategy + +**Most creators go wide (lots of surface-level content). Ralston goes deep.** + +**Width Strategy (Common but Weak):** +- Post daily across all platforms +- Cover many topics shallowly +- Chase trends and virality +- Result: High volume, low authority + +**Depth Strategy (Ralston's Approach):** +- Post less frequently but with substance +- Master specific topics deeply +- Create evergreen, referenceable content +- Result: Lower volume, high authority + +**The Depth-First Ladder:** +1. Start: One platform, one topic, deep content +2. Build: Establish expertise through consistent depth +3. Expand: Once known for that topic, add related topics +4. Scale: With authority established, expand platforms + +**Never go wide before going deep.** + +### 4. The Two-Column Exercise + +**Find your contrarian positioning by mapping conventional wisdom vs your truth.** + +**How to do it:** +``` +CONVENTIONAL WISDOM | MY CONTRARIAN TRUTH +------------------------------|------------------------- +"Post every day" | "Post when you have something valuable to say" +"Be everywhere" | "Master one platform first" +"Volume = success" | "Depth = authority" +``` + +**This exercise reveals:** +- Your unique positioning +- Your controversial takes +- Your message differentiation +- Content angles that stand out + +**Application:** Use these contrarian positions as hooks and core messages. + +### 5. The Accordion Method + +**Strategic expansion and contraction of focus.** + +**The Rhythm:** +- **Contract:** Go deep on one topic for 3-6 months +- **Expand:** Broaden to related topics once authority established +- **Contract:** Go deep on the expanded topic +- **Expand:** Continue the rhythm + +**Example:** +``` +Month 1-3: Deep on "Email marketing for coaches" +Month 4-6: Expand to "Entire marketing funnel for coaches" +Month 7-9: Deep on "Webinar strategies for coaches" +Month 10-12: Expand to "Complete client acquisition for coaches" +``` + +**Why it works:** Depth builds authority. Expansion leverages that authority. Alternating creates sustainable growth. + +### 6. Association-Based Branding + +**Your brand is defined by your associations more than your claims.** + +**The Principle:** +- Don't say you're an expert +- Associate with recognized experts +- Don't claim authority +- Create content with authorities + +**How to build associations:** +1. **Collaborate:** Interview recognized experts +2. **Reference:** Cite and credit top people in your field +3. **Engage:** Thoughtfully comment on authority figures' content +4. **Learn publicly:** Share what you're learning from experts +5. **Contribute:** Add value to conversations led by authorities + +**The Result:** You become known by the company you keep. + +### 7. The Trust Architecture + +**Build trust before asking for the sale.** + +**The Sequence:** +1. **Demonstrate expertise** (show you know the topic deeply) +2. **Provide value** (give away your best ideas) +3. **Show understanding** (prove you get their situation) +4. **Offer solution** (present your service/product) + +**Most people skip to step 4. Ralston's method requires 1-3 first.** + +**Content Ratio:** +- 80% value, education, demonstration +- 20% offer, sell, promote + +**Why it works:** People buy from those they trust. Trust is built through generosity and demonstration, not claims. + +### 8. The Depth-First Content Framework + +**Every piece of content should:** + +1. **Teach Something Substantial** + - Not surface-level tips + - Actual frameworks or methods + - Something they can implement + +2. **Demonstrate Your Thinking** + - Show how you approach problems + - Reveal your unique perspective + - Let them see your depth + +3. **Be Referenceable** + - They can come back to it + - They can share it + - It has lasting value + +**The Depth Test:** +Could someone: +- Learn something genuinely new? +- Implement it today? +- Reference it in the future? +- Share it with someone else? + +If no to any of these, go deeper. + +## When to Use Ralston's Framework + +### Best For: +- **High-ticket services** ($2,000+) +- **Coaching and consulting** +- **B2B services requiring trust** +- **Authority/thought leadership building** +- **Complex services** +- **Long sales cycles** + +### Format Applications: +- Long-form LinkedIn posts +- In-depth blog articles +- Educational email sequences +- Authority-building content +- Webinar content +- Podcast episodes + +### Avoid When: +- Need quick conversions +- Low-ticket impulse purchases +- Volume-based businesses +- Trend-chasing required + +## Combining with Other Frameworks + +### Ralston + Hormozi (High-Ticket Services) +- Ralston: Build depth and trust +- Hormozi: Stack value when presenting offer +- **Sequence:** Depth content → Value demonstration → Grand Slam Offer + +### Ralston + StoryBrand (Consulting) +- Ralston: Establish authority through depth +- StoryBrand: Clarify the transformation message +- **Result:** Clear message + demonstrated expertise + +### Ralston + Voss (Complex Sales) +- Ralston: Show depth and understanding +- Voss: Handle objections with tactical empathy +- **Result:** Trust + objection handling + +## Content Templates + +### The Depth-First LinkedIn Post +``` +HOOK (contrarian or insightful statement) + +CONTEXT (set up the problem or observation) + +FRAMEWORK (teach a specific method or approach) +├─ Step 1 +├─ Step 2 +└─ Step 3 + +EXAMPLE (show it in action) + +INSIGHT (the deeper lesson) + +APPLICATION (how they can use this) +``` + +### The Waterfall Base Article +``` +TITLE: How to [Transformation] Using [Method] + +INTRODUCTION +├─ The common way (that doesn't work) +├─ The contrarian way (that does) +└─ What you'll learn + +SECTION 1: The Problem (Go Deep) +SECTION 2: The Solution Framework (Go Deep) +SECTION 3: Implementation (Go Deep) +SECTION 4: Common Mistakes (Go Deep) + +CONCLUSION +└─ Summary + One clear next step +``` + +## Key Principles Summary + +1. **Depth beats width** - Go deep before going wide +2. **Demonstration beats claims** - Show don't tell +3. **Value beats promotion** - Give before asking +4. **Association beats assertion** - Build through connections +5. **Substance beats volume** - Quality over quantity +6. **Trust beats tactics** - Long game wins +7. **Contrarian beats conventional** - Stand out through difference + +## Ralston's Content Checklist + +Before publishing, ask: +- [ ] Is this substantial enough to be referenceable? +- [ ] Does it demonstrate my thinking/expertise? +- [ ] Can someone implement this today? +- [ ] Does it build trust or just promote? +- [ ] Is it deep enough to stand out? +- [ ] Does it show a contrarian or unique perspective? +- [ ] Would I want to read this 6 months from now? + +If any answer is "no," go deeper. + +## The Ralston Test + +**Ask yourself:** +"If someone consumed all my content, would they: +1. Know my unique approach? +2. Trust me to help them? +3. See me as an authority? +4. Understand my contrarian positions? +5. Have learned substantial, implementable methods?" + +**If no to any, you need more depth.** diff --git a/skills/landing-page-expert/frameworks/russell-brunson.md b/skills/landing-page-expert/frameworks/russell-brunson.md new file mode 100644 index 000000000..4a04ef05b --- /dev/null +++ b/skills/landing-page-expert/frameworks/russell-brunson.md @@ -0,0 +1,278 @@ +# Russell Brunson Framework + +## Overview + +Russell Brunson is the founder of ClickFunnels and pioneer of the "funnel hacker" movement. His framework centers on story-driven selling and psychological triggers that guide prospects through buying decisions. + +**Core Philosophy:** Stories sell. Facts tell, stories sell. Every sale is preceded by an emotional story. + +## Key Methodologies + +### 1. The Perfect Webinar Framework + +**The 90-minute structure that converts:** + +**MINUTES 1-15: The Big Idea** +- Origin story (your journey) +- The one big promise +- Build desire for outcome + +**MINUTES 15-60: The Content (3 Secrets)** +- **Secret #1:** Reframe the vehicle (how to achieve result) +- **Secret #2:** Address internal beliefs (why they can do it) +- **Secret #3:** Address external roadblocks (what's been stopping them) + +**MINUTES 60-75: The Stack (Present Offer)** +- Component-by-component value build +- Total value >> Price +- Risk reversal (guarantee) + +**MINUTES 75-90: Close** +- Urgency/scarcity +- FAQ handling +- Final CTA + +**Why it works:** Educates first, sells second. By the time you pitch, they're already convinced. + +### 2. The Epiphany Bridge + +**Turn your breakthrough moments into their aha moments.** + +**The Story Structure:** +1. **The Backstory** - Where were you before? +2. **The Desire** - What did you want? +3. **The Wall** - What was blocking you? +4. **The Epiphany** - Your breakthrough moment +5. **The Plan** - What you discovered +6. **The Result** - Where you are now +7. **Their Bridge** - How they can have the same epiphany + +**Application:** Every piece of selling copy should contain an epiphany bridge story. + +**The Psychology:** When they experience your epiphany, they believe in your solution. + +### 3. The Value Ladder (Ascension Model) + +**Structure of offers:** + +**BAIT:** Free + shipping book, lead magnet (break even or small loss) +**FRONTEND:** $7-$100 product (convert traffic to buyers) +**MIDDLE:** $500-$2,000 product (first profit) +**BACKEND:** $5,000+ high-ticket (transformation) + +**Purpose:** Start relationship low-risk, increase commitment as trust builds. + +### 4. The Soap Opera Sequence (Email) + +**5-email sequence for new subscribers:** + +**Email 1: Set the stage** +- Who you are +- Why they should listen +- What they'll learn +- Cliffhanger for email 2 + +**Email 2: Episode 2** +- Continue the story +- Provide value +- Build connection +- Cliffhanger for email 3 + +**Email 3: Epiphany** +- Your breakthrough moment +- The key insight +- How it changed everything +- Cliffhanger for email 4 + +**Email 4: Hidden benefits** +- Additional value they didn't expect +- Surprising insights +- Cliffhanger for email 5 + +**Email 5: Urgency/CTA** +- Time to take action +- Special offer +- Clear call-to-action +- Deadline + +**Why it works:** Creates addiction to your emails like a TV soap opera. + +### 5. The False Belief Patterns + +**Three levels of false beliefs to address:** + +**Level 1: Vehicle (The What)** +- False Belief: "Funnels don't work" +- Your Reframe: "Here's proof funnels work when done right" + +**Level 2: Internal (The Who)** +- False Belief: "This might work for others, but not for me" +- Your Reframe: "Here's why you CAN do this" + +**Level 3: External (The How)** +- False Belief: "I don't have time/money/tech skills" +- Your Reframe: "Here's how we remove those obstacles" + +**Application:** Identify all three levels of false beliefs your prospect has. Address each with stories. + +### 6. The Hook, Story, Offer Framework + +**Universal structure for all content:** + +**HOOK:** Grab attention (first 3 seconds) +- Question +- Bold statement +- Surprising fact +- Controversy + +**STORY:** Build connection & desire +- Epiphany bridge +- Customer success story +- Origin story +- Analogies/metaphors + +**OFFER:** Present solution +- What they get +- Value stack +- Risk reversal +- Clear CTA + +**Use this for:** Ads, posts, videos, emails, landing pages. + +### 7. The Attractive Character + +**Every brand needs a personality people connect with:** + +**The 4 Types:** +1. **The Leader** - Teach and guide (Tony Robbins) +2. **The Adventurer** - Discover and share (Bear Grylls) +3. **The Reporter** - Interview and share insights (Tim Ferriss) +4. **The Reluctant Hero** - Ordinary person, extraordinary results (Katniss) + +**Elements of an Attractive Character:** +- Backstory (where you came from) +- Parables (stories that teach) +- Character flaws (relatability) +- Polarize (strong opinions) +- Use "us vs them" positioning + +**Why it works:** People buy from people, not companies. + +### 8. The Question Loop (Open Loops) + +**Keep attention by opening questions and delaying answers.** + +**How to use:** +1. Ask intriguing question +2. Provide partial answer +3. Promise full answer if they keep reading/watching +4. Close loop at the end + +**Example:** +"I made $100K in 30 days using this weird funnel hack. I'll show you exactly what it was at the end of this post..." + +**Application:** Use in subject lines, hooks, throughout copy to maintain engagement. + +## When to Use Brunson's Framework + +### Best For: +- Story-driven markets +- Info products +- Courses and coaching +- Webinar funnels +- Email marketing +- Community-based brands + +### Format Applications: +- Webinars (Perfect Webinar) +- VSLs (Hook-Story-Offer) +- Email sequences (Soap Opera) +- Social media (Attractive Character) +- Landing pages (Epiphany Bridge) + +### Avoid When: +- Ultra-corporate B2B +- Commodity products +- Audiences resistant to story-selling + +## Combining with Other Frameworks + +### Brunson + Hormozi +- Brunson: Story-driven desire +- Hormozi: Value stacking +- **Result:** Emotional + Logical appeal + +### Brunson + Ralston +- Brunson: Story to build connection +- Ralston: Depth to build authority +- **Result:** Connection + Credibility + +### Brunson + StoryBrand +- Brunson: Personal epiphany stories +- StoryBrand: Brand story clarity +- **Result:** Personal + Brand story + +## Content Templates + +### The Epiphany Bridge Post +``` +HOOK: [Intriguing statement about your transformation] + +BACKSTORY: [Where you were before] +"I used to [struggle]..." + +DESIRE: [What you wanted] +"All I wanted was [goal]..." + +THE WALL: [What was blocking you] +"But every time I tried, [obstacle]..." + +THE EPIPHANY: [Your breakthrough] +"Then one day, I realized [insight]..." + +THE PLAN: [What you discovered] +"Here's what I discovered: [framework]..." + +THE RESULT: [Where you are now] +"Now, [current state]..." + +THE BRIDGE: [How they can cross] +"You can have this same epiphany by [offer/CTA]..." +``` + +### The Perfect Webinar Script +``` +SLIDE 1: Title + Big Promise +SLIDES 2-5: Origin Story +SLIDES 6-15: Secret #1 (Vehicle) +SLIDES 16-25: Secret #2 (Internal) +SLIDES 26-35: Secret #3 (External) +SLIDES 36-45: The Stack (Offer) +SLIDES 46-50: Objection Handling +SLIDE 51: Final CTA +``` + +## Key Principles Summary + +1. **Stories sell** - Facts tell, stories sell +2. **Epiphany drives belief** - Share your breakthrough +3. **Address false beliefs** - Handle objections via story +4. **Open loops** - Keep them engaged +5. **Attractive character** - Be relatable and real +6. **Ascension model** - Start small, build trust +7. **Hook-Story-Offer** - Universal framework + +## The Brunson Checklist + +Before launching content/offer: +- [ ] Does it start with a compelling hook? +- [ ] Have I shared an epiphany bridge story? +- [ ] Have I addressed vehicle false beliefs? +- [ ] Have I addressed internal false beliefs? +- [ ] Have I addressed external false beliefs? +- [ ] Is my attractive character clear? +- [ ] Have I created open loops? +- [ ] Does it end with a clear offer/CTA? +- [ ] Would this keep someone watching/reading? + +If any answer is "no," add more story. diff --git a/skills/lead-research-assistant/SKILL.md b/skills/lead-research-assistant/SKILL.md new file mode 100644 index 000000000..aa63d957d --- /dev/null +++ b/skills/lead-research-assistant/SKILL.md @@ -0,0 +1,199 @@ +--- +name: lead-research-assistant +description: Identifies high-quality leads for your product or service by analyzing your business, searching for target companies, and providing actionable contact strategies. Perfect for sales, business development, and marketing professionals. +--- + +# Lead Research Assistant + +This skill helps you identify and qualify potential leads for your business by analyzing your product/service, understanding your ideal customer profile, and providing actionable outreach strategies. + +## When to Use This Skill + +- Finding potential customers or clients for your product/service +- Building a list of companies to reach out to for partnerships +- Identifying target accounts for sales outreach +- Researching companies that match your ideal customer profile +- Preparing for business development activities + +## What This Skill Does + +1. **Understands Your Business**: Analyzes your product/service, value proposition, and target market +2. **Identifies Target Companies**: Finds companies that match your ideal customer profile based on: + - Industry and sector + - Company size and location + - Technology stack and tools they use + - Growth stage and funding + - Pain points your product solves +3. **Prioritizes Leads**: Ranks companies based on fit score and relevance +4. **Provides Contact Strategies**: Suggests how to approach each lead with personalized messaging +5. **Enriches Data**: Gathers relevant information about decision-makers and company context + +## How to Use + +### Basic Usage + +Simply describe your product/service and what you're looking for: + +``` +I'm building [product description]. Find me 10 companies in [location/industry] +that would be good leads for this. +``` + +### With Your Codebase + +For even better results, run this from your product's source code directory: + +``` +Look at what I'm building in this repository and identify the top 10 companies +in [location/industry] that would benefit from this product. +``` + +### Advanced Usage + +For more targeted research: + +``` +My product: [description] +Ideal customer profile: +- Industry: [industry] +- Company size: [size range] +- Location: [location] +- Current pain points: [pain points] +- Technologies they use: [tech stack] + +Find me 20 qualified leads with contact strategies for each. +``` + +## Instructions + +When a user requests lead research: + +1. **Understand the Product/Service** + - If in a code directory, analyze the codebase to understand the product + - Ask clarifying questions about the value proposition + - Identify key features and benefits + - Understand what problems it solves + +2. **Define Ideal Customer Profile** + - Determine target industries and sectors + - Identify company size ranges + - Consider geographic preferences + - Understand relevant pain points + - Note any technology requirements + +3. **Research and Identify Leads** + - Search for companies matching the criteria + - Look for signals of need (job postings, tech stack, recent news) + - Consider growth indicators (funding, expansion, hiring) + - Identify companies with complementary products/services + - Check for budget indicators + +4. **Prioritize and Score** + - Create a fit score (1-10) for each lead + - Consider factors like: + - Alignment with ICP + - Signals of immediate need + - Budget availability + - Competitive landscape + - Timing indicators + +5. **Provide Actionable Output** + + For each lead, provide: + - **Company Name** and website + - **Why They're a Good Fit**: Specific reasons based on their business + - **Priority Score**: 1-10 with explanation + - **Decision Maker**: Role/title to target (e.g., "VP of Engineering") + - **Contact Strategy**: Personalized approach suggestions + - **Value Proposition**: How your product solves their specific problem + - **Conversation Starters**: Specific points to mention in outreach + - **LinkedIn URL**: If available, for easy connection + +6. **Format the Output** + + Present results in a clear, scannable format: + + ```markdown + # Lead Research Results + + ## Summary + - Total leads found: [X] + - High priority (8-10): [X] + - Medium priority (5-7): [X] + - Average fit score: [X] + + --- + + ## Lead 1: [Company Name] + + **Website**: [URL] + **Priority Score**: [X/10] + **Industry**: [Industry] + **Size**: [Employee count/revenue range] + + **Why They're a Good Fit**: + [2-3 specific reasons based on their business] + + **Target Decision Maker**: [Role/Title] + **LinkedIn**: [URL if available] + + **Value Proposition for Them**: + [Specific benefit for this company] + + **Outreach Strategy**: + [Personalized approach - mention specific pain points, recent company news, or relevant context] + + **Conversation Starters**: + - [Specific point 1] + - [Specific point 2] + + --- + + [Repeat for each lead] + ``` + +7. **Offer Next Steps** + - Suggest saving results to a CSV for CRM import + - Offer to draft personalized outreach messages + - Recommend prioritization based on timing + - Suggest follow-up research for top leads + +## Examples + +### Example 1: From Lenny's Newsletter + +**User**: "I'm building a tool that masks sensitive data in AI coding assistant queries. Find potential leads." + +**Output**: Creates a prioritized list of companies that: +- Use AI coding assistants (Copilot, Cursor, etc.) +- Handle sensitive data (fintech, healthcare, legal) +- Have evidence in their GitHub repos of using coding agents +- May have accidentally exposed sensitive data in code +- Includes LinkedIn URLs of relevant decision-makers + +### Example 2: Local Business + +**User**: "I run a consulting practice for remote team productivity. Find me 10 companies in the Bay Area that recently went remote." + +**Output**: Identifies companies that: +- Recently posted remote job listings +- Announced remote-first policies +- Are hiring distributed teams +- Show signs of remote work challenges +- Provides personalized outreach strategies for each + +## Tips for Best Results + +- **Be specific** about your product and its unique value +- **Run from your codebase** if applicable for automatic context +- **Provide context** about your ideal customer profile +- **Specify constraints** like industry, location, or company size +- **Request follow-up** research on promising leads for deeper insights + +## Related Use Cases + +- Drafting personalized outreach emails after identifying leads +- Building a CRM-ready CSV of qualified prospects +- Researching specific companies in detail +- Analyzing competitor customer bases +- Identifying partnership opportunities diff --git a/skills/notebooklm b/skills/notebooklm new file mode 160000 index 000000000..d8cc39683 --- /dev/null +++ b/skills/notebooklm @@ -0,0 +1 @@ +Subproject commit d8cc396835298671e2e047c8b78a9bdaf2444b6e diff --git a/skills/notion-template-processor/SKILL.md b/skills/notion-template-processor/SKILL.md new file mode 100644 index 000000000..ebdd8ee30 --- /dev/null +++ b/skills/notion-template-processor/SKILL.md @@ -0,0 +1,301 @@ +--- +name: Notion Template Processor +description: Fills Notion database templates with data and delivers via email using Notion MCP integration +allowed-tools: + - MCP + - API + - Bash +--- + +# Notion Template Processor + +This skill enables automated template processing using Notion databases and delivery via email. It leverages the Notion MCP server for seamless integration with Notion workspaces, allowing you to fill templates with data and send the results via email. + +## When to Use This Skill + +Activate this skill when you need to: +- Fill out templates stored in Notion databases +- Automate document generation from structured data +- Send templated content via email +- Process client proposals, reports, or form responses +- Generate personalized communications from database records + +## Capabilities + +### Template Processing +- **Database Query**: Search and retrieve templates from Notion databases +- **Dynamic Filling**: Replace placeholders with data (manual input or from other sources) +- **Conditional Logic**: Show/hide sections based on data values +- **Multi-part Templates**: Handle complex documents with multiple sections + +### Notion Integration (MCP) +- **Database Operations**: Query, filter, and update Notion databases +- **Page Management**: Create, read, update, and archive pages +- **Content Blocks**: Manipulate text, lists, tables, and rich content +- **Property Management**: Handle all Notion property types (text, number, date, select, etc.) + +### Email Delivery +- **SMTP Integration**: Send via any SMTP server +- **Rich HTML**: Convert Notion content to formatted HTML emails +- **Attachment Support**: Include PDFs, documents, or additional files +- **Template Rendering**: Send rendered templates or raw content + +### Data Sources +- **Manual Input**: Accept data directly in conversation +- **API Integration**: Pull data from external services +- **Database Lookup**: Retrieve information from other Notion databases +- **File Parsing**: Extract data from uploaded documents + +## How to Use + +### Basic Template Filling +``` +Use the notion-template-processor skill to fill the "Client Proposal" template +in my Notion workspace with: +- Client Name: Acme Corp +- Project Scope: Website redesign +- Budget: $50,000 +- Timeline: 3 months + +Then email the filled template to john@acmecorp.com with subject "Acme Corp Proposal" +``` + +### Advanced Workflow +``` +Query my Notion CRM database for clients where status = "Qualified". +For each client, fill out the "Project Proposal" template using their company +information, attach relevant case studies from Notion, and email it from my +sales account with personalized subject lines. +``` + +### Template Creation +``` +Create a new template page in my "Templates" Notion database called "Meeting Summary" +with placeholders for: +- Meeting Date +- Attendees +- Key Decisions +- Action Items +- Next Steps + +Save it for future use with the notion-template-processor skill. +``` + +## Template Format + +### Required Template Structure +Each template page in Notion must have: + +**Required Properties:** +- `template_id`: Unique identifier for the template +- `template_type`: Type of template (proposal, report, email, etc.) +- `status`: Must be "Published" to be available + +**Content Structure:** +- Use `{{placeholder_name}}` syntax for dynamic content +- Include sections marked with `{% if condition %}` for conditional logic +- Use standard Notion blocks (paragraphs, headings, lists, tables) + +### Example Template Content: +``` +# Project Proposal - {{client_name}} + +## Client Information +- **Company**: {{client_name}} +- **Contact**: {{contact_email}} +- **Budget**: {{budget}} + +## Project Overview +{{project_description}} + +{% if has_attachments %} +## Attachments +{{attachments_list}} +{% endif %} + +## Next Steps +{{next_steps}} +``` + +## Input Format + +### Template Selection +- **By Name**: "Use the 'Client Proposal' template" +- **By Database**: "From my 'Templates' database, use template_id 'proposal-001'" +- **By Page URL**: "Use the template at https://notion.so/page/..." + +### Data Input Methods +- **Structured**: Key-value pairs (Name: Value) +- **JSON**: Complete data objects +- **YAML**: For complex hierarchical data +- **From Database**: Reference other Notion databases + +### Email Configuration +- **Recipient**: Single email or list +- **Subject**: Template with placeholders +- **Sender**: Authenticated account +- **Attachments**: File references or generated content + +## Output Format + +### Success Response +``` +✅ Template filled successfully! +📧 Email sent to john@company.com +🔗 Link to generated page: https://notion.so/generated-page-id +📎 Attachments: proposal.pdf, case-study.pdf +``` + +### Error Handling +``` +❌ Template not found: "Client Proposal" +💡 Try: "List available templates in my workspace" + +❌ Missing required data: client_name +💡 Required fields: client_name, budget, timeline +``` + +## Email Integration + +### Supported Methods +- **SMTP**: Direct server connection +- **API Services**: SendGrid, Mailgun, Amazon SES +- **OAuth**: Gmail, Outlook integration + +### Email Templates +Convert Notion content to: +- **Plain Text**: Simple text emails +- **HTML**: Rich formatted emails +- **Markdown**: GitHub-style formatting +- **PDF**: Attached document generation + +### Delivery Options +- **Immediate**: Send right after filling +- **Scheduled**: Queue for later delivery +- **Batch**: Send multiple emails in sequence +- **Conditional**: Send only with certain data values + +## Integration with Other Skills + +This skill composes well with: +- **Database skills**: For data source integration +- **Document skills**: For attachment generation +- **API skills**: For external data fetching +- **Formatting skills**: For content preprocessing + +## Example Workflows + +### Sales Proposal Automation +1. Lead qualified in CRM database +2. Pull client data from Notion +3. Fill proposal template +4. Attach case studies from Notion +5. Email personalized proposal + +### Report Generation +1. Query project metrics from database +2. Fill monthly report template +3. Convert to PDF format +4. Email to stakeholders with charts + +### Client Onboarding +1. New client form submitted +2. Fill welcome template +3. Attach company documents +4. Send personalized onboarding email + +## Security & Permissions + +### Notion Access +- **Workspace Access**: Requires integration token with read/write permissions +- **Database Access**: Specific database-level permissions +- **Page Permissions**: Respects Notion's sharing settings + +### Email Security +- **SMTP Encryption**: TLS/SSL support +- **API Security**: Secure token storage +- **Privacy**: No data logging or retention +- **Consent**: Only send emails with user approval + +## Best Practices + +### Template Design +- Use clear placeholder naming convention +- Include validation rules in templates +- Test templates with sample data first +- Version control template changes + +### Workflow Planning +- Test complete end-to-end process before production use +- Set up error handling for missing templates/data +- Monitor email delivery success rates +- Keep templates updated with current needs + +### Performance +- Cache frequently used templates +- Batch process multiple emails when possible +- Use database indexes for template queries +- Monitor API rate limits + +## Limitations + +### Notion MCP Constraints +- Requires active Notion integration token +- Limited by Notion API rate limits +- Some advanced formatting may not translate perfectly + +### Email Constraints +- SMTP server limitations (daily/hourly limits) +- Attachment size restrictions +- Recipient authentication requirements + +### Template Constraints +- Complex conditional logic limited by MCP capabilities +- Rich media rendering depends on export options +- Real-time collaboration features not supported + +## Troubleshooting + +### Template Not Found +```bash +# Check available templates +curl -X POST https://notion-api-endpoint/search \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -d '{"query": "template"}' +``` + +### Email Delivery Issues +- Verify SMTP server configuration +- Check sender authentication +- Review spam filters for content +- Confirm recipient validity + +### Permission Errors +- Refresh Notion integration token +- Verify database sharing permissions +- Check workspace access levels + +## Getting Started + +1. **Setup Notion Integration** + - Create integration at https://developers.notion.com + - Generate API token + - Share target databases with integration + +2. **Create Template Database** + - Create Notion database for templates + - Add required properties (template_id, status, etc.) + - Populate with template pages + +3. **Configure Email Settings** + - Choose delivery method (SMTP/API/OAuth) + - Store credentials securely + - Test connection with sample email + +4. **Test the Skill** + ``` + Hey Claude, use the notion-template-processor to fill a test template + and send it to my email address for verification. + ``` + +This skill provides a complete solution for template processing and automated email delivery using Notion's powerful database and content management capabilities. diff --git a/skills/pdf/LICENSE.txt b/skills/pdf/LICENSE.txt new file mode 100644 index 000000000..c55ab4222 --- /dev/null +++ b/skills/pdf/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/skills/pdf/SKILL.md b/skills/pdf/SKILL.md new file mode 100644 index 000000000..f6a22ddf8 --- /dev/null +++ b/skills/pdf/SKILL.md @@ -0,0 +1,294 @@ +--- +name: pdf +description: Comprehensive PDF manipulation toolkit for extracting text and tables, creating new PDFs, merging/splitting documents, and handling forms. When Claude needs to fill in a PDF form or programmatically process, generate, or analyze PDF documents at scale. +license: Proprietary. LICENSE.txt has complete terms +--- + +# PDF Processing Guide + +## Overview + +This guide covers essential PDF processing operations using Python libraries and command-line tools. For advanced features, JavaScript libraries, and detailed examples, see reference.md. If you need to fill out a PDF form, read forms.md and follow its instructions. + +## Quick Start + +```python +from pypdf import PdfReader, PdfWriter + +# Read a PDF +reader = PdfReader("document.pdf") +print(f"Pages: {len(reader.pages)}") + +# Extract text +text = "" +for page in reader.pages: + text += page.extract_text() +``` + +## Python Libraries + +### pypdf - Basic Operations + +#### Merge PDFs +```python +from pypdf import PdfWriter, PdfReader + +writer = PdfWriter() +for pdf_file in ["doc1.pdf", "doc2.pdf", "doc3.pdf"]: + reader = PdfReader(pdf_file) + for page in reader.pages: + writer.add_page(page) + +with open("merged.pdf", "wb") as output: + writer.write(output) +``` + +#### Split PDF +```python +reader = PdfReader("input.pdf") +for i, page in enumerate(reader.pages): + writer = PdfWriter() + writer.add_page(page) + with open(f"page_{i+1}.pdf", "wb") as output: + writer.write(output) +``` + +#### Extract Metadata +```python +reader = PdfReader("document.pdf") +meta = reader.metadata +print(f"Title: {meta.title}") +print(f"Author: {meta.author}") +print(f"Subject: {meta.subject}") +print(f"Creator: {meta.creator}") +``` + +#### Rotate Pages +```python +reader = PdfReader("input.pdf") +writer = PdfWriter() + +page = reader.pages[0] +page.rotate(90) # Rotate 90 degrees clockwise +writer.add_page(page) + +with open("rotated.pdf", "wb") as output: + writer.write(output) +``` + +### pdfplumber - Text and Table Extraction + +#### Extract Text with Layout +```python +import pdfplumber + +with pdfplumber.open("document.pdf") as pdf: + for page in pdf.pages: + text = page.extract_text() + print(text) +``` + +#### Extract Tables +```python +with pdfplumber.open("document.pdf") as pdf: + for i, page in enumerate(pdf.pages): + tables = page.extract_tables() + for j, table in enumerate(tables): + print(f"Table {j+1} on page {i+1}:") + for row in table: + print(row) +``` + +#### Advanced Table Extraction +```python +import pandas as pd + +with pdfplumber.open("document.pdf") as pdf: + all_tables = [] + for page in pdf.pages: + tables = page.extract_tables() + for table in tables: + if table: # Check if table is not empty + df = pd.DataFrame(table[1:], columns=table[0]) + all_tables.append(df) + +# Combine all tables +if all_tables: + combined_df = pd.concat(all_tables, ignore_index=True) + combined_df.to_excel("extracted_tables.xlsx", index=False) +``` + +### reportlab - Create PDFs + +#### Basic PDF Creation +```python +from reportlab.lib.pagesizes import letter +from reportlab.pdfgen import canvas + +c = canvas.Canvas("hello.pdf", pagesize=letter) +width, height = letter + +# Add text +c.drawString(100, height - 100, "Hello World!") +c.drawString(100, height - 120, "This is a PDF created with reportlab") + +# Add a line +c.line(100, height - 140, 400, height - 140) + +# Save +c.save() +``` + +#### Create PDF with Multiple Pages +```python +from reportlab.lib.pagesizes import letter +from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak +from reportlab.lib.styles import getSampleStyleSheet + +doc = SimpleDocTemplate("report.pdf", pagesize=letter) +styles = getSampleStyleSheet() +story = [] + +# Add content +title = Paragraph("Report Title", styles['Title']) +story.append(title) +story.append(Spacer(1, 12)) + +body = Paragraph("This is the body of the report. " * 20, styles['Normal']) +story.append(body) +story.append(PageBreak()) + +# Page 2 +story.append(Paragraph("Page 2", styles['Heading1'])) +story.append(Paragraph("Content for page 2", styles['Normal'])) + +# Build PDF +doc.build(story) +``` + +## Command-Line Tools + +### pdftotext (poppler-utils) +```bash +# Extract text +pdftotext input.pdf output.txt + +# Extract text preserving layout +pdftotext -layout input.pdf output.txt + +# Extract specific pages +pdftotext -f 1 -l 5 input.pdf output.txt # Pages 1-5 +``` + +### qpdf +```bash +# Merge PDFs +qpdf --empty --pages file1.pdf file2.pdf -- merged.pdf + +# Split pages +qpdf input.pdf --pages . 1-5 -- pages1-5.pdf +qpdf input.pdf --pages . 6-10 -- pages6-10.pdf + +# Rotate pages +qpdf input.pdf output.pdf --rotate=+90:1 # Rotate page 1 by 90 degrees + +# Remove password +qpdf --password=mypassword --decrypt encrypted.pdf decrypted.pdf +``` + +### pdftk (if available) +```bash +# Merge +pdftk file1.pdf file2.pdf cat output merged.pdf + +# Split +pdftk input.pdf burst + +# Rotate +pdftk input.pdf rotate 1east output rotated.pdf +``` + +## Common Tasks + +### Extract Text from Scanned PDFs +```python +# Requires: pip install pytesseract pdf2image +import pytesseract +from pdf2image import convert_from_path + +# Convert PDF to images +images = convert_from_path('scanned.pdf') + +# OCR each page +text = "" +for i, image in enumerate(images): + text += f"Page {i+1}:\n" + text += pytesseract.image_to_string(image) + text += "\n\n" + +print(text) +``` + +### Add Watermark +```python +from pypdf import PdfReader, PdfWriter + +# Create watermark (or load existing) +watermark = PdfReader("watermark.pdf").pages[0] + +# Apply to all pages +reader = PdfReader("document.pdf") +writer = PdfWriter() + +for page in reader.pages: + page.merge_page(watermark) + writer.add_page(page) + +with open("watermarked.pdf", "wb") as output: + writer.write(output) +``` + +### Extract Images +```bash +# Using pdfimages (poppler-utils) +pdfimages -j input.pdf output_prefix + +# This extracts all images as output_prefix-000.jpg, output_prefix-001.jpg, etc. +``` + +### Password Protection +```python +from pypdf import PdfReader, PdfWriter + +reader = PdfReader("input.pdf") +writer = PdfWriter() + +for page in reader.pages: + writer.add_page(page) + +# Add password +writer.encrypt("userpassword", "ownerpassword") + +with open("encrypted.pdf", "wb") as output: + writer.write(output) +``` + +## Quick Reference + +| Task | Best Tool | Command/Code | +|------|-----------|--------------| +| Merge PDFs | pypdf | `writer.add_page(page)` | +| Split PDFs | pypdf | One page per file | +| Extract text | pdfplumber | `page.extract_text()` | +| Extract tables | pdfplumber | `page.extract_tables()` | +| Create PDFs | reportlab | Canvas or Platypus | +| Command line merge | qpdf | `qpdf --empty --pages ...` | +| OCR scanned PDFs | pytesseract | Convert to image first | +| Fill PDF forms | pdf-lib or pypdf (see forms.md) | See forms.md | + +## Next Steps + +- For advanced pypdfium2 usage, see reference.md +- For JavaScript libraries (pdf-lib), see reference.md +- If you need to fill out a PDF form, follow the instructions in forms.md +- For troubleshooting guides, see reference.md diff --git a/skills/pdf/forms.md b/skills/pdf/forms.md new file mode 100644 index 000000000..4e234506d --- /dev/null +++ b/skills/pdf/forms.md @@ -0,0 +1,205 @@ +**CRITICAL: You MUST complete these steps in order. Do not skip ahead to writing code.** + +If you need to fill out a PDF form, first check to see if the PDF has fillable form fields. Run this script from this file's directory: + `python scripts/check_fillable_fields `, and depending on the result go to either the "Fillable fields" or "Non-fillable fields" and follow those instructions. + +# Fillable fields +If the PDF has fillable form fields: +- Run this script from this file's directory: `python scripts/extract_form_field_info.py `. It will create a JSON file with a list of fields in this format: +``` +[ + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "rect": ([left, bottom, right, top] bounding box in PDF coordinates, y=0 is the bottom of the page), + "type": ("text", "checkbox", "radio_group", or "choice"), + }, + // Checkboxes have "checked_value" and "unchecked_value" properties: + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "type": "checkbox", + "checked_value": (Set the field to this value to check the checkbox), + "unchecked_value": (Set the field to this value to uncheck the checkbox), + }, + // Radio groups have a "radio_options" list with the possible choices. + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "type": "radio_group", + "radio_options": [ + { + "value": (set the field to this value to select this radio option), + "rect": (bounding box for the radio button for this option) + }, + // Other radio options + ] + }, + // Multiple choice fields have a "choice_options" list with the possible choices: + { + "field_id": (unique ID for the field), + "page": (page number, 1-based), + "type": "choice", + "choice_options": [ + { + "value": (set the field to this value to select this option), + "text": (display text of the option) + }, + // Other choice options + ], + } +] +``` +- Convert the PDF to PNGs (one image for each page) with this script (run from this file's directory): +`python scripts/convert_pdf_to_images.py ` +Then analyze the images to determine the purpose of each form field (make sure to convert the bounding box PDF coordinates to image coordinates). +- Create a `field_values.json` file in this format with the values to be entered for each field: +``` +[ + { + "field_id": "last_name", // Must match the field_id from `extract_form_field_info.py` + "description": "The user's last name", + "page": 1, // Must match the "page" value in field_info.json + "value": "Simpson" + }, + { + "field_id": "Checkbox12", + "description": "Checkbox to be checked if the user is 18 or over", + "page": 1, + "value": "/On" // If this is a checkbox, use its "checked_value" value to check it. If it's a radio button group, use one of the "value" values in "radio_options". + }, + // more fields +] +``` +- Run the `fill_fillable_fields.py` script from this file's directory to create a filled-in PDF: +`python scripts/fill_fillable_fields.py ` +This script will verify that the field IDs and values you provide are valid; if it prints error messages, correct the appropriate fields and try again. + +# Non-fillable fields +If the PDF doesn't have fillable form fields, you'll need to visually determine where the data should be added and create text annotations. Follow the below steps *exactly*. You MUST perform all of these steps to ensure that the the form is accurately completed. Details for each step are below. +- Convert the PDF to PNG images and determine field bounding boxes. +- Create a JSON file with field information and validation images showing the bounding boxes. +- Validate the the bounding boxes. +- Use the bounding boxes to fill in the form. + +## Step 1: Visual Analysis (REQUIRED) +- Convert the PDF to PNG images. Run this script from this file's directory: +`python scripts/convert_pdf_to_images.py ` +The script will create a PNG image for each page in the PDF. +- Carefully examine each PNG image and identify all form fields and areas where the user should enter data. For each form field where the user should enter text, determine bounding boxes for both the form field label, and the area where the user should enter text. The label and entry bounding boxes MUST NOT INTERSECT; the text entry box should only include the area where data should be entered. Usually this area will be immediately to the side, above, or below its label. Entry bounding boxes must be tall and wide enough to contain their text. + +These are some examples of form structures that you might see: + +*Label inside box* +``` +┌────────────────────────┐ +│ Name: │ +└────────────────────────┘ +``` +The input area should be to the right of the "Name" label and extend to the edge of the box. + +*Label before line* +``` +Email: _______________________ +``` +The input area should be above the line and include its entire width. + +*Label under line* +``` +_________________________ +Name +``` +The input area should be above the line and include the entire width of the line. This is common for signature and date fields. + +*Label above line* +``` +Please enter any special requests: +________________________________________________ +``` +The input area should extend from the bottom of the label to the line, and should include the entire width of the line. + +*Checkboxes* +``` +Are you a US citizen? Yes □ No □ +``` +For checkboxes: +- Look for small square boxes (□) - these are the actual checkboxes to target. They may be to the left or right of their labels. +- Distinguish between label text ("Yes", "No") and the clickable checkbox squares. +- The entry bounding box should cover ONLY the small square, not the text label. + +### Step 2: Create fields.json and validation images (REQUIRED) +- Create a file named `fields.json` with information for the form fields and bounding boxes in this format: +``` +{ + "pages": [ + { + "page_number": 1, + "image_width": (first page image width in pixels), + "image_height": (first page image height in pixels), + }, + { + "page_number": 2, + "image_width": (second page image width in pixels), + "image_height": (second page image height in pixels), + } + // additional pages + ], + "form_fields": [ + // Example for a text field. + { + "page_number": 1, + "description": "The user's last name should be entered here", + // Bounding boxes are [left, top, right, bottom]. The bounding boxes for the label and text entry should not overlap. + "field_label": "Last name", + "label_bounding_box": [30, 125, 95, 142], + "entry_bounding_box": [100, 125, 280, 142], + "entry_text": { + "text": "Johnson", // This text will be added as an annotation at the entry_bounding_box location + "font_size": 14, // optional, defaults to 14 + "font_color": "000000", // optional, RRGGBB format, defaults to 000000 (black) + } + }, + // Example for a checkbox. TARGET THE SQUARE for the entry bounding box, NOT THE TEXT + { + "page_number": 2, + "description": "Checkbox that should be checked if the user is over 18", + "entry_bounding_box": [140, 525, 155, 540], // Small box over checkbox square + "field_label": "Yes", + "label_bounding_box": [100, 525, 132, 540], // Box containing "Yes" text + // Use "X" to check a checkbox. + "entry_text": { + "text": "X", + } + } + // additional form field entries + ] +} +``` + +Create validation images by running this script from this file's directory for each page: +`python scripts/create_validation_image.py + +The validation images will have red rectangles where text should be entered, and blue rectangles covering label text. + +### Step 3: Validate Bounding Boxes (REQUIRED) +#### Automated intersection check +- Verify that none of bounding boxes intersect and that the entry bounding boxes are tall enough by checking the fields.json file with the `check_bounding_boxes.py` script (run from this file's directory): +`python scripts/check_bounding_boxes.py ` + +If there are errors, reanalyze the relevant fields, adjust the bounding boxes, and iterate until there are no remaining errors. Remember: label (blue) bounding boxes should contain text labels, entry (red) boxes should not. + +#### Manual image inspection +**CRITICAL: Do not proceed without visually inspecting validation images** +- Red rectangles must ONLY cover input areas +- Red rectangles MUST NOT contain any text +- Blue rectangles should contain label text +- For checkboxes: + - Red rectangle MUST be centered on the checkbox square + - Blue rectangle should cover the text label for the checkbox + +- If any rectangles look wrong, fix fields.json, regenerate the validation images, and verify again. Repeat this process until the bounding boxes are fully accurate. + + +### Step 4: Add annotations to the PDF +Run this script from this file's directory to create a filled-out PDF using the information in fields.json: +`python scripts/fill_pdf_form_with_annotations.py diff --git a/skills/pdf/reference.md b/skills/pdf/reference.md new file mode 100644 index 000000000..41400bf4f --- /dev/null +++ b/skills/pdf/reference.md @@ -0,0 +1,612 @@ +# PDF Processing Advanced Reference + +This document contains advanced PDF processing features, detailed examples, and additional libraries not covered in the main skill instructions. + +## pypdfium2 Library (Apache/BSD License) + +### Overview +pypdfium2 is a Python binding for PDFium (Chromium's PDF library). It's excellent for fast PDF rendering, image generation, and serves as a PyMuPDF replacement. + +### Render PDF to Images +```python +import pypdfium2 as pdfium +from PIL import Image + +# Load PDF +pdf = pdfium.PdfDocument("document.pdf") + +# Render page to image +page = pdf[0] # First page +bitmap = page.render( + scale=2.0, # Higher resolution + rotation=0 # No rotation +) + +# Convert to PIL Image +img = bitmap.to_pil() +img.save("page_1.png", "PNG") + +# Process multiple pages +for i, page in enumerate(pdf): + bitmap = page.render(scale=1.5) + img = bitmap.to_pil() + img.save(f"page_{i+1}.jpg", "JPEG", quality=90) +``` + +### Extract Text with pypdfium2 +```python +import pypdfium2 as pdfium + +pdf = pdfium.PdfDocument("document.pdf") +for i, page in enumerate(pdf): + text = page.get_text() + print(f"Page {i+1} text length: {len(text)} chars") +``` + +## JavaScript Libraries + +### pdf-lib (MIT License) + +pdf-lib is a powerful JavaScript library for creating and modifying PDF documents in any JavaScript environment. + +#### Load and Manipulate Existing PDF +```javascript +import { PDFDocument } from 'pdf-lib'; +import fs from 'fs'; + +async function manipulatePDF() { + // Load existing PDF + const existingPdfBytes = fs.readFileSync('input.pdf'); + const pdfDoc = await PDFDocument.load(existingPdfBytes); + + // Get page count + const pageCount = pdfDoc.getPageCount(); + console.log(`Document has ${pageCount} pages`); + + // Add new page + const newPage = pdfDoc.addPage([600, 400]); + newPage.drawText('Added by pdf-lib', { + x: 100, + y: 300, + size: 16 + }); + + // Save modified PDF + const pdfBytes = await pdfDoc.save(); + fs.writeFileSync('modified.pdf', pdfBytes); +} +``` + +#### Create Complex PDFs from Scratch +```javascript +import { PDFDocument, rgb, StandardFonts } from 'pdf-lib'; +import fs from 'fs'; + +async function createPDF() { + const pdfDoc = await PDFDocument.create(); + + // Add fonts + const helveticaFont = await pdfDoc.embedFont(StandardFonts.Helvetica); + const helveticaBold = await pdfDoc.embedFont(StandardFonts.HelveticaBold); + + // Add page + const page = pdfDoc.addPage([595, 842]); // A4 size + const { width, height } = page.getSize(); + + // Add text with styling + page.drawText('Invoice #12345', { + x: 50, + y: height - 50, + size: 18, + font: helveticaBold, + color: rgb(0.2, 0.2, 0.8) + }); + + // Add rectangle (header background) + page.drawRectangle({ + x: 40, + y: height - 100, + width: width - 80, + height: 30, + color: rgb(0.9, 0.9, 0.9) + }); + + // Add table-like content + const items = [ + ['Item', 'Qty', 'Price', 'Total'], + ['Widget', '2', '$50', '$100'], + ['Gadget', '1', '$75', '$75'] + ]; + + let yPos = height - 150; + items.forEach(row => { + let xPos = 50; + row.forEach(cell => { + page.drawText(cell, { + x: xPos, + y: yPos, + size: 12, + font: helveticaFont + }); + xPos += 120; + }); + yPos -= 25; + }); + + const pdfBytes = await pdfDoc.save(); + fs.writeFileSync('created.pdf', pdfBytes); +} +``` + +#### Advanced Merge and Split Operations +```javascript +import { PDFDocument } from 'pdf-lib'; +import fs from 'fs'; + +async function mergePDFs() { + // Create new document + const mergedPdf = await PDFDocument.create(); + + // Load source PDFs + const pdf1Bytes = fs.readFileSync('doc1.pdf'); + const pdf2Bytes = fs.readFileSync('doc2.pdf'); + + const pdf1 = await PDFDocument.load(pdf1Bytes); + const pdf2 = await PDFDocument.load(pdf2Bytes); + + // Copy pages from first PDF + const pdf1Pages = await mergedPdf.copyPages(pdf1, pdf1.getPageIndices()); + pdf1Pages.forEach(page => mergedPdf.addPage(page)); + + // Copy specific pages from second PDF (pages 0, 2, 4) + const pdf2Pages = await mergedPdf.copyPages(pdf2, [0, 2, 4]); + pdf2Pages.forEach(page => mergedPdf.addPage(page)); + + const mergedPdfBytes = await mergedPdf.save(); + fs.writeFileSync('merged.pdf', mergedPdfBytes); +} +``` + +### pdfjs-dist (Apache License) + +PDF.js is Mozilla's JavaScript library for rendering PDFs in the browser. + +#### Basic PDF Loading and Rendering +```javascript +import * as pdfjsLib from 'pdfjs-dist'; + +// Configure worker (important for performance) +pdfjsLib.GlobalWorkerOptions.workerSrc = './pdf.worker.js'; + +async function renderPDF() { + // Load PDF + const loadingTask = pdfjsLib.getDocument('document.pdf'); + const pdf = await loadingTask.promise; + + console.log(`Loaded PDF with ${pdf.numPages} pages`); + + // Get first page + const page = await pdf.getPage(1); + const viewport = page.getViewport({ scale: 1.5 }); + + // Render to canvas + const canvas = document.createElement('canvas'); + const context = canvas.getContext('2d'); + canvas.height = viewport.height; + canvas.width = viewport.width; + + const renderContext = { + canvasContext: context, + viewport: viewport + }; + + await page.render(renderContext).promise; + document.body.appendChild(canvas); +} +``` + +#### Extract Text with Coordinates +```javascript +import * as pdfjsLib from 'pdfjs-dist'; + +async function extractText() { + const loadingTask = pdfjsLib.getDocument('document.pdf'); + const pdf = await loadingTask.promise; + + let fullText = ''; + + // Extract text from all pages + for (let i = 1; i <= pdf.numPages; i++) { + const page = await pdf.getPage(i); + const textContent = await page.getTextContent(); + + const pageText = textContent.items + .map(item => item.str) + .join(' '); + + fullText += `\n--- Page ${i} ---\n${pageText}`; + + // Get text with coordinates for advanced processing + const textWithCoords = textContent.items.map(item => ({ + text: item.str, + x: item.transform[4], + y: item.transform[5], + width: item.width, + height: item.height + })); + } + + console.log(fullText); + return fullText; +} +``` + +#### Extract Annotations and Forms +```javascript +import * as pdfjsLib from 'pdfjs-dist'; + +async function extractAnnotations() { + const loadingTask = pdfjsLib.getDocument('annotated.pdf'); + const pdf = await loadingTask.promise; + + for (let i = 1; i <= pdf.numPages; i++) { + const page = await pdf.getPage(i); + const annotations = await page.getAnnotations(); + + annotations.forEach(annotation => { + console.log(`Annotation type: ${annotation.subtype}`); + console.log(`Content: ${annotation.contents}`); + console.log(`Coordinates: ${JSON.stringify(annotation.rect)}`); + }); + } +} +``` + +## Advanced Command-Line Operations + +### poppler-utils Advanced Features + +#### Extract Text with Bounding Box Coordinates +```bash +# Extract text with bounding box coordinates (essential for structured data) +pdftotext -bbox-layout document.pdf output.xml + +# The XML output contains precise coordinates for each text element +``` + +#### Advanced Image Conversion +```bash +# Convert to PNG images with specific resolution +pdftoppm -png -r 300 document.pdf output_prefix + +# Convert specific page range with high resolution +pdftoppm -png -r 600 -f 1 -l 3 document.pdf high_res_pages + +# Convert to JPEG with quality setting +pdftoppm -jpeg -jpegopt quality=85 -r 200 document.pdf jpeg_output +``` + +#### Extract Embedded Images +```bash +# Extract all embedded images with metadata +pdfimages -j -p document.pdf page_images + +# List image info without extracting +pdfimages -list document.pdf + +# Extract images in their original format +pdfimages -all document.pdf images/img +``` + +### qpdf Advanced Features + +#### Complex Page Manipulation +```bash +# Split PDF into groups of pages +qpdf --split-pages=3 input.pdf output_group_%02d.pdf + +# Extract specific pages with complex ranges +qpdf input.pdf --pages input.pdf 1,3-5,8,10-end -- extracted.pdf + +# Merge specific pages from multiple PDFs +qpdf --empty --pages doc1.pdf 1-3 doc2.pdf 5-7 doc3.pdf 2,4 -- combined.pdf +``` + +#### PDF Optimization and Repair +```bash +# Optimize PDF for web (linearize for streaming) +qpdf --linearize input.pdf optimized.pdf + +# Remove unused objects and compress +qpdf --optimize-level=all input.pdf compressed.pdf + +# Attempt to repair corrupted PDF structure +qpdf --check input.pdf +qpdf --fix-qdf damaged.pdf repaired.pdf + +# Show detailed PDF structure for debugging +qpdf --show-all-pages input.pdf > structure.txt +``` + +#### Advanced Encryption +```bash +# Add password protection with specific permissions +qpdf --encrypt user_pass owner_pass 256 --print=none --modify=none -- input.pdf encrypted.pdf + +# Check encryption status +qpdf --show-encryption encrypted.pdf + +# Remove password protection (requires password) +qpdf --password=secret123 --decrypt encrypted.pdf decrypted.pdf +``` + +## Advanced Python Techniques + +### pdfplumber Advanced Features + +#### Extract Text with Precise Coordinates +```python +import pdfplumber + +with pdfplumber.open("document.pdf") as pdf: + page = pdf.pages[0] + + # Extract all text with coordinates + chars = page.chars + for char in chars[:10]: # First 10 characters + print(f"Char: '{char['text']}' at x:{char['x0']:.1f} y:{char['y0']:.1f}") + + # Extract text by bounding box (left, top, right, bottom) + bbox_text = page.within_bbox((100, 100, 400, 200)).extract_text() +``` + +#### Advanced Table Extraction with Custom Settings +```python +import pdfplumber +import pandas as pd + +with pdfplumber.open("complex_table.pdf") as pdf: + page = pdf.pages[0] + + # Extract tables with custom settings for complex layouts + table_settings = { + "vertical_strategy": "lines", + "horizontal_strategy": "lines", + "snap_tolerance": 3, + "intersection_tolerance": 15 + } + tables = page.extract_tables(table_settings) + + # Visual debugging for table extraction + img = page.to_image(resolution=150) + img.save("debug_layout.png") +``` + +### reportlab Advanced Features + +#### Create Professional Reports with Tables +```python +from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph +from reportlab.lib.styles import getSampleStyleSheet +from reportlab.lib import colors + +# Sample data +data = [ + ['Product', 'Q1', 'Q2', 'Q3', 'Q4'], + ['Widgets', '120', '135', '142', '158'], + ['Gadgets', '85', '92', '98', '105'] +] + +# Create PDF with table +doc = SimpleDocTemplate("report.pdf") +elements = [] + +# Add title +styles = getSampleStyleSheet() +title = Paragraph("Quarterly Sales Report", styles['Title']) +elements.append(title) + +# Add table with advanced styling +table = Table(data) +table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.grey), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (0, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, 0), 14), + ('BOTTOMPADDING', (0, 0), (-1, 0), 12), + ('BACKGROUND', (0, 1), (-1, -1), colors.beige), + ('GRID', (0, 0), (-1, -1), 1, colors.black) +])) +elements.append(table) + +doc.build(elements) +``` + +## Complex Workflows + +### Extract Figures/Images from PDF + +#### Method 1: Using pdfimages (fastest) +```bash +# Extract all images with original quality +pdfimages -all document.pdf images/img +``` + +#### Method 2: Using pypdfium2 + Image Processing +```python +import pypdfium2 as pdfium +from PIL import Image +import numpy as np + +def extract_figures(pdf_path, output_dir): + pdf = pdfium.PdfDocument(pdf_path) + + for page_num, page in enumerate(pdf): + # Render high-resolution page + bitmap = page.render(scale=3.0) + img = bitmap.to_pil() + + # Convert to numpy for processing + img_array = np.array(img) + + # Simple figure detection (non-white regions) + mask = np.any(img_array != [255, 255, 255], axis=2) + + # Find contours and extract bounding boxes + # (This is simplified - real implementation would need more sophisticated detection) + + # Save detected figures + # ... implementation depends on specific needs +``` + +### Batch PDF Processing with Error Handling +```python +import os +import glob +from pypdf import PdfReader, PdfWriter +import logging + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def batch_process_pdfs(input_dir, operation='merge'): + pdf_files = glob.glob(os.path.join(input_dir, "*.pdf")) + + if operation == 'merge': + writer = PdfWriter() + for pdf_file in pdf_files: + try: + reader = PdfReader(pdf_file) + for page in reader.pages: + writer.add_page(page) + logger.info(f"Processed: {pdf_file}") + except Exception as e: + logger.error(f"Failed to process {pdf_file}: {e}") + continue + + with open("batch_merged.pdf", "wb") as output: + writer.write(output) + + elif operation == 'extract_text': + for pdf_file in pdf_files: + try: + reader = PdfReader(pdf_file) + text = "" + for page in reader.pages: + text += page.extract_text() + + output_file = pdf_file.replace('.pdf', '.txt') + with open(output_file, 'w', encoding='utf-8') as f: + f.write(text) + logger.info(f"Extracted text from: {pdf_file}") + + except Exception as e: + logger.error(f"Failed to extract text from {pdf_file}: {e}") + continue +``` + +### Advanced PDF Cropping +```python +from pypdf import PdfWriter, PdfReader + +reader = PdfReader("input.pdf") +writer = PdfWriter() + +# Crop page (left, bottom, right, top in points) +page = reader.pages[0] +page.mediabox.left = 50 +page.mediabox.bottom = 50 +page.mediabox.right = 550 +page.mediabox.top = 750 + +writer.add_page(page) +with open("cropped.pdf", "wb") as output: + writer.write(output) +``` + +## Performance Optimization Tips + +### 1. For Large PDFs +- Use streaming approaches instead of loading entire PDF in memory +- Use `qpdf --split-pages` for splitting large files +- Process pages individually with pypdfium2 + +### 2. For Text Extraction +- `pdftotext -bbox-layout` is fastest for plain text extraction +- Use pdfplumber for structured data and tables +- Avoid `pypdf.extract_text()` for very large documents + +### 3. For Image Extraction +- `pdfimages` is much faster than rendering pages +- Use low resolution for previews, high resolution for final output + +### 4. For Form Filling +- pdf-lib maintains form structure better than most alternatives +- Pre-validate form fields before processing + +### 5. Memory Management +```python +# Process PDFs in chunks +def process_large_pdf(pdf_path, chunk_size=10): + reader = PdfReader(pdf_path) + total_pages = len(reader.pages) + + for start_idx in range(0, total_pages, chunk_size): + end_idx = min(start_idx + chunk_size, total_pages) + writer = PdfWriter() + + for i in range(start_idx, end_idx): + writer.add_page(reader.pages[i]) + + # Process chunk + with open(f"chunk_{start_idx//chunk_size}.pdf", "wb") as output: + writer.write(output) +``` + +## Troubleshooting Common Issues + +### Encrypted PDFs +```python +# Handle password-protected PDFs +from pypdf import PdfReader + +try: + reader = PdfReader("encrypted.pdf") + if reader.is_encrypted: + reader.decrypt("password") +except Exception as e: + print(f"Failed to decrypt: {e}") +``` + +### Corrupted PDFs +```bash +# Use qpdf to repair +qpdf --check corrupted.pdf +qpdf --replace-input corrupted.pdf +``` + +### Text Extraction Issues +```python +# Fallback to OCR for scanned PDFs +import pytesseract +from pdf2image import convert_from_path + +def extract_text_with_ocr(pdf_path): + images = convert_from_path(pdf_path) + text = "" + for i, image in enumerate(images): + text += pytesseract.image_to_string(image) + return text +``` + +## License Information + +- **pypdf**: BSD License +- **pdfplumber**: MIT License +- **pypdfium2**: Apache/BSD License +- **reportlab**: BSD License +- **poppler-utils**: GPL-2 License +- **qpdf**: Apache License +- **pdf-lib**: MIT License +- **pdfjs-dist**: Apache License \ No newline at end of file diff --git a/skills/pdf/scripts/check_bounding_boxes.py b/skills/pdf/scripts/check_bounding_boxes.py new file mode 100644 index 000000000..7443660c0 --- /dev/null +++ b/skills/pdf/scripts/check_bounding_boxes.py @@ -0,0 +1,70 @@ +from dataclasses import dataclass +import json +import sys + + +# Script to check that the `fields.json` file that Claude creates when analyzing PDFs +# does not have overlapping bounding boxes. See forms.md. + + +@dataclass +class RectAndField: + rect: list[float] + rect_type: str + field: dict + + +# Returns a list of messages that are printed to stdout for Claude to read. +def get_bounding_box_messages(fields_json_stream) -> list[str]: + messages = [] + fields = json.load(fields_json_stream) + messages.append(f"Read {len(fields['form_fields'])} fields") + + def rects_intersect(r1, r2): + disjoint_horizontal = r1[0] >= r2[2] or r1[2] <= r2[0] + disjoint_vertical = r1[1] >= r2[3] or r1[3] <= r2[1] + return not (disjoint_horizontal or disjoint_vertical) + + rects_and_fields = [] + for f in fields["form_fields"]: + rects_and_fields.append(RectAndField(f["label_bounding_box"], "label", f)) + rects_and_fields.append(RectAndField(f["entry_bounding_box"], "entry", f)) + + has_error = False + for i, ri in enumerate(rects_and_fields): + # This is O(N^2); we can optimize if it becomes a problem. + for j in range(i + 1, len(rects_and_fields)): + rj = rects_and_fields[j] + if ri.field["page_number"] == rj.field["page_number"] and rects_intersect(ri.rect, rj.rect): + has_error = True + if ri.field is rj.field: + messages.append(f"FAILURE: intersection between label and entry bounding boxes for `{ri.field['description']}` ({ri.rect}, {rj.rect})") + else: + messages.append(f"FAILURE: intersection between {ri.rect_type} bounding box for `{ri.field['description']}` ({ri.rect}) and {rj.rect_type} bounding box for `{rj.field['description']}` ({rj.rect})") + if len(messages) >= 20: + messages.append("Aborting further checks; fix bounding boxes and try again") + return messages + if ri.rect_type == "entry": + if "entry_text" in ri.field: + font_size = ri.field["entry_text"].get("font_size", 14) + entry_height = ri.rect[3] - ri.rect[1] + if entry_height < font_size: + has_error = True + messages.append(f"FAILURE: entry bounding box height ({entry_height}) for `{ri.field['description']}` is too short for the text content (font size: {font_size}). Increase the box height or decrease the font size.") + if len(messages) >= 20: + messages.append("Aborting further checks; fix bounding boxes and try again") + return messages + + if not has_error: + messages.append("SUCCESS: All bounding boxes are valid") + return messages + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: check_bounding_boxes.py [fields.json]") + sys.exit(1) + # Input file should be in the `fields.json` format described in forms.md. + with open(sys.argv[1]) as f: + messages = get_bounding_box_messages(f) + for msg in messages: + print(msg) diff --git a/skills/pdf/scripts/check_bounding_boxes_test.py b/skills/pdf/scripts/check_bounding_boxes_test.py new file mode 100644 index 000000000..1dbb463c8 --- /dev/null +++ b/skills/pdf/scripts/check_bounding_boxes_test.py @@ -0,0 +1,226 @@ +import unittest +import json +import io +from check_bounding_boxes import get_bounding_box_messages + + +# Currently this is not run automatically in CI; it's just for documentation and manual checking. +class TestGetBoundingBoxMessages(unittest.TestCase): + + def create_json_stream(self, data): + """Helper to create a JSON stream from data""" + return io.StringIO(json.dumps(data)) + + def test_no_intersections(self): + """Test case with no bounding box intersections""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30] + }, + { + "description": "Email", + "page_number": 1, + "label_bounding_box": [10, 40, 50, 60], + "entry_bounding_box": [60, 40, 150, 60] + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_label_entry_intersection_same_field(self): + """Test intersection between label and entry of the same field""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 60, 30], + "entry_bounding_box": [50, 10, 150, 30] # Overlaps with label + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "intersection" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_intersection_between_different_fields(self): + """Test intersection between bounding boxes of different fields""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30] + }, + { + "description": "Email", + "page_number": 1, + "label_bounding_box": [40, 20, 80, 40], # Overlaps with Name's boxes + "entry_bounding_box": [160, 10, 250, 30] + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "intersection" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_different_pages_no_intersection(self): + """Test that boxes on different pages don't count as intersecting""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30] + }, + { + "description": "Email", + "page_number": 2, + "label_bounding_box": [10, 10, 50, 30], # Same coordinates but different page + "entry_bounding_box": [60, 10, 150, 30] + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_entry_height_too_small(self): + """Test that entry box height is checked against font size""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 20], # Height is 10 + "entry_text": { + "font_size": 14 # Font size larger than height + } + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "height" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_entry_height_adequate(self): + """Test that adequate entry box height passes""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 30], # Height is 20 + "entry_text": { + "font_size": 14 # Font size smaller than height + } + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_default_font_size(self): + """Test that default font size is used when not specified""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 20], # Height is 10 + "entry_text": {} # No font_size specified, should use default 14 + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("FAILURE" in msg and "height" in msg for msg in messages)) + self.assertFalse(any("SUCCESS" in msg for msg in messages)) + + def test_no_entry_text(self): + """Test that missing entry_text doesn't cause height check""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [60, 10, 150, 20] # Small height but no entry_text + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + def test_multiple_errors_limit(self): + """Test that error messages are limited to prevent excessive output""" + fields = [] + # Create many overlapping fields + for i in range(25): + fields.append({ + "description": f"Field{i}", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], # All overlap + "entry_bounding_box": [20, 15, 60, 35] # All overlap + }) + + data = {"form_fields": fields} + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + # Should abort after ~20 messages + self.assertTrue(any("Aborting" in msg for msg in messages)) + # Should have some FAILURE messages but not hundreds + failure_count = sum(1 for msg in messages if "FAILURE" in msg) + self.assertGreater(failure_count, 0) + self.assertLess(len(messages), 30) # Should be limited + + def test_edge_touching_boxes(self): + """Test that boxes touching at edges don't count as intersecting""" + data = { + "form_fields": [ + { + "description": "Name", + "page_number": 1, + "label_bounding_box": [10, 10, 50, 30], + "entry_bounding_box": [50, 10, 150, 30] # Touches at x=50 + } + ] + } + + stream = self.create_json_stream(data) + messages = get_bounding_box_messages(stream) + self.assertTrue(any("SUCCESS" in msg for msg in messages)) + self.assertFalse(any("FAILURE" in msg for msg in messages)) + + +if __name__ == '__main__': + unittest.main() diff --git a/skills/pdf/scripts/check_fillable_fields.py b/skills/pdf/scripts/check_fillable_fields.py new file mode 100644 index 000000000..dc43d1821 --- /dev/null +++ b/skills/pdf/scripts/check_fillable_fields.py @@ -0,0 +1,12 @@ +import sys +from pypdf import PdfReader + + +# Script for Claude to run to determine whether a PDF has fillable form fields. See forms.md. + + +reader = PdfReader(sys.argv[1]) +if (reader.get_fields()): + print("This PDF has fillable form fields") +else: + print("This PDF does not have fillable form fields; you will need to visually determine where to enter data") diff --git a/skills/pdf/scripts/convert_pdf_to_images.py b/skills/pdf/scripts/convert_pdf_to_images.py new file mode 100644 index 000000000..f8a4ec524 --- /dev/null +++ b/skills/pdf/scripts/convert_pdf_to_images.py @@ -0,0 +1,35 @@ +import os +import sys + +from pdf2image import convert_from_path + + +# Converts each page of a PDF to a PNG image. + + +def convert(pdf_path, output_dir, max_dim=1000): + images = convert_from_path(pdf_path, dpi=200) + + for i, image in enumerate(images): + # Scale image if needed to keep width/height under `max_dim` + width, height = image.size + if width > max_dim or height > max_dim: + scale_factor = min(max_dim / width, max_dim / height) + new_width = int(width * scale_factor) + new_height = int(height * scale_factor) + image = image.resize((new_width, new_height)) + + image_path = os.path.join(output_dir, f"page_{i+1}.png") + image.save(image_path) + print(f"Saved page {i+1} as {image_path} (size: {image.size})") + + print(f"Converted {len(images)} pages to PNG images") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: convert_pdf_to_images.py [input pdf] [output directory]") + sys.exit(1) + pdf_path = sys.argv[1] + output_directory = sys.argv[2] + convert(pdf_path, output_directory) diff --git a/skills/pdf/scripts/create_validation_image.py b/skills/pdf/scripts/create_validation_image.py new file mode 100644 index 000000000..4913f8f8d --- /dev/null +++ b/skills/pdf/scripts/create_validation_image.py @@ -0,0 +1,41 @@ +import json +import sys + +from PIL import Image, ImageDraw + + +# Creates "validation" images with rectangles for the bounding box information that +# Claude creates when determining where to add text annotations in PDFs. See forms.md. + + +def create_validation_image(page_number, fields_json_path, input_path, output_path): + # Input file should be in the `fields.json` format described in forms.md. + with open(fields_json_path, 'r') as f: + data = json.load(f) + + img = Image.open(input_path) + draw = ImageDraw.Draw(img) + num_boxes = 0 + + for field in data["form_fields"]: + if field["page_number"] == page_number: + entry_box = field['entry_bounding_box'] + label_box = field['label_bounding_box'] + # Draw red rectangle over entry bounding box and blue rectangle over the label. + draw.rectangle(entry_box, outline='red', width=2) + draw.rectangle(label_box, outline='blue', width=2) + num_boxes += 2 + + img.save(output_path) + print(f"Created validation image at {output_path} with {num_boxes} bounding boxes") + + +if __name__ == "__main__": + if len(sys.argv) != 5: + print("Usage: create_validation_image.py [page number] [fields.json file] [input image path] [output image path]") + sys.exit(1) + page_number = int(sys.argv[1]) + fields_json_path = sys.argv[2] + input_image_path = sys.argv[3] + output_image_path = sys.argv[4] + create_validation_image(page_number, fields_json_path, input_image_path, output_image_path) diff --git a/skills/pdf/scripts/extract_form_field_info.py b/skills/pdf/scripts/extract_form_field_info.py new file mode 100644 index 000000000..f42a2df84 --- /dev/null +++ b/skills/pdf/scripts/extract_form_field_info.py @@ -0,0 +1,152 @@ +import json +import sys + +from pypdf import PdfReader + + +# Extracts data for the fillable form fields in a PDF and outputs JSON that +# Claude uses to fill the fields. See forms.md. + + +# This matches the format used by PdfReader `get_fields` and `update_page_form_field_values` methods. +def get_full_annotation_field_id(annotation): + components = [] + while annotation: + field_name = annotation.get('/T') + if field_name: + components.append(field_name) + annotation = annotation.get('/Parent') + return ".".join(reversed(components)) if components else None + + +def make_field_dict(field, field_id): + field_dict = {"field_id": field_id} + ft = field.get('/FT') + if ft == "/Tx": + field_dict["type"] = "text" + elif ft == "/Btn": + field_dict["type"] = "checkbox" # radio groups handled separately + states = field.get("/_States_", []) + if len(states) == 2: + # "/Off" seems to always be the unchecked value, as suggested by + # https://opensource.adobe.com/dc-acrobat-sdk-docs/standards/pdfstandards/pdf/PDF32000_2008.pdf#page=448 + # It can be either first or second in the "/_States_" list. + if "/Off" in states: + field_dict["checked_value"] = states[0] if states[0] != "/Off" else states[1] + field_dict["unchecked_value"] = "/Off" + else: + print(f"Unexpected state values for checkbox `${field_id}`. Its checked and unchecked values may not be correct; if you're trying to check it, visually verify the results.") + field_dict["checked_value"] = states[0] + field_dict["unchecked_value"] = states[1] + elif ft == "/Ch": + field_dict["type"] = "choice" + states = field.get("/_States_", []) + field_dict["choice_options"] = [{ + "value": state[0], + "text": state[1], + } for state in states] + else: + field_dict["type"] = f"unknown ({ft})" + return field_dict + + +# Returns a list of fillable PDF fields: +# [ +# { +# "field_id": "name", +# "page": 1, +# "type": ("text", "checkbox", "radio_group", or "choice") +# // Per-type additional fields described in forms.md +# }, +# ] +def get_field_info(reader: PdfReader): + fields = reader.get_fields() + + field_info_by_id = {} + possible_radio_names = set() + + for field_id, field in fields.items(): + # Skip if this is a container field with children, except that it might be + # a parent group for radio button options. + if field.get("/Kids"): + if field.get("/FT") == "/Btn": + possible_radio_names.add(field_id) + continue + field_info_by_id[field_id] = make_field_dict(field, field_id) + + # Bounding rects are stored in annotations in page objects. + + # Radio button options have a separate annotation for each choice; + # all choices have the same field name. + # See https://westhealth.github.io/exploring-fillable-forms-with-pdfrw.html + radio_fields_by_id = {} + + for page_index, page in enumerate(reader.pages): + annotations = page.get('/Annots', []) + for ann in annotations: + field_id = get_full_annotation_field_id(ann) + if field_id in field_info_by_id: + field_info_by_id[field_id]["page"] = page_index + 1 + field_info_by_id[field_id]["rect"] = ann.get('/Rect') + elif field_id in possible_radio_names: + try: + # ann['/AP']['/N'] should have two items. One of them is '/Off', + # the other is the active value. + on_values = [v for v in ann["/AP"]["/N"] if v != "/Off"] + except KeyError: + continue + if len(on_values) == 1: + rect = ann.get("/Rect") + if field_id not in radio_fields_by_id: + radio_fields_by_id[field_id] = { + "field_id": field_id, + "type": "radio_group", + "page": page_index + 1, + "radio_options": [], + } + # Note: at least on macOS 15.7, Preview.app doesn't show selected + # radio buttons correctly. (It does if you remove the leading slash + # from the value, but that causes them not to appear correctly in + # Chrome/Firefox/Acrobat/etc). + radio_fields_by_id[field_id]["radio_options"].append({ + "value": on_values[0], + "rect": rect, + }) + + # Some PDFs have form field definitions without corresponding annotations, + # so we can't tell where they are. Ignore these fields for now. + fields_with_location = [] + for field_info in field_info_by_id.values(): + if "page" in field_info: + fields_with_location.append(field_info) + else: + print(f"Unable to determine location for field id: {field_info.get('field_id')}, ignoring") + + # Sort by page number, then Y position (flipped in PDF coordinate system), then X. + def sort_key(f): + if "radio_options" in f: + rect = f["radio_options"][0]["rect"] or [0, 0, 0, 0] + else: + rect = f.get("rect") or [0, 0, 0, 0] + adjusted_position = [-rect[1], rect[0]] + return [f.get("page"), adjusted_position] + + sorted_fields = fields_with_location + list(radio_fields_by_id.values()) + sorted_fields.sort(key=sort_key) + + return sorted_fields + + +def write_field_info(pdf_path: str, json_output_path: str): + reader = PdfReader(pdf_path) + field_info = get_field_info(reader) + with open(json_output_path, "w") as f: + json.dump(field_info, f, indent=2) + print(f"Wrote {len(field_info)} fields to {json_output_path}") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: extract_form_field_info.py [input pdf] [output json]") + sys.exit(1) + write_field_info(sys.argv[1], sys.argv[2]) diff --git a/skills/pdf/scripts/fill_fillable_fields.py b/skills/pdf/scripts/fill_fillable_fields.py new file mode 100644 index 000000000..ac35753c5 --- /dev/null +++ b/skills/pdf/scripts/fill_fillable_fields.py @@ -0,0 +1,114 @@ +import json +import sys + +from pypdf import PdfReader, PdfWriter + +from extract_form_field_info import get_field_info + + +# Fills fillable form fields in a PDF. See forms.md. + + +def fill_pdf_fields(input_pdf_path: str, fields_json_path: str, output_pdf_path: str): + with open(fields_json_path) as f: + fields = json.load(f) + # Group by page number. + fields_by_page = {} + for field in fields: + if "value" in field: + field_id = field["field_id"] + page = field["page"] + if page not in fields_by_page: + fields_by_page[page] = {} + fields_by_page[page][field_id] = field["value"] + + reader = PdfReader(input_pdf_path) + + has_error = False + field_info = get_field_info(reader) + fields_by_ids = {f["field_id"]: f for f in field_info} + for field in fields: + existing_field = fields_by_ids.get(field["field_id"]) + if not existing_field: + has_error = True + print(f"ERROR: `{field['field_id']}` is not a valid field ID") + elif field["page"] != existing_field["page"]: + has_error = True + print(f"ERROR: Incorrect page number for `{field['field_id']}` (got {field['page']}, expected {existing_field['page']})") + else: + if "value" in field: + err = validation_error_for_field_value(existing_field, field["value"]) + if err: + print(err) + has_error = True + if has_error: + sys.exit(1) + + writer = PdfWriter(clone_from=reader) + for page, field_values in fields_by_page.items(): + writer.update_page_form_field_values(writer.pages[page - 1], field_values, auto_regenerate=False) + + # This seems to be necessary for many PDF viewers to format the form values correctly. + # It may cause the viewer to show a "save changes" dialog even if the user doesn't make any changes. + writer.set_need_appearances_writer(True) + + with open(output_pdf_path, "wb") as f: + writer.write(f) + + +def validation_error_for_field_value(field_info, field_value): + field_type = field_info["type"] + field_id = field_info["field_id"] + if field_type == "checkbox": + checked_val = field_info["checked_value"] + unchecked_val = field_info["unchecked_value"] + if field_value != checked_val and field_value != unchecked_val: + return f'ERROR: Invalid value "{field_value}" for checkbox field "{field_id}". The checked value is "{checked_val}" and the unchecked value is "{unchecked_val}"' + elif field_type == "radio_group": + option_values = [opt["value"] for opt in field_info["radio_options"]] + if field_value not in option_values: + return f'ERROR: Invalid value "{field_value}" for radio group field "{field_id}". Valid values are: {option_values}' + elif field_type == "choice": + choice_values = [opt["value"] for opt in field_info["choice_options"]] + if field_value not in choice_values: + return f'ERROR: Invalid value "{field_value}" for choice field "{field_id}". Valid values are: {choice_values}' + return None + + +# pypdf (at least version 5.7.0) has a bug when setting the value for a selection list field. +# In _writer.py around line 966: +# +# if field.get(FA.FT, "/Tx") == "/Ch" and field_flags & FA.FfBits.Combo == 0: +# txt = "\n".join(annotation.get_inherited(FA.Opt, [])) +# +# The problem is that for selection lists, `get_inherited` returns a list of two-element lists like +# [["value1", "Text 1"], ["value2", "Text 2"], ...] +# This causes `join` to throw a TypeError because it expects an iterable of strings. +# The horrible workaround is to patch `get_inherited` to return a list of the value strings. +# We call the original method and adjust the return value only if the argument to `get_inherited` +# is `FA.Opt` and if the return value is a list of two-element lists. +def monkeypatch_pydpf_method(): + from pypdf.generic import DictionaryObject + from pypdf.constants import FieldDictionaryAttributes + + original_get_inherited = DictionaryObject.get_inherited + + def patched_get_inherited(self, key: str, default = None): + result = original_get_inherited(self, key, default) + if key == FieldDictionaryAttributes.Opt: + if isinstance(result, list) and all(isinstance(v, list) and len(v) == 2 for v in result): + result = [r[0] for r in result] + return result + + DictionaryObject.get_inherited = patched_get_inherited + + +if __name__ == "__main__": + if len(sys.argv) != 4: + print("Usage: fill_fillable_fields.py [input pdf] [field_values.json] [output pdf]") + sys.exit(1) + monkeypatch_pydpf_method() + input_pdf = sys.argv[1] + fields_json = sys.argv[2] + output_pdf = sys.argv[3] + fill_pdf_fields(input_pdf, fields_json, output_pdf) diff --git a/skills/pdf/scripts/fill_pdf_form_with_annotations.py b/skills/pdf/scripts/fill_pdf_form_with_annotations.py new file mode 100644 index 000000000..f98053135 --- /dev/null +++ b/skills/pdf/scripts/fill_pdf_form_with_annotations.py @@ -0,0 +1,108 @@ +import json +import sys + +from pypdf import PdfReader, PdfWriter +from pypdf.annotations import FreeText + + +# Fills a PDF by adding text annotations defined in `fields.json`. See forms.md. + + +def transform_coordinates(bbox, image_width, image_height, pdf_width, pdf_height): + """Transform bounding box from image coordinates to PDF coordinates""" + # Image coordinates: origin at top-left, y increases downward + # PDF coordinates: origin at bottom-left, y increases upward + x_scale = pdf_width / image_width + y_scale = pdf_height / image_height + + left = bbox[0] * x_scale + right = bbox[2] * x_scale + + # Flip Y coordinates for PDF + top = pdf_height - (bbox[1] * y_scale) + bottom = pdf_height - (bbox[3] * y_scale) + + return left, bottom, right, top + + +def fill_pdf_form(input_pdf_path, fields_json_path, output_pdf_path): + """Fill the PDF form with data from fields.json""" + + # `fields.json` format described in forms.md. + with open(fields_json_path, "r") as f: + fields_data = json.load(f) + + # Open the PDF + reader = PdfReader(input_pdf_path) + writer = PdfWriter() + + # Copy all pages to writer + writer.append(reader) + + # Get PDF dimensions for each page + pdf_dimensions = {} + for i, page in enumerate(reader.pages): + mediabox = page.mediabox + pdf_dimensions[i + 1] = [mediabox.width, mediabox.height] + + # Process each form field + annotations = [] + for field in fields_data["form_fields"]: + page_num = field["page_number"] + + # Get page dimensions and transform coordinates. + page_info = next(p for p in fields_data["pages"] if p["page_number"] == page_num) + image_width = page_info["image_width"] + image_height = page_info["image_height"] + pdf_width, pdf_height = pdf_dimensions[page_num] + + transformed_entry_box = transform_coordinates( + field["entry_bounding_box"], + image_width, image_height, + pdf_width, pdf_height + ) + + # Skip empty fields + if "entry_text" not in field or "text" not in field["entry_text"]: + continue + entry_text = field["entry_text"] + text = entry_text["text"] + if not text: + continue + + font_name = entry_text.get("font", "Arial") + font_size = str(entry_text.get("font_size", 14)) + "pt" + font_color = entry_text.get("font_color", "000000") + + # Font size/color seems to not work reliably across viewers: + # https://github.com/py-pdf/pypdf/issues/2084 + annotation = FreeText( + text=text, + rect=transformed_entry_box, + font=font_name, + font_size=font_size, + font_color=font_color, + border_color=None, + background_color=None, + ) + annotations.append(annotation) + # page_number is 0-based for pypdf + writer.add_annotation(page_number=page_num - 1, annotation=annotation) + + # Save the filled PDF + with open(output_pdf_path, "wb") as output: + writer.write(output) + + print(f"Successfully filled PDF form and saved to {output_pdf_path}") + print(f"Added {len(annotations)} text annotations") + + +if __name__ == "__main__": + if len(sys.argv) != 4: + print("Usage: fill_pdf_form_with_annotations.py [input pdf] [fields.json] [output pdf]") + sys.exit(1) + input_pdf = sys.argv[1] + fields_json = sys.argv[2] + output_pdf = sys.argv[3] + + fill_pdf_form(input_pdf, fields_json, output_pdf) \ No newline at end of file diff --git a/skills/playwright-browser-automation/SKILL.md b/skills/playwright-browser-automation/SKILL.md new file mode 100644 index 000000000..c2551e8b7 --- /dev/null +++ b/skills/playwright-browser-automation/SKILL.md @@ -0,0 +1,404 @@ +--- +name: Playwright Browser Automation +description: Complete browser automation with Playwright. Auto-detects dev servers, writes clean test scripts to /tmp. Test pages, fill forms, take screenshots, check responsive design, validate UX, test login flows, check links, automate any browser task. Use when user wants to test websites, automate browser interactions, validate web functionality, or perform any browser-based testing. +license: MIT +--- + +**IMPORTANT - Path Resolution:** +This skill can be installed in different locations (plugin system, manual installation, global, or project-specific). Before executing any commands, determine the skill directory based on where you loaded this SKILL.md file, and use that path in all commands below. Replace `$SKILL_DIR` with the actual discovered path. + +Common installation paths: +- Plugin system: `~/.claude/plugins/marketplaces/playwright-skill/skills/playwright-skill` +- Manual global: `~/.claude/skills/playwright-skill` +- Project-specific: `/.claude/skills/playwright-skill` + +# Playwright Browser Automation + +General-purpose browser automation skill. I'll write custom Playwright code for any automation task you request and execute it via the universal executor. + +**CRITICAL WORKFLOW - Follow these steps in order:** + +1. **Auto-detect dev servers** - For localhost testing, ALWAYS run server detection FIRST: + ```bash + cd $SKILL_DIR && node -e "require('./lib/helpers').detectDevServers().then(servers => console.log(JSON.stringify(servers)))" + ``` + - If **1 server found**: Use it automatically, inform user + - If **multiple servers found**: Ask user which one to test + - If **no servers found**: Ask for URL or offer to help start dev server + +2. **Write scripts to /tmp** - NEVER write test files to skill directory; always use `/tmp/playwright-test-*.js` + +3. **Use visible browser by default** - Always use `headless: false` unless user specifically requests headless mode + +4. **Parameterize URLs** - Always make URLs configurable via environment variable or constant at top of script + +## How It Works + +1. You describe what you want to test/automate +2. I auto-detect running dev servers (or ask for URL if testing external site) +3. I write custom Playwright code in `/tmp/playwright-test-*.js` (won't clutter your project) +4. I execute it via: `cd $SKILL_DIR && node run.js /tmp/playwright-test-*.js` +5. Results displayed in real-time, browser window visible for debugging +6. Test files auto-cleaned from /tmp by your OS + +## Setup (First Time) + +```bash +cd $SKILL_DIR +npm run setup +``` + +This installs Playwright and Chromium browser. Only needed once. + +## Execution Pattern + +**Step 1: Detect dev servers (for localhost testing)** + +```bash +cd $SKILL_DIR && node -e "require('./lib/helpers').detectDevServers().then(s => console.log(JSON.stringify(s)))" +``` + +**Step 2: Write test script to /tmp with URL parameter** + +```javascript +// /tmp/playwright-test-page.js +const { chromium } = require('playwright'); + +// Parameterized URL (detected or user-provided) +const TARGET_URL = 'http://localhost:3001'; // <-- Auto-detected or from user + +(async () => { + const browser = await chromium.launch({ headless: false }); + const page = await browser.newPage(); + + await page.goto(TARGET_URL); + console.log('Page loaded:', await page.title()); + + await page.screenshot({ path: '/tmp/screenshot.png', fullPage: true }); + console.log('📸 Screenshot saved to /tmp/screenshot.png'); + + await browser.close(); +})(); +``` + +**Step 3: Execute from skill directory** + +```bash +cd $SKILL_DIR && node run.js /tmp/playwright-test-page.js +``` + +## Common Patterns + +### Test a Page (Multiple Viewports) + +```javascript +// /tmp/playwright-test-responsive.js +const { chromium } = require('playwright'); + +const TARGET_URL = 'http://localhost:3001'; // Auto-detected + +(async () => { + const browser = await chromium.launch({ headless: false, slowMo: 100 }); + const page = await browser.newPage(); + + // Desktop test + await page.setViewportSize({ width: 1920, height: 1080 }); + await page.goto(TARGET_URL); + console.log('Desktop - Title:', await page.title()); + await page.screenshot({ path: '/tmp/desktop.png', fullPage: true }); + + // Mobile test + await page.setViewportSize({ width: 375, height: 667 }); + await page.screenshot({ path: '/tmp/mobile.png', fullPage: true }); + + await browser.close(); +})(); +``` + +### Test Login Flow + +```javascript +// /tmp/playwright-test-login.js +const { chromium } = require('playwright'); + +const TARGET_URL = 'http://localhost:3001'; // Auto-detected + +(async () => { + const browser = await chromium.launch({ headless: false }); + const page = await browser.newPage(); + + await page.goto(`${TARGET_URL}/login`); + + await page.fill('input[name="email"]', 'test@example.com'); + await page.fill('input[name="password"]', 'password123'); + await page.click('button[type="submit"]'); + + // Wait for redirect + await page.waitForURL('**/dashboard'); + console.log('✅ Login successful, redirected to dashboard'); + + await browser.close(); +})(); +``` + +### Fill and Submit Form + +```javascript +// /tmp/playwright-test-form.js +const { chromium } = require('playwright'); + +const TARGET_URL = 'http://localhost:3001'; // Auto-detected + +(async () => { + const browser = await chromium.launch({ headless: false, slowMo: 50 }); + const page = await browser.newPage(); + + await page.goto(`${TARGET_URL}/contact`); + + await page.fill('input[name="name"]', 'John Doe'); + await page.fill('input[name="email"]', 'john@example.com'); + await page.fill('textarea[name="message"]', 'Test message'); + await page.click('button[type="submit"]'); + + // Verify submission + await page.waitForSelector('.success-message'); + console.log('✅ Form submitted successfully'); + + await browser.close(); +})(); +``` + +### Check for Broken Links + +```javascript +const { chromium } = require('playwright'); + +(async () => { + const browser = await chromium.launch({ headless: false }); + const page = await browser.newPage(); + + await page.goto('http://localhost:3000'); + + const links = await page.locator('a[href^="http"]').all(); + const results = { working: 0, broken: [] }; + + for (const link of links) { + const href = await link.getAttribute('href'); + try { + const response = await page.request.head(href); + if (response.ok()) { + results.working++; + } else { + results.broken.push({ url: href, status: response.status() }); + } + } catch (e) { + results.broken.push({ url: href, error: e.message }); + } + } + + console.log(`✅ Working links: ${results.working}`); + console.log(`❌ Broken links:`, results.broken); + + await browser.close(); +})(); +``` + +### Take Screenshot with Error Handling + +```javascript +const { chromium } = require('playwright'); + +(async () => { + const browser = await chromium.launch({ headless: false }); + const page = await browser.newPage(); + + try { + await page.goto('http://localhost:3000', { + waitUntil: 'networkidle', + timeout: 10000 + }); + + await page.screenshot({ + path: '/tmp/screenshot.png', + fullPage: true + }); + + console.log('📸 Screenshot saved to /tmp/screenshot.png'); + } catch (error) { + console.error('❌ Error:', error.message); + } finally { + await browser.close(); + } +})(); +``` + +### Test Responsive Design + +```javascript +// /tmp/playwright-test-responsive-full.js +const { chromium } = require('playwright'); + +const TARGET_URL = 'http://localhost:3001'; // Auto-detected + +(async () => { + const browser = await chromium.launch({ headless: false }); + const page = await browser.newPage(); + + const viewports = [ + { name: 'Desktop', width: 1920, height: 1080 }, + { name: 'Tablet', width: 768, height: 1024 }, + { name: 'Mobile', width: 375, height: 667 } + ]; + + for (const viewport of viewports) { + console.log(`Testing ${viewport.name} (${viewport.width}x${viewport.height})`); + + await page.setViewportSize({ + width: viewport.width, + height: viewport.height + }); + + await page.goto(TARGET_URL); + await page.waitForTimeout(1000); + + await page.screenshot({ + path: `/tmp/${viewport.name.toLowerCase()}.png`, + fullPage: true + }); + } + + console.log('✅ All viewports tested'); + await browser.close(); +})(); +``` + +## Inline Execution (Simple Tasks) + +For quick one-off tasks, you can execute code inline without creating files: + +```bash +# Take a quick screenshot +cd $SKILL_DIR && node run.js " +const browser = await chromium.launch({ headless: false }); +const page = await browser.newPage(); +await page.goto('http://localhost:3001'); +await page.screenshot({ path: '/tmp/quick-screenshot.png', fullPage: true }); +console.log('Screenshot saved'); +await browser.close(); +" +``` + +**When to use inline vs files:** +- **Inline**: Quick one-off tasks (screenshot, check if element exists, get page title) +- **Files**: Complex tests, responsive design checks, anything user might want to re-run + +## Available Helpers + +Optional utility functions in `lib/helpers.js`: + +```javascript +const helpers = require('./lib/helpers'); + +// Detect running dev servers (CRITICAL - use this first!) +const servers = await helpers.detectDevServers(); +console.log('Found servers:', servers); + +// Safe click with retry +await helpers.safeClick(page, 'button.submit', { retries: 3 }); + +// Safe type with clear +await helpers.safeType(page, '#username', 'testuser'); + +// Take timestamped screenshot +await helpers.takeScreenshot(page, 'test-result'); + +// Handle cookie banners +await helpers.handleCookieBanner(page); + +// Extract table data +const data = await helpers.extractTableData(page, 'table.results'); +``` + +See `lib/helpers.js` for full list. + +## Advanced Usage + +For comprehensive Playwright API documentation, see [API_REFERENCE.md](API_REFERENCE.md): + +- Selectors & Locators best practices +- Network interception & API mocking +- Authentication & session management +- Visual regression testing +- Mobile device emulation +- Performance testing +- Debugging techniques +- CI/CD integration + +## Tips + +- **CRITICAL: Detect servers FIRST** - Always run `detectDevServers()` before writing test code for localhost testing +- **Use /tmp for test files** - Write to `/tmp/playwright-test-*.js`, never to skill directory or user's project +- **Parameterize URLs** - Put detected/provided URL in a `TARGET_URL` constant at the top of every script +- **DEFAULT: Visible browser** - Always use `headless: false` unless user explicitly asks for headless mode +- **Headless mode** - Only use `headless: true` when user specifically requests "headless" or "background" execution +- **Slow down:** Use `slowMo: 100` to make actions visible and easier to follow +- **Wait strategies:** Use `waitForURL`, `waitForSelector`, `waitForLoadState` instead of fixed timeouts +- **Error handling:** Always use try-catch for robust automation +- **Console output:** Use `console.log()` to track progress and show what's happening + +## Troubleshooting + +**Playwright not installed:** +```bash +cd $SKILL_DIR && npm run setup +``` + +**Module not found:** +Ensure running from skill directory via `run.js` wrapper + +**Browser doesn't open:** +Check `headless: false` and ensure display available + +**Element not found:** +Add wait: `await page.waitForSelector('.element', { timeout: 10000 })` + +## Example Usage + +``` +User: "Test if the marketing page looks good" + +Claude: I'll test the marketing page across multiple viewports. Let me first detect running servers... +[Runs: detectDevServers()] +[Output: Found server on port 3001] +I found your dev server running on http://localhost:3001 + +[Writes custom automation script to /tmp/playwright-test-marketing.js with URL parameterized] +[Runs: cd $SKILL_DIR && node run.js /tmp/playwright-test-marketing.js] +[Shows results with screenshots from /tmp/] +``` + +``` +User: "Check if login redirects correctly" + +Claude: I'll test the login flow. First, let me check for running servers... +[Runs: detectDevServers()] +[Output: Found servers on ports 3000 and 3001] +I found 2 dev servers. Which one should I test? +- http://localhost:3000 +- http://localhost:3001 + +User: "Use 3001" + +[Writes login automation to /tmp/playwright-test-login.js] +[Runs: cd $SKILL_DIR && node run.js /tmp/playwright-test-login.js] +[Reports: ✅ Login successful, redirected to /dashboard] +``` + +## Notes + +- Each automation is custom-written for your specific request +- Not limited to pre-built scripts - any browser task possible +- Auto-detects running dev servers to eliminate hardcoded URLs +- Test scripts written to `/tmp` for automatic cleanup (no clutter) +- Code executes reliably with proper module resolution via `run.js` +- Progressive disclosure - API_REFERENCE.md loaded only when advanced features needed diff --git a/skills/playwright-browser-automation/lib/helpers.js b/skills/playwright-browser-automation/lib/helpers.js new file mode 100644 index 000000000..ce1de3826 --- /dev/null +++ b/skills/playwright-browser-automation/lib/helpers.js @@ -0,0 +1,398 @@ +// playwright-helpers.js +// Reusable utility functions for Playwright automation + +const { chromium, firefox, webkit } = require('playwright'); + +/** + * Launch browser with standard configuration + * @param {string} browserType - 'chromium', 'firefox', or 'webkit' + * @param {Object} options - Additional launch options + */ +async function launchBrowser(browserType = 'chromium', options = {}) { + const defaultOptions = { + headless: process.env.HEADLESS !== 'false', + slowMo: process.env.SLOW_MO ? parseInt(process.env.SLOW_MO) : 0, + args: ['--no-sandbox', '--disable-setuid-sandbox'] + }; + + const browsers = { chromium, firefox, webkit }; + const browser = browsers[browserType]; + + if (!browser) { + throw new Error(`Invalid browser type: ${browserType}`); + } + + return await browser.launch({ ...defaultOptions, ...options }); +} + +/** + * Create a new page with viewport and user agent + * @param {Object} context - Browser context + * @param {Object} options - Page options + */ +async function createPage(context, options = {}) { + const page = await context.newPage(); + + if (options.viewport) { + await page.setViewportSize(options.viewport); + } + + if (options.userAgent) { + await page.setExtraHTTPHeaders({ + 'User-Agent': options.userAgent + }); + } + + // Set default timeout + page.setDefaultTimeout(options.timeout || 30000); + + return page; +} + +/** + * Smart wait for page to be ready + * @param {Object} page - Playwright page + * @param {Object} options - Wait options + */ +async function waitForPageReady(page, options = {}) { + const waitOptions = { + waitUntil: options.waitUntil || 'networkidle', + timeout: options.timeout || 30000 + }; + + try { + await page.waitForLoadState(waitOptions.waitUntil, { + timeout: waitOptions.timeout + }); + } catch (e) { + console.warn('Page load timeout, continuing...'); + } + + // Additional wait for dynamic content if selector provided + if (options.waitForSelector) { + await page.waitForSelector(options.waitForSelector, { + timeout: options.timeout + }); + } +} + +/** + * Safe click with retry logic + * @param {Object} page - Playwright page + * @param {string} selector - Element selector + * @param {Object} options - Click options + */ +async function safeClick(page, selector, options = {}) { + const maxRetries = options.retries || 3; + const retryDelay = options.retryDelay || 1000; + + for (let i = 0; i < maxRetries; i++) { + try { + await page.waitForSelector(selector, { + state: 'visible', + timeout: options.timeout || 5000 + }); + await page.click(selector, { + force: options.force || false, + timeout: options.timeout || 5000 + }); + return true; + } catch (e) { + if (i === maxRetries - 1) { + console.error(`Failed to click ${selector} after ${maxRetries} attempts`); + throw e; + } + console.log(`Retry ${i + 1}/${maxRetries} for clicking ${selector}`); + await page.waitForTimeout(retryDelay); + } + } +} + +/** + * Safe text input with clear before type + * @param {Object} page - Playwright page + * @param {string} selector - Input selector + * @param {string} text - Text to type + * @param {Object} options - Type options + */ +async function safeType(page, selector, text, options = {}) { + await page.waitForSelector(selector, { + state: 'visible', + timeout: options.timeout || 10000 + }); + + if (options.clear !== false) { + await page.fill(selector, ''); + } + + if (options.slow) { + await page.type(selector, text, { delay: options.delay || 100 }); + } else { + await page.fill(selector, text); + } +} + +/** + * Extract text from multiple elements + * @param {Object} page - Playwright page + * @param {string} selector - Elements selector + */ +async function extractTexts(page, selector) { + await page.waitForSelector(selector, { timeout: 10000 }); + return await page.$$eval(selector, elements => + elements.map(el => el.textContent?.trim()).filter(Boolean) + ); +} + +/** + * Take screenshot with timestamp + * @param {Object} page - Playwright page + * @param {string} name - Screenshot name + * @param {Object} options - Screenshot options + */ +async function takeScreenshot(page, name, options = {}) { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const filename = `${name}-${timestamp}.png`; + + await page.screenshot({ + path: filename, + fullPage: options.fullPage !== false, + ...options + }); + + console.log(`Screenshot saved: ${filename}`); + return filename; +} + +/** + * Handle authentication + * @param {Object} page - Playwright page + * @param {Object} credentials - Username and password + * @param {Object} selectors - Login form selectors + */ +async function authenticate(page, credentials, selectors = {}) { + const defaultSelectors = { + username: 'input[name="username"], input[name="email"], #username, #email', + password: 'input[name="password"], #password', + submit: 'button[type="submit"], input[type="submit"], button:has-text("Login"), button:has-text("Sign in")' + }; + + const finalSelectors = { ...defaultSelectors, ...selectors }; + + await safeType(page, finalSelectors.username, credentials.username); + await safeType(page, finalSelectors.password, credentials.password); + await safeClick(page, finalSelectors.submit); + + // Wait for navigation or success indicator + await Promise.race([ + page.waitForNavigation({ waitUntil: 'networkidle' }), + page.waitForSelector(selectors.successIndicator || '.dashboard, .user-menu, .logout', { timeout: 10000 }) + ]).catch(() => { + console.log('Login might have completed without navigation'); + }); +} + +/** + * Scroll page + * @param {Object} page - Playwright page + * @param {string} direction - 'down', 'up', 'top', 'bottom' + * @param {number} distance - Pixels to scroll (for up/down) + */ +async function scrollPage(page, direction = 'down', distance = 500) { + switch (direction) { + case 'down': + await page.evaluate(d => window.scrollBy(0, d), distance); + break; + case 'up': + await page.evaluate(d => window.scrollBy(0, -d), distance); + break; + case 'top': + await page.evaluate(() => window.scrollTo(0, 0)); + break; + case 'bottom': + await page.evaluate(() => window.scrollTo(0, document.body.scrollHeight)); + break; + } + await page.waitForTimeout(500); // Wait for scroll animation +} + +/** + * Extract table data + * @param {Object} page - Playwright page + * @param {string} tableSelector - Table selector + */ +async function extractTableData(page, tableSelector) { + await page.waitForSelector(tableSelector); + + return await page.evaluate((selector) => { + const table = document.querySelector(selector); + if (!table) return null; + + const headers = Array.from(table.querySelectorAll('thead th')).map(th => + th.textContent?.trim() + ); + + const rows = Array.from(table.querySelectorAll('tbody tr')).map(tr => { + const cells = Array.from(tr.querySelectorAll('td')); + if (headers.length > 0) { + return cells.reduce((obj, cell, index) => { + obj[headers[index] || `column_${index}`] = cell.textContent?.trim(); + return obj; + }, {}); + } else { + return cells.map(cell => cell.textContent?.trim()); + } + }); + + return { headers, rows }; + }, tableSelector); +} + +/** + * Wait for and dismiss cookie banners + * @param {Object} page - Playwright page + * @param {number} timeout - Max time to wait + */ +async function handleCookieBanner(page, timeout = 3000) { + const commonSelectors = [ + 'button:has-text("Accept")', + 'button:has-text("Accept all")', + 'button:has-text("OK")', + 'button:has-text("Got it")', + 'button:has-text("I agree")', + '.cookie-accept', + '#cookie-accept', + '[data-testid="cookie-accept"]' + ]; + + for (const selector of commonSelectors) { + try { + const element = await page.waitForSelector(selector, { + timeout: timeout / commonSelectors.length, + state: 'visible' + }); + if (element) { + await element.click(); + console.log('Cookie banner dismissed'); + return true; + } + } catch (e) { + // Continue to next selector + } + } + + return false; +} + +/** + * Retry a function with exponential backoff + * @param {Function} fn - Function to retry + * @param {number} maxRetries - Maximum retry attempts + * @param {number} initialDelay - Initial delay in ms + */ +async function retryWithBackoff(fn, maxRetries = 3, initialDelay = 1000) { + let lastError; + + for (let i = 0; i < maxRetries; i++) { + try { + return await fn(); + } catch (error) { + lastError = error; + const delay = initialDelay * Math.pow(2, i); + console.log(`Attempt ${i + 1} failed, retrying in ${delay}ms...`); + await new Promise(resolve => setTimeout(resolve, delay)); + } + } + + throw lastError; +} + +/** + * Create browser context with common settings + * @param {Object} browser - Browser instance + * @param {Object} options - Context options + */ +async function createContext(browser, options = {}) { + const defaultOptions = { + viewport: { width: 1280, height: 720 }, + userAgent: options.mobile + ? 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1' + : undefined, + permissions: options.permissions || [], + geolocation: options.geolocation, + locale: options.locale || 'en-US', + timezoneId: options.timezoneId || 'America/New_York' + }; + + return await browser.newContext({ ...defaultOptions, ...options }); +} + +/** + * Detect running dev servers on common ports + * @param {Array} customPorts - Additional ports to check + * @returns {Promise} Array of detected server URLs + */ +async function detectDevServers(customPorts = []) { + const http = require('http'); + + // Common dev server ports + const commonPorts = [3000, 3001, 3002, 5173, 8080, 8000, 4200, 5000, 9000, 1234]; + const allPorts = [...new Set([...commonPorts, ...customPorts])]; + + const detectedServers = []; + + console.log('🔍 Checking for running dev servers...'); + + for (const port of allPorts) { + try { + await new Promise((resolve, reject) => { + const req = http.request({ + hostname: 'localhost', + port: port, + path: '/', + method: 'HEAD', + timeout: 500 + }, (res) => { + if (res.statusCode < 500) { + detectedServers.push(`http://localhost:${port}`); + console.log(` ✅ Found server on port ${port}`); + } + resolve(); + }); + + req.on('error', () => resolve()); + req.on('timeout', () => { + req.destroy(); + resolve(); + }); + + req.end(); + }); + } catch (e) { + // Port not available, continue + } + } + + if (detectedServers.length === 0) { + console.log(' ❌ No dev servers detected'); + } + + return detectedServers; +} + +module.exports = { + launchBrowser, + createPage, + waitForPageReady, + safeClick, + safeType, + extractTexts, + takeScreenshot, + authenticate, + scrollPage, + extractTableData, + handleCookieBanner, + retryWithBackoff, + createContext, + detectDevServers +}; diff --git a/skills/playwright-browser-automation/package.json b/skills/playwright-browser-automation/package.json new file mode 100644 index 000000000..42e480469 --- /dev/null +++ b/skills/playwright-browser-automation/package.json @@ -0,0 +1,26 @@ +{ + "name": "playwright-skill", + "version": "4.0.2", + "description": "General-purpose browser automation with Playwright for Claude Code with auto-detection and smart test management", + "author": "lackeyjb", + "main": "run.js", + "scripts": { + "setup": "npm install && npx playwright install chromium", + "install-all-browsers": "npx playwright install chromium firefox webkit" + }, + "keywords": [ + "playwright", + "automation", + "browser-testing", + "web-automation", + "claude-skill", + "general-purpose" + ], + "dependencies": { + "playwright": "^1.48.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "license": "MIT" +} diff --git a/skills/playwright-browser-automation/run.js b/skills/playwright-browser-automation/run.js new file mode 100755 index 000000000..b5af75437 --- /dev/null +++ b/skills/playwright-browser-automation/run.js @@ -0,0 +1,208 @@ +#!/usr/bin/env node +/** + * Universal Playwright Executor for Claude Code + * + * Executes Playwright automation code from: + * - File path: node run.js script.js + * - Inline code: node run.js 'await page.goto("...")' + * - Stdin: cat script.js | node run.js + * + * Ensures proper module resolution by running from skill directory. + */ + +const fs = require('fs'); +const path = require('path'); +const { execSync } = require('child_process'); + +// Change to skill directory for proper module resolution +process.chdir(__dirname); + +/** + * Check if Playwright is installed + */ +function checkPlaywrightInstalled() { + try { + require.resolve('playwright'); + return true; + } catch (e) { + return false; + } +} + +/** + * Install Playwright if missing + */ +function installPlaywright() { + console.log('📦 Playwright not found. Installing...'); + try { + execSync('npm install', { stdio: 'inherit', cwd: __dirname }); + execSync('npx playwright install chromium', { stdio: 'inherit', cwd: __dirname }); + console.log('✅ Playwright installed successfully'); + return true; + } catch (e) { + console.error('❌ Failed to install Playwright:', e.message); + console.error('Please run manually: cd', __dirname, '&& npm run setup'); + return false; + } +} + +/** + * Get code to execute from various sources + */ +function getCodeToExecute() { + const args = process.argv.slice(2); + + // Case 1: File path provided + if (args.length > 0 && fs.existsSync(args[0])) { + const filePath = path.resolve(args[0]); + console.log(`📄 Executing file: ${filePath}`); + return fs.readFileSync(filePath, 'utf8'); + } + + // Case 2: Inline code provided as argument + if (args.length > 0) { + console.log('⚡ Executing inline code'); + return args.join(' '); + } + + // Case 3: Code from stdin + if (!process.stdin.isTTY) { + console.log('📥 Reading from stdin'); + return fs.readFileSync(0, 'utf8'); + } + + // No input + console.error('❌ No code to execute'); + console.error('Usage:'); + console.error(' node run.js script.js # Execute file'); + console.error(' node run.js "code here" # Execute inline'); + console.error(' cat script.js | node run.js # Execute from stdin'); + process.exit(1); +} + +/** + * Clean up old temporary execution files from previous runs + */ +function cleanupOldTempFiles() { + try { + const files = fs.readdirSync(__dirname); + const tempFiles = files.filter(f => f.startsWith('.temp-execution-') && f.endsWith('.js')); + + if (tempFiles.length > 0) { + tempFiles.forEach(file => { + const filePath = path.join(__dirname, file); + try { + fs.unlinkSync(filePath); + } catch (e) { + // Ignore errors - file might be in use or already deleted + } + }); + } + } catch (e) { + // Ignore directory read errors + } +} + +/** + * Wrap code in async IIFE if not already wrapped + */ +function wrapCodeIfNeeded(code) { + // Check if code already has require() and async structure + const hasRequire = code.includes('require('); + const hasAsyncIIFE = code.includes('(async () => {') || code.includes('(async()=>{'); + + // If it's already a complete script, return as-is + if (hasRequire && hasAsyncIIFE) { + return code; + } + + // If it's just Playwright commands, wrap in full template + if (!hasRequire) { + return ` +const { chromium, firefox, webkit, devices } = require('playwright'); +const helpers = require('./lib/helpers'); + +(async () => { + try { + ${code} + } catch (error) { + console.error('❌ Automation error:', error.message); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +})(); +`; + } + + // If has require but no async wrapper + if (!hasAsyncIIFE) { + return ` +(async () => { + try { + ${code} + } catch (error) { + console.error('❌ Automation error:', error.message); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +})(); +`; + } + + return code; +} + +/** + * Main execution + */ +async function main() { + console.log('🎭 Playwright Skill - Universal Executor\n'); + + // Clean up old temp files from previous runs + cleanupOldTempFiles(); + + // Check Playwright installation + if (!checkPlaywrightInstalled()) { + const installed = installPlaywright(); + if (!installed) { + process.exit(1); + } + } + + // Get code to execute + const rawCode = getCodeToExecute(); + const code = wrapCodeIfNeeded(rawCode); + + // Create temporary file for execution + const tempFile = path.join(__dirname, `.temp-execution-${Date.now()}.js`); + + try { + // Write code to temp file + fs.writeFileSync(tempFile, code, 'utf8'); + + // Execute the code + console.log('🚀 Starting automation...\n'); + require(tempFile); + + // Note: Temp file will be cleaned up on next run + // This allows long-running async operations to complete safely + + } catch (error) { + console.error('❌ Execution failed:', error.message); + if (error.stack) { + console.error('\n📋 Stack trace:'); + console.error(error.stack); + } + process.exit(1); + } +} + +// Run main function +main().catch(error => { + console.error('❌ Fatal error:', error.message); + process.exit(1); +}); diff --git a/skills/pptx/LICENSE.txt b/skills/pptx/LICENSE.txt new file mode 100644 index 000000000..c55ab4222 --- /dev/null +++ b/skills/pptx/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/skills/pptx/SKILL.md b/skills/pptx/SKILL.md new file mode 100644 index 000000000..b93b875fe --- /dev/null +++ b/skills/pptx/SKILL.md @@ -0,0 +1,484 @@ +--- +name: pptx +description: "Presentation creation, editing, and analysis. When Claude needs to work with presentations (.pptx files) for: (1) Creating new presentations, (2) Modifying or editing content, (3) Working with layouts, (4) Adding comments or speaker notes, or any other presentation tasks" +license: Proprietary. LICENSE.txt has complete terms +--- + +# PPTX creation, editing, and analysis + +## Overview + +A user may ask you to create, edit, or analyze the contents of a .pptx file. A .pptx file is essentially a ZIP archive containing XML files and other resources that you can read or edit. You have different tools and workflows available for different tasks. + +## Reading and analyzing content + +### Text extraction +If you just need to read the text contents of a presentation, you should convert the document to markdown: + +```bash +# Convert document to markdown +python -m markitdown path-to-file.pptx +``` + +### Raw XML access +You need raw XML access for: comments, speaker notes, slide layouts, animations, design elements, and complex formatting. For any of these features, you'll need to unpack a presentation and read its raw XML contents. + +#### Unpacking a file +`python ooxml/scripts/unpack.py ` + +**Note**: The unpack.py script is located at `skills/pptx/ooxml/scripts/unpack.py` relative to the project root. If the script doesn't exist at this path, use `find . -name "unpack.py"` to locate it. + +#### Key file structures +* `ppt/presentation.xml` - Main presentation metadata and slide references +* `ppt/slides/slide{N}.xml` - Individual slide contents (slide1.xml, slide2.xml, etc.) +* `ppt/notesSlides/notesSlide{N}.xml` - Speaker notes for each slide +* `ppt/comments/modernComment_*.xml` - Comments for specific slides +* `ppt/slideLayouts/` - Layout templates for slides +* `ppt/slideMasters/` - Master slide templates +* `ppt/theme/` - Theme and styling information +* `ppt/media/` - Images and other media files + +#### Typography and color extraction +**When given an example design to emulate**: Always analyze the presentation's typography and colors first using the methods below: +1. **Read theme file**: Check `ppt/theme/theme1.xml` for colors (``) and fonts (``) +2. **Sample slide content**: Examine `ppt/slides/slide1.xml` for actual font usage (``) and colors +3. **Search for patterns**: Use grep to find color (``, ``) and font references across all XML files + +## Creating a new PowerPoint presentation **without a template** + +When creating a new PowerPoint presentation from scratch, use the **html2pptx** workflow to convert HTML slides to PowerPoint with accurate positioning. + +### Design Principles + +**CRITICAL**: Before creating any presentation, analyze the content and choose appropriate design elements: +1. **Consider the subject matter**: What is this presentation about? What tone, industry, or mood does it suggest? +2. **Check for branding**: If the user mentions a company/organization, consider their brand colors and identity +3. **Match palette to content**: Select colors that reflect the subject +4. **State your approach**: Explain your design choices before writing code + +**Requirements**: +- ✅ State your content-informed design approach BEFORE writing code +- ✅ Use web-safe fonts only: Arial, Helvetica, Times New Roman, Georgia, Courier New, Verdana, Tahoma, Trebuchet MS, Impact +- ✅ Create clear visual hierarchy through size, weight, and color +- ✅ Ensure readability: strong contrast, appropriately sized text, clean alignment +- ✅ Be consistent: repeat patterns, spacing, and visual language across slides + +#### Color Palette Selection + +**Choosing colors creatively**: +- **Think beyond defaults**: What colors genuinely match this specific topic? Avoid autopilot choices. +- **Consider multiple angles**: Topic, industry, mood, energy level, target audience, brand identity (if mentioned) +- **Be adventurous**: Try unexpected combinations - a healthcare presentation doesn't have to be green, finance doesn't have to be navy +- **Build your palette**: Pick 3-5 colors that work together (dominant colors + supporting tones + accent) +- **Ensure contrast**: Text must be clearly readable on backgrounds + +**Example color palettes** (use these to spark creativity - choose one, adapt it, or create your own): + +1. **Classic Blue**: Deep navy (#1C2833), slate gray (#2E4053), silver (#AAB7B8), off-white (#F4F6F6) +2. **Teal & Coral**: Teal (#5EA8A7), deep teal (#277884), coral (#FE4447), white (#FFFFFF) +3. **Bold Red**: Red (#C0392B), bright red (#E74C3C), orange (#F39C12), yellow (#F1C40F), green (#2ECC71) +4. **Warm Blush**: Mauve (#A49393), blush (#EED6D3), rose (#E8B4B8), cream (#FAF7F2) +5. **Burgundy Luxury**: Burgundy (#5D1D2E), crimson (#951233), rust (#C15937), gold (#997929) +6. **Deep Purple & Emerald**: Purple (#B165FB), dark blue (#181B24), emerald (#40695B), white (#FFFFFF) +7. **Cream & Forest Green**: Cream (#FFE1C7), forest green (#40695B), white (#FCFCFC) +8. **Pink & Purple**: Pink (#F8275B), coral (#FF574A), rose (#FF737D), purple (#3D2F68) +9. **Lime & Plum**: Lime (#C5DE82), plum (#7C3A5F), coral (#FD8C6E), blue-gray (#98ACB5) +10. **Black & Gold**: Gold (#BF9A4A), black (#000000), cream (#F4F6F6) +11. **Sage & Terracotta**: Sage (#87A96B), terracotta (#E07A5F), cream (#F4F1DE), charcoal (#2C2C2C) +12. **Charcoal & Red**: Charcoal (#292929), red (#E33737), light gray (#CCCBCB) +13. **Vibrant Orange**: Orange (#F96D00), light gray (#F2F2F2), charcoal (#222831) +14. **Forest Green**: Black (#191A19), green (#4E9F3D), dark green (#1E5128), white (#FFFFFF) +15. **Retro Rainbow**: Purple (#722880), pink (#D72D51), orange (#EB5C18), amber (#F08800), gold (#DEB600) +16. **Vintage Earthy**: Mustard (#E3B448), sage (#CBD18F), forest green (#3A6B35), cream (#F4F1DE) +17. **Coastal Rose**: Old rose (#AD7670), beaver (#B49886), eggshell (#F3ECDC), ash gray (#BFD5BE) +18. **Orange & Turquoise**: Light orange (#FC993E), grayish turquoise (#667C6F), white (#FCFCFC) + +#### Visual Details Options + +**Geometric Patterns**: +- Diagonal section dividers instead of horizontal +- Asymmetric column widths (30/70, 40/60, 25/75) +- Rotated text headers at 90° or 270° +- Circular/hexagonal frames for images +- Triangular accent shapes in corners +- Overlapping shapes for depth + +**Border & Frame Treatments**: +- Thick single-color borders (10-20pt) on one side only +- Double-line borders with contrasting colors +- Corner brackets instead of full frames +- L-shaped borders (top+left or bottom+right) +- Underline accents beneath headers (3-5pt thick) + +**Typography Treatments**: +- Extreme size contrast (72pt headlines vs 11pt body) +- All-caps headers with wide letter spacing +- Numbered sections in oversized display type +- Monospace (Courier New) for data/stats/technical content +- Condensed fonts (Arial Narrow) for dense information +- Outlined text for emphasis + +**Chart & Data Styling**: +- Monochrome charts with single accent color for key data +- Horizontal bar charts instead of vertical +- Dot plots instead of bar charts +- Minimal gridlines or none at all +- Data labels directly on elements (no legends) +- Oversized numbers for key metrics + +**Layout Innovations**: +- Full-bleed images with text overlays +- Sidebar column (20-30% width) for navigation/context +- Modular grid systems (3×3, 4×4 blocks) +- Z-pattern or F-pattern content flow +- Floating text boxes over colored shapes +- Magazine-style multi-column layouts + +**Background Treatments**: +- Solid color blocks occupying 40-60% of slide +- Gradient fills (vertical or diagonal only) +- Split backgrounds (two colors, diagonal or vertical) +- Edge-to-edge color bands +- Negative space as a design element + +### Layout Tips +**When creating slides with charts or tables:** +- **Two-column layout (PREFERRED)**: Use a header spanning the full width, then two columns below - text/bullets in one column and the featured content in the other. This provides better balance and makes charts/tables more readable. Use flexbox with unequal column widths (e.g., 40%/60% split) to optimize space for each content type. +- **Full-slide layout**: Let the featured content (chart/table) take up the entire slide for maximum impact and readability +- **NEVER vertically stack**: Do not place charts/tables below text in a single column - this causes poor readability and layout issues + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`html2pptx.md`](html2pptx.md) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for detailed syntax, critical formatting rules, and best practices before proceeding with presentation creation. +2. Create an HTML file for each slide with proper dimensions (e.g., 720pt × 405pt for 16:9) + - Use `

`, `

`-`

`, `
    `, `
      ` for all text content + - Use `class="placeholder"` for areas where charts/tables will be added (render with gray background for visibility) + - **CRITICAL**: Rasterize gradients and icons as PNG images FIRST using Sharp, then reference in HTML + - **LAYOUT**: For slides with charts/tables/images, use either full-slide layout or two-column layout for better readability +3. Create and run a JavaScript file using the [`html2pptx.js`](scripts/html2pptx.js) library to convert HTML slides to PowerPoint and save the presentation + - Use the `html2pptx()` function to process each HTML file + - Add charts and tables to placeholder areas using PptxGenJS API + - Save the presentation using `pptx.writeFile()` +4. **Visual validation**: Generate thumbnails and inspect for layout issues + - Create thumbnail grid: `python scripts/thumbnail.py output.pptx workspace/thumbnails --cols 4` + - Read and carefully examine the thumbnail image for: + - **Text cutoff**: Text being cut off by header bars, shapes, or slide edges + - **Text overlap**: Text overlapping with other text or shapes + - **Positioning issues**: Content too close to slide boundaries or other elements + - **Contrast issues**: Insufficient contrast between text and backgrounds + - If issues found, adjust HTML margins/spacing/colors and regenerate the presentation + - Repeat until all slides are visually correct + +## Editing an existing PowerPoint presentation + +When edit slides in an existing PowerPoint presentation, you need to work with the raw Office Open XML (OOXML) format. This involves unpacking the .pptx file, editing the XML content, and repacking it. + +### Workflow +1. **MANDATORY - READ ENTIRE FILE**: Read [`ooxml.md`](ooxml.md) (~500 lines) completely from start to finish. **NEVER set any range limits when reading this file.** Read the full file content for detailed guidance on OOXML structure and editing workflows before any presentation editing. +2. Unpack the presentation: `python ooxml/scripts/unpack.py ` +3. Edit the XML files (primarily `ppt/slides/slide{N}.xml` and related files) +4. **CRITICAL**: Validate immediately after each edit and fix any validation errors before proceeding: `python ooxml/scripts/validate.py --original ` +5. Pack the final presentation: `python ooxml/scripts/pack.py ` + +## Creating a new PowerPoint presentation **using a template** + +When you need to create a presentation that follows an existing template's design, you'll need to duplicate and re-arrange template slides before then replacing placeholder context. + +### Workflow +1. **Extract template text AND create visual thumbnail grid**: + * Extract text: `python -m markitdown template.pptx > template-content.md` + * Read `template-content.md`: Read the entire file to understand the contents of the template presentation. **NEVER set any range limits when reading this file.** + * Create thumbnail grids: `python scripts/thumbnail.py template.pptx` + * See [Creating Thumbnail Grids](#creating-thumbnail-grids) section for more details + +2. **Analyze template and save inventory to a file**: + * **Visual Analysis**: Review thumbnail grid(s) to understand slide layouts, design patterns, and visual structure + * Create and save a template inventory file at `template-inventory.md` containing: + ```markdown + # Template Inventory Analysis + **Total Slides: [count]** + **IMPORTANT: Slides are 0-indexed (first slide = 0, last slide = count-1)** + + ## [Category Name] + - Slide 0: [Layout code if available] - Description/purpose + - Slide 1: [Layout code] - Description/purpose + - Slide 2: [Layout code] - Description/purpose + [... EVERY slide must be listed individually with its index ...] + ``` + * **Using the thumbnail grid**: Reference the visual thumbnails to identify: + - Layout patterns (title slides, content layouts, section dividers) + - Image placeholder locations and counts + - Design consistency across slide groups + - Visual hierarchy and structure + * This inventory file is REQUIRED for selecting appropriate templates in the next step + +3. **Create presentation outline based on template inventory**: + * Review available templates from step 2. + * Choose an intro or title template for the first slide. This should be one of the first templates. + * Choose safe, text-based layouts for the other slides. + * **CRITICAL: Match layout structure to actual content**: + - Single-column layouts: Use for unified narrative or single topic + - Two-column layouts: Use ONLY when you have exactly 2 distinct items/concepts + - Three-column layouts: Use ONLY when you have exactly 3 distinct items/concepts + - Image + text layouts: Use ONLY when you have actual images to insert + - Quote layouts: Use ONLY for actual quotes from people (with attribution), never for emphasis + - Never use layouts with more placeholders than you have content + - If you have 2 items, don't force them into a 3-column layout + - If you have 4+ items, consider breaking into multiple slides or using a list format + * Count your actual content pieces BEFORE selecting the layout + * Verify each placeholder in the chosen layout will be filled with meaningful content + * Select one option representing the **best** layout for each content section. + * Save `outline.md` with content AND template mapping that leverages available designs + * Example template mapping: + ``` + # Template slides to use (0-based indexing) + # WARNING: Verify indices are within range! Template with 73 slides has indices 0-72 + # Mapping: slide numbers from outline -> template slide indices + template_mapping = [ + 0, # Use slide 0 (Title/Cover) + 34, # Use slide 34 (B1: Title and body) + 34, # Use slide 34 again (duplicate for second B1) + 50, # Use slide 50 (E1: Quote) + 54, # Use slide 54 (F2: Closing + Text) + ] + ``` + +4. **Duplicate, reorder, and delete slides using `rearrange.py`**: + * Use the `scripts/rearrange.py` script to create a new presentation with slides in the desired order: + ```bash + python scripts/rearrange.py template.pptx working.pptx 0,34,34,50,52 + ``` + * The script handles duplicating repeated slides, deleting unused slides, and reordering automatically + * Slide indices are 0-based (first slide is 0, second is 1, etc.) + * The same slide index can appear multiple times to duplicate that slide + +5. **Extract ALL text using the `inventory.py` script**: + * **Run inventory extraction**: + ```bash + python scripts/inventory.py working.pptx text-inventory.json + ``` + * **Read text-inventory.json**: Read the entire text-inventory.json file to understand all shapes and their properties. **NEVER set any range limits when reading this file.** + + * The inventory JSON structure: + ```json + { + "slide-0": { + "shape-0": { + "placeholder_type": "TITLE", // or null for non-placeholders + "left": 1.5, // position in inches + "top": 2.0, + "width": 7.5, + "height": 1.2, + "paragraphs": [ + { + "text": "Paragraph text", + // Optional properties (only included when non-default): + "bullet": true, // explicit bullet detected + "level": 0, // only included when bullet is true + "alignment": "CENTER", // CENTER, RIGHT (not LEFT) + "space_before": 10.0, // space before paragraph in points + "space_after": 6.0, // space after paragraph in points + "line_spacing": 22.4, // line spacing in points + "font_name": "Arial", // from first run + "font_size": 14.0, // in points + "bold": true, + "italic": false, + "underline": false, + "color": "FF0000" // RGB color + } + ] + } + } + } + ``` + + * Key features: + - **Slides**: Named as "slide-0", "slide-1", etc. + - **Shapes**: Ordered by visual position (top-to-bottom, left-to-right) as "shape-0", "shape-1", etc. + - **Placeholder types**: TITLE, CENTER_TITLE, SUBTITLE, BODY, OBJECT, or null + - **Default font size**: `default_font_size` in points extracted from layout placeholders (when available) + - **Slide numbers are filtered**: Shapes with SLIDE_NUMBER placeholder type are automatically excluded from inventory + - **Bullets**: When `bullet: true`, `level` is always included (even if 0) + - **Spacing**: `space_before`, `space_after`, and `line_spacing` in points (only included when set) + - **Colors**: `color` for RGB (e.g., "FF0000"), `theme_color` for theme colors (e.g., "DARK_1") + - **Properties**: Only non-default values are included in the output + +6. **Generate replacement text and save the data to a JSON file** + Based on the text inventory from the previous step: + - **CRITICAL**: First verify which shapes exist in the inventory - only reference shapes that are actually present + - **VALIDATION**: The replace.py script will validate that all shapes in your replacement JSON exist in the inventory + - If you reference a non-existent shape, you'll get an error showing available shapes + - If you reference a non-existent slide, you'll get an error indicating the slide doesn't exist + - All validation errors are shown at once before the script exits + - **IMPORTANT**: The replace.py script uses inventory.py internally to identify ALL text shapes + - **AUTOMATIC CLEARING**: ALL text shapes from the inventory will be cleared unless you provide "paragraphs" for them + - Add a "paragraphs" field to shapes that need content (not "replacement_paragraphs") + - Shapes without "paragraphs" in the replacement JSON will have their text cleared automatically + - Paragraphs with bullets will be automatically left aligned. Don't set the `alignment` property on when `"bullet": true` + - Generate appropriate replacement content for placeholder text + - Use shape size to determine appropriate content length + - **CRITICAL**: Include paragraph properties from the original inventory - don't just provide text + - **IMPORTANT**: When bullet: true, do NOT include bullet symbols (•, -, *) in text - they're added automatically + - **ESSENTIAL FORMATTING RULES**: + - Headers/titles should typically have `"bold": true` + - List items should have `"bullet": true, "level": 0` (level is required when bullet is true) + - Preserve any alignment properties (e.g., `"alignment": "CENTER"` for centered text) + - Include font properties when different from default (e.g., `"font_size": 14.0`, `"font_name": "Lora"`) + - Colors: Use `"color": "FF0000"` for RGB or `"theme_color": "DARK_1"` for theme colors + - The replacement script expects **properly formatted paragraphs**, not just text strings + - **Overlapping shapes**: Prefer shapes with larger default_font_size or more appropriate placeholder_type + - Save the updated inventory with replacements to `replacement-text.json` + - **WARNING**: Different template layouts have different shape counts - always check the actual inventory before creating replacements + + Example paragraphs field showing proper formatting: + ```json + "paragraphs": [ + { + "text": "New presentation title text", + "alignment": "CENTER", + "bold": true + }, + { + "text": "Section Header", + "bold": true + }, + { + "text": "First bullet point without bullet symbol", + "bullet": true, + "level": 0 + }, + { + "text": "Red colored text", + "color": "FF0000" + }, + { + "text": "Theme colored text", + "theme_color": "DARK_1" + }, + { + "text": "Regular paragraph text without special formatting" + } + ] + ``` + + **Shapes not listed in the replacement JSON are automatically cleared**: + ```json + { + "slide-0": { + "shape-0": { + "paragraphs": [...] // This shape gets new text + } + // shape-1 and shape-2 from inventory will be cleared automatically + } + } + ``` + + **Common formatting patterns for presentations**: + - Title slides: Bold text, sometimes centered + - Section headers within slides: Bold text + - Bullet lists: Each item needs `"bullet": true, "level": 0` + - Body text: Usually no special properties needed + - Quotes: May have special alignment or font properties + +7. **Apply replacements using the `replace.py` script** + ```bash + python scripts/replace.py working.pptx replacement-text.json output.pptx + ``` + + The script will: + - First extract the inventory of ALL text shapes using functions from inventory.py + - Validate that all shapes in the replacement JSON exist in the inventory + - Clear text from ALL shapes identified in the inventory + - Apply new text only to shapes with "paragraphs" defined in the replacement JSON + - Preserve formatting by applying paragraph properties from the JSON + - Handle bullets, alignment, font properties, and colors automatically + - Save the updated presentation + + Example validation errors: + ``` + ERROR: Invalid shapes in replacement JSON: + - Shape 'shape-99' not found on 'slide-0'. Available shapes: shape-0, shape-1, shape-4 + - Slide 'slide-999' not found in inventory + ``` + + ``` + ERROR: Replacement text made overflow worse in these shapes: + - slide-0/shape-2: overflow worsened by 1.25" (was 0.00", now 1.25") + ``` + +## Creating Thumbnail Grids + +To create visual thumbnail grids of PowerPoint slides for quick analysis and reference: + +```bash +python scripts/thumbnail.py template.pptx [output_prefix] +``` + +**Features**: +- Creates: `thumbnails.jpg` (or `thumbnails-1.jpg`, `thumbnails-2.jpg`, etc. for large decks) +- Default: 5 columns, max 30 slides per grid (5×6) +- Custom prefix: `python scripts/thumbnail.py template.pptx my-grid` + - Note: The output prefix should include the path if you want output in a specific directory (e.g., `workspace/my-grid`) +- Adjust columns: `--cols 4` (range: 3-6, affects slides per grid) +- Grid limits: 3 cols = 12 slides/grid, 4 cols = 20, 5 cols = 30, 6 cols = 42 +- Slides are zero-indexed (Slide 0, Slide 1, etc.) + +**Use cases**: +- Template analysis: Quickly understand slide layouts and design patterns +- Content review: Visual overview of entire presentation +- Navigation reference: Find specific slides by their visual appearance +- Quality check: Verify all slides are properly formatted + +**Examples**: +```bash +# Basic usage +python scripts/thumbnail.py presentation.pptx + +# Combine options: custom name, columns +python scripts/thumbnail.py template.pptx analysis --cols 4 +``` + +## Converting Slides to Images + +To visually analyze PowerPoint slides, convert them to images using a two-step process: + +1. **Convert PPTX to PDF**: + ```bash + soffice --headless --convert-to pdf template.pptx + ``` + +2. **Convert PDF pages to JPEG images**: + ```bash + pdftoppm -jpeg -r 150 template.pdf slide + ``` + This creates files like `slide-1.jpg`, `slide-2.jpg`, etc. + +Options: +- `-r 150`: Sets resolution to 150 DPI (adjust for quality/size balance) +- `-jpeg`: Output JPEG format (use `-png` for PNG if preferred) +- `-f N`: First page to convert (e.g., `-f 2` starts from page 2) +- `-l N`: Last page to convert (e.g., `-l 5` stops at page 5) +- `slide`: Prefix for output files + +Example for specific range: +```bash +pdftoppm -jpeg -r 150 -f 2 -l 5 template.pdf slide # Converts only pages 2-5 +``` + +## Code Style Guidelines +**IMPORTANT**: When generating code for PPTX operations: +- Write concise code +- Avoid verbose variable names and redundant operations +- Avoid unnecessary print statements + +## Dependencies + +Required dependencies (should already be installed): + +- **markitdown**: `pip install "markitdown[pptx]"` (for text extraction from presentations) +- **pptxgenjs**: `npm install -g pptxgenjs` (for creating presentations via html2pptx) +- **playwright**: `npm install -g playwright` (for HTML rendering in html2pptx) +- **react-icons**: `npm install -g react-icons react react-dom` (for icons) +- **sharp**: `npm install -g sharp` (for SVG rasterization and image processing) +- **LibreOffice**: `sudo apt-get install libreoffice` (for PDF conversion) +- **Poppler**: `sudo apt-get install poppler-utils` (for pdftoppm to convert PDF to images) +- **defusedxml**: `pip install defusedxml` (for secure XML parsing) \ No newline at end of file diff --git a/skills/pptx/html2pptx.md b/skills/pptx/html2pptx.md new file mode 100644 index 000000000..106adf72d --- /dev/null +++ b/skills/pptx/html2pptx.md @@ -0,0 +1,625 @@ +# HTML to PowerPoint Guide + +Convert HTML slides to PowerPoint presentations with accurate positioning using the `html2pptx.js` library. + +## Table of Contents + +1. [Creating HTML Slides](#creating-html-slides) +2. [Using the html2pptx Library](#using-the-html2pptx-library) +3. [Using PptxGenJS](#using-pptxgenjs) + +--- + +## Creating HTML Slides + +Every HTML slide must include proper body dimensions: + +### Layout Dimensions + +- **16:9** (default): `width: 720pt; height: 405pt` +- **4:3**: `width: 720pt; height: 540pt` +- **16:10**: `width: 720pt; height: 450pt` + +### Supported Elements + +- `

      `, `

      `-`

      ` - Text with styling +- `
        `, `
          ` - Lists (never use manual bullets •, -, *) +- ``, `` - Bold text (inline formatting) +- ``, `` - Italic text (inline formatting) +- `` - Underlined text (inline formatting) +- `` - Inline formatting with CSS styles (bold, italic, underline, color) +- `
          ` - Line breaks +- `
          ` with bg/border - Becomes shape +- `` - Images +- `class="placeholder"` - Reserved space for charts (returns `{ id, x, y, w, h }`) + +### Critical Text Rules + +**ALL text MUST be inside `

          `, `

          `-`

          `, `
            `, or `
              ` tags:** +- ✅ Correct: `

              Text here

              ` +- ❌ Wrong: `
              Text here
              ` - **Text will NOT appear in PowerPoint** +- ❌ Wrong: `Text` - **Text will NOT appear in PowerPoint** +- Text in `
              ` or `` without a text tag will be silently ignored + +**NEVER use manual bullet symbols (•, -, *, etc.)** - Use `
                ` or `
                  ` lists instead + +**ONLY use web-safe fonts that are universally available:** +- ✅ Web-safe fonts: `Arial`, `Helvetica`, `Times New Roman`, `Georgia`, `Courier New`, `Verdana`, `Tahoma`, `Trebuchet MS`, `Impact`, `Comic Sans MS` +- ❌ Wrong: `'Segoe UI'`, `'SF Pro'`, `'Roboto'`, custom fonts - **Might cause rendering issues** + +### Styling + +- Use `display: flex` on body to prevent margin collapse from breaking overflow validation +- Use `margin` for spacing (padding included in size) +- Inline formatting: Use ``, ``, `` tags OR `` with CSS styles + - `` supports: `font-weight: bold`, `font-style: italic`, `text-decoration: underline`, `color: #rrggbb` + - `` does NOT support: `margin`, `padding` (not supported in PowerPoint text runs) + - Example: `Bold blue text` +- Flexbox works - positions calculated from rendered layout +- Use hex colors with `#` prefix in CSS +- **Text alignment**: Use CSS `text-align` (`center`, `right`, etc.) when needed as a hint to PptxGenJS for text formatting if text lengths are slightly off + +### Shape Styling (DIV elements only) + +**IMPORTANT: Backgrounds, borders, and shadows only work on `
                  ` elements, NOT on text elements (`

                  `, `

                  `-`

                  `, `
                    `, `
                      `)** + +- **Backgrounds**: CSS `background` or `background-color` on `
                      ` elements only + - Example: `
                      ` - Creates a shape with background +- **Borders**: CSS `border` on `
                      ` elements converts to PowerPoint shape borders + - Supports uniform borders: `border: 2px solid #333333` + - Supports partial borders: `border-left`, `border-right`, `border-top`, `border-bottom` (rendered as line shapes) + - Example: `
                      ` +- **Border radius**: CSS `border-radius` on `
                      ` elements for rounded corners + - `border-radius: 50%` or higher creates circular shape + - Percentages <50% calculated relative to shape's smaller dimension + - Supports px and pt units (e.g., `border-radius: 8pt;`, `border-radius: 12px;`) + - Example: `
                      ` on 100x200px box = 25% of 100px = 25px radius +- **Box shadows**: CSS `box-shadow` on `
                      ` elements converts to PowerPoint shadows + - Supports outer shadows only (inset shadows are ignored to prevent corruption) + - Example: `
                      ` + - Note: Inset/inner shadows are not supported by PowerPoint and will be skipped + +### Icons & Gradients + +- **CRITICAL: Never use CSS gradients (`linear-gradient`, `radial-gradient`)** - They don't convert to PowerPoint +- **ALWAYS create gradient/icon PNGs FIRST using Sharp, then reference in HTML** +- For gradients: Rasterize SVG to PNG background images +- For icons: Rasterize react-icons SVG to PNG images +- All visual effects must be pre-rendered as raster images before HTML rendering + +**Rasterizing Icons with Sharp:** + +```javascript +const React = require('react'); +const ReactDOMServer = require('react-dom/server'); +const sharp = require('sharp'); +const { FaHome } = require('react-icons/fa'); + +async function rasterizeIconPng(IconComponent, color, size = "256", filename) { + const svgString = ReactDOMServer.renderToStaticMarkup( + React.createElement(IconComponent, { color: `#${color}`, size: size }) + ); + + // Convert SVG to PNG using Sharp + await sharp(Buffer.from(svgString)) + .png() + .toFile(filename); + + return filename; +} + +// Usage: Rasterize icon before using in HTML +const iconPath = await rasterizeIconPng(FaHome, "4472c4", "256", "home-icon.png"); +// Then reference in HTML: +``` + +**Rasterizing Gradients with Sharp:** + +```javascript +const sharp = require('sharp'); + +async function createGradientBackground(filename) { + const svg = ` + + + + + + + + `; + + await sharp(Buffer.from(svg)) + .png() + .toFile(filename); + + return filename; +} + +// Usage: Create gradient background before HTML +const bgPath = await createGradientBackground("gradient-bg.png"); +// Then in HTML: +``` + +### Example + +```html + + + + + + +
                      +

                      Recipe Title

                      +
                        +
                      • Item: Description
                      • +
                      +

                      Text with bold, italic, underline.

                      +
                      + + +
                      +

                      5

                      +
                      +
                      + + +``` + +## Using the html2pptx Library + +### Dependencies + +These libraries have been globally installed and are available to use: +- `pptxgenjs` +- `playwright` +- `sharp` + +### Basic Usage + +```javascript +const pptxgen = require('pptxgenjs'); +const html2pptx = require('./html2pptx'); + +const pptx = new pptxgen(); +pptx.layout = 'LAYOUT_16x9'; // Must match HTML body dimensions + +const { slide, placeholders } = await html2pptx('slide1.html', pptx); + +// Add chart to placeholder area +if (placeholders.length > 0) { + slide.addChart(pptx.charts.LINE, chartData, placeholders[0]); +} + +await pptx.writeFile('output.pptx'); +``` + +### API Reference + +#### Function Signature +```javascript +await html2pptx(htmlFile, pres, options) +``` + +#### Parameters +- `htmlFile` (string): Path to HTML file (absolute or relative) +- `pres` (pptxgen): PptxGenJS presentation instance with layout already set +- `options` (object, optional): + - `tmpDir` (string): Temporary directory for generated files (default: `process.env.TMPDIR || '/tmp'`) + - `slide` (object): Existing slide to reuse (default: creates new slide) + +#### Returns +```javascript +{ + slide: pptxgenSlide, // The created/updated slide + placeholders: [ // Array of placeholder positions + { id: string, x: number, y: number, w: number, h: number }, + ... + ] +} +``` + +### Validation + +The library automatically validates and collects all errors before throwing: + +1. **HTML dimensions must match presentation layout** - Reports dimension mismatches +2. **Content must not overflow body** - Reports overflow with exact measurements +3. **CSS gradients** - Reports unsupported gradient usage +4. **Text element styling** - Reports backgrounds/borders/shadows on text elements (only allowed on divs) + +**All validation errors are collected and reported together** in a single error message, allowing you to fix all issues at once instead of one at a time. + +### Working with Placeholders + +```javascript +const { slide, placeholders } = await html2pptx('slide.html', pptx); + +// Use first placeholder +slide.addChart(pptx.charts.BAR, data, placeholders[0]); + +// Find by ID +const chartArea = placeholders.find(p => p.id === 'chart-area'); +slide.addChart(pptx.charts.LINE, data, chartArea); +``` + +### Complete Example + +```javascript +const pptxgen = require('pptxgenjs'); +const html2pptx = require('./html2pptx'); + +async function createPresentation() { + const pptx = new pptxgen(); + pptx.layout = 'LAYOUT_16x9'; + pptx.author = 'Your Name'; + pptx.title = 'My Presentation'; + + // Slide 1: Title + const { slide: slide1 } = await html2pptx('slides/title.html', pptx); + + // Slide 2: Content with chart + const { slide: slide2, placeholders } = await html2pptx('slides/data.html', pptx); + + const chartData = [{ + name: 'Sales', + labels: ['Q1', 'Q2', 'Q3', 'Q4'], + values: [4500, 5500, 6200, 7100] + }]; + + slide2.addChart(pptx.charts.BAR, chartData, { + ...placeholders[0], + showTitle: true, + title: 'Quarterly Sales', + showCatAxisTitle: true, + catAxisTitle: 'Quarter', + showValAxisTitle: true, + valAxisTitle: 'Sales ($000s)' + }); + + // Save + await pptx.writeFile({ fileName: 'presentation.pptx' }); + console.log('Presentation created successfully!'); +} + +createPresentation().catch(console.error); +``` + +## Using PptxGenJS + +After converting HTML to slides with `html2pptx`, you'll use PptxGenJS to add dynamic content like charts, images, and additional elements. + +### ⚠️ Critical Rules + +#### Colors +- **NEVER use `#` prefix** with hex colors in PptxGenJS - causes file corruption +- ✅ Correct: `color: "FF0000"`, `fill: { color: "0066CC" }` +- ❌ Wrong: `color: "#FF0000"` (breaks document) + +### Adding Images + +Always calculate aspect ratios from actual image dimensions: + +```javascript +// Get image dimensions: identify image.png | grep -o '[0-9]* x [0-9]*' +const imgWidth = 1860, imgHeight = 1519; // From actual file +const aspectRatio = imgWidth / imgHeight; + +const h = 3; // Max height +const w = h * aspectRatio; +const x = (10 - w) / 2; // Center on 16:9 slide + +slide.addImage({ path: "chart.png", x, y: 1.5, w, h }); +``` + +### Adding Text + +```javascript +// Rich text with formatting +slide.addText([ + { text: "Bold ", options: { bold: true } }, + { text: "Italic ", options: { italic: true } }, + { text: "Normal" } +], { + x: 1, y: 2, w: 8, h: 1 +}); +``` + +### Adding Shapes + +```javascript +// Rectangle +slide.addShape(pptx.shapes.RECTANGLE, { + x: 1, y: 1, w: 3, h: 2, + fill: { color: "4472C4" }, + line: { color: "000000", width: 2 } +}); + +// Circle +slide.addShape(pptx.shapes.OVAL, { + x: 5, y: 1, w: 2, h: 2, + fill: { color: "ED7D31" } +}); + +// Rounded rectangle +slide.addShape(pptx.shapes.ROUNDED_RECTANGLE, { + x: 1, y: 4, w: 3, h: 1.5, + fill: { color: "70AD47" }, + rectRadius: 0.2 +}); +``` + +### Adding Charts + +**Required for most charts:** Axis labels using `catAxisTitle` (category) and `valAxisTitle` (value). + +**Chart Data Format:** +- Use **single series with all labels** for simple bar/line charts +- Each series creates a separate legend entry +- Labels array defines X-axis values + +**Time Series Data - Choose Correct Granularity:** +- **< 30 days**: Use daily grouping (e.g., "10-01", "10-02") - avoid monthly aggregation that creates single-point charts +- **30-365 days**: Use monthly grouping (e.g., "2024-01", "2024-02") +- **> 365 days**: Use yearly grouping (e.g., "2023", "2024") +- **Validate**: Charts with only 1 data point likely indicate incorrect aggregation for the time period + +```javascript +const { slide, placeholders } = await html2pptx('slide.html', pptx); + +// CORRECT: Single series with all labels +slide.addChart(pptx.charts.BAR, [{ + name: "Sales 2024", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [4500, 5500, 6200, 7100] +}], { + ...placeholders[0], // Use placeholder position + barDir: 'col', // 'col' = vertical bars, 'bar' = horizontal + showTitle: true, + title: 'Quarterly Sales', + showLegend: false, // No legend needed for single series + // Required axis labels + showCatAxisTitle: true, + catAxisTitle: 'Quarter', + showValAxisTitle: true, + valAxisTitle: 'Sales ($000s)', + // Optional: Control scaling (adjust min based on data range for better visualization) + valAxisMaxVal: 8000, + valAxisMinVal: 0, // Use 0 for counts/amounts; for clustered data (e.g., 4500-7100), consider starting closer to min value + valAxisMajorUnit: 2000, // Control y-axis label spacing to prevent crowding + catAxisLabelRotate: 45, // Rotate labels if crowded + dataLabelPosition: 'outEnd', + dataLabelColor: '000000', + // Use single color for single-series charts + chartColors: ["4472C4"] // All bars same color +}); +``` + +#### Scatter Chart + +**IMPORTANT**: Scatter chart data format is unusual - first series contains X-axis values, subsequent series contain Y-values: + +```javascript +// Prepare data +const data1 = [{ x: 10, y: 20 }, { x: 15, y: 25 }, { x: 20, y: 30 }]; +const data2 = [{ x: 12, y: 18 }, { x: 18, y: 22 }]; + +const allXValues = [...data1.map(d => d.x), ...data2.map(d => d.x)]; + +slide.addChart(pptx.charts.SCATTER, [ + { name: 'X-Axis', values: allXValues }, // First series = X values + { name: 'Series 1', values: data1.map(d => d.y) }, // Y values only + { name: 'Series 2', values: data2.map(d => d.y) } // Y values only +], { + x: 1, y: 1, w: 8, h: 4, + lineSize: 0, // 0 = no connecting lines + lineDataSymbol: 'circle', + lineDataSymbolSize: 6, + showCatAxisTitle: true, + catAxisTitle: 'X Axis', + showValAxisTitle: true, + valAxisTitle: 'Y Axis', + chartColors: ["4472C4", "ED7D31"] +}); +``` + +#### Line Chart + +```javascript +slide.addChart(pptx.charts.LINE, [{ + name: "Temperature", + labels: ["Jan", "Feb", "Mar", "Apr"], + values: [32, 35, 42, 55] +}], { + x: 1, y: 1, w: 8, h: 4, + lineSize: 4, + lineSmooth: true, + // Required axis labels + showCatAxisTitle: true, + catAxisTitle: 'Month', + showValAxisTitle: true, + valAxisTitle: 'Temperature (°F)', + // Optional: Y-axis range (set min based on data range for better visualization) + valAxisMinVal: 0, // For ranges starting at 0 (counts, percentages, etc.) + valAxisMaxVal: 60, + valAxisMajorUnit: 20, // Control y-axis label spacing to prevent crowding (e.g., 10, 20, 25) + // valAxisMinVal: 30, // PREFERRED: For data clustered in a range (e.g., 32-55 or ratings 3-5), start axis closer to min value to show variation + // Optional: Chart colors + chartColors: ["4472C4", "ED7D31", "A5A5A5"] +}); +``` + +#### Pie Chart (No Axis Labels Required) + +**CRITICAL**: Pie charts require a **single data series** with all categories in the `labels` array and corresponding values in the `values` array. + +```javascript +slide.addChart(pptx.charts.PIE, [{ + name: "Market Share", + labels: ["Product A", "Product B", "Other"], // All categories in one array + values: [35, 45, 20] // All values in one array +}], { + x: 2, y: 1, w: 6, h: 4, + showPercent: true, + showLegend: true, + legendPos: 'r', // right + chartColors: ["4472C4", "ED7D31", "A5A5A5"] +}); +``` + +#### Multiple Data Series + +```javascript +slide.addChart(pptx.charts.LINE, [ + { + name: "Product A", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [10, 20, 30, 40] + }, + { + name: "Product B", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [15, 25, 20, 35] + } +], { + x: 1, y: 1, w: 8, h: 4, + showCatAxisTitle: true, + catAxisTitle: 'Quarter', + showValAxisTitle: true, + valAxisTitle: 'Revenue ($M)' +}); +``` + +### Chart Colors + +**CRITICAL**: Use hex colors **without** the `#` prefix - including `#` causes file corruption. + +**Align chart colors with your chosen design palette**, ensuring sufficient contrast and distinctiveness for data visualization. Adjust colors for: +- Strong contrast between adjacent series +- Readability against slide backgrounds +- Accessibility (avoid red-green only combinations) + +```javascript +// Example: Ocean palette-inspired chart colors (adjusted for contrast) +const chartColors = ["16A085", "FF6B9D", "2C3E50", "F39C12", "9B59B6"]; + +// Single-series chart: Use one color for all bars/points +slide.addChart(pptx.charts.BAR, [{ + name: "Sales", + labels: ["Q1", "Q2", "Q3", "Q4"], + values: [4500, 5500, 6200, 7100] +}], { + ...placeholders[0], + chartColors: ["16A085"], // All bars same color + showLegend: false +}); + +// Multi-series chart: Each series gets a different color +slide.addChart(pptx.charts.LINE, [ + { name: "Product A", labels: ["Q1", "Q2", "Q3"], values: [10, 20, 30] }, + { name: "Product B", labels: ["Q1", "Q2", "Q3"], values: [15, 25, 20] } +], { + ...placeholders[0], + chartColors: ["16A085", "FF6B9D"] // One color per series +}); +``` + +### Adding Tables + +Tables can be added with basic or advanced formatting: + +#### Basic Table + +```javascript +slide.addTable([ + ["Header 1", "Header 2", "Header 3"], + ["Row 1, Col 1", "Row 1, Col 2", "Row 1, Col 3"], + ["Row 2, Col 1", "Row 2, Col 2", "Row 2, Col 3"] +], { + x: 0.5, + y: 1, + w: 9, + h: 3, + border: { pt: 1, color: "999999" }, + fill: { color: "F1F1F1" } +}); +``` + +#### Table with Custom Formatting + +```javascript +const tableData = [ + // Header row with custom styling + [ + { text: "Product", options: { fill: { color: "4472C4" }, color: "FFFFFF", bold: true } }, + { text: "Revenue", options: { fill: { color: "4472C4" }, color: "FFFFFF", bold: true } }, + { text: "Growth", options: { fill: { color: "4472C4" }, color: "FFFFFF", bold: true } } + ], + // Data rows + ["Product A", "$50M", "+15%"], + ["Product B", "$35M", "+22%"], + ["Product C", "$28M", "+8%"] +]; + +slide.addTable(tableData, { + x: 1, + y: 1.5, + w: 8, + h: 3, + colW: [3, 2.5, 2.5], // Column widths + rowH: [0.5, 0.6, 0.6, 0.6], // Row heights + border: { pt: 1, color: "CCCCCC" }, + align: "center", + valign: "middle", + fontSize: 14 +}); +``` + +#### Table with Merged Cells + +```javascript +const mergedTableData = [ + [ + { text: "Q1 Results", options: { colspan: 3, fill: { color: "4472C4" }, color: "FFFFFF", bold: true } } + ], + ["Product", "Sales", "Market Share"], + ["Product A", "$25M", "35%"], + ["Product B", "$18M", "25%"] +]; + +slide.addTable(mergedTableData, { + x: 1, + y: 1, + w: 8, + h: 2.5, + colW: [3, 2.5, 2.5], + border: { pt: 1, color: "DDDDDD" } +}); +``` + +### Table Options + +Common table options: +- `x, y, w, h` - Position and size +- `colW` - Array of column widths (in inches) +- `rowH` - Array of row heights (in inches) +- `border` - Border style: `{ pt: 1, color: "999999" }` +- `fill` - Background color (no # prefix) +- `align` - Text alignment: "left", "center", "right" +- `valign` - Vertical alignment: "top", "middle", "bottom" +- `fontSize` - Text size +- `autoPage` - Auto-create new slides if content overflows \ No newline at end of file diff --git a/skills/pptx/ooxml.md b/skills/pptx/ooxml.md new file mode 100644 index 000000000..951b3cf65 --- /dev/null +++ b/skills/pptx/ooxml.md @@ -0,0 +1,427 @@ +# Office Open XML Technical Reference for PowerPoint + +**Important: Read this entire document before starting.** Critical XML schema rules and formatting requirements are covered throughout. Incorrect implementation can create invalid PPTX files that PowerPoint cannot open. + +## Technical Guidelines + +### Schema Compliance +- **Element ordering in ``**: ``, ``, `` +- **Whitespace**: Add `xml:space='preserve'` to `` elements with leading/trailing spaces +- **Unicode**: Escape characters in ASCII content: `"` becomes `“` +- **Images**: Add to `ppt/media/`, reference in slide XML, set dimensions to fit slide bounds +- **Relationships**: Update `ppt/slides/_rels/slideN.xml.rels` for each slide's resources +- **Dirty attribute**: Add `dirty="0"` to `` and `` elements to indicate clean state + +## Presentation Structure + +### Basic Slide Structure +```xml + + + + + ... + ... + + + + +``` + +### Text Box / Shape with Text +```xml + + + + + + + + + + + + + + + + + + + + + + Slide Title + + + + +``` + +### Text Formatting +```xml + + + + Bold Text + + + + + + Italic Text + + + + + + Underlined + + + + + + + + + + Highlighted Text + + + + + + + + + + Colored Arial 24pt + + + + + + + + + + Formatted text + +``` + +### Lists +```xml + + + + + + + First bullet point + + + + + + + + + + First numbered item + + + + + + + + + + Indented bullet + + +``` + +### Shapes +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Images +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Tables +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + Cell 1 + + + + + + + + + + + Cell 2 + + + + + + + + + +``` + +### Slide Layouts + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +## File Updates + +When adding content, update these files: + +**`ppt/_rels/presentation.xml.rels`:** +```xml + + +``` + +**`ppt/slides/_rels/slide1.xml.rels`:** +```xml + + +``` + +**`[Content_Types].xml`:** +```xml + + + +``` + +**`ppt/presentation.xml`:** +```xml + + + + +``` + +**`docProps/app.xml`:** Update slide count and statistics +```xml +2 +10 +50 +``` + +## Slide Operations + +### Adding a New Slide +When adding a slide to the end of the presentation: + +1. **Create the slide file** (`ppt/slides/slideN.xml`) +2. **Update `[Content_Types].xml`**: Add Override for the new slide +3. **Update `ppt/_rels/presentation.xml.rels`**: Add relationship for the new slide +4. **Update `ppt/presentation.xml`**: Add slide ID to `` +5. **Create slide relationships** (`ppt/slides/_rels/slideN.xml.rels`) if needed +6. **Update `docProps/app.xml`**: Increment slide count and update statistics (if present) + +### Duplicating a Slide +1. Copy the source slide XML file with a new name +2. Update all IDs in the new slide to be unique +3. Follow the "Adding a New Slide" steps above +4. **CRITICAL**: Remove or update any notes slide references in `_rels` files +5. Remove references to unused media files + +### Reordering Slides +1. **Update `ppt/presentation.xml`**: Reorder `` elements in `` +2. The order of `` elements determines slide order +3. Keep slide IDs and relationship IDs unchanged + +Example: +```xml + + + + + + + + + + + + + +``` + +### Deleting a Slide +1. **Remove from `ppt/presentation.xml`**: Delete the `` entry +2. **Remove from `ppt/_rels/presentation.xml.rels`**: Delete the relationship +3. **Remove from `[Content_Types].xml`**: Delete the Override entry +4. **Delete files**: Remove `ppt/slides/slideN.xml` and `ppt/slides/_rels/slideN.xml.rels` +5. **Update `docProps/app.xml`**: Decrement slide count and update statistics +6. **Clean up unused media**: Remove orphaned images from `ppt/media/` + +Note: Don't renumber remaining slides - keep their original IDs and filenames. + + +## Common Errors to Avoid + +- **Encodings**: Escape unicode characters in ASCII content: `"` becomes `“` +- **Images**: Add to `ppt/media/` and update relationship files +- **Lists**: Omit bullets from list headers +- **IDs**: Use valid hexadecimal values for UUIDs +- **Themes**: Check all themes in `theme` directory for colors + +## Validation Checklist for Template-Based Presentations + +### Before Packing, Always: +- **Clean unused resources**: Remove unreferenced media, fonts, and notes directories +- **Fix Content_Types.xml**: Declare ALL slides, layouts, and themes present in the package +- **Fix relationship IDs**: + - Remove font embed references if not using embedded fonts +- **Remove broken references**: Check all `_rels` files for references to deleted resources + +### Common Template Duplication Pitfalls: +- Multiple slides referencing the same notes slide after duplication +- Image/media references from template slides that no longer exist +- Font embedding references when fonts aren't included +- Missing slideLayout declarations for layouts 12-25 +- docProps directory may not unpack - this is optional \ No newline at end of file diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd new file mode 100644 index 000000000..6454ef9a9 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chart.xsd @@ -0,0 +1,1499 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd new file mode 100644 index 000000000..afa4f463e --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd new file mode 100644 index 000000000..64e66b8ab --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd @@ -0,0 +1,1085 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd new file mode 100644 index 000000000..687eea829 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd @@ -0,0 +1,11 @@ + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd new file mode 100644 index 000000000..6ac81b06b --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-main.xsd @@ -0,0 +1,3081 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd new file mode 100644 index 000000000..1dbf05140 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-picture.xsd @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd new file mode 100644 index 000000000..f1af17db4 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd @@ -0,0 +1,185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd new file mode 100644 index 000000000..0a185ab6e --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd @@ -0,0 +1,287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd new file mode 100644 index 000000000..14ef48886 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/pml.xsd @@ -0,0 +1,1676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd new file mode 100644 index 000000000..c20f3bf14 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd new file mode 100644 index 000000000..ac6025226 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd @@ -0,0 +1,144 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd new file mode 100644 index 000000000..424b8ba8d --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd @@ -0,0 +1,174 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd new file mode 100644 index 000000000..2bddce292 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd new file mode 100644 index 000000000..8a8c18ba2 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd new file mode 100644 index 000000000..5c42706a0 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd new file mode 100644 index 000000000..853c341c8 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd new file mode 100644 index 000000000..da835ee82 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd new file mode 100644 index 000000000..87ad2658f --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-math.xsd @@ -0,0 +1,582 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd new file mode 100644 index 000000000..9e86f1b2b --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd new file mode 100644 index 000000000..d0be42e75 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/sml.xsd @@ -0,0 +1,4439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd new file mode 100644 index 000000000..8821dd183 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-main.xsd @@ -0,0 +1,570 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd new file mode 100644 index 000000000..ca2575c75 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd @@ -0,0 +1,509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd new file mode 100644 index 000000000..dd079e603 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd new file mode 100644 index 000000000..3dd6cf625 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd new file mode 100644 index 000000000..f1041e34e --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd new file mode 100644 index 000000000..9c5b7a633 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/wml.xsd @@ -0,0 +1,3646 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd new file mode 100644 index 000000000..0f13678d8 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ISO-IEC29500-4_2016/xml.xsd @@ -0,0 +1,116 @@ + + + + + + See http://www.w3.org/XML/1998/namespace.html and + http://www.w3.org/TR/REC-xml for information about this namespace. + + This schema document describes the XML namespace, in a form + suitable for import by other schema documents. + + Note that local names in this namespace are intended to be defined + only by the World Wide Web Consortium or its subgroups. The + following names are currently defined in this namespace and should + not be used with conflicting semantics by any Working Group, + specification, or document instance: + + base (as an attribute name): denotes an attribute whose value + provides a URI to be used as the base for interpreting any + relative URIs in the scope of the element on which it + appears; its value is inherited. This name is reserved + by virtue of its definition in the XML Base specification. + + lang (as an attribute name): denotes an attribute whose value + is a language code for the natural language of the content of + any element; its value is inherited. This name is reserved + by virtue of its definition in the XML specification. + + space (as an attribute name): denotes an attribute whose + value is a keyword indicating what whitespace processing + discipline is intended for the content of the element; its + value is inherited. This name is reserved by virtue of its + definition in the XML specification. + + Father (in any context at all): denotes Jon Bosak, the chair of + the original XML Working Group. This name is reserved by + the following decision of the W3C XML Plenary and + XML Coordination groups: + + In appreciation for his vision, leadership and dedication + the W3C XML Plenary on this 10th day of February, 2000 + reserves for Jon Bosak in perpetuity the XML name + xml:Father + + + + + This schema defines attributes and an attribute group + suitable for use by + schemas wishing to allow xml:base, xml:lang or xml:space attributes + on elements they define. + + To enable this, such a schema must import this schema + for the XML namespace, e.g. as follows: + <schema . . .> + . . . + <import namespace="http://www.w3.org/XML/1998/namespace" + schemaLocation="http://www.w3.org/2001/03/xml.xsd"/> + + Subsequently, qualified reference to any of the attributes + or the group defined below will have the desired effect, e.g. + + <type . . .> + . . . + <attributeGroup ref="xml:specialAttrs"/> + + will define a type which will schema-validate an instance + element with any of those attributes + + + + In keeping with the XML Schema WG's standard versioning + policy, this schema document will persist at + http://www.w3.org/2001/03/xml.xsd. + At the date of issue it can also be found at + http://www.w3.org/2001/xml.xsd. + The schema document at that URI may however change in the future, + in order to remain compatible with the latest version of XML Schema + itself. In other words, if the XML Schema namespace changes, the version + of this document at + http://www.w3.org/2001/xml.xsd will change + accordingly; the version at + http://www.w3.org/2001/03/xml.xsd will not change. + + + + + + In due course, we should install the relevant ISO 2- and 3-letter + codes as the enumerated possible values . . . + + + + + + + + + + + + + + + See http://www.w3.org/TR/xmlbase/ for + information about this attribute. + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd new file mode 100644 index 000000000..a6de9d273 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-contentTypes.xsd @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd new file mode 100644 index 000000000..10e978b66 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-coreProperties.xsd @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd new file mode 100644 index 000000000..4248bf7a3 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-digSig.xsd @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd new file mode 100644 index 000000000..564974671 --- /dev/null +++ b/skills/pptx/ooxml/schemas/ecma/fouth-edition/opc-relationships.xsd @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/mce/mc.xsd b/skills/pptx/ooxml/schemas/mce/mc.xsd new file mode 100644 index 000000000..ef725457c --- /dev/null +++ b/skills/pptx/ooxml/schemas/mce/mc.xsd @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-2010.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-2010.xsd new file mode 100644 index 000000000..f65f77773 --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-2010.xsd @@ -0,0 +1,560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-2012.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-2012.xsd new file mode 100644 index 000000000..6b00755a9 --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-2012.xsd @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-2018.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-2018.xsd new file mode 100644 index 000000000..f321d333a --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-2018.xsd @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-cex-2018.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-cex-2018.xsd new file mode 100644 index 000000000..364c6a9b8 --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-cex-2018.xsd @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-cid-2016.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-cid-2016.xsd new file mode 100644 index 000000000..fed9d15b7 --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-cid-2016.xsd @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd new file mode 100644 index 000000000..680cf1540 --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-sdtdatahash-2020.xsd @@ -0,0 +1,4 @@ + + + + diff --git a/skills/pptx/ooxml/schemas/microsoft/wml-symex-2015.xsd b/skills/pptx/ooxml/schemas/microsoft/wml-symex-2015.xsd new file mode 100644 index 000000000..89ada9083 --- /dev/null +++ b/skills/pptx/ooxml/schemas/microsoft/wml-symex-2015.xsd @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/skills/pptx/ooxml/scripts/pack.py b/skills/pptx/ooxml/scripts/pack.py new file mode 100755 index 000000000..68bc0886f --- /dev/null +++ b/skills/pptx/ooxml/scripts/pack.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +Tool to pack a directory into a .docx, .pptx, or .xlsx file with XML formatting undone. + +Example usage: + python pack.py [--force] +""" + +import argparse +import shutil +import subprocess +import sys +import tempfile +import defusedxml.minidom +import zipfile +from pathlib import Path + + +def main(): + parser = argparse.ArgumentParser(description="Pack a directory into an Office file") + parser.add_argument("input_directory", help="Unpacked Office document directory") + parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)") + parser.add_argument("--force", action="store_true", help="Skip validation") + args = parser.parse_args() + + try: + success = pack_document( + args.input_directory, args.output_file, validate=not args.force + ) + + # Show warning if validation was skipped + if args.force: + print("Warning: Skipped validation, file may be corrupt", file=sys.stderr) + # Exit with error if validation failed + elif not success: + print("Contents would produce a corrupt file.", file=sys.stderr) + print("Please validate XML before repacking.", file=sys.stderr) + print("Use --force to skip validation and pack anyway.", file=sys.stderr) + sys.exit(1) + + except ValueError as e: + sys.exit(f"Error: {e}") + + +def pack_document(input_dir, output_file, validate=False): + """Pack a directory into an Office file (.docx/.pptx/.xlsx). + + Args: + input_dir: Path to unpacked Office document directory + output_file: Path to output Office file + validate: If True, validates with soffice (default: False) + + Returns: + bool: True if successful, False if validation failed + """ + input_dir = Path(input_dir) + output_file = Path(output_file) + + if not input_dir.is_dir(): + raise ValueError(f"{input_dir} is not a directory") + if output_file.suffix.lower() not in {".docx", ".pptx", ".xlsx"}: + raise ValueError(f"{output_file} must be a .docx, .pptx, or .xlsx file") + + # Work in temporary directory to avoid modifying original + with tempfile.TemporaryDirectory() as temp_dir: + temp_content_dir = Path(temp_dir) / "content" + shutil.copytree(input_dir, temp_content_dir) + + # Process XML files to remove pretty-printing whitespace + for pattern in ["*.xml", "*.rels"]: + for xml_file in temp_content_dir.rglob(pattern): + condense_xml(xml_file) + + # Create final Office file as zip archive + output_file.parent.mkdir(parents=True, exist_ok=True) + with zipfile.ZipFile(output_file, "w", zipfile.ZIP_DEFLATED) as zf: + for f in temp_content_dir.rglob("*"): + if f.is_file(): + zf.write(f, f.relative_to(temp_content_dir)) + + # Validate if requested + if validate: + if not validate_document(output_file): + output_file.unlink() # Delete the corrupt file + return False + + return True + + +def validate_document(doc_path): + """Validate document by converting to HTML with soffice.""" + # Determine the correct filter based on file extension + match doc_path.suffix.lower(): + case ".docx": + filter_name = "html:HTML" + case ".pptx": + filter_name = "html:impress_html_Export" + case ".xlsx": + filter_name = "html:HTML (StarCalc)" + + with tempfile.TemporaryDirectory() as temp_dir: + try: + result = subprocess.run( + [ + "soffice", + "--headless", + "--convert-to", + filter_name, + "--outdir", + temp_dir, + str(doc_path), + ], + capture_output=True, + timeout=10, + text=True, + ) + if not (Path(temp_dir) / f"{doc_path.stem}.html").exists(): + error_msg = result.stderr.strip() or "Document validation failed" + print(f"Validation error: {error_msg}", file=sys.stderr) + return False + return True + except FileNotFoundError: + print("Warning: soffice not found. Skipping validation.", file=sys.stderr) + return True + except subprocess.TimeoutExpired: + print("Validation error: Timeout during conversion", file=sys.stderr) + return False + except Exception as e: + print(f"Validation error: {e}", file=sys.stderr) + return False + + +def condense_xml(xml_file): + """Strip unnecessary whitespace and remove comments.""" + with open(xml_file, "r", encoding="utf-8") as f: + dom = defusedxml.minidom.parse(f) + + # Process each element to remove whitespace and comments + for element in dom.getElementsByTagName("*"): + # Skip w:t elements and their processing + if element.tagName.endswith(":t"): + continue + + # Remove whitespace-only text nodes and comment nodes + for child in list(element.childNodes): + if ( + child.nodeType == child.TEXT_NODE + and child.nodeValue + and child.nodeValue.strip() == "" + ) or child.nodeType == child.COMMENT_NODE: + element.removeChild(child) + + # Write back the condensed XML + with open(xml_file, "wb") as f: + f.write(dom.toxml(encoding="UTF-8")) + + +if __name__ == "__main__": + main() diff --git a/skills/pptx/ooxml/scripts/unpack.py b/skills/pptx/ooxml/scripts/unpack.py new file mode 100755 index 000000000..493879881 --- /dev/null +++ b/skills/pptx/ooxml/scripts/unpack.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""Unpack and format XML contents of Office files (.docx, .pptx, .xlsx)""" + +import random +import sys +import defusedxml.minidom +import zipfile +from pathlib import Path + +# Get command line arguments +assert len(sys.argv) == 3, "Usage: python unpack.py " +input_file, output_dir = sys.argv[1], sys.argv[2] + +# Extract and format +output_path = Path(output_dir) +output_path.mkdir(parents=True, exist_ok=True) +zipfile.ZipFile(input_file).extractall(output_path) + +# Pretty print all XML files +xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels")) +for xml_file in xml_files: + content = xml_file.read_text(encoding="utf-8") + dom = defusedxml.minidom.parseString(content) + xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="ascii")) + +# For .docx files, suggest an RSID for tracked changes +if input_file.endswith(".docx"): + suggested_rsid = "".join(random.choices("0123456789ABCDEF", k=8)) + print(f"Suggested RSID for edit session: {suggested_rsid}") diff --git a/skills/pptx/ooxml/scripts/validate.py b/skills/pptx/ooxml/scripts/validate.py new file mode 100755 index 000000000..508c5891f --- /dev/null +++ b/skills/pptx/ooxml/scripts/validate.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Command line tool to validate Office document XML files against XSD schemas and tracked changes. + +Usage: + python validate.py --original +""" + +import argparse +import sys +from pathlib import Path + +from validation import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator + + +def main(): + parser = argparse.ArgumentParser(description="Validate Office document XML files") + parser.add_argument( + "unpacked_dir", + help="Path to unpacked Office document directory", + ) + parser.add_argument( + "--original", + required=True, + help="Path to original file (.docx/.pptx/.xlsx)", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + args = parser.parse_args() + + # Validate paths + unpacked_dir = Path(args.unpacked_dir) + original_file = Path(args.original) + file_extension = original_file.suffix.lower() + assert unpacked_dir.is_dir(), f"Error: {unpacked_dir} is not a directory" + assert original_file.is_file(), f"Error: {original_file} is not a file" + assert file_extension in [".docx", ".pptx", ".xlsx"], ( + f"Error: {original_file} must be a .docx, .pptx, or .xlsx file" + ) + + # Run validations + match file_extension: + case ".docx": + validators = [DOCXSchemaValidator, RedliningValidator] + case ".pptx": + validators = [PPTXSchemaValidator] + case _: + print(f"Error: Validation not supported for file type {file_extension}") + sys.exit(1) + + # Run validators + success = True + for V in validators: + validator = V(unpacked_dir, original_file, verbose=args.verbose) + if not validator.validate(): + success = False + + if success: + print("All validations PASSED!") + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + main() diff --git a/skills/pptx/ooxml/scripts/validation/__init__.py b/skills/pptx/ooxml/scripts/validation/__init__.py new file mode 100644 index 000000000..db092ece7 --- /dev/null +++ b/skills/pptx/ooxml/scripts/validation/__init__.py @@ -0,0 +1,15 @@ +""" +Validation modules for Word document processing. +""" + +from .base import BaseSchemaValidator +from .docx import DOCXSchemaValidator +from .pptx import PPTXSchemaValidator +from .redlining import RedliningValidator + +__all__ = [ + "BaseSchemaValidator", + "DOCXSchemaValidator", + "PPTXSchemaValidator", + "RedliningValidator", +] diff --git a/skills/pptx/ooxml/scripts/validation/base.py b/skills/pptx/ooxml/scripts/validation/base.py new file mode 100644 index 000000000..0681b199c --- /dev/null +++ b/skills/pptx/ooxml/scripts/validation/base.py @@ -0,0 +1,951 @@ +""" +Base validator with common validation logic for document files. +""" + +import re +from pathlib import Path + +import lxml.etree + + +class BaseSchemaValidator: + """Base validator with common validation logic for document files.""" + + # Elements whose 'id' attributes must be unique within their file + # Format: element_name -> (attribute_name, scope) + # scope can be 'file' (unique within file) or 'global' (unique across all files) + UNIQUE_ID_REQUIREMENTS = { + # Word elements + "comment": ("id", "file"), # Comment IDs in comments.xml + "commentrangestart": ("id", "file"), # Must match comment IDs + "commentrangeend": ("id", "file"), # Must match comment IDs + "bookmarkstart": ("id", "file"), # Bookmark start IDs + "bookmarkend": ("id", "file"), # Bookmark end IDs + # Note: ins and del (track changes) can share IDs when part of same revision + # PowerPoint elements + "sldid": ("id", "file"), # Slide IDs in presentation.xml + "sldmasterid": ("id", "global"), # Slide master IDs must be globally unique + "sldlayoutid": ("id", "global"), # Slide layout IDs must be globally unique + "cm": ("authorid", "file"), # Comment author IDs + # Excel elements + "sheet": ("sheetid", "file"), # Sheet IDs in workbook.xml + "definedname": ("id", "file"), # Named range IDs + # Drawing/Shape elements (all formats) + "cxnsp": ("id", "file"), # Connection shape IDs + "sp": ("id", "file"), # Shape IDs + "pic": ("id", "file"), # Picture IDs + "grpsp": ("id", "file"), # Group shape IDs + } + + # Mapping of element names to expected relationship types + # Subclasses should override this with format-specific mappings + ELEMENT_RELATIONSHIP_TYPES = {} + + # Unified schema mappings for all Office document types + SCHEMA_MAPPINGS = { + # Document type specific schemas + "word": "ISO-IEC29500-4_2016/wml.xsd", # Word documents + "ppt": "ISO-IEC29500-4_2016/pml.xsd", # PowerPoint presentations + "xl": "ISO-IEC29500-4_2016/sml.xsd", # Excel spreadsheets + # Common file types + "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd", + "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd", + "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd", + "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd", + ".rels": "ecma/fouth-edition/opc-relationships.xsd", + # Word-specific files + "people.xml": "microsoft/wml-2012.xsd", + "commentsIds.xml": "microsoft/wml-cid-2016.xsd", + "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd", + "commentsExtended.xml": "microsoft/wml-2012.xsd", + # Chart files (common across document types) + "chart": "ISO-IEC29500-4_2016/dml-chart.xsd", + # Theme files (common across document types) + "theme": "ISO-IEC29500-4_2016/dml-main.xsd", + # Drawing and media files + "drawing": "ISO-IEC29500-4_2016/dml-main.xsd", + } + + # Unified namespace constants + MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006" + XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" + + # Common OOXML namespaces used across validators + PACKAGE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/relationships" + ) + OFFICE_RELATIONSHIPS_NAMESPACE = ( + "http://schemas.openxmlformats.org/officeDocument/2006/relationships" + ) + CONTENT_TYPES_NAMESPACE = ( + "http://schemas.openxmlformats.org/package/2006/content-types" + ) + + # Folders where we should clean ignorable namespaces + MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"} + + # All allowed OOXML namespaces (superset of all document types) + OOXML_NAMESPACES = { + "http://schemas.openxmlformats.org/officeDocument/2006/math", + "http://schemas.openxmlformats.org/officeDocument/2006/relationships", + "http://schemas.openxmlformats.org/schemaLibrary/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/main", + "http://schemas.openxmlformats.org/drawingml/2006/chart", + "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/diagram", + "http://schemas.openxmlformats.org/drawingml/2006/picture", + "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing", + "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing", + "http://schemas.openxmlformats.org/wordprocessingml/2006/main", + "http://schemas.openxmlformats.org/presentationml/2006/main", + "http://schemas.openxmlformats.org/spreadsheetml/2006/main", + "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes", + "http://www.w3.org/XML/1998/namespace", + } + + def __init__(self, unpacked_dir, original_file, verbose=False): + self.unpacked_dir = Path(unpacked_dir).resolve() + self.original_file = Path(original_file) + self.verbose = verbose + + # Set schemas directory + self.schemas_dir = Path(__file__).parent.parent.parent / "schemas" + + # Get all XML and .rels files + patterns = ["*.xml", "*.rels"] + self.xml_files = [ + f for pattern in patterns for f in self.unpacked_dir.rglob(pattern) + ] + + if not self.xml_files: + print(f"Warning: No XML files found in {self.unpacked_dir}") + + def validate(self): + """Run all validation checks and return True if all pass.""" + raise NotImplementedError("Subclasses must implement the validate method") + + def validate_xml(self): + """Validate that all XML files are well-formed.""" + errors = [] + + for xml_file in self.xml_files: + try: + # Try to parse the XML file + lxml.etree.parse(str(xml_file)) + except lxml.etree.XMLSyntaxError as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {e.lineno}: {e.msg}" + ) + except Exception as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Unexpected error: {str(e)}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} XML violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All XML files are well-formed") + return True + + def validate_namespaces(self): + """Validate that namespace prefixes in Ignorable attributes are declared.""" + errors = [] + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + declared = set(root.nsmap.keys()) - {None} # Exclude default namespace + + for attr_val in [ + v for k, v in root.attrib.items() if k.endswith("Ignorable") + ]: + undeclared = set(attr_val.split()) - declared + errors.extend( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Namespace '{ns}' in Ignorable but not declared" + for ns in undeclared + ) + except lxml.etree.XMLSyntaxError: + continue + + if errors: + print(f"FAILED - {len(errors)} namespace issues:") + for error in errors: + print(error) + return False + if self.verbose: + print("PASSED - All namespace prefixes properly declared") + return True + + def validate_unique_ids(self): + """Validate that specific IDs are unique according to OOXML requirements.""" + errors = [] + global_ids = {} # Track globally unique IDs across all files + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + file_ids = {} # Track IDs that must be unique within this file + + # Remove all mc:AlternateContent elements from the tree + mc_elements = root.xpath( + ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE} + ) + for elem in mc_elements: + elem.getparent().remove(elem) + + # Now check IDs in the cleaned tree + for elem in root.iter(): + # Get the element name without namespace + tag = ( + elem.tag.split("}")[-1].lower() + if "}" in elem.tag + else elem.tag.lower() + ) + + # Check if this element type has ID uniqueness requirements + if tag in self.UNIQUE_ID_REQUIREMENTS: + attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag] + + # Look for the specified attribute + id_value = None + for attr, value in elem.attrib.items(): + attr_local = ( + attr.split("}")[-1].lower() + if "}" in attr + else attr.lower() + ) + if attr_local == attr_name: + id_value = value + break + + if id_value is not None: + if scope == "global": + # Check global uniqueness + if id_value in global_ids: + prev_file, prev_line, prev_tag = global_ids[ + id_value + ] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> " + f"already used in {prev_file} at line {prev_line} in <{prev_tag}>" + ) + else: + global_ids[id_value] = ( + xml_file.relative_to(self.unpacked_dir), + elem.sourceline, + tag, + ) + elif scope == "file": + # Check file-level uniqueness + key = (tag, attr_name) + if key not in file_ids: + file_ids[key] = {} + + if id_value in file_ids[key]: + prev_line = file_ids[key][id_value] + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> " + f"(first occurrence at line {prev_line})" + ) + else: + file_ids[key][id_value] = elem.sourceline + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} ID uniqueness violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All required IDs are unique") + return True + + def validate_file_references(self): + """ + Validate that all .rels files properly reference files and that all files are referenced. + """ + errors = [] + + # Find all .rels files + rels_files = list(self.unpacked_dir.rglob("*.rels")) + + if not rels_files: + if self.verbose: + print("PASSED - No .rels files found") + return True + + # Get all files in the unpacked directory (excluding reference files) + all_files = [] + for file_path in self.unpacked_dir.rglob("*"): + if ( + file_path.is_file() + and file_path.name != "[Content_Types].xml" + and not file_path.name.endswith(".rels") + ): # This file is not referenced by .rels + all_files.append(file_path.resolve()) + + # Track all files that are referenced by any .rels file + all_referenced_files = set() + + if self.verbose: + print( + f"Found {len(rels_files)} .rels files and {len(all_files)} target files" + ) + + # Check each .rels file + for rels_file in rels_files: + try: + # Parse relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Get the directory where this .rels file is located + rels_dir = rels_file.parent + + # Find all relationships and their targets + referenced_files = set() + broken_refs = [] + + for rel in rels_root.findall( + ".//ns:Relationship", + namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE}, + ): + target = rel.get("Target") + if target and not target.startswith( + ("http", "mailto:") + ): # Skip external URLs + # Resolve the target path relative to the .rels file location + if rels_file.name == ".rels": + # Root .rels file - targets are relative to unpacked_dir + target_path = self.unpacked_dir / target + else: + # Other .rels files - targets are relative to their parent's parent + # e.g., word/_rels/document.xml.rels -> targets relative to word/ + base_dir = rels_dir.parent + target_path = base_dir / target + + # Normalize the path and check if it exists + try: + target_path = target_path.resolve() + if target_path.exists() and target_path.is_file(): + referenced_files.add(target_path) + all_referenced_files.add(target_path) + else: + broken_refs.append((target, rel.sourceline)) + except (OSError, ValueError): + broken_refs.append((target, rel.sourceline)) + + # Report broken references + if broken_refs: + rel_path = rels_file.relative_to(self.unpacked_dir) + for broken_ref, line_num in broken_refs: + errors.append( + f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}" + ) + + except Exception as e: + rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append(f" Error parsing {rel_path}: {e}") + + # Check for unreferenced files (files that exist but are not referenced anywhere) + unreferenced_files = set(all_files) - all_referenced_files + + if unreferenced_files: + for unref_file in sorted(unreferenced_files): + unref_rel_path = unref_file.relative_to(self.unpacked_dir) + errors.append(f" Unreferenced file: {unref_rel_path}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship validation errors:") + for error in errors: + print(error) + print( + "CRITICAL: These errors will cause the document to appear corrupt. " + + "Broken references MUST be fixed, " + + "and unreferenced files MUST be referenced or removed." + ) + return False + else: + if self.verbose: + print( + "PASSED - All references are valid and all files are properly referenced" + ) + return True + + def validate_all_relationship_ids(self): + """ + Validate that all r:id attributes in XML files reference existing IDs + in their corresponding .rels files, and optionally validate relationship types. + """ + import lxml.etree + + errors = [] + + # Process each XML file that might contain r:id references + for xml_file in self.xml_files: + # Skip .rels files themselves + if xml_file.suffix == ".rels": + continue + + # Determine the corresponding .rels file + # For dir/file.xml, it's dir/_rels/file.xml.rels + rels_dir = xml_file.parent / "_rels" + rels_file = rels_dir / f"{xml_file.name}.rels" + + # Skip if there's no corresponding .rels file (that's okay) + if not rels_file.exists(): + continue + + try: + # Parse the .rels file to get valid relationship IDs and their types + rels_root = lxml.etree.parse(str(rels_file)).getroot() + rid_to_type = {} + + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rid = rel.get("Id") + rel_type = rel.get("Type", "") + if rid: + # Check for duplicate rIds + if rid in rid_to_type: + rels_rel_path = rels_file.relative_to(self.unpacked_dir) + errors.append( + f" {rels_rel_path}: Line {rel.sourceline}: " + f"Duplicate relationship ID '{rid}' (IDs must be unique)" + ) + # Extract just the type name from the full URL + type_name = ( + rel_type.split("/")[-1] if "/" in rel_type else rel_type + ) + rid_to_type[rid] = type_name + + # Parse the XML file to find all r:id references + xml_root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all elements with r:id attributes + for elem in xml_root.iter(): + # Check for r:id attribute (relationship ID) + rid_attr = elem.get(f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id") + if rid_attr: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + elem_name = ( + elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag + ) + + # Check if the ID exists + if rid_attr not in rid_to_type: + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references non-existent relationship '{rid_attr}' " + f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})" + ) + # Check if we have type expectations for this element + elif self.ELEMENT_RELATIONSHIP_TYPES: + expected_type = self._get_expected_relationship_type( + elem_name + ) + if expected_type: + actual_type = rid_to_type[rid_attr] + # Check if the actual type matches or contains the expected type + if expected_type not in actual_type.lower(): + errors.append( + f" {xml_rel_path}: Line {elem.sourceline}: " + f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' " + f"but should point to a '{expected_type}' relationship" + ) + + except Exception as e: + xml_rel_path = xml_file.relative_to(self.unpacked_dir) + errors.append(f" Error processing {xml_rel_path}: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} relationship ID reference errors:") + for error in errors: + print(error) + print("\nThese ID mismatches will cause the document to appear corrupt!") + return False + else: + if self.verbose: + print("PASSED - All relationship ID references are valid") + return True + + def _get_expected_relationship_type(self, element_name): + """ + Get the expected relationship type for an element. + First checks the explicit mapping, then tries pattern detection. + """ + # Normalize element name to lowercase + elem_lower = element_name.lower() + + # Check explicit mapping first + if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES: + return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower] + + # Try pattern detection for common patterns + # Pattern 1: Elements ending in "Id" often expect a relationship of the prefix type + if elem_lower.endswith("id") and len(elem_lower) > 2: + # e.g., "sldId" -> "sld", "sldMasterId" -> "sldMaster" + prefix = elem_lower[:-2] # Remove "id" + # Check if this might be a compound like "sldMasterId" + if prefix.endswith("master"): + return prefix.lower() + elif prefix.endswith("layout"): + return prefix.lower() + else: + # Simple case like "sldId" -> "slide" + # Common transformations + if prefix == "sld": + return "slide" + return prefix.lower() + + # Pattern 2: Elements ending in "Reference" expect a relationship of the prefix type + if elem_lower.endswith("reference") and len(elem_lower) > 9: + prefix = elem_lower[:-9] # Remove "reference" + return prefix.lower() + + return None + + def validate_content_types(self): + """Validate that all content files are properly declared in [Content_Types].xml.""" + errors = [] + + # Find [Content_Types].xml file + content_types_file = self.unpacked_dir / "[Content_Types].xml" + if not content_types_file.exists(): + print("FAILED - [Content_Types].xml file not found") + return False + + try: + # Parse and get all declared parts and extensions + root = lxml.etree.parse(str(content_types_file)).getroot() + declared_parts = set() + declared_extensions = set() + + # Get Override declarations (specific files) + for override in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override" + ): + part_name = override.get("PartName") + if part_name is not None: + declared_parts.add(part_name.lstrip("/")) + + # Get Default declarations (by extension) + for default in root.findall( + f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default" + ): + extension = default.get("Extension") + if extension is not None: + declared_extensions.add(extension.lower()) + + # Root elements that require content type declaration + declarable_roots = { + "sld", + "sldLayout", + "sldMaster", + "presentation", # PowerPoint + "document", # Word + "workbook", + "worksheet", # Excel + "theme", # Common + } + + # Common media file extensions that should be declared + media_extensions = { + "png": "image/png", + "jpg": "image/jpeg", + "jpeg": "image/jpeg", + "gif": "image/gif", + "bmp": "image/bmp", + "tiff": "image/tiff", + "wmf": "image/x-wmf", + "emf": "image/x-emf", + } + + # Get all files in the unpacked directory + all_files = list(self.unpacked_dir.rglob("*")) + all_files = [f for f in all_files if f.is_file()] + + # Check all XML files for Override declarations + for xml_file in self.xml_files: + path_str = str(xml_file.relative_to(self.unpacked_dir)).replace( + "\\", "/" + ) + + # Skip non-content files + if any( + skip in path_str + for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"] + ): + continue + + try: + root_tag = lxml.etree.parse(str(xml_file)).getroot().tag + root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag + + if root_name in declarable_roots and path_str not in declared_parts: + errors.append( + f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml" + ) + + except Exception: + continue # Skip unparseable files + + # Check all non-XML files for Default extension declarations + for file_path in all_files: + # Skip XML files and metadata files (already checked above) + if file_path.suffix.lower() in {".xml", ".rels"}: + continue + if file_path.name == "[Content_Types].xml": + continue + if "_rels" in file_path.parts or "docProps" in file_path.parts: + continue + + extension = file_path.suffix.lstrip(".").lower() + if extension and extension not in declared_extensions: + # Check if it's a known media extension that should be declared + if extension in media_extensions: + relative_path = file_path.relative_to(self.unpacked_dir) + errors.append( + f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: ' + ) + + except Exception as e: + errors.append(f" Error parsing [Content_Types].xml: {e}") + + if errors: + print(f"FAILED - Found {len(errors)} content type declaration errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print( + "PASSED - All content files are properly declared in [Content_Types].xml" + ) + return True + + def validate_file_against_xsd(self, xml_file, verbose=False): + """Validate a single XML file against XSD schema, comparing with original. + + Args: + xml_file: Path to XML file to validate + verbose: Enable verbose output + + Returns: + tuple: (is_valid, new_errors_set) where is_valid is True/False/None (skipped) + """ + # Resolve both paths to handle symlinks + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + + # Validate current file + is_valid, current_errors = self._validate_single_file_xsd( + xml_file, unpacked_dir + ) + + if is_valid is None: + return None, set() # Skipped + elif is_valid: + return True, set() # Valid, no errors + + # Get errors from original file for this specific file + original_errors = self._get_original_file_errors(xml_file) + + # Compare with original (both are guaranteed to be sets here) + assert current_errors is not None + new_errors = current_errors - original_errors + + if new_errors: + if verbose: + relative_path = xml_file.relative_to(unpacked_dir) + print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)") + for error in list(new_errors)[:3]: + truncated = error[:250] + "..." if len(error) > 250 else error + print(f" - {truncated}") + return False, new_errors + else: + # All errors existed in original + if verbose: + print( + f"PASSED - No new errors (original had {len(current_errors)} errors)" + ) + return True, set() + + def validate_against_xsd(self): + """Validate XML files against XSD schemas, showing only new errors compared to original.""" + new_errors = [] + original_error_count = 0 + valid_count = 0 + skipped_count = 0 + + for xml_file in self.xml_files: + relative_path = str(xml_file.relative_to(self.unpacked_dir)) + is_valid, new_file_errors = self.validate_file_against_xsd( + xml_file, verbose=False + ) + + if is_valid is None: + skipped_count += 1 + continue + elif is_valid and not new_file_errors: + valid_count += 1 + continue + elif is_valid: + # Had errors but all existed in original + original_error_count += 1 + valid_count += 1 + continue + + # Has new errors + new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)") + for error in list(new_file_errors)[:3]: # Show first 3 errors + new_errors.append( + f" - {error[:250]}..." if len(error) > 250 else f" - {error}" + ) + + # Print summary + if self.verbose: + print(f"Validated {len(self.xml_files)} files:") + print(f" - Valid: {valid_count}") + print(f" - Skipped (no schema): {skipped_count}") + if original_error_count: + print(f" - With original errors (ignored): {original_error_count}") + print( + f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}" + ) + + if new_errors: + print("\nFAILED - Found NEW validation errors:") + for error in new_errors: + print(error) + return False + else: + if self.verbose: + print("\nPASSED - No new XSD validation errors introduced") + return True + + def _get_schema_path(self, xml_file): + """Determine the appropriate schema path for an XML file.""" + # Check exact filename match + if xml_file.name in self.SCHEMA_MAPPINGS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name] + + # Check .rels files + if xml_file.suffix == ".rels": + return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"] + + # Check chart files + if "charts/" in str(xml_file) and xml_file.name.startswith("chart"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"] + + # Check theme files + if "theme/" in str(xml_file) and xml_file.name.startswith("theme"): + return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"] + + # Check if file is in a main content folder and use appropriate schema + if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS: + return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name] + + return None + + def _clean_ignorable_namespaces(self, xml_doc): + """Remove attributes and elements not in allowed namespaces.""" + # Create a clean copy + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + # Remove attributes not in allowed namespaces + for elem in xml_copy.iter(): + attrs_to_remove = [] + + for attr in elem.attrib: + # Check if attribute is from a namespace other than allowed ones + if "{" in attr: + ns = attr.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + attrs_to_remove.append(attr) + + # Remove collected attributes + for attr in attrs_to_remove: + del elem.attrib[attr] + + # Remove elements not in allowed namespaces + self._remove_ignorable_elements(xml_copy) + + return lxml.etree.ElementTree(xml_copy) + + def _remove_ignorable_elements(self, root): + """Recursively remove all elements not in allowed namespaces.""" + elements_to_remove = [] + + # Find elements to remove + for elem in list(root): + # Skip non-element nodes (comments, processing instructions, etc.) + if not hasattr(elem, "tag") or callable(elem.tag): + continue + + tag_str = str(elem.tag) + if tag_str.startswith("{"): + ns = tag_str.split("}")[0][1:] + if ns not in self.OOXML_NAMESPACES: + elements_to_remove.append(elem) + continue + + # Recursively clean child elements + self._remove_ignorable_elements(elem) + + # Remove collected elements + for elem in elements_to_remove: + root.remove(elem) + + def _preprocess_for_mc_ignorable(self, xml_doc): + """Preprocess XML to handle mc:Ignorable attribute properly.""" + # Remove mc:Ignorable attributes before validation + root = xml_doc.getroot() + + # Remove mc:Ignorable attribute from root + if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib: + del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"] + + return xml_doc + + def _validate_single_file_xsd(self, xml_file, base_path): + """Validate a single XML file against XSD schema. Returns (is_valid, errors_set).""" + schema_path = self._get_schema_path(xml_file) + if not schema_path: + return None, None # Skip file + + try: + # Load schema + with open(schema_path, "rb") as xsd_file: + parser = lxml.etree.XMLParser() + xsd_doc = lxml.etree.parse( + xsd_file, parser=parser, base_url=str(schema_path) + ) + schema = lxml.etree.XMLSchema(xsd_doc) + + # Load and preprocess XML + with open(xml_file, "r") as f: + xml_doc = lxml.etree.parse(f) + + xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc) + xml_doc = self._preprocess_for_mc_ignorable(xml_doc) + + # Clean ignorable namespaces if needed + relative_path = xml_file.relative_to(base_path) + if ( + relative_path.parts + and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS + ): + xml_doc = self._clean_ignorable_namespaces(xml_doc) + + # Validate + if schema.validate(xml_doc): + return True, set() + else: + errors = set() + for error in schema.error_log: + # Store normalized error message (without line numbers for comparison) + errors.add(error.message) + return False, errors + + except Exception as e: + return False, {str(e)} + + def _get_original_file_errors(self, xml_file): + """Get XSD validation errors from a single file in the original document. + + Args: + xml_file: Path to the XML file in unpacked_dir to check + + Returns: + set: Set of error messages from the original file + """ + import tempfile + import zipfile + + # Resolve both paths to handle symlinks (e.g., /var vs /private/var on macOS) + xml_file = Path(xml_file).resolve() + unpacked_dir = self.unpacked_dir.resolve() + relative_path = xml_file.relative_to(unpacked_dir) + + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Extract original file + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_path) + + # Find corresponding file in original + original_xml_file = temp_path / relative_path + + if not original_xml_file.exists(): + # File didn't exist in original, so no original errors + return set() + + # Validate the specific file in original + is_valid, errors = self._validate_single_file_xsd( + original_xml_file, temp_path + ) + return errors if errors else set() + + def _remove_template_tags_from_text_nodes(self, xml_doc): + """Remove template tags from XML text nodes and collect warnings. + + Template tags follow the pattern {{ ... }} and are used as placeholders + for content replacement. They should be removed from text content before + XSD validation while preserving XML structure. + + Returns: + tuple: (cleaned_xml_doc, warnings_list) + """ + warnings = [] + template_pattern = re.compile(r"\{\{[^}]*\}\}") + + # Create a copy of the document to avoid modifying the original + xml_string = lxml.etree.tostring(xml_doc, encoding="unicode") + xml_copy = lxml.etree.fromstring(xml_string) + + def process_text_content(text, content_type): + if not text: + return text + matches = list(template_pattern.finditer(text)) + if matches: + for match in matches: + warnings.append( + f"Found template tag in {content_type}: {match.group()}" + ) + return template_pattern.sub("", text) + return text + + # Process all text nodes in the document + for elem in xml_copy.iter(): + # Skip processing if this is a w:t element + if not hasattr(elem, "tag") or callable(elem.tag): + continue + tag_str = str(elem.tag) + if tag_str.endswith("}t") or tag_str == "t": + continue + + elem.text = process_text_content(elem.text, "text content") + elem.tail = process_text_content(elem.tail, "tail content") + + return lxml.etree.ElementTree(xml_copy), warnings + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/pptx/ooxml/scripts/validation/docx.py b/skills/pptx/ooxml/scripts/validation/docx.py new file mode 100644 index 000000000..602c47087 --- /dev/null +++ b/skills/pptx/ooxml/scripts/validation/docx.py @@ -0,0 +1,274 @@ +""" +Validator for Word document XML files against XSD schemas. +""" + +import re +import tempfile +import zipfile + +import lxml.etree + +from .base import BaseSchemaValidator + + +class DOCXSchemaValidator(BaseSchemaValidator): + """Validator for Word document XML files against XSD schemas.""" + + # Word-specific namespace + WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + + # Word-specific element to relationship type mappings + # Start with empty mapping - add specific cases as we discover them + ELEMENT_RELATIONSHIP_TYPES = {} + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 4: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 5: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 6: Whitespace preservation + if not self.validate_whitespace_preservation(): + all_valid = False + + # Test 7: Deletion validation + if not self.validate_deletions(): + all_valid = False + + # Test 8: Insertion validation + if not self.validate_insertions(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Count and compare paragraphs + self.compare_paragraph_counts() + + return all_valid + + def validate_whitespace_preservation(self): + """ + Validate that w:t elements with whitespace have xml:space='preserve'. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements + for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"): + if elem.text: + text = elem.text + # Check if text starts or ends with whitespace + if re.match(r"^\s.*", text) or re.match(r".*\s$", text): + # Check if xml:space="preserve" attribute exists + xml_space_attr = f"{{{self.XML_NAMESPACE}}}space" + if ( + xml_space_attr not in elem.attrib + or elem.attrib[xml_space_attr] != "preserve" + ): + # Show a preview of the text + text_preview = ( + repr(text)[:50] + "..." + if len(repr(text)) > 50 + else repr(text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} whitespace preservation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All whitespace is properly preserved") + return True + + def validate_deletions(self): + """ + Validate that w:t elements are not within w:del elements. + For some reason, XSD validation does not catch this, so we do it manually. + """ + errors = [] + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Find all w:t elements that are descendants of w:del elements + namespaces = {"w": self.WORD_2006_NAMESPACE} + xpath_expression = ".//w:del//w:t" + problematic_t_elements = root.xpath( + xpath_expression, namespaces=namespaces + ) + for t_elem in problematic_t_elements: + if t_elem.text: + # Show a preview of the text + text_preview = ( + repr(t_elem.text)[:50] + "..." + if len(repr(t_elem.text)) > 50 + else repr(t_elem.text) + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {t_elem.sourceline}: found within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} deletion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:t elements found within w:del elements") + return True + + def count_paragraphs_in_unpacked(self): + """Count the number of paragraphs in the unpacked document.""" + count = 0 + + for xml_file in self.xml_files: + # Only check document.xml files + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + except Exception as e: + print(f"Error counting paragraphs in unpacked document: {e}") + + return count + + def count_paragraphs_in_original(self): + """Count the number of paragraphs in the original docx file.""" + count = 0 + + try: + # Create temporary directory to unpack original + with tempfile.TemporaryDirectory() as temp_dir: + # Unpack original docx + with zipfile.ZipFile(self.original_file, "r") as zip_ref: + zip_ref.extractall(temp_dir) + + # Parse document.xml + doc_xml_path = temp_dir + "/word/document.xml" + root = lxml.etree.parse(doc_xml_path).getroot() + + # Count all w:p elements + paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p") + count = len(paragraphs) + + except Exception as e: + print(f"Error counting paragraphs in original document: {e}") + + return count + + def validate_insertions(self): + """ + Validate that w:delText elements are not within w:ins elements. + w:delText is only allowed in w:ins if nested within a w:del. + """ + errors = [] + + for xml_file in self.xml_files: + if xml_file.name != "document.xml": + continue + + try: + root = lxml.etree.parse(str(xml_file)).getroot() + namespaces = {"w": self.WORD_2006_NAMESPACE} + + # Find w:delText in w:ins that are NOT within w:del + invalid_elements = root.xpath( + ".//w:ins//w:delText[not(ancestor::w:del)]", + namespaces=namespaces + ) + + for elem in invalid_elements: + text_preview = ( + repr(elem.text or "")[:50] + "..." + if len(repr(elem.text or "")) > 50 + else repr(elem.text or "") + ) + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: within : {text_preview}" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} insertion validation violations:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - No w:delText elements within w:ins elements") + return True + + def compare_paragraph_counts(self): + """Compare paragraph counts between original and new document.""" + original_count = self.count_paragraphs_in_original() + new_count = self.count_paragraphs_in_unpacked() + + diff = new_count - original_count + diff_str = f"+{diff}" if diff > 0 else str(diff) + print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})") + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/pptx/ooxml/scripts/validation/pptx.py b/skills/pptx/ooxml/scripts/validation/pptx.py new file mode 100644 index 000000000..66d5b1e2d --- /dev/null +++ b/skills/pptx/ooxml/scripts/validation/pptx.py @@ -0,0 +1,315 @@ +""" +Validator for PowerPoint presentation XML files against XSD schemas. +""" + +import re + +from .base import BaseSchemaValidator + + +class PPTXSchemaValidator(BaseSchemaValidator): + """Validator for PowerPoint presentation XML files against XSD schemas.""" + + # PowerPoint presentation namespace + PRESENTATIONML_NAMESPACE = ( + "http://schemas.openxmlformats.org/presentationml/2006/main" + ) + + # PowerPoint-specific element to relationship type mappings + ELEMENT_RELATIONSHIP_TYPES = { + "sldid": "slide", + "sldmasterid": "slidemaster", + "notesmasterid": "notesmaster", + "sldlayoutid": "slidelayout", + "themeid": "theme", + "tablestyleid": "tablestyles", + } + + def validate(self): + """Run all validation checks and return True if all pass.""" + # Test 0: XML well-formedness + if not self.validate_xml(): + return False + + # Test 1: Namespace declarations + all_valid = True + if not self.validate_namespaces(): + all_valid = False + + # Test 2: Unique IDs + if not self.validate_unique_ids(): + all_valid = False + + # Test 3: UUID ID validation + if not self.validate_uuid_ids(): + all_valid = False + + # Test 4: Relationship and file reference validation + if not self.validate_file_references(): + all_valid = False + + # Test 5: Slide layout ID validation + if not self.validate_slide_layout_ids(): + all_valid = False + + # Test 6: Content type declarations + if not self.validate_content_types(): + all_valid = False + + # Test 7: XSD schema validation + if not self.validate_against_xsd(): + all_valid = False + + # Test 8: Notes slide reference validation + if not self.validate_notes_slide_references(): + all_valid = False + + # Test 9: Relationship ID reference validation + if not self.validate_all_relationship_ids(): + all_valid = False + + # Test 10: Duplicate slide layout references validation + if not self.validate_no_duplicate_slide_layouts(): + all_valid = False + + return all_valid + + def validate_uuid_ids(self): + """Validate that ID attributes that look like UUIDs contain only hex values.""" + import lxml.etree + + errors = [] + # UUID pattern: 8-4-4-4-12 hex digits with optional braces/hyphens + uuid_pattern = re.compile( + r"^[\{\(]?[0-9A-Fa-f]{8}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{12}[\}\)]?$" + ) + + for xml_file in self.xml_files: + try: + root = lxml.etree.parse(str(xml_file)).getroot() + + # Check all elements for ID attributes + for elem in root.iter(): + for attr, value in elem.attrib.items(): + # Check if this is an ID attribute + attr_name = attr.split("}")[-1].lower() + if attr_name == "id" or attr_name.endswith("id"): + # Check if value looks like a UUID (has the right length and pattern structure) + if self._looks_like_uuid(value): + # Validate that it contains only hex characters in the right positions + if not uuid_pattern.match(value): + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: " + f"Line {elem.sourceline}: ID '{value}' appears to be a UUID but contains invalid hex characters" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} UUID ID validation errors:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All UUID-like IDs contain valid hex values") + return True + + def _looks_like_uuid(self, value): + """Check if a value has the general structure of a UUID.""" + # Remove common UUID delimiters + clean_value = value.strip("{}()").replace("-", "") + # Check if it's 32 hex-like characters (could include invalid hex chars) + return len(clean_value) == 32 and all(c.isalnum() for c in clean_value) + + def validate_slide_layout_ids(self): + """Validate that sldLayoutId elements in slide masters reference valid slide layouts.""" + import lxml.etree + + errors = [] + + # Find all slide master files + slide_masters = list(self.unpacked_dir.glob("ppt/slideMasters/*.xml")) + + if not slide_masters: + if self.verbose: + print("PASSED - No slide masters found") + return True + + for slide_master in slide_masters: + try: + # Parse the slide master file + root = lxml.etree.parse(str(slide_master)).getroot() + + # Find the corresponding _rels file for this slide master + rels_file = slide_master.parent / "_rels" / f"{slide_master.name}.rels" + + if not rels_file.exists(): + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Missing relationships file: {rels_file.relative_to(self.unpacked_dir)}" + ) + continue + + # Parse the relationships file + rels_root = lxml.etree.parse(str(rels_file)).getroot() + + # Build a set of valid relationship IDs that point to slide layouts + valid_layout_rids = set() + for rel in rels_root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "slideLayout" in rel_type: + valid_layout_rids.add(rel.get("Id")) + + # Find all sldLayoutId elements in the slide master + for sld_layout_id in root.findall( + f".//{{{self.PRESENTATIONML_NAMESPACE}}}sldLayoutId" + ): + r_id = sld_layout_id.get( + f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id" + ) + layout_id = sld_layout_id.get("id") + + if r_id and r_id not in valid_layout_rids: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: " + f"Line {sld_layout_id.sourceline}: sldLayoutId with id='{layout_id}' " + f"references r:id='{r_id}' which is not found in slide layout relationships" + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {slide_master.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print(f"FAILED - Found {len(errors)} slide layout ID validation errors:") + for error in errors: + print(error) + print( + "Remove invalid references or add missing slide layouts to the relationships file." + ) + return False + else: + if self.verbose: + print("PASSED - All slide layout IDs reference valid slide layouts") + return True + + def validate_no_duplicate_slide_layouts(self): + """Validate that each slide has exactly one slideLayout reference.""" + import lxml.etree + + errors = [] + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + for rels_file in slide_rels_files: + try: + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all slideLayout relationships + layout_rels = [ + rel + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ) + if "slideLayout" in rel.get("Type", "") + ] + + if len(layout_rels) > 1: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: has {len(layout_rels)} slideLayout references" + ) + + except Exception as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + if errors: + print("FAILED - Found slides with duplicate slideLayout references:") + for error in errors: + print(error) + return False + else: + if self.verbose: + print("PASSED - All slides have exactly one slideLayout reference") + return True + + def validate_notes_slide_references(self): + """Validate that each notesSlide file is referenced by only one slide.""" + import lxml.etree + + errors = [] + notes_slide_references = {} # Track which slides reference each notesSlide + + # Find all slide relationship files + slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels")) + + if not slide_rels_files: + if self.verbose: + print("PASSED - No slide relationship files found") + return True + + for rels_file in slide_rels_files: + try: + # Parse the relationships file + root = lxml.etree.parse(str(rels_file)).getroot() + + # Find all notesSlide relationships + for rel in root.findall( + f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship" + ): + rel_type = rel.get("Type", "") + if "notesSlide" in rel_type: + target = rel.get("Target", "") + if target: + # Normalize the target path to handle relative paths + normalized_target = target.replace("../", "") + + # Track which slide references this notesSlide + slide_name = rels_file.stem.replace( + ".xml", "" + ) # e.g., "slide1" + + if normalized_target not in notes_slide_references: + notes_slide_references[normalized_target] = [] + notes_slide_references[normalized_target].append( + (slide_name, rels_file) + ) + + except (lxml.etree.XMLSyntaxError, Exception) as e: + errors.append( + f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}" + ) + + # Check for duplicate references + for target, references in notes_slide_references.items(): + if len(references) > 1: + slide_names = [ref[0] for ref in references] + errors.append( + f" Notes slide '{target}' is referenced by multiple slides: {', '.join(slide_names)}" + ) + for slide_name, rels_file in references: + errors.append(f" - {rels_file.relative_to(self.unpacked_dir)}") + + if errors: + print( + f"FAILED - Found {len([e for e in errors if not e.startswith(' ')])} notes slide reference validation errors:" + ) + for error in errors: + print(error) + print("Each slide may optionally have its own slide file.") + return False + else: + if self.verbose: + print("PASSED - All notes slide references are unique") + return True + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/pptx/ooxml/scripts/validation/redlining.py b/skills/pptx/ooxml/scripts/validation/redlining.py new file mode 100644 index 000000000..7ed425edf --- /dev/null +++ b/skills/pptx/ooxml/scripts/validation/redlining.py @@ -0,0 +1,279 @@ +""" +Validator for tracked changes in Word documents. +""" + +import subprocess +import tempfile +import zipfile +from pathlib import Path + + +class RedliningValidator: + """Validator for tracked changes in Word documents.""" + + def __init__(self, unpacked_dir, original_docx, verbose=False): + self.unpacked_dir = Path(unpacked_dir) + self.original_docx = Path(original_docx) + self.verbose = verbose + self.namespaces = { + "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main" + } + + def validate(self): + """Main validation method that returns True if valid, False otherwise.""" + # Verify unpacked directory exists and has correct structure + modified_file = self.unpacked_dir / "word" / "document.xml" + if not modified_file.exists(): + print(f"FAILED - Modified document.xml not found at {modified_file}") + return False + + # First, check if there are any tracked changes by Claude to validate + try: + import xml.etree.ElementTree as ET + + tree = ET.parse(modified_file) + root = tree.getroot() + + # Check for w:del or w:ins tags authored by Claude + del_elements = root.findall(".//w:del", self.namespaces) + ins_elements = root.findall(".//w:ins", self.namespaces) + + # Filter to only include changes by Claude + claude_del_elements = [ + elem + for elem in del_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + claude_ins_elements = [ + elem + for elem in ins_elements + if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude" + ] + + # Redlining validation is only needed if tracked changes by Claude have been used. + if not claude_del_elements and not claude_ins_elements: + if self.verbose: + print("PASSED - No tracked changes by Claude found.") + return True + + except Exception: + # If we can't parse the XML, continue with full validation + pass + + # Create temporary directory for unpacking original docx + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Unpack original docx + try: + with zipfile.ZipFile(self.original_docx, "r") as zip_ref: + zip_ref.extractall(temp_path) + except Exception as e: + print(f"FAILED - Error unpacking original docx: {e}") + return False + + original_file = temp_path / "word" / "document.xml" + if not original_file.exists(): + print( + f"FAILED - Original document.xml not found in {self.original_docx}" + ) + return False + + # Parse both XML files using xml.etree.ElementTree for redlining validation + try: + import xml.etree.ElementTree as ET + + modified_tree = ET.parse(modified_file) + modified_root = modified_tree.getroot() + original_tree = ET.parse(original_file) + original_root = original_tree.getroot() + except ET.ParseError as e: + print(f"FAILED - Error parsing XML files: {e}") + return False + + # Remove Claude's tracked changes from both documents + self._remove_claude_tracked_changes(original_root) + self._remove_claude_tracked_changes(modified_root) + + # Extract and compare text content + modified_text = self._extract_text_content(modified_root) + original_text = self._extract_text_content(original_root) + + if modified_text != original_text: + # Show detailed character-level differences for each paragraph + error_message = self._generate_detailed_diff( + original_text, modified_text + ) + print(error_message) + return False + + if self.verbose: + print("PASSED - All changes by Claude are properly tracked") + return True + + def _generate_detailed_diff(self, original_text, modified_text): + """Generate detailed word-level differences using git word diff.""" + error_parts = [ + "FAILED - Document text doesn't match after removing Claude's tracked changes", + "", + "Likely causes:", + " 1. Modified text inside another author's or tags", + " 2. Made edits without proper tracked changes", + " 3. Didn't nest inside when deleting another's insertion", + "", + "For pre-redlined documents, use correct patterns:", + " - To reject another's INSERTION: Nest inside their ", + " - To restore another's DELETION: Add new AFTER their ", + "", + ] + + # Show git word diff + git_diff = self._get_git_word_diff(original_text, modified_text) + if git_diff: + error_parts.extend(["Differences:", "============", git_diff]) + else: + error_parts.append("Unable to generate word diff (git not available)") + + return "\n".join(error_parts) + + def _get_git_word_diff(self, original_text, modified_text): + """Generate word diff using git with character-level precision.""" + try: + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Create two files + original_file = temp_path / "original.txt" + modified_file = temp_path / "modified.txt" + + original_file.write_text(original_text, encoding="utf-8") + modified_file.write_text(modified_text, encoding="utf-8") + + # Try character-level diff first for precise differences + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "--word-diff-regex=.", # Character-by-character diff + "-U0", # Zero lines of context - show only changed lines + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + # Clean up the output - remove git diff header lines + lines = result.stdout.split("\n") + # Skip the header lines (diff --git, index, +++, ---, @@) + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + + if content_lines: + return "\n".join(content_lines) + + # Fallback to word-level diff if character-level is too verbose + result = subprocess.run( + [ + "git", + "diff", + "--word-diff=plain", + "-U0", # Zero lines of context + "--no-index", + str(original_file), + str(modified_file), + ], + capture_output=True, + text=True, + ) + + if result.stdout.strip(): + lines = result.stdout.split("\n") + content_lines = [] + in_content = False + for line in lines: + if line.startswith("@@"): + in_content = True + continue + if in_content and line.strip(): + content_lines.append(line) + return "\n".join(content_lines) + + except (subprocess.CalledProcessError, FileNotFoundError, Exception): + # Git not available or other error, return None to use fallback + pass + + return None + + def _remove_claude_tracked_changes(self, root): + """Remove tracked changes authored by Claude from the XML root.""" + ins_tag = f"{{{self.namespaces['w']}}}ins" + del_tag = f"{{{self.namespaces['w']}}}del" + author_attr = f"{{{self.namespaces['w']}}}author" + + # Remove w:ins elements + for parent in root.iter(): + to_remove = [] + for child in parent: + if child.tag == ins_tag and child.get(author_attr) == "Claude": + to_remove.append(child) + for elem in to_remove: + parent.remove(elem) + + # Unwrap content in w:del elements where author is "Claude" + deltext_tag = f"{{{self.namespaces['w']}}}delText" + t_tag = f"{{{self.namespaces['w']}}}t" + + for parent in root.iter(): + to_process = [] + for child in parent: + if child.tag == del_tag and child.get(author_attr) == "Claude": + to_process.append((child, list(parent).index(child))) + + # Process in reverse order to maintain indices + for del_elem, del_index in reversed(to_process): + # Convert w:delText to w:t before moving + for elem in del_elem.iter(): + if elem.tag == deltext_tag: + elem.tag = t_tag + + # Move all children of w:del to its parent before removing w:del + for child in reversed(list(del_elem)): + parent.insert(del_index, child) + parent.remove(del_elem) + + def _extract_text_content(self, root): + """Extract text content from Word XML, preserving paragraph structure. + + Empty paragraphs are skipped to avoid false positives when tracked + insertions add only structural elements without text content. + """ + p_tag = f"{{{self.namespaces['w']}}}p" + t_tag = f"{{{self.namespaces['w']}}}t" + + paragraphs = [] + for p_elem in root.findall(f".//{p_tag}"): + # Get all text elements within this paragraph + text_parts = [] + for t_elem in p_elem.findall(f".//{t_tag}"): + if t_elem.text: + text_parts.append(t_elem.text) + paragraph_text = "".join(text_parts) + # Skip empty paragraphs - they don't affect content validation + if paragraph_text: + paragraphs.append(paragraph_text) + + return "\n".join(paragraphs) + + +if __name__ == "__main__": + raise RuntimeError("This module should not be run directly.") diff --git a/skills/pptx/scripts/html2pptx.js b/skills/pptx/scripts/html2pptx.js new file mode 100755 index 000000000..437bf7c53 --- /dev/null +++ b/skills/pptx/scripts/html2pptx.js @@ -0,0 +1,979 @@ +/** + * html2pptx - Convert HTML slide to pptxgenjs slide with positioned elements + * + * USAGE: + * const pptx = new pptxgen(); + * pptx.layout = 'LAYOUT_16x9'; // Must match HTML body dimensions + * + * const { slide, placeholders } = await html2pptx('slide.html', pptx); + * slide.addChart(pptx.charts.LINE, data, placeholders[0]); + * + * await pptx.writeFile('output.pptx'); + * + * FEATURES: + * - Converts HTML to PowerPoint with accurate positioning + * - Supports text, images, shapes, and bullet lists + * - Extracts placeholder elements (class="placeholder") with positions + * - Handles CSS gradients, borders, and margins + * + * VALIDATION: + * - Uses body width/height from HTML for viewport sizing + * - Throws error if HTML dimensions don't match presentation layout + * - Throws error if content overflows body (with overflow details) + * + * RETURNS: + * { slide, placeholders } where placeholders is an array of { id, x, y, w, h } + */ + +const { chromium } = require('playwright'); +const path = require('path'); +const sharp = require('sharp'); + +const PT_PER_PX = 0.75; +const PX_PER_IN = 96; +const EMU_PER_IN = 914400; + +// Helper: Get body dimensions and check for overflow +async function getBodyDimensions(page) { + const bodyDimensions = await page.evaluate(() => { + const body = document.body; + const style = window.getComputedStyle(body); + + return { + width: parseFloat(style.width), + height: parseFloat(style.height), + scrollWidth: body.scrollWidth, + scrollHeight: body.scrollHeight + }; + }); + + const errors = []; + const widthOverflowPx = Math.max(0, bodyDimensions.scrollWidth - bodyDimensions.width - 1); + const heightOverflowPx = Math.max(0, bodyDimensions.scrollHeight - bodyDimensions.height - 1); + + const widthOverflowPt = widthOverflowPx * PT_PER_PX; + const heightOverflowPt = heightOverflowPx * PT_PER_PX; + + if (widthOverflowPt > 0 || heightOverflowPt > 0) { + const directions = []; + if (widthOverflowPt > 0) directions.push(`${widthOverflowPt.toFixed(1)}pt horizontally`); + if (heightOverflowPt > 0) directions.push(`${heightOverflowPt.toFixed(1)}pt vertically`); + const reminder = heightOverflowPt > 0 ? ' (Remember: leave 0.5" margin at bottom of slide)' : ''; + errors.push(`HTML content overflows body by ${directions.join(' and ')}${reminder}`); + } + + return { ...bodyDimensions, errors }; +} + +// Helper: Validate dimensions match presentation layout +function validateDimensions(bodyDimensions, pres) { + const errors = []; + const widthInches = bodyDimensions.width / PX_PER_IN; + const heightInches = bodyDimensions.height / PX_PER_IN; + + if (pres.presLayout) { + const layoutWidth = pres.presLayout.width / EMU_PER_IN; + const layoutHeight = pres.presLayout.height / EMU_PER_IN; + + if (Math.abs(layoutWidth - widthInches) > 0.1 || Math.abs(layoutHeight - heightInches) > 0.1) { + errors.push( + `HTML dimensions (${widthInches.toFixed(1)}" × ${heightInches.toFixed(1)}") ` + + `don't match presentation layout (${layoutWidth.toFixed(1)}" × ${layoutHeight.toFixed(1)}")` + ); + } + } + return errors; +} + +function validateTextBoxPosition(slideData, bodyDimensions) { + const errors = []; + const slideHeightInches = bodyDimensions.height / PX_PER_IN; + const minBottomMargin = 0.5; // 0.5 inches from bottom + + for (const el of slideData.elements) { + // Check text elements (p, h1-h6, list) + if (['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'list'].includes(el.type)) { + const fontSize = el.style?.fontSize || 0; + const bottomEdge = el.position.y + el.position.h; + const distanceFromBottom = slideHeightInches - bottomEdge; + + if (fontSize > 12 && distanceFromBottom < minBottomMargin) { + const getText = () => { + if (typeof el.text === 'string') return el.text; + if (Array.isArray(el.text)) return el.text.find(t => t.text)?.text || ''; + if (Array.isArray(el.items)) return el.items.find(item => item.text)?.text || ''; + return ''; + }; + const textPrefix = getText().substring(0, 50) + (getText().length > 50 ? '...' : ''); + + errors.push( + `Text box "${textPrefix}" ends too close to bottom edge ` + + `(${distanceFromBottom.toFixed(2)}" from bottom, minimum ${minBottomMargin}" required)` + ); + } + } + } + + return errors; +} + +// Helper: Add background to slide +async function addBackground(slideData, targetSlide, tmpDir) { + if (slideData.background.type === 'image' && slideData.background.path) { + let imagePath = slideData.background.path.startsWith('file://') + ? slideData.background.path.replace('file://', '') + : slideData.background.path; + targetSlide.background = { path: imagePath }; + } else if (slideData.background.type === 'color' && slideData.background.value) { + targetSlide.background = { color: slideData.background.value }; + } +} + +// Helper: Add elements to slide +function addElements(slideData, targetSlide, pres) { + for (const el of slideData.elements) { + if (el.type === 'image') { + let imagePath = el.src.startsWith('file://') ? el.src.replace('file://', '') : el.src; + targetSlide.addImage({ + path: imagePath, + x: el.position.x, + y: el.position.y, + w: el.position.w, + h: el.position.h + }); + } else if (el.type === 'line') { + targetSlide.addShape(pres.ShapeType.line, { + x: el.x1, + y: el.y1, + w: el.x2 - el.x1, + h: el.y2 - el.y1, + line: { color: el.color, width: el.width } + }); + } else if (el.type === 'shape') { + const shapeOptions = { + x: el.position.x, + y: el.position.y, + w: el.position.w, + h: el.position.h, + shape: el.shape.rectRadius > 0 ? pres.ShapeType.roundRect : pres.ShapeType.rect + }; + + if (el.shape.fill) { + shapeOptions.fill = { color: el.shape.fill }; + if (el.shape.transparency != null) shapeOptions.fill.transparency = el.shape.transparency; + } + if (el.shape.line) shapeOptions.line = el.shape.line; + if (el.shape.rectRadius > 0) shapeOptions.rectRadius = el.shape.rectRadius; + if (el.shape.shadow) shapeOptions.shadow = el.shape.shadow; + + targetSlide.addText(el.text || '', shapeOptions); + } else if (el.type === 'list') { + const listOptions = { + x: el.position.x, + y: el.position.y, + w: el.position.w, + h: el.position.h, + fontSize: el.style.fontSize, + fontFace: el.style.fontFace, + color: el.style.color, + align: el.style.align, + valign: 'top', + lineSpacing: el.style.lineSpacing, + paraSpaceBefore: el.style.paraSpaceBefore, + paraSpaceAfter: el.style.paraSpaceAfter, + margin: el.style.margin + }; + if (el.style.margin) listOptions.margin = el.style.margin; + targetSlide.addText(el.items, listOptions); + } else { + // Check if text is single-line (height suggests one line) + const lineHeight = el.style.lineSpacing || el.style.fontSize * 1.2; + const isSingleLine = el.position.h <= lineHeight * 1.5; + + let adjustedX = el.position.x; + let adjustedW = el.position.w; + + // Make single-line text 2% wider to account for underestimate + if (isSingleLine) { + const widthIncrease = el.position.w * 0.02; + const align = el.style.align; + + if (align === 'center') { + // Center: expand both sides + adjustedX = el.position.x - (widthIncrease / 2); + adjustedW = el.position.w + widthIncrease; + } else if (align === 'right') { + // Right: expand to the left + adjustedX = el.position.x - widthIncrease; + adjustedW = el.position.w + widthIncrease; + } else { + // Left (default): expand to the right + adjustedW = el.position.w + widthIncrease; + } + } + + const textOptions = { + x: adjustedX, + y: el.position.y, + w: adjustedW, + h: el.position.h, + fontSize: el.style.fontSize, + fontFace: el.style.fontFace, + color: el.style.color, + bold: el.style.bold, + italic: el.style.italic, + underline: el.style.underline, + valign: 'top', + lineSpacing: el.style.lineSpacing, + paraSpaceBefore: el.style.paraSpaceBefore, + paraSpaceAfter: el.style.paraSpaceAfter, + inset: 0 // Remove default PowerPoint internal padding + }; + + if (el.style.align) textOptions.align = el.style.align; + if (el.style.margin) textOptions.margin = el.style.margin; + if (el.style.rotate !== undefined) textOptions.rotate = el.style.rotate; + if (el.style.transparency !== null && el.style.transparency !== undefined) textOptions.transparency = el.style.transparency; + + targetSlide.addText(el.text, textOptions); + } + } +} + +// Helper: Extract slide data from HTML page +async function extractSlideData(page) { + return await page.evaluate(() => { + const PT_PER_PX = 0.75; + const PX_PER_IN = 96; + + // Fonts that are single-weight and should not have bold applied + // (applying bold causes PowerPoint to use faux bold which makes text wider) + const SINGLE_WEIGHT_FONTS = ['impact']; + + // Helper: Check if a font should skip bold formatting + const shouldSkipBold = (fontFamily) => { + if (!fontFamily) return false; + const normalizedFont = fontFamily.toLowerCase().replace(/['"]/g, '').split(',')[0].trim(); + return SINGLE_WEIGHT_FONTS.includes(normalizedFont); + }; + + // Unit conversion helpers + const pxToInch = (px) => px / PX_PER_IN; + const pxToPoints = (pxStr) => parseFloat(pxStr) * PT_PER_PX; + const rgbToHex = (rgbStr) => { + // Handle transparent backgrounds by defaulting to white + if (rgbStr === 'rgba(0, 0, 0, 0)' || rgbStr === 'transparent') return 'FFFFFF'; + + const match = rgbStr.match(/rgba?\((\d+),\s*(\d+),\s*(\d+)/); + if (!match) return 'FFFFFF'; + return match.slice(1).map(n => parseInt(n).toString(16).padStart(2, '0')).join(''); + }; + + const extractAlpha = (rgbStr) => { + const match = rgbStr.match(/rgba\((\d+),\s*(\d+),\s*(\d+),\s*([\d.]+)\)/); + if (!match || !match[4]) return null; + const alpha = parseFloat(match[4]); + return Math.round((1 - alpha) * 100); + }; + + const applyTextTransform = (text, textTransform) => { + if (textTransform === 'uppercase') return text.toUpperCase(); + if (textTransform === 'lowercase') return text.toLowerCase(); + if (textTransform === 'capitalize') { + return text.replace(/\b\w/g, c => c.toUpperCase()); + } + return text; + }; + + // Extract rotation angle from CSS transform and writing-mode + const getRotation = (transform, writingMode) => { + let angle = 0; + + // Handle writing-mode first + // PowerPoint: 90° = text rotated 90° clockwise (reads top to bottom, letters upright) + // PowerPoint: 270° = text rotated 270° clockwise (reads bottom to top, letters upright) + if (writingMode === 'vertical-rl') { + // vertical-rl alone = text reads top to bottom = 90° in PowerPoint + angle = 90; + } else if (writingMode === 'vertical-lr') { + // vertical-lr alone = text reads bottom to top = 270° in PowerPoint + angle = 270; + } + + // Then add any transform rotation + if (transform && transform !== 'none') { + // Try to match rotate() function + const rotateMatch = transform.match(/rotate\((-?\d+(?:\.\d+)?)deg\)/); + if (rotateMatch) { + angle += parseFloat(rotateMatch[1]); + } else { + // Browser may compute as matrix - extract rotation from matrix + const matrixMatch = transform.match(/matrix\(([^)]+)\)/); + if (matrixMatch) { + const values = matrixMatch[1].split(',').map(parseFloat); + // matrix(a, b, c, d, e, f) where rotation = atan2(b, a) + const matrixAngle = Math.atan2(values[1], values[0]) * (180 / Math.PI); + angle += Math.round(matrixAngle); + } + } + } + + // Normalize to 0-359 range + angle = angle % 360; + if (angle < 0) angle += 360; + + return angle === 0 ? null : angle; + }; + + // Get position/dimensions accounting for rotation + const getPositionAndSize = (el, rect, rotation) => { + if (rotation === null) { + return { x: rect.left, y: rect.top, w: rect.width, h: rect.height }; + } + + // For 90° or 270° rotations, swap width and height + // because PowerPoint applies rotation to the original (unrotated) box + const isVertical = rotation === 90 || rotation === 270; + + if (isVertical) { + // The browser shows us the rotated dimensions (tall box for vertical text) + // But PowerPoint needs the pre-rotation dimensions (wide box that will be rotated) + // So we swap: browser's height becomes PPT's width, browser's width becomes PPT's height + const centerX = rect.left + rect.width / 2; + const centerY = rect.top + rect.height / 2; + + return { + x: centerX - rect.height / 2, + y: centerY - rect.width / 2, + w: rect.height, + h: rect.width + }; + } + + // For other rotations, use element's offset dimensions + const centerX = rect.left + rect.width / 2; + const centerY = rect.top + rect.height / 2; + return { + x: centerX - el.offsetWidth / 2, + y: centerY - el.offsetHeight / 2, + w: el.offsetWidth, + h: el.offsetHeight + }; + }; + + // Parse CSS box-shadow into PptxGenJS shadow properties + const parseBoxShadow = (boxShadow) => { + if (!boxShadow || boxShadow === 'none') return null; + + // Browser computed style format: "rgba(0, 0, 0, 0.3) 2px 2px 8px 0px [inset]" + // CSS format: "[inset] 2px 2px 8px 0px rgba(0, 0, 0, 0.3)" + + const insetMatch = boxShadow.match(/inset/); + + // IMPORTANT: PptxGenJS/PowerPoint doesn't properly support inset shadows + // Only process outer shadows to avoid file corruption + if (insetMatch) return null; + + // Extract color first (rgba or rgb at start) + const colorMatch = boxShadow.match(/rgba?\([^)]+\)/); + + // Extract numeric values (handles both px and pt units) + const parts = boxShadow.match(/([-\d.]+)(px|pt)/g); + + if (!parts || parts.length < 2) return null; + + const offsetX = parseFloat(parts[0]); + const offsetY = parseFloat(parts[1]); + const blur = parts.length > 2 ? parseFloat(parts[2]) : 0; + + // Calculate angle from offsets (in degrees, 0 = right, 90 = down) + let angle = 0; + if (offsetX !== 0 || offsetY !== 0) { + angle = Math.atan2(offsetY, offsetX) * (180 / Math.PI); + if (angle < 0) angle += 360; + } + + // Calculate offset distance (hypotenuse) + const offset = Math.sqrt(offsetX * offsetX + offsetY * offsetY) * PT_PER_PX; + + // Extract opacity from rgba + let opacity = 0.5; + if (colorMatch) { + const opacityMatch = colorMatch[0].match(/[\d.]+\)$/); + if (opacityMatch) { + opacity = parseFloat(opacityMatch[0].replace(')', '')); + } + } + + return { + type: 'outer', + angle: Math.round(angle), + blur: blur * 0.75, // Convert to points + color: colorMatch ? rgbToHex(colorMatch[0]) : '000000', + offset: offset, + opacity + }; + }; + + // Parse inline formatting tags (, , , , , ) into text runs + const parseInlineFormatting = (element, baseOptions = {}, runs = [], baseTextTransform = (x) => x) => { + let prevNodeIsText = false; + + element.childNodes.forEach((node) => { + let textTransform = baseTextTransform; + + const isText = node.nodeType === Node.TEXT_NODE || node.tagName === 'BR'; + if (isText) { + const text = node.tagName === 'BR' ? '\n' : textTransform(node.textContent.replace(/\s+/g, ' ')); + const prevRun = runs[runs.length - 1]; + if (prevNodeIsText && prevRun) { + prevRun.text += text; + } else { + runs.push({ text, options: { ...baseOptions } }); + } + + } else if (node.nodeType === Node.ELEMENT_NODE && node.textContent.trim()) { + const options = { ...baseOptions }; + const computed = window.getComputedStyle(node); + + // Handle inline elements with computed styles + if (node.tagName === 'SPAN' || node.tagName === 'B' || node.tagName === 'STRONG' || node.tagName === 'I' || node.tagName === 'EM' || node.tagName === 'U') { + const isBold = computed.fontWeight === 'bold' || parseInt(computed.fontWeight) >= 600; + if (isBold && !shouldSkipBold(computed.fontFamily)) options.bold = true; + if (computed.fontStyle === 'italic') options.italic = true; + if (computed.textDecoration && computed.textDecoration.includes('underline')) options.underline = true; + if (computed.color && computed.color !== 'rgb(0, 0, 0)') { + options.color = rgbToHex(computed.color); + const transparency = extractAlpha(computed.color); + if (transparency !== null) options.transparency = transparency; + } + if (computed.fontSize) options.fontSize = pxToPoints(computed.fontSize); + + // Apply text-transform on the span element itself + if (computed.textTransform && computed.textTransform !== 'none') { + const transformStr = computed.textTransform; + textTransform = (text) => applyTextTransform(text, transformStr); + } + + // Validate: Check for margins on inline elements + if (computed.marginLeft && parseFloat(computed.marginLeft) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-left which is not supported in PowerPoint. Remove margin from inline elements.`); + } + if (computed.marginRight && parseFloat(computed.marginRight) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-right which is not supported in PowerPoint. Remove margin from inline elements.`); + } + if (computed.marginTop && parseFloat(computed.marginTop) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-top which is not supported in PowerPoint. Remove margin from inline elements.`); + } + if (computed.marginBottom && parseFloat(computed.marginBottom) > 0) { + errors.push(`Inline element <${node.tagName.toLowerCase()}> has margin-bottom which is not supported in PowerPoint. Remove margin from inline elements.`); + } + + // Recursively process the child node. This will flatten nested spans into multiple runs. + parseInlineFormatting(node, options, runs, textTransform); + } + } + + prevNodeIsText = isText; + }); + + // Trim leading space from first run and trailing space from last run + if (runs.length > 0) { + runs[0].text = runs[0].text.replace(/^\s+/, ''); + runs[runs.length - 1].text = runs[runs.length - 1].text.replace(/\s+$/, ''); + } + + return runs.filter(r => r.text.length > 0); + }; + + // Extract background from body (image or color) + const body = document.body; + const bodyStyle = window.getComputedStyle(body); + const bgImage = bodyStyle.backgroundImage; + const bgColor = bodyStyle.backgroundColor; + + // Collect validation errors + const errors = []; + + // Validate: Check for CSS gradients + if (bgImage && (bgImage.includes('linear-gradient') || bgImage.includes('radial-gradient'))) { + errors.push( + 'CSS gradients are not supported. Use Sharp to rasterize gradients as PNG images first, ' + + 'then reference with background-image: url(\'gradient.png\')' + ); + } + + let background; + if (bgImage && bgImage !== 'none') { + // Extract URL from url("...") or url(...) + const urlMatch = bgImage.match(/url\(["']?([^"')]+)["']?\)/); + if (urlMatch) { + background = { + type: 'image', + path: urlMatch[1] + }; + } else { + background = { + type: 'color', + value: rgbToHex(bgColor) + }; + } + } else { + background = { + type: 'color', + value: rgbToHex(bgColor) + }; + } + + // Process all elements + const elements = []; + const placeholders = []; + const textTags = ['P', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'UL', 'OL', 'LI']; + const processed = new Set(); + + document.querySelectorAll('*').forEach((el) => { + if (processed.has(el)) return; + + // Validate text elements don't have backgrounds, borders, or shadows + if (textTags.includes(el.tagName)) { + const computed = window.getComputedStyle(el); + const hasBg = computed.backgroundColor && computed.backgroundColor !== 'rgba(0, 0, 0, 0)'; + const hasBorder = (computed.borderWidth && parseFloat(computed.borderWidth) > 0) || + (computed.borderTopWidth && parseFloat(computed.borderTopWidth) > 0) || + (computed.borderRightWidth && parseFloat(computed.borderRightWidth) > 0) || + (computed.borderBottomWidth && parseFloat(computed.borderBottomWidth) > 0) || + (computed.borderLeftWidth && parseFloat(computed.borderLeftWidth) > 0); + const hasShadow = computed.boxShadow && computed.boxShadow !== 'none'; + + if (hasBg || hasBorder || hasShadow) { + errors.push( + `Text element <${el.tagName.toLowerCase()}> has ${hasBg ? 'background' : hasBorder ? 'border' : 'shadow'}. ` + + 'Backgrounds, borders, and shadows are only supported on
                      elements, not text elements.' + ); + return; + } + } + + // Extract placeholder elements (for charts, etc.) + if (el.className && el.className.includes('placeholder')) { + const rect = el.getBoundingClientRect(); + if (rect.width === 0 || rect.height === 0) { + errors.push( + `Placeholder "${el.id || 'unnamed'}" has ${rect.width === 0 ? 'width: 0' : 'height: 0'}. Check the layout CSS.` + ); + } else { + placeholders.push({ + id: el.id || `placeholder-${placeholders.length}`, + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + }); + } + processed.add(el); + return; + } + + // Extract images + if (el.tagName === 'IMG') { + const rect = el.getBoundingClientRect(); + if (rect.width > 0 && rect.height > 0) { + elements.push({ + type: 'image', + src: el.src, + position: { + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + } + }); + processed.add(el); + return; + } + } + + // Extract DIVs with backgrounds/borders as shapes + const isContainer = el.tagName === 'DIV' && !textTags.includes(el.tagName); + if (isContainer) { + const computed = window.getComputedStyle(el); + const hasBg = computed.backgroundColor && computed.backgroundColor !== 'rgba(0, 0, 0, 0)'; + + // Validate: Check for unwrapped text content in DIV + for (const node of el.childNodes) { + if (node.nodeType === Node.TEXT_NODE) { + const text = node.textContent.trim(); + if (text) { + errors.push( + `DIV element contains unwrapped text "${text.substring(0, 50)}${text.length > 50 ? '...' : ''}". ` + + 'All text must be wrapped in

                      ,

                      -

                      ,
                        , or
                          tags to appear in PowerPoint.' + ); + } + } + } + + // Check for background images on shapes + const bgImage = computed.backgroundImage; + if (bgImage && bgImage !== 'none') { + errors.push( + 'Background images on DIV elements are not supported. ' + + 'Use solid colors or borders for shapes, or use slide.addImage() in PptxGenJS to layer images.' + ); + return; + } + + // Check for borders - both uniform and partial + const borderTop = computed.borderTopWidth; + const borderRight = computed.borderRightWidth; + const borderBottom = computed.borderBottomWidth; + const borderLeft = computed.borderLeftWidth; + const borders = [borderTop, borderRight, borderBottom, borderLeft].map(b => parseFloat(b) || 0); + const hasBorder = borders.some(b => b > 0); + const hasUniformBorder = hasBorder && borders.every(b => b === borders[0]); + const borderLines = []; + + if (hasBorder && !hasUniformBorder) { + const rect = el.getBoundingClientRect(); + const x = pxToInch(rect.left); + const y = pxToInch(rect.top); + const w = pxToInch(rect.width); + const h = pxToInch(rect.height); + + // Collect lines to add after shape (inset by half the line width to center on edge) + if (parseFloat(borderTop) > 0) { + const widthPt = pxToPoints(borderTop); + const inset = (widthPt / 72) / 2; // Convert points to inches, then half + borderLines.push({ + type: 'line', + x1: x, y1: y + inset, x2: x + w, y2: y + inset, + width: widthPt, + color: rgbToHex(computed.borderTopColor) + }); + } + if (parseFloat(borderRight) > 0) { + const widthPt = pxToPoints(borderRight); + const inset = (widthPt / 72) / 2; + borderLines.push({ + type: 'line', + x1: x + w - inset, y1: y, x2: x + w - inset, y2: y + h, + width: widthPt, + color: rgbToHex(computed.borderRightColor) + }); + } + if (parseFloat(borderBottom) > 0) { + const widthPt = pxToPoints(borderBottom); + const inset = (widthPt / 72) / 2; + borderLines.push({ + type: 'line', + x1: x, y1: y + h - inset, x2: x + w, y2: y + h - inset, + width: widthPt, + color: rgbToHex(computed.borderBottomColor) + }); + } + if (parseFloat(borderLeft) > 0) { + const widthPt = pxToPoints(borderLeft); + const inset = (widthPt / 72) / 2; + borderLines.push({ + type: 'line', + x1: x + inset, y1: y, x2: x + inset, y2: y + h, + width: widthPt, + color: rgbToHex(computed.borderLeftColor) + }); + } + } + + if (hasBg || hasBorder) { + const rect = el.getBoundingClientRect(); + if (rect.width > 0 && rect.height > 0) { + const shadow = parseBoxShadow(computed.boxShadow); + + // Only add shape if there's background or uniform border + if (hasBg || hasUniformBorder) { + elements.push({ + type: 'shape', + text: '', // Shape only - child text elements render on top + position: { + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + }, + shape: { + fill: hasBg ? rgbToHex(computed.backgroundColor) : null, + transparency: hasBg ? extractAlpha(computed.backgroundColor) : null, + line: hasUniformBorder ? { + color: rgbToHex(computed.borderColor), + width: pxToPoints(computed.borderWidth) + } : null, + // Convert border-radius to rectRadius (in inches) + // % values: 50%+ = circle (1), <50% = percentage of min dimension + // pt values: divide by 72 (72pt = 1 inch) + // px values: divide by 96 (96px = 1 inch) + rectRadius: (() => { + const radius = computed.borderRadius; + const radiusValue = parseFloat(radius); + if (radiusValue === 0) return 0; + + if (radius.includes('%')) { + if (radiusValue >= 50) return 1; + // Calculate percentage of smaller dimension + const minDim = Math.min(rect.width, rect.height); + return (radiusValue / 100) * pxToInch(minDim); + } + + if (radius.includes('pt')) return radiusValue / 72; + return radiusValue / PX_PER_IN; + })(), + shadow: shadow + } + }); + } + + // Add partial border lines + elements.push(...borderLines); + + processed.add(el); + return; + } + } + } + + // Extract bullet lists as single text block + if (el.tagName === 'UL' || el.tagName === 'OL') { + const rect = el.getBoundingClientRect(); + if (rect.width === 0 || rect.height === 0) return; + + const liElements = Array.from(el.querySelectorAll('li')); + const items = []; + const ulComputed = window.getComputedStyle(el); + const ulPaddingLeftPt = pxToPoints(ulComputed.paddingLeft); + + // Split: margin-left for bullet position, indent for text position + // margin-left + indent = ul padding-left + const marginLeft = ulPaddingLeftPt * 0.5; + const textIndent = ulPaddingLeftPt * 0.5; + + liElements.forEach((li, idx) => { + const isLast = idx === liElements.length - 1; + const runs = parseInlineFormatting(li, { breakLine: false }); + // Clean manual bullets from first run + if (runs.length > 0) { + runs[0].text = runs[0].text.replace(/^[•\-\*▪▸]\s*/, ''); + runs[0].options.bullet = { indent: textIndent }; + } + // Set breakLine on last run + if (runs.length > 0 && !isLast) { + runs[runs.length - 1].options.breakLine = true; + } + items.push(...runs); + }); + + const computed = window.getComputedStyle(liElements[0] || el); + + elements.push({ + type: 'list', + items: items, + position: { + x: pxToInch(rect.left), + y: pxToInch(rect.top), + w: pxToInch(rect.width), + h: pxToInch(rect.height) + }, + style: { + fontSize: pxToPoints(computed.fontSize), + fontFace: computed.fontFamily.split(',')[0].replace(/['"]/g, '').trim(), + color: rgbToHex(computed.color), + transparency: extractAlpha(computed.color), + align: computed.textAlign === 'start' ? 'left' : computed.textAlign, + lineSpacing: computed.lineHeight && computed.lineHeight !== 'normal' ? pxToPoints(computed.lineHeight) : null, + paraSpaceBefore: 0, + paraSpaceAfter: pxToPoints(computed.marginBottom), + // PptxGenJS margin array is [left, right, bottom, top] + margin: [marginLeft, 0, 0, 0] + } + }); + + liElements.forEach(li => processed.add(li)); + processed.add(el); + return; + } + + // Extract text elements (P, H1, H2, etc.) + if (!textTags.includes(el.tagName)) return; + + const rect = el.getBoundingClientRect(); + const text = el.textContent.trim(); + if (rect.width === 0 || rect.height === 0 || !text) return; + + // Validate: Check for manual bullet symbols in text elements (not in lists) + if (el.tagName !== 'LI' && /^[•\-\*▪▸○●◆◇■□]\s/.test(text.trimStart())) { + errors.push( + `Text element <${el.tagName.toLowerCase()}> starts with bullet symbol "${text.substring(0, 20)}...". ` + + 'Use
                            or
                              lists instead of manual bullet symbols.' + ); + return; + } + + const computed = window.getComputedStyle(el); + const rotation = getRotation(computed.transform, computed.writingMode); + const { x, y, w, h } = getPositionAndSize(el, rect, rotation); + + const baseStyle = { + fontSize: pxToPoints(computed.fontSize), + fontFace: computed.fontFamily.split(',')[0].replace(/['"]/g, '').trim(), + color: rgbToHex(computed.color), + align: computed.textAlign === 'start' ? 'left' : computed.textAlign, + lineSpacing: pxToPoints(computed.lineHeight), + paraSpaceBefore: pxToPoints(computed.marginTop), + paraSpaceAfter: pxToPoints(computed.marginBottom), + // PptxGenJS margin array is [left, right, bottom, top] (not [top, right, bottom, left] as documented) + margin: [ + pxToPoints(computed.paddingLeft), + pxToPoints(computed.paddingRight), + pxToPoints(computed.paddingBottom), + pxToPoints(computed.paddingTop) + ] + }; + + const transparency = extractAlpha(computed.color); + if (transparency !== null) baseStyle.transparency = transparency; + + if (rotation !== null) baseStyle.rotate = rotation; + + const hasFormatting = el.querySelector('b, i, u, strong, em, span, br'); + + if (hasFormatting) { + // Text with inline formatting + const transformStr = computed.textTransform; + const runs = parseInlineFormatting(el, {}, [], (str) => applyTextTransform(str, transformStr)); + + // Adjust lineSpacing based on largest fontSize in runs + const adjustedStyle = { ...baseStyle }; + if (adjustedStyle.lineSpacing) { + const maxFontSize = Math.max( + adjustedStyle.fontSize, + ...runs.map(r => r.options?.fontSize || 0) + ); + if (maxFontSize > adjustedStyle.fontSize) { + const lineHeightMultiplier = adjustedStyle.lineSpacing / adjustedStyle.fontSize; + adjustedStyle.lineSpacing = maxFontSize * lineHeightMultiplier; + } + } + + elements.push({ + type: el.tagName.toLowerCase(), + text: runs, + position: { x: pxToInch(x), y: pxToInch(y), w: pxToInch(w), h: pxToInch(h) }, + style: adjustedStyle + }); + } else { + // Plain text - inherit CSS formatting + const textTransform = computed.textTransform; + const transformedText = applyTextTransform(text, textTransform); + + const isBold = computed.fontWeight === 'bold' || parseInt(computed.fontWeight) >= 600; + + elements.push({ + type: el.tagName.toLowerCase(), + text: transformedText, + position: { x: pxToInch(x), y: pxToInch(y), w: pxToInch(w), h: pxToInch(h) }, + style: { + ...baseStyle, + bold: isBold && !shouldSkipBold(computed.fontFamily), + italic: computed.fontStyle === 'italic', + underline: computed.textDecoration.includes('underline') + } + }); + } + + processed.add(el); + }); + + return { background, elements, placeholders, errors }; + }); +} + +async function html2pptx(htmlFile, pres, options = {}) { + const { + tmpDir = process.env.TMPDIR || '/tmp', + slide = null + } = options; + + try { + // Use Chrome on macOS, default Chromium on Unix + const launchOptions = { env: { TMPDIR: tmpDir } }; + if (process.platform === 'darwin') { + launchOptions.channel = 'chrome'; + } + + const browser = await chromium.launch(launchOptions); + + let bodyDimensions; + let slideData; + + const filePath = path.isAbsolute(htmlFile) ? htmlFile : path.join(process.cwd(), htmlFile); + const validationErrors = []; + + try { + const page = await browser.newPage(); + page.on('console', (msg) => { + // Log the message text to your test runner's console + console.log(`Browser console: ${msg.text()}`); + }); + + await page.goto(`file://${filePath}`); + + bodyDimensions = await getBodyDimensions(page); + + await page.setViewportSize({ + width: Math.round(bodyDimensions.width), + height: Math.round(bodyDimensions.height) + }); + + slideData = await extractSlideData(page); + } finally { + await browser.close(); + } + + // Collect all validation errors + if (bodyDimensions.errors && bodyDimensions.errors.length > 0) { + validationErrors.push(...bodyDimensions.errors); + } + + const dimensionErrors = validateDimensions(bodyDimensions, pres); + if (dimensionErrors.length > 0) { + validationErrors.push(...dimensionErrors); + } + + const textBoxPositionErrors = validateTextBoxPosition(slideData, bodyDimensions); + if (textBoxPositionErrors.length > 0) { + validationErrors.push(...textBoxPositionErrors); + } + + if (slideData.errors && slideData.errors.length > 0) { + validationErrors.push(...slideData.errors); + } + + // Throw all errors at once if any exist + if (validationErrors.length > 0) { + const errorMessage = validationErrors.length === 1 + ? validationErrors[0] + : `Multiple validation errors found:\n${validationErrors.map((e, i) => ` ${i + 1}. ${e}`).join('\n')}`; + throw new Error(errorMessage); + } + + const targetSlide = slide || pres.addSlide(); + + await addBackground(slideData, targetSlide, tmpDir); + addElements(slideData, targetSlide, pres); + + return { slide: targetSlide, placeholders: slideData.placeholders }; + } catch (error) { + if (!error.message.startsWith(htmlFile)) { + throw new Error(`${htmlFile}: ${error.message}`); + } + throw error; + } +} + +module.exports = html2pptx; \ No newline at end of file diff --git a/skills/pptx/scripts/inventory.py b/skills/pptx/scripts/inventory.py new file mode 100755 index 000000000..edda390e7 --- /dev/null +++ b/skills/pptx/scripts/inventory.py @@ -0,0 +1,1020 @@ +#!/usr/bin/env python3 +""" +Extract structured text content from PowerPoint presentations. + +This module provides functionality to: +- Extract all text content from PowerPoint shapes +- Preserve paragraph formatting (alignment, bullets, fonts, spacing) +- Handle nested GroupShapes recursively with correct absolute positions +- Sort shapes by visual position on slides +- Filter out slide numbers and non-content placeholders +- Export to JSON with clean, structured data + +Classes: + ParagraphData: Represents a text paragraph with formatting + ShapeData: Represents a shape with position and text content + +Main Functions: + extract_text_inventory: Extract all text from a presentation + save_inventory: Save extracted data to JSON + +Usage: + python inventory.py input.pptx output.json +""" + +import argparse +import json +import platform +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +from PIL import Image, ImageDraw, ImageFont +from pptx import Presentation +from pptx.enum.text import PP_ALIGN +from pptx.shapes.base import BaseShape + +# Type aliases for cleaner signatures +JsonValue = Union[str, int, float, bool, None] +ParagraphDict = Dict[str, JsonValue] +ShapeDict = Dict[ + str, Union[str, float, bool, List[ParagraphDict], List[str], Dict[str, Any], None] +] +InventoryData = Dict[ + str, Dict[str, "ShapeData"] +] # Dict of slide_id -> {shape_id -> ShapeData} +InventoryDict = Dict[str, Dict[str, ShapeDict]] # JSON-serializable inventory + + +def main(): + """Main entry point for command-line usage.""" + parser = argparse.ArgumentParser( + description="Extract text inventory from PowerPoint with proper GroupShape support.", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python inventory.py presentation.pptx inventory.json + Extracts text inventory with correct absolute positions for grouped shapes + + python inventory.py presentation.pptx inventory.json --issues-only + Extracts only text shapes that have overflow or overlap issues + +The output JSON includes: + - All text content organized by slide and shape + - Correct absolute positions for shapes in groups + - Visual position and size in inches + - Paragraph properties and formatting + - Issue detection: text overflow and shape overlaps + """, + ) + + parser.add_argument("input", help="Input PowerPoint file (.pptx)") + parser.add_argument("output", help="Output JSON file for inventory") + parser.add_argument( + "--issues-only", + action="store_true", + help="Include only text shapes that have overflow or overlap issues", + ) + + args = parser.parse_args() + + input_path = Path(args.input) + if not input_path.exists(): + print(f"Error: Input file not found: {args.input}") + sys.exit(1) + + if not input_path.suffix.lower() == ".pptx": + print("Error: Input must be a PowerPoint file (.pptx)") + sys.exit(1) + + try: + print(f"Extracting text inventory from: {args.input}") + if args.issues_only: + print( + "Filtering to include only text shapes with issues (overflow/overlap)" + ) + inventory = extract_text_inventory(input_path, issues_only=args.issues_only) + + output_path = Path(args.output) + output_path.parent.mkdir(parents=True, exist_ok=True) + save_inventory(inventory, output_path) + + print(f"Output saved to: {args.output}") + + # Report statistics + total_slides = len(inventory) + total_shapes = sum(len(shapes) for shapes in inventory.values()) + if args.issues_only: + if total_shapes > 0: + print( + f"Found {total_shapes} text elements with issues in {total_slides} slides" + ) + else: + print("No issues discovered") + else: + print( + f"Found text in {total_slides} slides with {total_shapes} text elements" + ) + + except Exception as e: + print(f"Error processing presentation: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) + + +@dataclass +class ShapeWithPosition: + """A shape with its absolute position on the slide.""" + + shape: BaseShape + absolute_left: int # in EMUs + absolute_top: int # in EMUs + + +class ParagraphData: + """Data structure for paragraph properties extracted from a PowerPoint paragraph.""" + + def __init__(self, paragraph: Any): + """Initialize from a PowerPoint paragraph object. + + Args: + paragraph: The PowerPoint paragraph object + """ + self.text: str = paragraph.text.strip() + self.bullet: bool = False + self.level: Optional[int] = None + self.alignment: Optional[str] = None + self.space_before: Optional[float] = None + self.space_after: Optional[float] = None + self.font_name: Optional[str] = None + self.font_size: Optional[float] = None + self.bold: Optional[bool] = None + self.italic: Optional[bool] = None + self.underline: Optional[bool] = None + self.color: Optional[str] = None + self.theme_color: Optional[str] = None + self.line_spacing: Optional[float] = None + + # Check for bullet formatting + if ( + hasattr(paragraph, "_p") + and paragraph._p is not None + and paragraph._p.pPr is not None + ): + pPr = paragraph._p.pPr + ns = "{http://schemas.openxmlformats.org/drawingml/2006/main}" + if ( + pPr.find(f"{ns}buChar") is not None + or pPr.find(f"{ns}buAutoNum") is not None + ): + self.bullet = True + if hasattr(paragraph, "level"): + self.level = paragraph.level + + # Add alignment if not LEFT (default) + if hasattr(paragraph, "alignment") and paragraph.alignment is not None: + alignment_map = { + PP_ALIGN.CENTER: "CENTER", + PP_ALIGN.RIGHT: "RIGHT", + PP_ALIGN.JUSTIFY: "JUSTIFY", + } + if paragraph.alignment in alignment_map: + self.alignment = alignment_map[paragraph.alignment] + + # Add spacing properties if set + if hasattr(paragraph, "space_before") and paragraph.space_before: + self.space_before = paragraph.space_before.pt + if hasattr(paragraph, "space_after") and paragraph.space_after: + self.space_after = paragraph.space_after.pt + + # Extract font properties from first run + if paragraph.runs: + first_run = paragraph.runs[0] + if hasattr(first_run, "font"): + font = first_run.font + if font.name: + self.font_name = font.name + if font.size: + self.font_size = font.size.pt + if font.bold is not None: + self.bold = font.bold + if font.italic is not None: + self.italic = font.italic + if font.underline is not None: + self.underline = font.underline + + # Handle color - both RGB and theme colors + try: + # Try RGB color first + if font.color.rgb: + self.color = str(font.color.rgb) + except (AttributeError, TypeError): + # Fall back to theme color + try: + if font.color.theme_color: + self.theme_color = font.color.theme_color.name + except (AttributeError, TypeError): + pass + + # Add line spacing if set + if hasattr(paragraph, "line_spacing") and paragraph.line_spacing is not None: + if hasattr(paragraph.line_spacing, "pt"): + self.line_spacing = round(paragraph.line_spacing.pt, 2) + else: + # Multiplier - convert to points + font_size = self.font_size if self.font_size else 12.0 + self.line_spacing = round(paragraph.line_spacing * font_size, 2) + + def to_dict(self) -> ParagraphDict: + """Convert to dictionary for JSON serialization, excluding None values.""" + result: ParagraphDict = {"text": self.text} + + # Add optional fields only if they have values + if self.bullet: + result["bullet"] = self.bullet + if self.level is not None: + result["level"] = self.level + if self.alignment: + result["alignment"] = self.alignment + if self.space_before is not None: + result["space_before"] = self.space_before + if self.space_after is not None: + result["space_after"] = self.space_after + if self.font_name: + result["font_name"] = self.font_name + if self.font_size is not None: + result["font_size"] = self.font_size + if self.bold is not None: + result["bold"] = self.bold + if self.italic is not None: + result["italic"] = self.italic + if self.underline is not None: + result["underline"] = self.underline + if self.color: + result["color"] = self.color + if self.theme_color: + result["theme_color"] = self.theme_color + if self.line_spacing is not None: + result["line_spacing"] = self.line_spacing + + return result + + +class ShapeData: + """Data structure for shape properties extracted from a PowerPoint shape.""" + + @staticmethod + def emu_to_inches(emu: int) -> float: + """Convert EMUs (English Metric Units) to inches.""" + return emu / 914400.0 + + @staticmethod + def inches_to_pixels(inches: float, dpi: int = 96) -> int: + """Convert inches to pixels at given DPI.""" + return int(inches * dpi) + + @staticmethod + def get_font_path(font_name: str) -> Optional[str]: + """Get the font file path for a given font name. + + Args: + font_name: Name of the font (e.g., 'Arial', 'Calibri') + + Returns: + Path to the font file, or None if not found + """ + system = platform.system() + + # Common font file variations to try + font_variations = [ + font_name, + font_name.lower(), + font_name.replace(" ", ""), + font_name.replace(" ", "-"), + ] + + # Define font directories and extensions by platform + if system == "Darwin": # macOS + font_dirs = [ + "/System/Library/Fonts/", + "/Library/Fonts/", + "~/Library/Fonts/", + ] + extensions = [".ttf", ".otf", ".ttc", ".dfont"] + else: # Linux + font_dirs = [ + "/usr/share/fonts/truetype/", + "/usr/local/share/fonts/", + "~/.fonts/", + ] + extensions = [".ttf", ".otf"] + + # Try to find the font file + from pathlib import Path + + for font_dir in font_dirs: + font_dir_path = Path(font_dir).expanduser() + if not font_dir_path.exists(): + continue + + # First try exact matches + for variant in font_variations: + for ext in extensions: + font_path = font_dir_path / f"{variant}{ext}" + if font_path.exists(): + return str(font_path) + + # Then try fuzzy matching - find files containing the font name + try: + for file_path in font_dir_path.iterdir(): + if file_path.is_file(): + file_name_lower = file_path.name.lower() + font_name_lower = font_name.lower().replace(" ", "") + if font_name_lower in file_name_lower and any( + file_name_lower.endswith(ext) for ext in extensions + ): + return str(file_path) + except (OSError, PermissionError): + continue + + return None + + @staticmethod + def get_slide_dimensions(slide: Any) -> tuple[Optional[int], Optional[int]]: + """Get slide dimensions from slide object. + + Args: + slide: Slide object + + Returns: + Tuple of (width_emu, height_emu) or (None, None) if not found + """ + try: + prs = slide.part.package.presentation_part.presentation + return prs.slide_width, prs.slide_height + except (AttributeError, TypeError): + return None, None + + @staticmethod + def get_default_font_size(shape: BaseShape, slide_layout: Any) -> Optional[float]: + """Extract default font size from slide layout for a placeholder shape. + + Args: + shape: Placeholder shape + slide_layout: Slide layout containing the placeholder definition + + Returns: + Default font size in points, or None if not found + """ + try: + if not hasattr(shape, "placeholder_format"): + return None + + shape_type = shape.placeholder_format.type # type: ignore + for layout_placeholder in slide_layout.placeholders: + if layout_placeholder.placeholder_format.type == shape_type: + # Find first defRPr element with sz (size) attribute + for elem in layout_placeholder.element.iter(): + if "defRPr" in elem.tag and (sz := elem.get("sz")): + return float(sz) / 100.0 # Convert EMUs to points + break + except Exception: + pass + return None + + def __init__( + self, + shape: BaseShape, + absolute_left: Optional[int] = None, + absolute_top: Optional[int] = None, + slide: Optional[Any] = None, + ): + """Initialize from a PowerPoint shape object. + + Args: + shape: The PowerPoint shape object (should be pre-validated) + absolute_left: Absolute left position in EMUs (for shapes in groups) + absolute_top: Absolute top position in EMUs (for shapes in groups) + slide: Optional slide object to get dimensions and layout information + """ + self.shape = shape # Store reference to original shape + self.shape_id: str = "" # Will be set after sorting + + # Get slide dimensions from slide object + self.slide_width_emu, self.slide_height_emu = ( + self.get_slide_dimensions(slide) if slide else (None, None) + ) + + # Get placeholder type if applicable + self.placeholder_type: Optional[str] = None + self.default_font_size: Optional[float] = None + if hasattr(shape, "is_placeholder") and shape.is_placeholder: # type: ignore + if shape.placeholder_format and shape.placeholder_format.type: # type: ignore + self.placeholder_type = ( + str(shape.placeholder_format.type).split(".")[-1].split(" ")[0] # type: ignore + ) + + # Get default font size from layout + if slide and hasattr(slide, "slide_layout"): + self.default_font_size = self.get_default_font_size( + shape, slide.slide_layout + ) + + # Get position information + # Use absolute positions if provided (for shapes in groups), otherwise use shape's position + left_emu = ( + absolute_left + if absolute_left is not None + else (shape.left if hasattr(shape, "left") else 0) + ) + top_emu = ( + absolute_top + if absolute_top is not None + else (shape.top if hasattr(shape, "top") else 0) + ) + + self.left: float = round(self.emu_to_inches(left_emu), 2) # type: ignore + self.top: float = round(self.emu_to_inches(top_emu), 2) # type: ignore + self.width: float = round( + self.emu_to_inches(shape.width if hasattr(shape, "width") else 0), + 2, # type: ignore + ) + self.height: float = round( + self.emu_to_inches(shape.height if hasattr(shape, "height") else 0), + 2, # type: ignore + ) + + # Store EMU positions for overflow calculations + self.left_emu = left_emu + self.top_emu = top_emu + self.width_emu = shape.width if hasattr(shape, "width") else 0 + self.height_emu = shape.height if hasattr(shape, "height") else 0 + + # Calculate overflow status + self.frame_overflow_bottom: Optional[float] = None + self.slide_overflow_right: Optional[float] = None + self.slide_overflow_bottom: Optional[float] = None + self.overlapping_shapes: Dict[ + str, float + ] = {} # Dict of shape_id -> overlap area in sq inches + self.warnings: List[str] = [] + self._estimate_frame_overflow() + self._calculate_slide_overflow() + self._detect_bullet_issues() + + @property + def paragraphs(self) -> List[ParagraphData]: + """Calculate paragraphs from the shape's text frame.""" + if not self.shape or not hasattr(self.shape, "text_frame"): + return [] + + paragraphs = [] + for paragraph in self.shape.text_frame.paragraphs: # type: ignore + if paragraph.text.strip(): + paragraphs.append(ParagraphData(paragraph)) + return paragraphs + + def _get_default_font_size(self) -> int: + """Get default font size from theme text styles or use conservative default.""" + try: + if not ( + hasattr(self.shape, "part") and hasattr(self.shape.part, "slide_layout") + ): + return 14 + + slide_master = self.shape.part.slide_layout.slide_master # type: ignore + if not hasattr(slide_master, "element"): + return 14 + + # Determine theme style based on placeholder type + style_name = "bodyStyle" # Default + if self.placeholder_type and "TITLE" in self.placeholder_type: + style_name = "titleStyle" + + # Find font size in theme styles + for child in slide_master.element.iter(): + tag = child.tag.split("}")[-1] if "}" in child.tag else child.tag + if tag == style_name: + for elem in child.iter(): + if "sz" in elem.attrib: + return int(elem.attrib["sz"]) // 100 + except Exception: + pass + + return 14 # Conservative default for body text + + def _get_usable_dimensions(self, text_frame) -> Tuple[int, int]: + """Get usable width and height in pixels after accounting for margins.""" + # Default PowerPoint margins in inches + margins = {"top": 0.05, "bottom": 0.05, "left": 0.1, "right": 0.1} + + # Override with actual margins if set + if hasattr(text_frame, "margin_top") and text_frame.margin_top: + margins["top"] = self.emu_to_inches(text_frame.margin_top) + if hasattr(text_frame, "margin_bottom") and text_frame.margin_bottom: + margins["bottom"] = self.emu_to_inches(text_frame.margin_bottom) + if hasattr(text_frame, "margin_left") and text_frame.margin_left: + margins["left"] = self.emu_to_inches(text_frame.margin_left) + if hasattr(text_frame, "margin_right") and text_frame.margin_right: + margins["right"] = self.emu_to_inches(text_frame.margin_right) + + # Calculate usable area + usable_width = self.width - margins["left"] - margins["right"] + usable_height = self.height - margins["top"] - margins["bottom"] + + # Convert to pixels + return ( + self.inches_to_pixels(usable_width), + self.inches_to_pixels(usable_height), + ) + + def _wrap_text_line(self, line: str, max_width_px: int, draw, font) -> List[str]: + """Wrap a single line of text to fit within max_width_px.""" + if not line: + return [""] + + # Use textlength for efficient width calculation + if draw.textlength(line, font=font) <= max_width_px: + return [line] + + # Need to wrap - split into words + wrapped = [] + words = line.split(" ") + current_line = "" + + for word in words: + test_line = current_line + (" " if current_line else "") + word + if draw.textlength(test_line, font=font) <= max_width_px: + current_line = test_line + else: + if current_line: + wrapped.append(current_line) + current_line = word + + if current_line: + wrapped.append(current_line) + + return wrapped + + def _estimate_frame_overflow(self) -> None: + """Estimate if text overflows the shape bounds using PIL text measurement.""" + if not self.shape or not hasattr(self.shape, "text_frame"): + return + + text_frame = self.shape.text_frame # type: ignore + if not text_frame or not text_frame.paragraphs: + return + + # Get usable dimensions after accounting for margins + usable_width_px, usable_height_px = self._get_usable_dimensions(text_frame) + if usable_width_px <= 0 or usable_height_px <= 0: + return + + # Set up PIL for text measurement + dummy_img = Image.new("RGB", (1, 1)) + draw = ImageDraw.Draw(dummy_img) + + # Get default font size from placeholder or use conservative estimate + default_font_size = self._get_default_font_size() + + # Calculate total height of all paragraphs + total_height_px = 0 + + for para_idx, paragraph in enumerate(text_frame.paragraphs): + if not paragraph.text.strip(): + continue + + para_data = ParagraphData(paragraph) + + # Load font for this paragraph + font_name = para_data.font_name or "Arial" + font_size = int(para_data.font_size or default_font_size) + + font = None + font_path = self.get_font_path(font_name) + if font_path: + try: + font = ImageFont.truetype(font_path, size=font_size) + except Exception: + font = ImageFont.load_default() + else: + font = ImageFont.load_default() + + # Wrap all lines in this paragraph + all_wrapped_lines = [] + for line in paragraph.text.split("\n"): + wrapped = self._wrap_text_line(line, usable_width_px, draw, font) + all_wrapped_lines.extend(wrapped) + + if all_wrapped_lines: + # Calculate line height + if para_data.line_spacing: + # Custom line spacing explicitly set + line_height_px = para_data.line_spacing * 96 / 72 + else: + # PowerPoint default single spacing (1.0x font size) + line_height_px = font_size * 96 / 72 + + # Add space_before (except first paragraph) + if para_idx > 0 and para_data.space_before: + total_height_px += para_data.space_before * 96 / 72 + + # Add paragraph text height + total_height_px += len(all_wrapped_lines) * line_height_px + + # Add space_after + if para_data.space_after: + total_height_px += para_data.space_after * 96 / 72 + + # Check for overflow (ignore negligible overflows <= 0.05") + if total_height_px > usable_height_px: + overflow_px = total_height_px - usable_height_px + overflow_inches = round(overflow_px / 96.0, 2) + if overflow_inches > 0.05: # Only report significant overflows + self.frame_overflow_bottom = overflow_inches + + def _calculate_slide_overflow(self) -> None: + """Calculate if shape overflows the slide boundaries.""" + if self.slide_width_emu is None or self.slide_height_emu is None: + return + + # Check right overflow (ignore negligible overflows <= 0.01") + right_edge_emu = self.left_emu + self.width_emu + if right_edge_emu > self.slide_width_emu: + overflow_emu = right_edge_emu - self.slide_width_emu + overflow_inches = round(self.emu_to_inches(overflow_emu), 2) + if overflow_inches > 0.01: # Only report significant overflows + self.slide_overflow_right = overflow_inches + + # Check bottom overflow (ignore negligible overflows <= 0.01") + bottom_edge_emu = self.top_emu + self.height_emu + if bottom_edge_emu > self.slide_height_emu: + overflow_emu = bottom_edge_emu - self.slide_height_emu + overflow_inches = round(self.emu_to_inches(overflow_emu), 2) + if overflow_inches > 0.01: # Only report significant overflows + self.slide_overflow_bottom = overflow_inches + + def _detect_bullet_issues(self) -> None: + """Detect bullet point formatting issues in paragraphs.""" + if not self.shape or not hasattr(self.shape, "text_frame"): + return + + text_frame = self.shape.text_frame # type: ignore + if not text_frame or not text_frame.paragraphs: + return + + # Common bullet symbols that indicate manual bullets + bullet_symbols = ["•", "●", "○"] + + for paragraph in text_frame.paragraphs: + text = paragraph.text.strip() + # Check for manual bullet symbols + if text and any(text.startswith(symbol + " ") for symbol in bullet_symbols): + self.warnings.append( + "manual_bullet_symbol: use proper bullet formatting" + ) + break + + @property + def has_any_issues(self) -> bool: + """Check if shape has any issues (overflow, overlap, or warnings).""" + return ( + self.frame_overflow_bottom is not None + or self.slide_overflow_right is not None + or self.slide_overflow_bottom is not None + or len(self.overlapping_shapes) > 0 + or len(self.warnings) > 0 + ) + + def to_dict(self) -> ShapeDict: + """Convert to dictionary for JSON serialization.""" + result: ShapeDict = { + "left": self.left, + "top": self.top, + "width": self.width, + "height": self.height, + } + + # Add optional fields if present + if self.placeholder_type: + result["placeholder_type"] = self.placeholder_type + + if self.default_font_size: + result["default_font_size"] = self.default_font_size + + # Add overflow information only if there is overflow + overflow_data = {} + + # Add frame overflow if present + if self.frame_overflow_bottom is not None: + overflow_data["frame"] = {"overflow_bottom": self.frame_overflow_bottom} + + # Add slide overflow if present + slide_overflow = {} + if self.slide_overflow_right is not None: + slide_overflow["overflow_right"] = self.slide_overflow_right + if self.slide_overflow_bottom is not None: + slide_overflow["overflow_bottom"] = self.slide_overflow_bottom + if slide_overflow: + overflow_data["slide"] = slide_overflow + + # Only add overflow field if there is overflow + if overflow_data: + result["overflow"] = overflow_data + + # Add overlap field if there are overlapping shapes + if self.overlapping_shapes: + result["overlap"] = {"overlapping_shapes": self.overlapping_shapes} + + # Add warnings field if there are warnings + if self.warnings: + result["warnings"] = self.warnings + + # Add paragraphs after placeholder_type + result["paragraphs"] = [para.to_dict() for para in self.paragraphs] + + return result + + +def is_valid_shape(shape: BaseShape) -> bool: + """Check if a shape contains meaningful text content.""" + # Must have a text frame with content + if not hasattr(shape, "text_frame") or not shape.text_frame: # type: ignore + return False + + text = shape.text_frame.text.strip() # type: ignore + if not text: + return False + + # Skip slide numbers and numeric footers + if hasattr(shape, "is_placeholder") and shape.is_placeholder: # type: ignore + if shape.placeholder_format and shape.placeholder_format.type: # type: ignore + placeholder_type = ( + str(shape.placeholder_format.type).split(".")[-1].split(" ")[0] # type: ignore + ) + if placeholder_type == "SLIDE_NUMBER": + return False + if placeholder_type == "FOOTER" and text.isdigit(): + return False + + return True + + +def collect_shapes_with_absolute_positions( + shape: BaseShape, parent_left: int = 0, parent_top: int = 0 +) -> List[ShapeWithPosition]: + """Recursively collect all shapes with valid text, calculating absolute positions. + + For shapes within groups, their positions are relative to the group. + This function calculates the absolute position on the slide by accumulating + parent group offsets. + + Args: + shape: The shape to process + parent_left: Accumulated left offset from parent groups (in EMUs) + parent_top: Accumulated top offset from parent groups (in EMUs) + + Returns: + List of ShapeWithPosition objects with absolute positions + """ + if hasattr(shape, "shapes"): # GroupShape + result = [] + # Get this group's position + group_left = shape.left if hasattr(shape, "left") else 0 + group_top = shape.top if hasattr(shape, "top") else 0 + + # Calculate absolute position for this group + abs_group_left = parent_left + group_left + abs_group_top = parent_top + group_top + + # Process children with accumulated offsets + for child in shape.shapes: # type: ignore + result.extend( + collect_shapes_with_absolute_positions( + child, abs_group_left, abs_group_top + ) + ) + return result + + # Regular shape - check if it has valid text + if is_valid_shape(shape): + # Calculate absolute position + shape_left = shape.left if hasattr(shape, "left") else 0 + shape_top = shape.top if hasattr(shape, "top") else 0 + + return [ + ShapeWithPosition( + shape=shape, + absolute_left=parent_left + shape_left, + absolute_top=parent_top + shape_top, + ) + ] + + return [] + + +def sort_shapes_by_position(shapes: List[ShapeData]) -> List[ShapeData]: + """Sort shapes by visual position (top-to-bottom, left-to-right). + + Shapes within 0.5 inches vertically are considered on the same row. + """ + if not shapes: + return shapes + + # Sort by top position first + shapes = sorted(shapes, key=lambda s: (s.top, s.left)) + + # Group shapes by row (within 0.5 inches vertically) + result = [] + row = [shapes[0]] + row_top = shapes[0].top + + for shape in shapes[1:]: + if abs(shape.top - row_top) <= 0.5: + row.append(shape) + else: + # Sort current row by left position and add to result + result.extend(sorted(row, key=lambda s: s.left)) + row = [shape] + row_top = shape.top + + # Don't forget the last row + result.extend(sorted(row, key=lambda s: s.left)) + return result + + +def calculate_overlap( + rect1: Tuple[float, float, float, float], + rect2: Tuple[float, float, float, float], + tolerance: float = 0.05, +) -> Tuple[bool, float]: + """Calculate if and how much two rectangles overlap. + + Args: + rect1: (left, top, width, height) of first rectangle in inches + rect2: (left, top, width, height) of second rectangle in inches + tolerance: Minimum overlap in inches to consider as overlapping (default: 0.05") + + Returns: + Tuple of (overlaps, overlap_area) where: + - overlaps: True if rectangles overlap by more than tolerance + - overlap_area: Area of overlap in square inches + """ + left1, top1, w1, h1 = rect1 + left2, top2, w2, h2 = rect2 + + # Calculate overlap dimensions + overlap_width = min(left1 + w1, left2 + w2) - max(left1, left2) + overlap_height = min(top1 + h1, top2 + h2) - max(top1, top2) + + # Check if there's meaningful overlap (more than tolerance) + if overlap_width > tolerance and overlap_height > tolerance: + # Calculate overlap area in square inches + overlap_area = overlap_width * overlap_height + return True, round(overlap_area, 2) + + return False, 0 + + +def detect_overlaps(shapes: List[ShapeData]) -> None: + """Detect overlapping shapes and update their overlapping_shapes dictionaries. + + This function requires each ShapeData to have its shape_id already set. + It modifies the shapes in-place, adding shape IDs with overlap areas in square inches. + + Args: + shapes: List of ShapeData objects with shape_id attributes set + """ + n = len(shapes) + + # Compare each pair of shapes + for i in range(n): + for j in range(i + 1, n): + shape1 = shapes[i] + shape2 = shapes[j] + + # Ensure shape IDs are set + assert shape1.shape_id, f"Shape at index {i} has no shape_id" + assert shape2.shape_id, f"Shape at index {j} has no shape_id" + + rect1 = (shape1.left, shape1.top, shape1.width, shape1.height) + rect2 = (shape2.left, shape2.top, shape2.width, shape2.height) + + overlaps, overlap_area = calculate_overlap(rect1, rect2) + + if overlaps: + # Add shape IDs with overlap area in square inches + shape1.overlapping_shapes[shape2.shape_id] = overlap_area + shape2.overlapping_shapes[shape1.shape_id] = overlap_area + + +def extract_text_inventory( + pptx_path: Path, prs: Optional[Any] = None, issues_only: bool = False +) -> InventoryData: + """Extract text content from all slides in a PowerPoint presentation. + + Args: + pptx_path: Path to the PowerPoint file + prs: Optional Presentation object to use. If not provided, will load from pptx_path. + issues_only: If True, only include shapes that have overflow or overlap issues + + Returns a nested dictionary: {slide-N: {shape-N: ShapeData}} + Shapes are sorted by visual position (top-to-bottom, left-to-right). + The ShapeData objects contain the full shape information and can be + converted to dictionaries for JSON serialization using to_dict(). + """ + if prs is None: + prs = Presentation(str(pptx_path)) + inventory: InventoryData = {} + + for slide_idx, slide in enumerate(prs.slides): + # Collect all valid shapes from this slide with absolute positions + shapes_with_positions = [] + for shape in slide.shapes: # type: ignore + shapes_with_positions.extend(collect_shapes_with_absolute_positions(shape)) + + if not shapes_with_positions: + continue + + # Convert to ShapeData with absolute positions and slide reference + shape_data_list = [ + ShapeData( + swp.shape, + swp.absolute_left, + swp.absolute_top, + slide, + ) + for swp in shapes_with_positions + ] + + # Sort by visual position and assign stable IDs in one step + sorted_shapes = sort_shapes_by_position(shape_data_list) + for idx, shape_data in enumerate(sorted_shapes): + shape_data.shape_id = f"shape-{idx}" + + # Detect overlaps using the stable shape IDs + if len(sorted_shapes) > 1: + detect_overlaps(sorted_shapes) + + # Filter for issues only if requested (after overlap detection) + if issues_only: + sorted_shapes = [sd for sd in sorted_shapes if sd.has_any_issues] + + if not sorted_shapes: + continue + + # Create slide inventory using the stable shape IDs + inventory[f"slide-{slide_idx}"] = { + shape_data.shape_id: shape_data for shape_data in sorted_shapes + } + + return inventory + + +def get_inventory_as_dict(pptx_path: Path, issues_only: bool = False) -> InventoryDict: + """Extract text inventory and return as JSON-serializable dictionaries. + + This is a convenience wrapper around extract_text_inventory that returns + dictionaries instead of ShapeData objects, useful for testing and direct + JSON serialization. + + Args: + pptx_path: Path to the PowerPoint file + issues_only: If True, only include shapes that have overflow or overlap issues + + Returns: + Nested dictionary with all data serialized for JSON + """ + inventory = extract_text_inventory(pptx_path, issues_only=issues_only) + + # Convert ShapeData objects to dictionaries + dict_inventory: InventoryDict = {} + for slide_key, shapes in inventory.items(): + dict_inventory[slide_key] = { + shape_key: shape_data.to_dict() for shape_key, shape_data in shapes.items() + } + + return dict_inventory + + +def save_inventory(inventory: InventoryData, output_path: Path) -> None: + """Save inventory to JSON file with proper formatting. + + Converts ShapeData objects to dictionaries for JSON serialization. + """ + # Convert ShapeData objects to dictionaries + json_inventory: InventoryDict = {} + for slide_key, shapes in inventory.items(): + json_inventory[slide_key] = { + shape_key: shape_data.to_dict() for shape_key, shape_data in shapes.items() + } + + with open(output_path, "w", encoding="utf-8") as f: + json.dump(json_inventory, f, indent=2, ensure_ascii=False) + + +if __name__ == "__main__": + main() diff --git a/skills/pptx/scripts/rearrange.py b/skills/pptx/scripts/rearrange.py new file mode 100755 index 000000000..2519911f1 --- /dev/null +++ b/skills/pptx/scripts/rearrange.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +""" +Rearrange PowerPoint slides based on a sequence of indices. + +Usage: + python rearrange.py template.pptx output.pptx 0,34,34,50,52 + +This will create output.pptx using slides from template.pptx in the specified order. +Slides can be repeated (e.g., 34 appears twice). +""" + +import argparse +import shutil +import sys +from copy import deepcopy +from pathlib import Path + +import six +from pptx import Presentation + + +def main(): + parser = argparse.ArgumentParser( + description="Rearrange PowerPoint slides based on a sequence of indices.", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python rearrange.py template.pptx output.pptx 0,34,34,50,52 + Creates output.pptx using slides 0, 34 (twice), 50, and 52 from template.pptx + + python rearrange.py template.pptx output.pptx 5,3,1,2,4 + Creates output.pptx with slides reordered as specified + +Note: Slide indices are 0-based (first slide is 0, second is 1, etc.) + """, + ) + + parser.add_argument("template", help="Path to template PPTX file") + parser.add_argument("output", help="Path for output PPTX file") + parser.add_argument( + "sequence", help="Comma-separated sequence of slide indices (0-based)" + ) + + args = parser.parse_args() + + # Parse the slide sequence + try: + slide_sequence = [int(x.strip()) for x in args.sequence.split(",")] + except ValueError: + print( + "Error: Invalid sequence format. Use comma-separated integers (e.g., 0,34,34,50,52)" + ) + sys.exit(1) + + # Check template exists + template_path = Path(args.template) + if not template_path.exists(): + print(f"Error: Template file not found: {args.template}") + sys.exit(1) + + # Create output directory if needed + output_path = Path(args.output) + output_path.parent.mkdir(parents=True, exist_ok=True) + + try: + rearrange_presentation(template_path, output_path, slide_sequence) + except ValueError as e: + print(f"Error: {e}") + sys.exit(1) + except Exception as e: + print(f"Error processing presentation: {e}") + sys.exit(1) + + +def duplicate_slide(pres, index): + """Duplicate a slide in the presentation.""" + source = pres.slides[index] + + # Use source's layout to preserve formatting + new_slide = pres.slides.add_slide(source.slide_layout) + + # Collect all image and media relationships from the source slide + image_rels = {} + for rel_id, rel in six.iteritems(source.part.rels): + if "image" in rel.reltype or "media" in rel.reltype: + image_rels[rel_id] = rel + + # CRITICAL: Clear placeholder shapes to avoid duplicates + for shape in new_slide.shapes: + sp = shape.element + sp.getparent().remove(sp) + + # Copy all shapes from source + for shape in source.shapes: + el = shape.element + new_el = deepcopy(el) + new_slide.shapes._spTree.insert_element_before(new_el, "p:extLst") + + # Handle picture shapes - need to update the blip reference + # Look for all blip elements (they can be in pic or other contexts) + # Using the element's own xpath method without namespaces argument + blips = new_el.xpath(".//a:blip[@r:embed]") + for blip in blips: + old_rId = blip.get( + "{http://schemas.openxmlformats.org/officeDocument/2006/relationships}embed" + ) + if old_rId in image_rels: + # Create a new relationship in the destination slide for this image + old_rel = image_rels[old_rId] + # get_or_add returns the rId directly, or adds and returns new rId + new_rId = new_slide.part.rels.get_or_add( + old_rel.reltype, old_rel._target + ) + # Update the blip's embed reference to use the new relationship ID + blip.set( + "{http://schemas.openxmlformats.org/officeDocument/2006/relationships}embed", + new_rId, + ) + + # Copy any additional image/media relationships that might be referenced elsewhere + for rel_id, rel in image_rels.items(): + try: + new_slide.part.rels.get_or_add(rel.reltype, rel._target) + except Exception: + pass # Relationship might already exist + + return new_slide + + +def delete_slide(pres, index): + """Delete a slide from the presentation.""" + rId = pres.slides._sldIdLst[index].rId + pres.part.drop_rel(rId) + del pres.slides._sldIdLst[index] + + +def reorder_slides(pres, slide_index, target_index): + """Move a slide from one position to another.""" + slides = pres.slides._sldIdLst + + # Remove slide element from current position + slide_element = slides[slide_index] + slides.remove(slide_element) + + # Insert at target position + slides.insert(target_index, slide_element) + + +def rearrange_presentation(template_path, output_path, slide_sequence): + """ + Create a new presentation with slides from template in specified order. + + Args: + template_path: Path to template PPTX file + output_path: Path for output PPTX file + slide_sequence: List of slide indices (0-based) to include + """ + # Copy template to preserve dimensions and theme + if template_path != output_path: + shutil.copy2(template_path, output_path) + prs = Presentation(output_path) + else: + prs = Presentation(template_path) + + total_slides = len(prs.slides) + + # Validate indices + for idx in slide_sequence: + if idx < 0 or idx >= total_slides: + raise ValueError(f"Slide index {idx} out of range (0-{total_slides - 1})") + + # Track original slides and their duplicates + slide_map = [] # List of actual slide indices for final presentation + duplicated = {} # Track duplicates: original_idx -> [duplicate_indices] + + # Step 1: DUPLICATE repeated slides + print(f"Processing {len(slide_sequence)} slides from template...") + for i, template_idx in enumerate(slide_sequence): + if template_idx in duplicated and duplicated[template_idx]: + # Already duplicated this slide, use the duplicate + slide_map.append(duplicated[template_idx].pop(0)) + print(f" [{i}] Using duplicate of slide {template_idx}") + elif slide_sequence.count(template_idx) > 1 and template_idx not in duplicated: + # First occurrence of a repeated slide - create duplicates + slide_map.append(template_idx) + duplicates = [] + count = slide_sequence.count(template_idx) - 1 + print( + f" [{i}] Using original slide {template_idx}, creating {count} duplicate(s)" + ) + for _ in range(count): + duplicate_slide(prs, template_idx) + duplicates.append(len(prs.slides) - 1) + duplicated[template_idx] = duplicates + else: + # Unique slide or first occurrence already handled, use original + slide_map.append(template_idx) + print(f" [{i}] Using original slide {template_idx}") + + # Step 2: DELETE unwanted slides (work backwards) + slides_to_keep = set(slide_map) + print(f"\nDeleting {len(prs.slides) - len(slides_to_keep)} unused slides...") + for i in range(len(prs.slides) - 1, -1, -1): + if i not in slides_to_keep: + delete_slide(prs, i) + # Update slide_map indices after deletion + slide_map = [idx - 1 if idx > i else idx for idx in slide_map] + + # Step 3: REORDER to final sequence + print(f"Reordering {len(slide_map)} slides to final sequence...") + for target_pos in range(len(slide_map)): + # Find which slide should be at target_pos + current_pos = slide_map[target_pos] + if current_pos != target_pos: + reorder_slides(prs, current_pos, target_pos) + # Update slide_map: the move shifts other slides + for i in range(len(slide_map)): + if slide_map[i] > current_pos and slide_map[i] <= target_pos: + slide_map[i] -= 1 + elif slide_map[i] < current_pos and slide_map[i] >= target_pos: + slide_map[i] += 1 + slide_map[target_pos] = target_pos + + # Save the presentation + prs.save(output_path) + print(f"\nSaved rearranged presentation to: {output_path}") + print(f"Final presentation has {len(prs.slides)} slides") + + +if __name__ == "__main__": + main() diff --git a/skills/pptx/scripts/replace.py b/skills/pptx/scripts/replace.py new file mode 100755 index 000000000..8f7a8b1ba --- /dev/null +++ b/skills/pptx/scripts/replace.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python3 +"""Apply text replacements to PowerPoint presentation. + +Usage: + python replace.py + +The replacements JSON should have the structure output by inventory.py. +ALL text shapes identified by inventory.py will have their text cleared +unless "paragraphs" is specified in the replacements for that shape. +""" + +import json +import sys +from pathlib import Path +from typing import Any, Dict, List + +from inventory import InventoryData, extract_text_inventory +from pptx import Presentation +from pptx.dml.color import RGBColor +from pptx.enum.dml import MSO_THEME_COLOR +from pptx.enum.text import PP_ALIGN +from pptx.oxml.xmlchemy import OxmlElement +from pptx.util import Pt + + +def clear_paragraph_bullets(paragraph): + """Clear bullet formatting from a paragraph.""" + pPr = paragraph._element.get_or_add_pPr() + + # Remove existing bullet elements + for child in list(pPr): + if ( + child.tag.endswith("buChar") + or child.tag.endswith("buNone") + or child.tag.endswith("buAutoNum") + or child.tag.endswith("buFont") + ): + pPr.remove(child) + + return pPr + + +def apply_paragraph_properties(paragraph, para_data: Dict[str, Any]): + """Apply formatting properties to a paragraph.""" + # Get the text but don't set it on paragraph directly yet + text = para_data.get("text", "") + + # Get or create paragraph properties + pPr = clear_paragraph_bullets(paragraph) + + # Handle bullet formatting + if para_data.get("bullet", False): + level = para_data.get("level", 0) + paragraph.level = level + + # Calculate font-proportional indentation + font_size = para_data.get("font_size", 18.0) + level_indent_emu = int((font_size * (1.6 + level * 1.6)) * 12700) + hanging_indent_emu = int(-font_size * 0.8 * 12700) + + # Set indentation + pPr.attrib["marL"] = str(level_indent_emu) + pPr.attrib["indent"] = str(hanging_indent_emu) + + # Add bullet character + buChar = OxmlElement("a:buChar") + buChar.set("char", "•") + pPr.append(buChar) + + # Default to left alignment for bullets if not specified + if "alignment" not in para_data: + paragraph.alignment = PP_ALIGN.LEFT + else: + # Remove indentation for non-bullet text + pPr.attrib["marL"] = "0" + pPr.attrib["indent"] = "0" + + # Add buNone element + buNone = OxmlElement("a:buNone") + pPr.insert(0, buNone) + + # Apply alignment + if "alignment" in para_data: + alignment_map = { + "LEFT": PP_ALIGN.LEFT, + "CENTER": PP_ALIGN.CENTER, + "RIGHT": PP_ALIGN.RIGHT, + "JUSTIFY": PP_ALIGN.JUSTIFY, + } + if para_data["alignment"] in alignment_map: + paragraph.alignment = alignment_map[para_data["alignment"]] + + # Apply spacing + if "space_before" in para_data: + paragraph.space_before = Pt(para_data["space_before"]) + if "space_after" in para_data: + paragraph.space_after = Pt(para_data["space_after"]) + if "line_spacing" in para_data: + paragraph.line_spacing = Pt(para_data["line_spacing"]) + + # Apply run-level formatting + if not paragraph.runs: + run = paragraph.add_run() + run.text = text + else: + run = paragraph.runs[0] + run.text = text + + # Apply font properties + apply_font_properties(run, para_data) + + +def apply_font_properties(run, para_data: Dict[str, Any]): + """Apply font properties to a text run.""" + if "bold" in para_data: + run.font.bold = para_data["bold"] + if "italic" in para_data: + run.font.italic = para_data["italic"] + if "underline" in para_data: + run.font.underline = para_data["underline"] + if "font_size" in para_data: + run.font.size = Pt(para_data["font_size"]) + if "font_name" in para_data: + run.font.name = para_data["font_name"] + + # Apply color - prefer RGB, fall back to theme_color + if "color" in para_data: + color_hex = para_data["color"].lstrip("#") + if len(color_hex) == 6: + r = int(color_hex[0:2], 16) + g = int(color_hex[2:4], 16) + b = int(color_hex[4:6], 16) + run.font.color.rgb = RGBColor(r, g, b) + elif "theme_color" in para_data: + # Get theme color by name (e.g., "DARK_1", "ACCENT_1") + theme_name = para_data["theme_color"] + try: + run.font.color.theme_color = getattr(MSO_THEME_COLOR, theme_name) + except AttributeError: + print(f" WARNING: Unknown theme color name '{theme_name}'") + + +def detect_frame_overflow(inventory: InventoryData) -> Dict[str, Dict[str, float]]: + """Detect text overflow in shapes (text exceeding shape bounds). + + Returns dict of slide_key -> shape_key -> overflow_inches. + Only includes shapes that have text overflow. + """ + overflow_map = {} + + for slide_key, shapes_dict in inventory.items(): + for shape_key, shape_data in shapes_dict.items(): + # Check for frame overflow (text exceeding shape bounds) + if shape_data.frame_overflow_bottom is not None: + if slide_key not in overflow_map: + overflow_map[slide_key] = {} + overflow_map[slide_key][shape_key] = shape_data.frame_overflow_bottom + + return overflow_map + + +def validate_replacements(inventory: InventoryData, replacements: Dict) -> List[str]: + """Validate that all shapes in replacements exist in inventory. + + Returns list of error messages. + """ + errors = [] + + for slide_key, shapes_data in replacements.items(): + if not slide_key.startswith("slide-"): + continue + + # Check if slide exists + if slide_key not in inventory: + errors.append(f"Slide '{slide_key}' not found in inventory") + continue + + # Check each shape + for shape_key in shapes_data.keys(): + if shape_key not in inventory[slide_key]: + # Find shapes without replacements defined and show their content + unused_with_content = [] + for k in inventory[slide_key].keys(): + if k not in shapes_data: + shape_data = inventory[slide_key][k] + # Get text from paragraphs as preview + paragraphs = shape_data.paragraphs + if paragraphs and paragraphs[0].text: + first_text = paragraphs[0].text[:50] + if len(paragraphs[0].text) > 50: + first_text += "..." + unused_with_content.append(f"{k} ('{first_text}')") + else: + unused_with_content.append(k) + + errors.append( + f"Shape '{shape_key}' not found on '{slide_key}'. " + f"Shapes without replacements: {', '.join(sorted(unused_with_content)) if unused_with_content else 'none'}" + ) + + return errors + + +def check_duplicate_keys(pairs): + """Check for duplicate keys when loading JSON.""" + result = {} + for key, value in pairs: + if key in result: + raise ValueError(f"Duplicate key found in JSON: '{key}'") + result[key] = value + return result + + +def apply_replacements(pptx_file: str, json_file: str, output_file: str): + """Apply text replacements from JSON to PowerPoint presentation.""" + + # Load presentation + prs = Presentation(pptx_file) + + # Get inventory of all text shapes (returns ShapeData objects) + # Pass prs to use same Presentation instance + inventory = extract_text_inventory(Path(pptx_file), prs) + + # Detect text overflow in original presentation + original_overflow = detect_frame_overflow(inventory) + + # Load replacement data with duplicate key detection + with open(json_file, "r") as f: + replacements = json.load(f, object_pairs_hook=check_duplicate_keys) + + # Validate replacements + errors = validate_replacements(inventory, replacements) + if errors: + print("ERROR: Invalid shapes in replacement JSON:") + for error in errors: + print(f" - {error}") + print("\nPlease check the inventory and update your replacement JSON.") + print( + "You can regenerate the inventory with: python inventory.py " + ) + raise ValueError(f"Found {len(errors)} validation error(s)") + + # Track statistics + shapes_processed = 0 + shapes_cleared = 0 + shapes_replaced = 0 + + # Process each slide from inventory + for slide_key, shapes_dict in inventory.items(): + if not slide_key.startswith("slide-"): + continue + + slide_index = int(slide_key.split("-")[1]) + + if slide_index >= len(prs.slides): + print(f"Warning: Slide {slide_index} not found") + continue + + # Process each shape from inventory + for shape_key, shape_data in shapes_dict.items(): + shapes_processed += 1 + + # Get the shape directly from ShapeData + shape = shape_data.shape + if not shape: + print(f"Warning: {shape_key} has no shape reference") + continue + + # ShapeData already validates text_frame in __init__ + text_frame = shape.text_frame # type: ignore + + text_frame.clear() # type: ignore + shapes_cleared += 1 + + # Check for replacement paragraphs + replacement_shape_data = replacements.get(slide_key, {}).get(shape_key, {}) + if "paragraphs" not in replacement_shape_data: + continue + + shapes_replaced += 1 + + # Add replacement paragraphs + for i, para_data in enumerate(replacement_shape_data["paragraphs"]): + if i == 0: + p = text_frame.paragraphs[0] # type: ignore + else: + p = text_frame.add_paragraph() # type: ignore + + apply_paragraph_properties(p, para_data) + + # Check for issues after replacements + # Save to a temporary file and reload to avoid modifying the presentation during inventory + # (extract_text_inventory accesses font.color which adds empty elements) + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as tmp: + tmp_path = Path(tmp.name) + prs.save(str(tmp_path)) + + try: + updated_inventory = extract_text_inventory(tmp_path) + updated_overflow = detect_frame_overflow(updated_inventory) + finally: + tmp_path.unlink() # Clean up temp file + + # Check if any text overflow got worse + overflow_errors = [] + for slide_key, shape_overflows in updated_overflow.items(): + for shape_key, new_overflow in shape_overflows.items(): + # Get original overflow (0 if there was no overflow before) + original = original_overflow.get(slide_key, {}).get(shape_key, 0.0) + + # Error if overflow increased + if new_overflow > original + 0.01: # Small tolerance for rounding + increase = new_overflow - original + overflow_errors.append( + f'{slide_key}/{shape_key}: overflow worsened by {increase:.2f}" ' + f'(was {original:.2f}", now {new_overflow:.2f}")' + ) + + # Collect warnings from updated shapes + warnings = [] + for slide_key, shapes_dict in updated_inventory.items(): + for shape_key, shape_data in shapes_dict.items(): + if shape_data.warnings: + for warning in shape_data.warnings: + warnings.append(f"{slide_key}/{shape_key}: {warning}") + + # Fail if there are any issues + if overflow_errors or warnings: + print("\nERROR: Issues detected in replacement output:") + if overflow_errors: + print("\nText overflow worsened:") + for error in overflow_errors: + print(f" - {error}") + if warnings: + print("\nFormatting warnings:") + for warning in warnings: + print(f" - {warning}") + print("\nPlease fix these issues before saving.") + raise ValueError( + f"Found {len(overflow_errors)} overflow error(s) and {len(warnings)} warning(s)" + ) + + # Save the presentation + prs.save(output_file) + + # Report results + print(f"Saved updated presentation to: {output_file}") + print(f"Processed {len(prs.slides)} slides") + print(f" - Shapes processed: {shapes_processed}") + print(f" - Shapes cleared: {shapes_cleared}") + print(f" - Shapes replaced: {shapes_replaced}") + + +def main(): + """Main entry point for command-line usage.""" + if len(sys.argv) != 4: + print(__doc__) + sys.exit(1) + + input_pptx = Path(sys.argv[1]) + replacements_json = Path(sys.argv[2]) + output_pptx = Path(sys.argv[3]) + + if not input_pptx.exists(): + print(f"Error: Input file '{input_pptx}' not found") + sys.exit(1) + + if not replacements_json.exists(): + print(f"Error: Replacements JSON file '{replacements_json}' not found") + sys.exit(1) + + try: + apply_replacements(str(input_pptx), str(replacements_json), str(output_pptx)) + except Exception as e: + print(f"Error applying replacements: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/pptx/scripts/thumbnail.py b/skills/pptx/scripts/thumbnail.py new file mode 100755 index 000000000..5c7fdf197 --- /dev/null +++ b/skills/pptx/scripts/thumbnail.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 +""" +Create thumbnail grids from PowerPoint presentation slides. + +Creates a grid layout of slide thumbnails with configurable columns (max 6). +Each grid contains up to cols×(cols+1) images. For presentations with more +slides, multiple numbered grid files are created automatically. + +The program outputs the names of all files created. + +Output: +- Single grid: {prefix}.jpg (if slides fit in one grid) +- Multiple grids: {prefix}-1.jpg, {prefix}-2.jpg, etc. + +Grid limits by column count: +- 3 cols: max 12 slides per grid (3×4) +- 4 cols: max 20 slides per grid (4×5) +- 5 cols: max 30 slides per grid (5×6) [default] +- 6 cols: max 42 slides per grid (6×7) + +Usage: + python thumbnail.py input.pptx [output_prefix] [--cols N] [--outline-placeholders] + +Examples: + python thumbnail.py presentation.pptx + # Creates: thumbnails.jpg (using default prefix) + # Outputs: + # Created 1 grid(s): + # - thumbnails.jpg + + python thumbnail.py large-deck.pptx grid --cols 4 + # Creates: grid-1.jpg, grid-2.jpg, grid-3.jpg + # Outputs: + # Created 3 grid(s): + # - grid-1.jpg + # - grid-2.jpg + # - grid-3.jpg + + python thumbnail.py template.pptx analysis --outline-placeholders + # Creates thumbnail grids with red outlines around text placeholders +""" + +import argparse +import subprocess +import sys +import tempfile +from pathlib import Path + +from inventory import extract_text_inventory +from PIL import Image, ImageDraw, ImageFont +from pptx import Presentation + +# Constants +THUMBNAIL_WIDTH = 300 # Fixed thumbnail width in pixels +CONVERSION_DPI = 100 # DPI for PDF to image conversion +MAX_COLS = 6 # Maximum number of columns +DEFAULT_COLS = 5 # Default number of columns +JPEG_QUALITY = 95 # JPEG compression quality + +# Grid layout constants +GRID_PADDING = 20 # Padding between thumbnails +BORDER_WIDTH = 2 # Border width around thumbnails +FONT_SIZE_RATIO = 0.12 # Font size as fraction of thumbnail width +LABEL_PADDING_RATIO = 0.4 # Label padding as fraction of font size + + +def main(): + parser = argparse.ArgumentParser( + description="Create thumbnail grids from PowerPoint slides." + ) + parser.add_argument("input", help="Input PowerPoint file (.pptx)") + parser.add_argument( + "output_prefix", + nargs="?", + default="thumbnails", + help="Output prefix for image files (default: thumbnails, will create prefix.jpg or prefix-N.jpg)", + ) + parser.add_argument( + "--cols", + type=int, + default=DEFAULT_COLS, + help=f"Number of columns (default: {DEFAULT_COLS}, max: {MAX_COLS})", + ) + parser.add_argument( + "--outline-placeholders", + action="store_true", + help="Outline text placeholders with a colored border", + ) + + args = parser.parse_args() + + # Validate columns + cols = min(args.cols, MAX_COLS) + if args.cols > MAX_COLS: + print(f"Warning: Columns limited to {MAX_COLS} (requested {args.cols})") + + # Validate input + input_path = Path(args.input) + if not input_path.exists() or input_path.suffix.lower() != ".pptx": + print(f"Error: Invalid PowerPoint file: {args.input}") + sys.exit(1) + + # Construct output path (always JPG) + output_path = Path(f"{args.output_prefix}.jpg") + + print(f"Processing: {args.input}") + + try: + with tempfile.TemporaryDirectory() as temp_dir: + # Get placeholder regions if outlining is enabled + placeholder_regions = None + slide_dimensions = None + if args.outline_placeholders: + print("Extracting placeholder regions...") + placeholder_regions, slide_dimensions = get_placeholder_regions( + input_path + ) + if placeholder_regions: + print(f"Found placeholders on {len(placeholder_regions)} slides") + + # Convert slides to images + slide_images = convert_to_images(input_path, Path(temp_dir), CONVERSION_DPI) + if not slide_images: + print("Error: No slides found") + sys.exit(1) + + print(f"Found {len(slide_images)} slides") + + # Create grids (max cols×(cols+1) images per grid) + grid_files = create_grids( + slide_images, + cols, + THUMBNAIL_WIDTH, + output_path, + placeholder_regions, + slide_dimensions, + ) + + # Print saved files + print(f"Created {len(grid_files)} grid(s):") + for grid_file in grid_files: + print(f" - {grid_file}") + + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + + +def create_hidden_slide_placeholder(size): + """Create placeholder image for hidden slides.""" + img = Image.new("RGB", size, color="#F0F0F0") + draw = ImageDraw.Draw(img) + line_width = max(5, min(size) // 100) + draw.line([(0, 0), size], fill="#CCCCCC", width=line_width) + draw.line([(size[0], 0), (0, size[1])], fill="#CCCCCC", width=line_width) + return img + + +def get_placeholder_regions(pptx_path): + """Extract ALL text regions from the presentation. + + Returns a tuple of (placeholder_regions, slide_dimensions). + text_regions is a dict mapping slide indices to lists of text regions. + Each region is a dict with 'left', 'top', 'width', 'height' in inches. + slide_dimensions is a tuple of (width_inches, height_inches). + """ + prs = Presentation(str(pptx_path)) + inventory = extract_text_inventory(pptx_path, prs) + placeholder_regions = {} + + # Get actual slide dimensions in inches (EMU to inches conversion) + slide_width_inches = (prs.slide_width or 9144000) / 914400.0 + slide_height_inches = (prs.slide_height or 5143500) / 914400.0 + + for slide_key, shapes in inventory.items(): + # Extract slide index from "slide-N" format + slide_idx = int(slide_key.split("-")[1]) + regions = [] + + for shape_key, shape_data in shapes.items(): + # The inventory only contains shapes with text, so all shapes should be highlighted + regions.append( + { + "left": shape_data.left, + "top": shape_data.top, + "width": shape_data.width, + "height": shape_data.height, + } + ) + + if regions: + placeholder_regions[slide_idx] = regions + + return placeholder_regions, (slide_width_inches, slide_height_inches) + + +def convert_to_images(pptx_path, temp_dir, dpi): + """Convert PowerPoint to images via PDF, handling hidden slides.""" + # Detect hidden slides + print("Analyzing presentation...") + prs = Presentation(str(pptx_path)) + total_slides = len(prs.slides) + + # Find hidden slides (1-based indexing for display) + hidden_slides = { + idx + 1 + for idx, slide in enumerate(prs.slides) + if slide.element.get("show") == "0" + } + + print(f"Total slides: {total_slides}") + if hidden_slides: + print(f"Hidden slides: {sorted(hidden_slides)}") + + pdf_path = temp_dir / f"{pptx_path.stem}.pdf" + + # Convert to PDF + print("Converting to PDF...") + result = subprocess.run( + [ + "soffice", + "--headless", + "--convert-to", + "pdf", + "--outdir", + str(temp_dir), + str(pptx_path), + ], + capture_output=True, + text=True, + ) + if result.returncode != 0 or not pdf_path.exists(): + raise RuntimeError("PDF conversion failed") + + # Convert PDF to images + print(f"Converting to images at {dpi} DPI...") + result = subprocess.run( + ["pdftoppm", "-jpeg", "-r", str(dpi), str(pdf_path), str(temp_dir / "slide")], + capture_output=True, + text=True, + ) + if result.returncode != 0: + raise RuntimeError("Image conversion failed") + + visible_images = sorted(temp_dir.glob("slide-*.jpg")) + + # Create full list with placeholders for hidden slides + all_images = [] + visible_idx = 0 + + # Get placeholder dimensions from first visible slide + if visible_images: + with Image.open(visible_images[0]) as img: + placeholder_size = img.size + else: + placeholder_size = (1920, 1080) + + for slide_num in range(1, total_slides + 1): + if slide_num in hidden_slides: + # Create placeholder image for hidden slide + placeholder_path = temp_dir / f"hidden-{slide_num:03d}.jpg" + placeholder_img = create_hidden_slide_placeholder(placeholder_size) + placeholder_img.save(placeholder_path, "JPEG") + all_images.append(placeholder_path) + else: + # Use the actual visible slide image + if visible_idx < len(visible_images): + all_images.append(visible_images[visible_idx]) + visible_idx += 1 + + return all_images + + +def create_grids( + image_paths, + cols, + width, + output_path, + placeholder_regions=None, + slide_dimensions=None, +): + """Create multiple thumbnail grids from slide images, max cols×(cols+1) images per grid.""" + # Maximum images per grid is cols × (cols + 1) for better proportions + max_images_per_grid = cols * (cols + 1) + grid_files = [] + + print( + f"Creating grids with {cols} columns (max {max_images_per_grid} images per grid)" + ) + + # Split images into chunks + for chunk_idx, start_idx in enumerate( + range(0, len(image_paths), max_images_per_grid) + ): + end_idx = min(start_idx + max_images_per_grid, len(image_paths)) + chunk_images = image_paths[start_idx:end_idx] + + # Create grid for this chunk + grid = create_grid( + chunk_images, cols, width, start_idx, placeholder_regions, slide_dimensions + ) + + # Generate output filename + if len(image_paths) <= max_images_per_grid: + # Single grid - use base filename without suffix + grid_filename = output_path + else: + # Multiple grids - insert index before extension with dash + stem = output_path.stem + suffix = output_path.suffix + grid_filename = output_path.parent / f"{stem}-{chunk_idx + 1}{suffix}" + + # Save grid + grid_filename.parent.mkdir(parents=True, exist_ok=True) + grid.save(str(grid_filename), quality=JPEG_QUALITY) + grid_files.append(str(grid_filename)) + + return grid_files + + +def create_grid( + image_paths, + cols, + width, + start_slide_num=0, + placeholder_regions=None, + slide_dimensions=None, +): + """Create thumbnail grid from slide images with optional placeholder outlining.""" + font_size = int(width * FONT_SIZE_RATIO) + label_padding = int(font_size * LABEL_PADDING_RATIO) + + # Get dimensions + with Image.open(image_paths[0]) as img: + aspect = img.height / img.width + height = int(width * aspect) + + # Calculate grid size + rows = (len(image_paths) + cols - 1) // cols + grid_w = cols * width + (cols + 1) * GRID_PADDING + grid_h = rows * (height + font_size + label_padding * 2) + (rows + 1) * GRID_PADDING + + # Create grid + grid = Image.new("RGB", (grid_w, grid_h), "white") + draw = ImageDraw.Draw(grid) + + # Load font with size based on thumbnail width + try: + # Use Pillow's default font with size + font = ImageFont.load_default(size=font_size) + except Exception: + # Fall back to basic default font if size parameter not supported + font = ImageFont.load_default() + + # Place thumbnails + for i, img_path in enumerate(image_paths): + row, col = i // cols, i % cols + x = col * width + (col + 1) * GRID_PADDING + y_base = ( + row * (height + font_size + label_padding * 2) + (row + 1) * GRID_PADDING + ) + + # Add label with actual slide number + label = f"{start_slide_num + i}" + bbox = draw.textbbox((0, 0), label, font=font) + text_w = bbox[2] - bbox[0] + draw.text( + (x + (width - text_w) // 2, y_base + label_padding), + label, + fill="black", + font=font, + ) + + # Add thumbnail below label with proportional spacing + y_thumbnail = y_base + label_padding + font_size + label_padding + + with Image.open(img_path) as img: + # Get original dimensions before thumbnail + orig_w, orig_h = img.size + + # Apply placeholder outlines if enabled + if placeholder_regions and (start_slide_num + i) in placeholder_regions: + # Convert to RGBA for transparency support + if img.mode != "RGBA": + img = img.convert("RGBA") + + # Get the regions for this slide + regions = placeholder_regions[start_slide_num + i] + + # Calculate scale factors using actual slide dimensions + if slide_dimensions: + slide_width_inches, slide_height_inches = slide_dimensions + else: + # Fallback: estimate from image size at CONVERSION_DPI + slide_width_inches = orig_w / CONVERSION_DPI + slide_height_inches = orig_h / CONVERSION_DPI + + x_scale = orig_w / slide_width_inches + y_scale = orig_h / slide_height_inches + + # Create a highlight overlay + overlay = Image.new("RGBA", img.size, (255, 255, 255, 0)) + overlay_draw = ImageDraw.Draw(overlay) + + # Highlight each placeholder region + for region in regions: + # Convert from inches to pixels in the original image + px_left = int(region["left"] * x_scale) + px_top = int(region["top"] * y_scale) + px_width = int(region["width"] * x_scale) + px_height = int(region["height"] * y_scale) + + # Draw highlight outline with red color and thick stroke + # Using a bright red outline instead of fill + stroke_width = max( + 5, min(orig_w, orig_h) // 150 + ) # Thicker proportional stroke width + overlay_draw.rectangle( + [(px_left, px_top), (px_left + px_width, px_top + px_height)], + outline=(255, 0, 0, 255), # Bright red, fully opaque + width=stroke_width, + ) + + # Composite the overlay onto the image using alpha blending + img = Image.alpha_composite(img, overlay) + # Convert back to RGB for JPEG saving + img = img.convert("RGB") + + img.thumbnail((width, height), Image.Resampling.LANCZOS) + w, h = img.size + tx = x + (width - w) // 2 + ty = y_thumbnail + (height - h) // 2 + grid.paste(img, (tx, ty)) + + # Add border + if BORDER_WIDTH > 0: + draw.rectangle( + [ + (tx - BORDER_WIDTH, ty - BORDER_WIDTH), + (tx + w + BORDER_WIDTH - 1, ty + h + BORDER_WIDTH - 1), + ], + outline="gray", + width=BORDER_WIDTH, + ) + + return grid + + +if __name__ == "__main__": + main() diff --git a/skills/prompt-engineer/SKILL.md b/skills/prompt-engineer/SKILL.md new file mode 100644 index 000000000..8db72e642 --- /dev/null +++ b/skills/prompt-engineer/SKILL.md @@ -0,0 +1,370 @@ +--- +name: prompt-engineer +description: Use when creating prompts for AI platforms (Veo3, Midjourney, DALL-E, Flux, Stable Diffusion, Claude, ChatGPT, Gemini) or improving existing prompts for better results - provides platform-specific techniques, parameters, and best practices for video generation, image creation, and text-based AI tasks +--- + +# Prompt Engineer + +## Overview + +Expert prompt engineering knowledge for major AI platforms. Apply proven techniques and platform-specific best practices to create high-quality prompts for video generation (Veo3), image creation (Midjourney, DALL-E, Flux, Stable Diffusion), and conversational AI (Claude, ChatGPT, Gemini). + +## When to Use + +**Use this skill when:** +- Creating prompts for video generation (Veo3) +- Writing image generation prompts (Midjourney, DALL-E, Flux, Stable Diffusion) +- Optimizing prompts for Claude, ChatGPT, or Gemini +- Results are generic, unclear, or missing key elements +- Need platform-specific parameters or syntax +- Improving existing prompts for better output + +**Don't use for:** +- Basic text generation without specific platform requirements +- Simple questions that don't need optimization + +## Video Generation: Veo3 (Google) + +### Optimal Prompt Structure +``` +[Shot Composition] + [Subject] + [Action] + [Setting] + [Aesthetics] +``` + +**Components:** +1. **Shot Composition**: Camera angle, position, movement, transitions + - "Dutch angle tracking shot", "Crane shot descending", "Handheld POV" + +2. **Subject**: Detailed character description + - Physical appearance, clothing, distinctive features + +3. **Action**: Subject-object interaction, physics, gestures + - Use "emotion chaining" for expressions: "smiling, then frowning, then laughing" + - Choreograph gestures: "waving hand, then pointing forward" + +4. **Setting**: Environmental backdrop, atmosphere + - Time of day, weather, location specifics + +5. **Aesthetics**: Visual style, lighting, mood + - Film stock: "shot on 35mm Kodak Vision3 film", "grainy 16mm look" + - Lighting: "golden hour", "harsh shadows", "soft diffused light" + +### Best Practices +- Be direct and specific for better results +- Chain emotions/gestures with "then" for sequences +- Allow emotional interpretation for natural performances (vague can work for emotion) +- Veo3 generates synchronized dialogue, sound effects, and music in one pass + +**Example:** +``` +Low-angle tracking shot of a confident woman in a red coat walking through +a bustling Tokyo street at night, neon signs reflecting in puddles. She +glances back with concern, then continues forward. Shot on 35mm Kodak +Vision3, cinematic lighting with bokeh background, golden hour glow. +``` + +## Image Generation + +### Midjourney (V7 - 2025) + +**Core Structure:** +``` +[Main Subject] + [Artistic Style] + [Context: Lighting/Colors/Background] + [Parameters] +``` + +**Essential Parameters:** +- `--ar :` - Aspect ratio (default 1:1) + - Common: `--ar 16:9`, `--ar 9:16`, `--ar 3:2` +- `--s <0-1000>` - Stylization (default 100) + - Lower = prompt-accurate, Higher = artistic +- `--chaos <0-100>` - Diversity of outputs + - Low = consistent, High = unpredictable/unique +- `--seed ` - Consistent style across generations + +**Advanced Techniques:** +- **Multi-prompts with weights**: `red car::2 blue sky::1` (emphasizes red car 2x) +- **Image references**: Upload image URL to influence style/composition +- **Commands**: + - `/blend` - Merge 2-5 images without text + - `/describe` - Reverse-engineer prompts from images + +**Best Practices:** +- Short, simple prompts work best +- Describe what you WANT (not what you don't) +- Use `--no ` to exclude elements + +**Example:** +``` +Majestic lion with flowing golden mane in savanna grassland, dramatic sunset +lighting, hyper-realistic, 8K detail, warm amber tones --ar 16:9 --s 150 --chaos 20 +``` + +### DALL-E 3 (OpenAI) + +**Core Principles:** +- **Be specific and detailed**: Include setting, objects, colors, mood, elements +- **Use positive prompts**: Focus on what you want (not what to exclude) +- **Balance specificity with creativity**: Too vague = generic, too detailed = constrained + +**Key Techniques:** +- **Describe mood**: Include actions, expressions, environments +- **Describe aesthetic**: Watercolor, sculpture, digital art, impressionism, photorealistic +- **Describe framing**: Dramatic, wide-angle, close-up, bird's-eye view +- **Prompt stacking**: Multiple requirements simultaneously + - "Create an image that is: 1) watercolor style, 2) forest scene, 3) mystical mood" + +**Best Practices:** +- Iterative refinement - first prompt rarely perfect +- ChatGPT integration helps refine prompts naturally +- Context is critical - more detail = better output + +**Example:** +``` +Close-up portrait of an elderly craftsman working on intricate wood carving, +dust particles floating in warm workshop lighting, weathered hands with +visible tool marks, shelves of finished work in soft-focus background, +photorealistic style with emphasis on texture and detail, golden afternoon light +``` + +### Stable Diffusion & Flux.1 + +**Stable Diffusion Structure:** +``` +[Subject] + [Descriptive Keywords] + [Modifiers] + [Negative Prompt] +``` + +**Keyword Weighting:** +- Syntax: `(keyword: 1.5)` = 150% weight +- Range: 0.4 to 1.6 optimal +- Example: `(detailed face: 1.4), (golden hair: 1.2), eyes` + +**Modifiers:** +- Quality: "detailed", "intricate", "8k", "hyperrealistic", "masterpiece" +- Lighting: "cinematic lighting", "volumetric", "rim lighting", "god rays" +- Style: "oil painting", "concept art", "photography", "digital art" + +**Negative Prompts (SD):** +``` +Negative: blurry, distorted, asymmetrical, low quality, artifacts, watermark +``` + +**Flux.1 Differences (2025):** +- **Natural language**: Speak conversationally, more descriptive +- **No negative prompts needed**: Describe what you want instead +- **No weight syntax support**: Use "with emphasis on X" or "with focus on Y" +- **Longer prompts supported**: Up to ~500 tokens +- **Left-to-right priority**: Keywords at start have higher priority + +**Stable Diffusion Example:** +``` +Portrait of cyberpunk hacker in neon-lit room, (detailed face: 1.4), +(holographic screens: 1.2), purple and blue lighting, intricate tech implants, +8k, hyperrealistic, cinematic composition +Negative: blurry, distorted, low quality, extra fingers, bad anatomy +``` + +**Flux.1 Example:** +``` +A detailed portrait of a cyberpunk hacker sitting in a dimly lit room filled +with glowing holographic screens displaying code, with emphasis on the intricate +facial cybernetic implants reflecting neon purple and blue light, ultra-detailed +realistic rendering with cinematic composition and atmospheric volumetric lighting +``` + +## Text-Based AI + +### Claude (Anthropic 4.x - 2025) + +**Core Techniques:** + +1. **XML Tags for Structure** (Claude-specific strength) +```xml + +Analyze the following code for security vulnerabilities + + + +[Your code here] + + + +- Focus on SQL injection risks +- Check authentication logic +- Identify data exposure issues + +``` + +2. **Clear Role Setting** + - Use system parameter for role definition + - Task-specific instructions in user turn + +3. **Chain of Thought** + - Add "Think step by step" to complex queries + - Enable extended thinking for multi-step reasoning + +4. **Provide Examples** + - Show input-output pairs for nuanced tasks + - Demonstrates pattern better than description + +5. **Be Specific and Explicit** + - Clear, unambiguous instructions + - State desired output format + +**Best Practices:** +- Test and iterate prompts +- Use Anthropic's prompt generator for production templates +- Leverage context awareness (Claude 4.5 tracks remaining context) + +**Example:** +```xml +You are an expert code reviewer specializing in Python security + + +Review the following authentication function for vulnerabilities. +Think step by step through potential security issues. + + + +def login(username, password): + query = f"SELECT * FROM users WHERE name='{username}' AND pass='{password}'" + return db.execute(query) + + + +1. List each vulnerability found +2. Explain the risk +3. Provide secure code alternative + +``` + +### ChatGPT / GPT-4 (OpenAI - 2025) + +**Core Techniques:** + +1. **Use Delimiters** + - Triple quotes, triple backticks, XML tags, angle brackets + - Prevents prompt injection, clarifies structure + +2. **Few-Shot Learning** + - Provide 2-5 examples of desired output + - Model adapts to pattern + +3. **Provide Grounding Data** + - Include context/data for reliable answers + - Critical for factual, up-to-date information + +4. **Break Down Complex Tasks** + - Split into manageable subtasks + - Sequential processing improves accuracy + +5. **Memory Features** + - GPT-4o maintains persistent memory + - Best for onboarding custom GPTs + +**Model-Specific:** +- GPT-4o: Short structured prompts with hashtags, numbered lists +- Use consistent delimiters throughout + +**Best Practices:** +- Clear structure beats clever wording +- Most failures come from ambiguity, not model limits +- Be clear, specific, provide context + +**Example:** +``` +Task: Extract key information from customer support transcripts + +Instructions: +1. Read the transcript below +2. Identify: customer issue, resolution status, sentiment +3. Format as JSON with keys: issue, status, sentiment, priority + +###TRANSCRIPT### +[Transcript content here] +###END### + +Expected output format: +{ + "issue": "brief description", + "status": "resolved|pending|escalated", + "sentiment": "positive|neutral|negative", + "priority": "low|medium|high" +} +``` + +### Google Gemini (2025) + +**PTCF Framework** (Best Practice): +``` +Persona + Task + Context + Format +``` + +**Components:** +1. **Persona**: Define Gemini's role + - "You are a financial analyst evaluating quarterly earnings" + +2. **Task**: What needs to be done + - "Summarize", "analyze", "create", "compare" + +3. **Context**: Background information + - Relevant data, constraints, goals + +4. **Format**: Desired output structure + - Table, bullet points, JSON, paragraph + +**2025 Features:** +- **Long context**: 1M token window (99% retrieval accuracy on structured data) +- **URL context**: Reads external web sources during API calls +- **Structured outputs**: `responseSchema` for strict JSON formatting + +**Best Practices:** +- Natural conversational language (speak to a person) +- Be specific yet concise +- Iterate with follow-up prompts +- Average 21 words for simple prompts (varies by complexity) +- Break complex problems into multiple requests + +**Example:** +``` +Persona: You are an experienced technical writer creating API documentation. + +Task: Write a comprehensive overview section for a REST API that manages +user authentication. + +Context: The API supports OAuth 2.0, JWT tokens, and has endpoints for +login, logout, token refresh, and password reset. Target audience is +backend developers with 2-5 years experience. + +Format: Start with a brief 2-3 sentence summary, followed by a bulleted +list of key features, then a "Quick Start" section with a code example +in Python. +``` + +## Common Mistakes Across Platforms + +| Mistake | Fix | +|---------|-----| +| Too vague: "make an image of a cat" | Be specific: "tabby cat sitting on windowsill, morning sunlight, watercolor style" | +| Negative descriptions (what NOT to include) | Positive descriptions (what to include) OR use platform-specific negative prompt syntax | +| Over-complicating with too many details | Balance: enough detail to guide, not constrain creativity | +| Ignoring platform-specific parameters | Learn and use parameters (--ar, --s, weights, XML tags) | +| Not iterating | First prompt rarely perfect - refine based on output | +| Forgetting context/grounding data | Provide relevant background for text AI tasks | +| Using same technique across platforms | Each platform has unique strengths - adapt accordingly | + +## Quick Reference + +### Choose Your Platform +| Use Case | Recommended Platform | Key Strength | +|----------|---------------------|--------------| +| Video with synchronized audio | Veo3 | Dialogue + sound + music in one pass | +| Artistic image, iterate style | Midjourney V7 | /describe and /blend commands | +| Natural language image prompts | DALL-E 3 | ChatGPT integration, iterative refinement | +| Technical control, specific weights | Stable Diffusion | Precise keyword weighting | +| Detailed scenes, natural prompts | Flux.1 | Long prompts, no negative needed | +| Structured text outputs, XML | Claude 4.x | XML tags, long context, thinking | +| Factual with grounding data | ChatGPT/GPT-4 | Few-shot learning, delimiters | +| Long-context document analysis | Gemini 2.5 Pro | 1M token window, 99% retrieval | + +## Real-World Impact + +Well-engineered prompts reduce iterations by 60-80%, saving time and API costs. Platform-specific techniques (XML for Claude, weights for SD, PTCF for Gemini) consistently outperform generic prompts. The difference between "create a video" and a properly structured Veo3 prompt with all 5 components can be 10+ iterations versus getting it right on attempt 1-2. diff --git a/skills/slack-gif-creator/LICENSE.txt b/skills/slack-gif-creator/LICENSE.txt new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/skills/slack-gif-creator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/skills/slack-gif-creator/SKILL.md b/skills/slack-gif-creator/SKILL.md new file mode 100644 index 000000000..5b1dcc44b --- /dev/null +++ b/skills/slack-gif-creator/SKILL.md @@ -0,0 +1,646 @@ +--- +name: slack-gif-creator +description: Toolkit for creating animated GIFs optimized for Slack, with validators for size constraints and composable animation primitives. This skill applies when users request animated GIFs or emoji animations for Slack from descriptions like "make me a GIF for Slack of X doing Y". +license: Complete terms in LICENSE.txt +--- + +# Slack GIF Creator - Flexible Toolkit + +A toolkit for creating animated GIFs optimized for Slack. Provides validators for Slack's constraints, composable animation primitives, and optional helper utilities. **Apply these tools however needed to achieve the creative vision.** + +## Slack's Requirements + +Slack has specific requirements for GIFs based on their use: + +**Message GIFs:** +- Max size: ~2MB +- Optimal dimensions: 480x480 +- Typical FPS: 15-20 +- Color limit: 128-256 +- Duration: 2-5s + +**Emoji GIFs:** +- Max size: 64KB (strict limit) +- Optimal dimensions: 128x128 +- Typical FPS: 10-12 +- Color limit: 32-48 +- Duration: 1-2s + +**Emoji GIFs are challenging** - the 64KB limit is strict. Strategies that help: +- Limit to 10-15 frames total +- Use 32-48 colors maximum +- Keep designs simple +- Avoid gradients +- Validate file size frequently + +## Toolkit Structure + +This skill provides three types of tools: + +1. **Validators** - Check if a GIF meets Slack's requirements +2. **Animation Primitives** - Composable building blocks for motion (shake, bounce, move, kaleidoscope) +3. **Helper Utilities** - Optional functions for common needs (text, colors, effects) + +**Complete creative freedom is available in how these tools are applied.** + +## Core Validators + +To ensure a GIF meets Slack's constraints, use these validators: + +```python +from core.gif_builder import GIFBuilder + +# After creating your GIF, check if it meets requirements +builder = GIFBuilder(width=128, height=128, fps=10) +# ... add your frames however you want ... + +# Save and check size +info = builder.save('emoji.gif', num_colors=48, optimize_for_emoji=True) + +# The save method automatically warns if file exceeds limits +# info dict contains: size_kb, size_mb, frame_count, duration_seconds +``` + +**File size validator**: +```python +from core.validators import check_slack_size + +# Check if GIF meets size limits +passes, info = check_slack_size('emoji.gif', is_emoji=True) +# Returns: (True/False, dict with size details) +``` + +**Dimension validator**: +```python +from core.validators import validate_dimensions + +# Check dimensions +passes, info = validate_dimensions(128, 128, is_emoji=True) +# Returns: (True/False, dict with dimension details) +``` + +**Complete validation**: +```python +from core.validators import validate_gif, is_slack_ready + +# Run all validations +all_pass, results = validate_gif('emoji.gif', is_emoji=True) + +# Or quick check +if is_slack_ready('emoji.gif', is_emoji=True): + print("Ready to upload!") +``` + +## Animation Primitives + +These are composable building blocks for motion. Apply these to any object in any combination: + +### Shake +```python +from templates.shake import create_shake_animation + +# Shake an emoji +frames = create_shake_animation( + object_type='emoji', + object_data={'emoji': '😱', 'size': 80}, + num_frames=20, + shake_intensity=15, + direction='both' # or 'horizontal', 'vertical' +) +``` + +### Bounce +```python +from templates.bounce import create_bounce_animation + +# Bounce a circle +frames = create_bounce_animation( + object_type='circle', + object_data={'radius': 40, 'color': (255, 100, 100)}, + num_frames=30, + bounce_height=150 +) +``` + +### Spin / Rotate +```python +from templates.spin import create_spin_animation, create_loading_spinner + +# Clockwise spin +frames = create_spin_animation( + object_type='emoji', + object_data={'emoji': '🔄', 'size': 100}, + rotation_type='clockwise', + full_rotations=2 +) + +# Wobble rotation +frames = create_spin_animation(rotation_type='wobble', full_rotations=3) + +# Loading spinner +frames = create_loading_spinner(spinner_type='dots') +``` + +### Pulse / Heartbeat +```python +from templates.pulse import create_pulse_animation, create_attention_pulse + +# Smooth pulse +frames = create_pulse_animation( + object_data={'emoji': '❤️', 'size': 100}, + pulse_type='smooth', + scale_range=(0.8, 1.2) +) + +# Heartbeat (double-pump) +frames = create_pulse_animation(pulse_type='heartbeat') + +# Attention pulse for emoji GIFs +frames = create_attention_pulse(emoji='⚠️', num_frames=20) +``` + +### Fade +```python +from templates.fade import create_fade_animation, create_crossfade + +# Fade in +frames = create_fade_animation(fade_type='in') + +# Fade out +frames = create_fade_animation(fade_type='out') + +# Crossfade between two emojis +frames = create_crossfade( + object1_data={'emoji': '😊', 'size': 100}, + object2_data={'emoji': '😂', 'size': 100} +) +``` + +### Zoom +```python +from templates.zoom import create_zoom_animation, create_explosion_zoom + +# Zoom in dramatically +frames = create_zoom_animation( + zoom_type='in', + scale_range=(0.1, 2.0), + add_motion_blur=True +) + +# Zoom out +frames = create_zoom_animation(zoom_type='out') + +# Explosion zoom +frames = create_explosion_zoom(emoji='💥') +``` + +### Explode / Shatter +```python +from templates.explode import create_explode_animation, create_particle_burst + +# Burst explosion +frames = create_explode_animation( + explode_type='burst', + num_pieces=25 +) + +# Shatter effect +frames = create_explode_animation(explode_type='shatter') + +# Dissolve into particles +frames = create_explode_animation(explode_type='dissolve') + +# Particle burst +frames = create_particle_burst(particle_count=30) +``` + +### Wiggle / Jiggle +```python +from templates.wiggle import create_wiggle_animation, create_excited_wiggle + +# Jello wobble +frames = create_wiggle_animation( + wiggle_type='jello', + intensity=1.0, + cycles=2 +) + +# Wave motion +frames = create_wiggle_animation(wiggle_type='wave') + +# Excited wiggle for emoji GIFs +frames = create_excited_wiggle(emoji='🎉') +``` + +### Slide +```python +from templates.slide import create_slide_animation, create_multi_slide + +# Slide in from left with overshoot +frames = create_slide_animation( + direction='left', + slide_type='in', + overshoot=True +) + +# Slide across +frames = create_slide_animation(direction='left', slide_type='across') + +# Multiple objects sliding in sequence +objects = [ + {'data': {'emoji': '🎯', 'size': 60}, 'direction': 'left', 'final_pos': (120, 240)}, + {'data': {'emoji': '🎪', 'size': 60}, 'direction': 'right', 'final_pos': (240, 240)} +] +frames = create_multi_slide(objects, stagger_delay=5) +``` + +### Flip +```python +from templates.flip import create_flip_animation, create_quick_flip + +# Horizontal flip between two emojis +frames = create_flip_animation( + object1_data={'emoji': '😊', 'size': 120}, + object2_data={'emoji': '😂', 'size': 120}, + flip_axis='horizontal' +) + +# Vertical flip +frames = create_flip_animation(flip_axis='vertical') + +# Quick flip for emoji GIFs +frames = create_quick_flip('👍', '👎') +``` + +### Morph / Transform +```python +from templates.morph import create_morph_animation, create_reaction_morph + +# Crossfade morph +frames = create_morph_animation( + object1_data={'emoji': '😊', 'size': 100}, + object2_data={'emoji': '😂', 'size': 100}, + morph_type='crossfade' +) + +# Scale morph (shrink while other grows) +frames = create_morph_animation(morph_type='scale') + +# Spin morph (3D flip-like) +frames = create_morph_animation(morph_type='spin_morph') +``` + +### Move Effect +```python +from templates.move import create_move_animation + +# Linear movement +frames = create_move_animation( + object_type='emoji', + object_data={'emoji': '🚀', 'size': 60}, + start_pos=(50, 240), + end_pos=(430, 240), + motion_type='linear', + easing='ease_out' +) + +# Arc movement (parabolic trajectory) +frames = create_move_animation( + object_type='emoji', + object_data={'emoji': '⚽', 'size': 60}, + start_pos=(50, 350), + end_pos=(430, 350), + motion_type='arc', + motion_params={'arc_height': 150} +) + +# Circular movement +frames = create_move_animation( + object_type='emoji', + object_data={'emoji': '🌍', 'size': 50}, + motion_type='circle', + motion_params={ + 'center': (240, 240), + 'radius': 120, + 'angle_range': 360 # full circle + } +) + +# Wave movement +frames = create_move_animation( + motion_type='wave', + motion_params={ + 'wave_amplitude': 50, + 'wave_frequency': 2 + } +) + +# Or use low-level easing functions +from core.easing import interpolate, calculate_arc_motion + +for i in range(num_frames): + t = i / (num_frames - 1) + x = interpolate(start_x, end_x, t, easing='ease_out') + # Or: x, y = calculate_arc_motion(start, end, height, t) +``` + +### Kaleidoscope Effect +```python +from templates.kaleidoscope import apply_kaleidoscope, create_kaleidoscope_animation + +# Apply to a single frame +kaleido_frame = apply_kaleidoscope(frame, segments=8) + +# Or create animated kaleidoscope +frames = create_kaleidoscope_animation( + base_frame=my_frame, # or None for demo pattern + num_frames=30, + segments=8, + rotation_speed=1.0 +) + +# Simple mirror effects (faster) +from templates.kaleidoscope import apply_simple_mirror + +mirrored = apply_simple_mirror(frame, mode='quad') # 4-way mirror +# modes: 'horizontal', 'vertical', 'quad', 'radial' +``` + +**To compose primitives freely, follow these patterns:** +```python +# Example: Bounce + shake for impact +for i in range(num_frames): + frame = create_blank_frame(480, 480, bg_color) + + # Bounce motion + t_bounce = i / (num_frames - 1) + y = interpolate(start_y, ground_y, t_bounce, 'bounce_out') + + # Add shake on impact (when y reaches ground) + if y >= ground_y - 5: + shake_x = math.sin(i * 2) * 10 + x = center_x + shake_x + else: + x = center_x + + draw_emoji(frame, '⚽', (x, y), size=60) + builder.add_frame(frame) +``` + +## Helper Utilities + +These are optional helpers for common needs. **Use, modify, or replace these with custom implementations as needed.** + +### GIF Builder (Assembly & Optimization) + +```python +from core.gif_builder import GIFBuilder + +# Create builder with your chosen settings +builder = GIFBuilder(width=480, height=480, fps=20) + +# Add frames (however you created them) +for frame in my_frames: + builder.add_frame(frame) + +# Save with optimization +builder.save('output.gif', + num_colors=128, + optimize_for_emoji=False) +``` + +Key features: +- Automatic color quantization +- Duplicate frame removal +- Size warnings for Slack limits +- Emoji mode (aggressive optimization) + +### Text Rendering + +For small GIFs like emojis, text readability is challenging. A common solution involves adding outlines: + +```python +from core.typography import draw_text_with_outline, TYPOGRAPHY_SCALE + +# Text with outline (helps readability) +draw_text_with_outline( + frame, "BONK!", + position=(240, 100), + font_size=TYPOGRAPHY_SCALE['h1'], # 60px + text_color=(255, 68, 68), + outline_color=(0, 0, 0), + outline_width=4, + centered=True +) +``` + +To implement custom text rendering, use PIL's `ImageDraw.text()` which works fine for larger GIFs. + +### Color Management + +Professional-looking GIFs often use cohesive color palettes: + +```python +from core.color_palettes import get_palette + +# Get a pre-made palette +palette = get_palette('vibrant') # or 'pastel', 'dark', 'neon', 'professional' + +bg_color = palette['background'] +text_color = palette['primary'] +accent_color = palette['accent'] +``` + +To work with colors directly, use RGB tuples - whatever works for the use case. + +### Visual Effects + +Optional effects for impact moments: + +```python +from core.visual_effects import ParticleSystem, create_impact_flash, create_shockwave_rings + +# Particle system +particles = ParticleSystem() +particles.emit_sparkles(x=240, y=200, count=15) +particles.emit_confetti(x=240, y=200, count=20) + +# Update and render each frame +particles.update() +particles.render(frame) + +# Flash effect +frame = create_impact_flash(frame, position=(240, 200), radius=100) + +# Shockwave rings +frame = create_shockwave_rings(frame, position=(240, 200), radii=[30, 60, 90]) +``` + +### Easing Functions + +Smooth motion uses easing instead of linear interpolation: + +```python +from core.easing import interpolate + +# Object falling (accelerates) +y = interpolate(start=0, end=400, t=progress, easing='ease_in') + +# Object landing (decelerates) +y = interpolate(start=0, end=400, t=progress, easing='ease_out') + +# Bouncing +y = interpolate(start=0, end=400, t=progress, easing='bounce_out') + +# Overshoot (elastic) +scale = interpolate(start=0.5, end=1.0, t=progress, easing='elastic_out') +``` + +Available easings: `linear`, `ease_in`, `ease_out`, `ease_in_out`, `bounce_out`, `elastic_out`, `back_out` (overshoot), and more in `core/easing.py`. + +### Frame Composition + +Basic drawing utilities if you need them: + +```python +from core.frame_composer import ( + create_gradient_background, # Gradient backgrounds + draw_emoji_enhanced, # Emoji with optional shadow + draw_circle_with_shadow, # Shapes with depth + draw_star # 5-pointed stars +) + +# Gradient background +frame = create_gradient_background(480, 480, top_color, bottom_color) + +# Emoji with shadow +draw_emoji_enhanced(frame, '🎉', position=(200, 200), size=80, shadow=True) +``` + +## Optimization Strategies + +When your GIF is too large: + +**For Message GIFs (>2MB):** +1. Reduce frames (lower FPS or shorter duration) +2. Reduce colors (128 → 64 colors) +3. Reduce dimensions (480x480 → 320x320) +4. Enable duplicate frame removal + +**For Emoji GIFs (>64KB) - be aggressive:** +1. Limit to 10-12 frames total +2. Use 32-40 colors maximum +3. Avoid gradients (solid colors compress better) +4. Simplify design (fewer elements) +5. Use `optimize_for_emoji=True` in save method + +## Example Composition Patterns + +### Simple Reaction (Pulsing) +```python +builder = GIFBuilder(128, 128, 10) + +for i in range(12): + frame = Image.new('RGB', (128, 128), (240, 248, 255)) + + # Pulsing scale + scale = 1.0 + math.sin(i * 0.5) * 0.15 + size = int(60 * scale) + + draw_emoji_enhanced(frame, '😱', position=(64-size//2, 64-size//2), + size=size, shadow=False) + builder.add_frame(frame) + +builder.save('reaction.gif', num_colors=40, optimize_for_emoji=True) + +# Validate +from core.validators import check_slack_size +check_slack_size('reaction.gif', is_emoji=True) +``` + +### Action with Impact (Bounce + Flash) +```python +builder = GIFBuilder(480, 480, 20) + +# Phase 1: Object falls +for i in range(15): + frame = create_gradient_background(480, 480, (240, 248, 255), (200, 230, 255)) + t = i / 14 + y = interpolate(0, 350, t, 'ease_in') + draw_emoji_enhanced(frame, '⚽', position=(220, int(y)), size=80) + builder.add_frame(frame) + +# Phase 2: Impact + flash +for i in range(8): + frame = create_gradient_background(480, 480, (240, 248, 255), (200, 230, 255)) + + # Flash on first frames + if i < 3: + frame = create_impact_flash(frame, (240, 350), radius=120, intensity=0.6) + + draw_emoji_enhanced(frame, '⚽', position=(220, 350), size=80) + + # Text appears + if i > 2: + draw_text_with_outline(frame, "GOAL!", position=(240, 150), + font_size=60, text_color=(255, 68, 68), + outline_color=(0, 0, 0), outline_width=4, centered=True) + + builder.add_frame(frame) + +builder.save('goal.gif', num_colors=128) +``` + +### Combining Primitives (Move + Shake) +```python +from templates.shake import create_shake_animation + +# Create shake animation +shake_frames = create_shake_animation( + object_type='emoji', + object_data={'emoji': '😰', 'size': 70}, + num_frames=20, + shake_intensity=12 +) + +# Create moving element that triggers the shake +builder = GIFBuilder(480, 480, 20) +for i in range(40): + t = i / 39 + + if i < 20: + # Before trigger - use blank frame with moving object + frame = create_blank_frame(480, 480, (255, 255, 255)) + x = interpolate(50, 300, t * 2, 'linear') + draw_emoji_enhanced(frame, '🚗', position=(int(x), 300), size=60) + draw_emoji_enhanced(frame, '😰', position=(350, 200), size=70) + else: + # After trigger - use shake frame + frame = shake_frames[i - 20] + # Add the car in final position + draw_emoji_enhanced(frame, '🚗', position=(300, 300), size=60) + + builder.add_frame(frame) + +builder.save('scare.gif') +``` + +## Philosophy + +This toolkit provides building blocks, not rigid recipes. To work with a GIF request: + +1. **Understand the creative vision** - What should happen? What's the mood? +2. **Design the animation** - Break it into phases (anticipation, action, reaction) +3. **Apply primitives as needed** - Shake, bounce, move, effects - mix freely +4. **Validate constraints** - Check file size, especially for emoji GIFs +5. **Iterate if needed** - Reduce frames/colors if over size limits + +**The goal is creative freedom within Slack's technical constraints.** + +## Dependencies + +To use this toolkit, install these dependencies only if they aren't already present: + +```bash +pip install pillow imageio numpy +``` diff --git a/skills/slack-gif-creator/core/color_palettes.py b/skills/slack-gif-creator/core/color_palettes.py new file mode 100755 index 000000000..8593ad722 --- /dev/null +++ b/skills/slack-gif-creator/core/color_palettes.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 +""" +Color Palettes - Professional, harmonious color schemes for GIFs. + +Using consistent, well-designed color palettes makes GIFs look professional +and polished instead of random and amateurish. +""" + +from typing import Optional +import colorsys + + +# Professional color palettes - hand-picked for GIF compression and visual appeal + +VIBRANT = { + 'primary': (255, 68, 68), # Bright red + 'secondary': (255, 168, 0), # Bright orange + 'accent': (0, 168, 255), # Bright blue + 'success': (68, 255, 68), # Bright green + 'background': (240, 248, 255), # Alice blue + 'text': (30, 30, 30), # Almost black + 'text_light': (255, 255, 255), # White +} + +PASTEL = { + 'primary': (255, 179, 186), # Pastel pink + 'secondary': (255, 223, 186), # Pastel peach + 'accent': (186, 225, 255), # Pastel blue + 'success': (186, 255, 201), # Pastel green + 'background': (255, 250, 240), # Floral white + 'text': (80, 80, 80), # Dark gray + 'text_light': (255, 255, 255), # White +} + +DARK = { + 'primary': (255, 100, 100), # Muted red + 'secondary': (100, 200, 255), # Muted blue + 'accent': (255, 200, 100), # Muted gold + 'success': (100, 255, 150), # Muted green + 'background': (30, 30, 35), # Almost black + 'text': (220, 220, 220), # Light gray + 'text_light': (255, 255, 255), # White +} + +NEON = { + 'primary': (255, 16, 240), # Neon pink + 'secondary': (0, 255, 255), # Cyan + 'accent': (255, 255, 0), # Yellow + 'success': (57, 255, 20), # Neon green + 'background': (20, 20, 30), # Dark blue-black + 'text': (255, 255, 255), # White + 'text_light': (255, 255, 255), # White +} + +PROFESSIONAL = { + 'primary': (0, 122, 255), # System blue + 'secondary': (88, 86, 214), # System purple + 'accent': (255, 149, 0), # System orange + 'success': (52, 199, 89), # System green + 'background': (255, 255, 255), # White + 'text': (0, 0, 0), # Black + 'text_light': (255, 255, 255), # White +} + +WARM = { + 'primary': (255, 107, 107), # Coral red + 'secondary': (255, 159, 64), # Orange + 'accent': (255, 218, 121), # Yellow + 'success': (106, 176, 76), # Olive green + 'background': (255, 246, 229), # Warm white + 'text': (51, 51, 51), # Charcoal + 'text_light': (255, 255, 255), # White +} + +COOL = { + 'primary': (107, 185, 240), # Sky blue + 'secondary': (130, 202, 157), # Mint + 'accent': (162, 155, 254), # Lavender + 'success': (86, 217, 150), # Aqua green + 'background': (240, 248, 255), # Alice blue + 'text': (45, 55, 72), # Dark slate + 'text_light': (255, 255, 255), # White +} + +MONOCHROME = { + 'primary': (80, 80, 80), # Dark gray + 'secondary': (130, 130, 130), # Medium gray + 'accent': (180, 180, 180), # Light gray + 'success': (100, 100, 100), # Gray + 'background': (245, 245, 245), # Off-white + 'text': (30, 30, 30), # Almost black + 'text_light': (255, 255, 255), # White +} + +# Map of palette names +PALETTES = { + 'vibrant': VIBRANT, + 'pastel': PASTEL, + 'dark': DARK, + 'neon': NEON, + 'professional': PROFESSIONAL, + 'warm': WARM, + 'cool': COOL, + 'monochrome': MONOCHROME, +} + + +def get_palette(name: str = 'vibrant') -> dict: + """ + Get a color palette by name. + + Args: + name: Palette name (vibrant, pastel, dark, neon, professional, warm, cool, monochrome) + + Returns: + Dictionary of color roles to RGB tuples + """ + return PALETTES.get(name.lower(), VIBRANT) + + +def get_text_color_for_background(bg_color: tuple[int, int, int]) -> tuple[int, int, int]: + """ + Get the best text color (black or white) for a given background. + + Uses luminance calculation to ensure readability. + + Args: + bg_color: Background RGB color + + Returns: + Text color (black or white) that contrasts well + """ + # Calculate relative luminance + r, g, b = bg_color + luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255 + + # Return black for light backgrounds, white for dark + return (0, 0, 0) if luminance > 0.5 else (255, 255, 255) + + +def get_complementary_color(color: tuple[int, int, int]) -> tuple[int, int, int]: + """ + Get the complementary (opposite) color on the color wheel. + + Args: + color: RGB color tuple + + Returns: + Complementary RGB color + """ + # Convert to HSV + r, g, b = [x / 255.0 for x in color] + h, s, v = colorsys.rgb_to_hsv(r, g, b) + + # Rotate hue by 180 degrees (0.5 in 0-1 scale) + h_comp = (h + 0.5) % 1.0 + + # Convert back to RGB + r_comp, g_comp, b_comp = colorsys.hsv_to_rgb(h_comp, s, v) + return (int(r_comp * 255), int(g_comp * 255), int(b_comp * 255)) + + +def lighten_color(color: tuple[int, int, int], amount: float = 0.3) -> tuple[int, int, int]: + """ + Lighten a color by a given amount. + + Args: + color: RGB color tuple + amount: Amount to lighten (0.0-1.0) + + Returns: + Lightened RGB color + """ + r, g, b = color + r = min(255, int(r + (255 - r) * amount)) + g = min(255, int(g + (255 - g) * amount)) + b = min(255, int(b + (255 - b) * amount)) + return (r, g, b) + + +def darken_color(color: tuple[int, int, int], amount: float = 0.3) -> tuple[int, int, int]: + """ + Darken a color by a given amount. + + Args: + color: RGB color tuple + amount: Amount to darken (0.0-1.0) + + Returns: + Darkened RGB color + """ + r, g, b = color + r = max(0, int(r * (1 - amount))) + g = max(0, int(g * (1 - amount))) + b = max(0, int(b * (1 - amount))) + return (r, g, b) + + +def blend_colors(color1: tuple[int, int, int], color2: tuple[int, int, int], + ratio: float = 0.5) -> tuple[int, int, int]: + """ + Blend two colors together. + + Args: + color1: First RGB color + color2: Second RGB color + ratio: Blend ratio (0.0 = all color1, 1.0 = all color2) + + Returns: + Blended RGB color + """ + r1, g1, b1 = color1 + r2, g2, b2 = color2 + + r = int(r1 * (1 - ratio) + r2 * ratio) + g = int(g1 * (1 - ratio) + g2 * ratio) + b = int(b1 * (1 - ratio) + b2 * ratio) + + return (r, g, b) + + +def create_gradient_colors(start_color: tuple[int, int, int], + end_color: tuple[int, int, int], + steps: int) -> list[tuple[int, int, int]]: + """ + Create a gradient of colors between two colors. + + Args: + start_color: Starting RGB color + end_color: Ending RGB color + steps: Number of gradient steps + + Returns: + List of RGB colors forming gradient + """ + colors = [] + for i in range(steps): + ratio = i / (steps - 1) if steps > 1 else 0 + colors.append(blend_colors(start_color, end_color, ratio)) + return colors + + +# Impact/emphasis colors that work well across palettes +IMPACT_COLORS = { + 'flash': (255, 255, 240), # Bright flash (cream) + 'explosion': (255, 150, 0), # Orange explosion + 'electricity': (100, 200, 255), # Electric blue + 'fire': (255, 100, 0), # Fire orange-red + 'success': (50, 255, 100), # Success green + 'error': (255, 50, 50), # Error red + 'warning': (255, 200, 0), # Warning yellow + 'magic': (200, 100, 255), # Magic purple +} + + +def get_impact_color(effect_type: str = 'flash') -> tuple[int, int, int]: + """ + Get a color for impact/emphasis effects. + + Args: + effect_type: Type of effect (flash, explosion, electricity, etc.) + + Returns: + RGB color for effect + """ + return IMPACT_COLORS.get(effect_type, IMPACT_COLORS['flash']) + + +# Emoji-safe palettes (work well at 128x128 with 32-64 colors) +EMOJI_PALETTES = { + 'simple': [ + (255, 255, 255), # White + (0, 0, 0), # Black + (255, 100, 100), # Red + (100, 255, 100), # Green + (100, 100, 255), # Blue + (255, 255, 100), # Yellow + ], + 'vibrant_emoji': [ + (255, 255, 255), # White + (30, 30, 30), # Black + (255, 68, 68), # Red + (68, 255, 68), # Green + (68, 68, 255), # Blue + (255, 200, 68), # Gold + (255, 68, 200), # Pink + (68, 255, 200), # Cyan + ] +} + + +def get_emoji_palette(name: str = 'simple') -> list[tuple[int, int, int]]: + """ + Get a limited color palette optimized for emoji GIFs (<64KB). + + Args: + name: Palette name (simple, vibrant_emoji) + + Returns: + List of RGB colors (6-8 colors) + """ + return EMOJI_PALETTES.get(name, EMOJI_PALETTES['simple']) \ No newline at end of file diff --git a/skills/slack-gif-creator/core/easing.py b/skills/slack-gif-creator/core/easing.py new file mode 100755 index 000000000..53ef88849 --- /dev/null +++ b/skills/slack-gif-creator/core/easing.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +""" +Easing Functions - Timing functions for smooth animations. + +Provides various easing functions for natural motion and timing. +All functions take a value t (0.0 to 1.0) and return eased value (0.0 to 1.0). +""" + +import math + + +def linear(t: float) -> float: + """Linear interpolation (no easing).""" + return t + + +def ease_in_quad(t: float) -> float: + """Quadratic ease-in (slow start, accelerating).""" + return t * t + + +def ease_out_quad(t: float) -> float: + """Quadratic ease-out (fast start, decelerating).""" + return t * (2 - t) + + +def ease_in_out_quad(t: float) -> float: + """Quadratic ease-in-out (slow start and end).""" + if t < 0.5: + return 2 * t * t + return -1 + (4 - 2 * t) * t + + +def ease_in_cubic(t: float) -> float: + """Cubic ease-in (slow start).""" + return t * t * t + + +def ease_out_cubic(t: float) -> float: + """Cubic ease-out (fast start).""" + return (t - 1) * (t - 1) * (t - 1) + 1 + + +def ease_in_out_cubic(t: float) -> float: + """Cubic ease-in-out.""" + if t < 0.5: + return 4 * t * t * t + return (t - 1) * (2 * t - 2) * (2 * t - 2) + 1 + + +def ease_in_bounce(t: float) -> float: + """Bounce ease-in (bouncy start).""" + return 1 - ease_out_bounce(1 - t) + + +def ease_out_bounce(t: float) -> float: + """Bounce ease-out (bouncy end).""" + if t < 1 / 2.75: + return 7.5625 * t * t + elif t < 2 / 2.75: + t -= 1.5 / 2.75 + return 7.5625 * t * t + 0.75 + elif t < 2.5 / 2.75: + t -= 2.25 / 2.75 + return 7.5625 * t * t + 0.9375 + else: + t -= 2.625 / 2.75 + return 7.5625 * t * t + 0.984375 + + +def ease_in_out_bounce(t: float) -> float: + """Bounce ease-in-out.""" + if t < 0.5: + return ease_in_bounce(t * 2) * 0.5 + return ease_out_bounce(t * 2 - 1) * 0.5 + 0.5 + + +def ease_in_elastic(t: float) -> float: + """Elastic ease-in (spring effect).""" + if t == 0 or t == 1: + return t + return -math.pow(2, 10 * (t - 1)) * math.sin((t - 1.1) * 5 * math.pi) + + +def ease_out_elastic(t: float) -> float: + """Elastic ease-out (spring effect).""" + if t == 0 or t == 1: + return t + return math.pow(2, -10 * t) * math.sin((t - 0.1) * 5 * math.pi) + 1 + + +def ease_in_out_elastic(t: float) -> float: + """Elastic ease-in-out.""" + if t == 0 or t == 1: + return t + t = t * 2 - 1 + if t < 0: + return -0.5 * math.pow(2, 10 * t) * math.sin((t - 0.1) * 5 * math.pi) + return math.pow(2, -10 * t) * math.sin((t - 0.1) * 5 * math.pi) * 0.5 + 1 + + +# Convenience mapping +EASING_FUNCTIONS = { + 'linear': linear, + 'ease_in': ease_in_quad, + 'ease_out': ease_out_quad, + 'ease_in_out': ease_in_out_quad, + 'bounce_in': ease_in_bounce, + 'bounce_out': ease_out_bounce, + 'bounce': ease_in_out_bounce, + 'elastic_in': ease_in_elastic, + 'elastic_out': ease_out_elastic, + 'elastic': ease_in_out_elastic, +} + + +def get_easing(name: str = 'linear'): + """Get easing function by name.""" + return EASING_FUNCTIONS.get(name, linear) + + +def interpolate(start: float, end: float, t: float, easing: str = 'linear') -> float: + """ + Interpolate between two values with easing. + + Args: + start: Start value + end: End value + t: Progress from 0.0 to 1.0 + easing: Name of easing function + + Returns: + Interpolated value + """ + ease_func = get_easing(easing) + eased_t = ease_func(t) + return start + (end - start) * eased_t + + +def ease_back_in(t: float) -> float: + """Back ease-in (slight overshoot backward before forward motion).""" + c1 = 1.70158 + c3 = c1 + 1 + return c3 * t * t * t - c1 * t * t + + +def ease_back_out(t: float) -> float: + """Back ease-out (overshoot forward then settle back).""" + c1 = 1.70158 + c3 = c1 + 1 + return 1 + c3 * pow(t - 1, 3) + c1 * pow(t - 1, 2) + + +def ease_back_in_out(t: float) -> float: + """Back ease-in-out (overshoot at both ends).""" + c1 = 1.70158 + c2 = c1 * 1.525 + if t < 0.5: + return (pow(2 * t, 2) * ((c2 + 1) * 2 * t - c2)) / 2 + return (pow(2 * t - 2, 2) * ((c2 + 1) * (t * 2 - 2) + c2) + 2) / 2 + + +def apply_squash_stretch(base_scale: tuple[float, float], intensity: float, + direction: str = 'vertical') -> tuple[float, float]: + """ + Calculate squash and stretch scales for more dynamic animation. + + Args: + base_scale: (width_scale, height_scale) base scales + intensity: Squash/stretch intensity (0.0-1.0) + direction: 'vertical', 'horizontal', or 'both' + + Returns: + (width_scale, height_scale) with squash/stretch applied + """ + width_scale, height_scale = base_scale + + if direction == 'vertical': + # Compress vertically, expand horizontally (preserve volume) + height_scale *= (1 - intensity * 0.5) + width_scale *= (1 + intensity * 0.5) + elif direction == 'horizontal': + # Compress horizontally, expand vertically + width_scale *= (1 - intensity * 0.5) + height_scale *= (1 + intensity * 0.5) + elif direction == 'both': + # General squash (both dimensions) + width_scale *= (1 - intensity * 0.3) + height_scale *= (1 - intensity * 0.3) + + return (width_scale, height_scale) + + +def calculate_arc_motion(start: tuple[float, float], end: tuple[float, float], + height: float, t: float) -> tuple[float, float]: + """ + Calculate position along a parabolic arc (natural motion path). + + Args: + start: (x, y) starting position + end: (x, y) ending position + height: Arc height at midpoint (positive = upward) + t: Progress (0.0-1.0) + + Returns: + (x, y) position along arc + """ + x1, y1 = start + x2, y2 = end + + # Linear interpolation for x + x = x1 + (x2 - x1) * t + + # Parabolic interpolation for y + # y = start + progress * (end - start) + arc_offset + # Arc offset peaks at t=0.5 + arc_offset = 4 * height * t * (1 - t) + y = y1 + (y2 - y1) * t - arc_offset + + return (x, y) + + +# Add new easing functions to the convenience mapping +EASING_FUNCTIONS.update({ + 'back_in': ease_back_in, + 'back_out': ease_back_out, + 'back_in_out': ease_back_in_out, + 'anticipate': ease_back_in, # Alias + 'overshoot': ease_back_out, # Alias +}) \ No newline at end of file diff --git a/skills/slack-gif-creator/core/frame_composer.py b/skills/slack-gif-creator/core/frame_composer.py new file mode 100755 index 000000000..aed3c5069 --- /dev/null +++ b/skills/slack-gif-creator/core/frame_composer.py @@ -0,0 +1,469 @@ +#!/usr/bin/env python3 +""" +Frame Composer - Utilities for composing visual elements into frames. + +Provides functions for drawing shapes, text, emojis, and compositing elements +together to create animation frames. +""" + +from PIL import Image, ImageDraw, ImageFont +import numpy as np +from typing import Optional + + +def create_blank_frame(width: int, height: int, color: tuple[int, int, int] = (255, 255, 255)) -> Image.Image: + """ + Create a blank frame with solid color background. + + Args: + width: Frame width + height: Frame height + color: RGB color tuple (default: white) + + Returns: + PIL Image + """ + return Image.new('RGB', (width, height), color) + + +def draw_circle(frame: Image.Image, center: tuple[int, int], radius: int, + fill_color: Optional[tuple[int, int, int]] = None, + outline_color: Optional[tuple[int, int, int]] = None, + outline_width: int = 1) -> Image.Image: + """ + Draw a circle on a frame. + + Args: + frame: PIL Image to draw on + center: (x, y) center position + radius: Circle radius + fill_color: RGB fill color (None for no fill) + outline_color: RGB outline color (None for no outline) + outline_width: Outline width in pixels + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x, y = center + bbox = [x - radius, y - radius, x + radius, y + radius] + draw.ellipse(bbox, fill=fill_color, outline=outline_color, width=outline_width) + return frame + + +def draw_rectangle(frame: Image.Image, top_left: tuple[int, int], bottom_right: tuple[int, int], + fill_color: Optional[tuple[int, int, int]] = None, + outline_color: Optional[tuple[int, int, int]] = None, + outline_width: int = 1) -> Image.Image: + """ + Draw a rectangle on a frame. + + Args: + frame: PIL Image to draw on + top_left: (x, y) top-left corner + bottom_right: (x, y) bottom-right corner + fill_color: RGB fill color (None for no fill) + outline_color: RGB outline color (None for no outline) + outline_width: Outline width in pixels + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + draw.rectangle([top_left, bottom_right], fill=fill_color, outline=outline_color, width=outline_width) + return frame + + +def draw_line(frame: Image.Image, start: tuple[int, int], end: tuple[int, int], + color: tuple[int, int, int] = (0, 0, 0), width: int = 2) -> Image.Image: + """ + Draw a line on a frame. + + Args: + frame: PIL Image to draw on + start: (x, y) start position + end: (x, y) end position + color: RGB line color + width: Line width in pixels + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + draw.line([start, end], fill=color, width=width) + return frame + + +def draw_text(frame: Image.Image, text: str, position: tuple[int, int], + font_size: int = 40, color: tuple[int, int, int] = (0, 0, 0), + centered: bool = False) -> Image.Image: + """ + Draw text on a frame. + + Args: + frame: PIL Image to draw on + text: Text to draw + position: (x, y) position (top-left unless centered=True) + font_size: Font size in pixels + color: RGB text color + centered: If True, center text at position + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + + # Try to use default font, fall back to basic if not available + try: + font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", font_size) + except: + font = ImageFont.load_default() + + if centered: + bbox = draw.textbbox((0, 0), text, font=font) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + x = position[0] - text_width // 2 + y = position[1] - text_height // 2 + position = (x, y) + + draw.text(position, text, fill=color, font=font) + return frame + + +def draw_emoji(frame: Image.Image, emoji: str, position: tuple[int, int], size: int = 60) -> Image.Image: + """ + Draw emoji text on a frame (requires system emoji support). + + Args: + frame: PIL Image to draw on + emoji: Emoji character(s) + position: (x, y) position + size: Emoji size in pixels + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + + # Use Apple Color Emoji font on macOS + try: + font = ImageFont.truetype("/System/Library/Fonts/Apple Color Emoji.ttc", size) + except: + # Fallback to text-based emoji + font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", size) + + draw.text(position, emoji, font=font, embedded_color=True) + return frame + + +def composite_layers(base: Image.Image, overlay: Image.Image, + position: tuple[int, int] = (0, 0), alpha: float = 1.0) -> Image.Image: + """ + Composite one image on top of another. + + Args: + base: Base image + overlay: Image to overlay on top + position: (x, y) position to place overlay + alpha: Opacity of overlay (0.0 = transparent, 1.0 = opaque) + + Returns: + Composite image + """ + # Convert to RGBA for transparency support + base_rgba = base.convert('RGBA') + overlay_rgba = overlay.convert('RGBA') + + # Apply alpha + if alpha < 1.0: + overlay_rgba = overlay_rgba.copy() + overlay_rgba.putalpha(int(255 * alpha)) + + # Paste overlay onto base + base_rgba.paste(overlay_rgba, position, overlay_rgba) + + # Convert back to RGB + return base_rgba.convert('RGB') + + +def draw_stick_figure(frame: Image.Image, position: tuple[int, int], scale: float = 1.0, + color: tuple[int, int, int] = (0, 0, 0), line_width: int = 3) -> Image.Image: + """ + Draw a simple stick figure. + + Args: + frame: PIL Image to draw on + position: (x, y) center position of head + scale: Size multiplier + color: RGB line color + line_width: Line width in pixels + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x, y = position + + # Scale dimensions + head_radius = int(15 * scale) + body_length = int(40 * scale) + arm_length = int(25 * scale) + leg_length = int(35 * scale) + leg_spread = int(15 * scale) + + # Head + draw.ellipse([x - head_radius, y - head_radius, x + head_radius, y + head_radius], + outline=color, width=line_width) + + # Body + body_start = y + head_radius + body_end = body_start + body_length + draw.line([(x, body_start), (x, body_end)], fill=color, width=line_width) + + # Arms + arm_y = body_start + int(body_length * 0.3) + draw.line([(x - arm_length, arm_y), (x + arm_length, arm_y)], fill=color, width=line_width) + + # Legs + draw.line([(x, body_end), (x - leg_spread, body_end + leg_length)], fill=color, width=line_width) + draw.line([(x, body_end), (x + leg_spread, body_end + leg_length)], fill=color, width=line_width) + + return frame + + +def create_gradient_background(width: int, height: int, + top_color: tuple[int, int, int], + bottom_color: tuple[int, int, int]) -> Image.Image: + """ + Create a vertical gradient background. + + Args: + width: Frame width + height: Frame height + top_color: RGB color at top + bottom_color: RGB color at bottom + + Returns: + PIL Image with gradient + """ + frame = Image.new('RGB', (width, height)) + draw = ImageDraw.Draw(frame) + + # Calculate color step for each row + r1, g1, b1 = top_color + r2, g2, b2 = bottom_color + + for y in range(height): + # Interpolate color + ratio = y / height + r = int(r1 * (1 - ratio) + r2 * ratio) + g = int(g1 * (1 - ratio) + g2 * ratio) + b = int(b1 * (1 - ratio) + b2 * ratio) + + # Draw horizontal line + draw.line([(0, y), (width, y)], fill=(r, g, b)) + + return frame + + +def draw_emoji_enhanced(frame: Image.Image, emoji: str, position: tuple[int, int], + size: int = 60, shadow: bool = True, + shadow_offset: tuple[int, int] = (2, 2)) -> Image.Image: + """ + Draw emoji with optional shadow for better visual quality. + + Args: + frame: PIL Image to draw on + emoji: Emoji character(s) + position: (x, y) position + size: Emoji size in pixels (minimum 12) + shadow: Whether to add drop shadow + shadow_offset: Shadow offset + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + + # Ensure minimum size to avoid font rendering errors + size = max(12, size) + + # Use Apple Color Emoji font on macOS + try: + font = ImageFont.truetype("/System/Library/Fonts/Apple Color Emoji.ttc", size) + except: + # Fallback to text-based emoji + try: + font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", size) + except: + font = ImageFont.load_default() + + # Draw shadow first if enabled + if shadow and size >= 20: # Only draw shadow for larger emojis + shadow_pos = (position[0] + shadow_offset[0], position[1] + shadow_offset[1]) + # Draw semi-transparent shadow (simulated by drawing multiple times) + for offset in range(1, 3): + try: + draw.text((shadow_pos[0] + offset, shadow_pos[1] + offset), + emoji, font=font, embedded_color=True, fill=(0, 0, 0, 100)) + except: + pass # Skip shadow if it fails + + # Draw main emoji + try: + draw.text(position, emoji, font=font, embedded_color=True) + except: + # Fallback to basic drawing if embedded color fails + draw.text(position, emoji, font=font, fill=(0, 0, 0)) + + return frame + + +def draw_circle_with_shadow(frame: Image.Image, center: tuple[int, int], radius: int, + fill_color: tuple[int, int, int], + shadow_offset: tuple[int, int] = (3, 3), + shadow_color: tuple[int, int, int] = (0, 0, 0)) -> Image.Image: + """ + Draw a circle with drop shadow. + + Args: + frame: PIL Image to draw on + center: (x, y) center position + radius: Circle radius + fill_color: RGB fill color + shadow_offset: (x, y) shadow offset + shadow_color: RGB shadow color + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x, y = center + + # Draw shadow + shadow_center = (x + shadow_offset[0], y + shadow_offset[1]) + shadow_bbox = [ + shadow_center[0] - radius, + shadow_center[1] - radius, + shadow_center[0] + radius, + shadow_center[1] + radius + ] + draw.ellipse(shadow_bbox, fill=shadow_color) + + # Draw main circle + bbox = [x - radius, y - radius, x + radius, y + radius] + draw.ellipse(bbox, fill=fill_color) + + return frame + + +def draw_rounded_rectangle(frame: Image.Image, top_left: tuple[int, int], + bottom_right: tuple[int, int], radius: int, + fill_color: Optional[tuple[int, int, int]] = None, + outline_color: Optional[tuple[int, int, int]] = None, + outline_width: int = 1) -> Image.Image: + """ + Draw a rectangle with rounded corners. + + Args: + frame: PIL Image to draw on + top_left: (x, y) top-left corner + bottom_right: (x, y) bottom-right corner + radius: Corner radius + fill_color: RGB fill color (None for no fill) + outline_color: RGB outline color (None for no outline) + outline_width: Outline width + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x1, y1 = top_left + x2, y2 = bottom_right + + # Draw rounded rectangle using PIL's built-in method + draw.rounded_rectangle([x1, y1, x2, y2], radius=radius, + fill=fill_color, outline=outline_color, width=outline_width) + + return frame + + +def add_vignette(frame: Image.Image, strength: float = 0.5) -> Image.Image: + """ + Add a vignette effect (darkened edges) to frame. + + Args: + frame: PIL Image + strength: Vignette strength (0.0-1.0) + + Returns: + Frame with vignette + """ + width, height = frame.size + + # Create radial gradient mask + center_x, center_y = width // 2, height // 2 + max_dist = ((width / 2) ** 2 + (height / 2) ** 2) ** 0.5 + + # Create overlay + overlay = Image.new('RGB', (width, height), (0, 0, 0)) + pixels = overlay.load() + + for y in range(height): + for x in range(width): + # Calculate distance from center + dx = x - center_x + dy = y - center_y + dist = (dx ** 2 + dy ** 2) ** 0.5 + + # Calculate vignette value + vignette = min(1, (dist / max_dist) * strength) + value = int(255 * (1 - vignette)) + pixels[x, y] = (value, value, value) + + # Blend with original using multiply + frame_array = np.array(frame, dtype=np.float32) / 255 + overlay_array = np.array(overlay, dtype=np.float32) / 255 + + result = frame_array * overlay_array + result = (result * 255).astype(np.uint8) + + return Image.fromarray(result) + + +def draw_star(frame: Image.Image, center: tuple[int, int], size: int, + fill_color: tuple[int, int, int], + outline_color: Optional[tuple[int, int, int]] = None, + outline_width: int = 1) -> Image.Image: + """ + Draw a 5-pointed star. + + Args: + frame: PIL Image to draw on + center: (x, y) center position + size: Star size (outer radius) + fill_color: RGB fill color + outline_color: RGB outline color (None for no outline) + outline_width: Outline width + + Returns: + Modified frame + """ + import math + draw = ImageDraw.Draw(frame) + x, y = center + + # Calculate star points + points = [] + for i in range(10): + angle = (i * 36 - 90) * math.pi / 180 # 36 degrees per point, start at top + radius = size if i % 2 == 0 else size * 0.4 # Alternate between outer and inner + px = x + radius * math.cos(angle) + py = y + radius * math.sin(angle) + points.append((px, py)) + + # Draw star + draw.polygon(points, fill=fill_color, outline=outline_color, width=outline_width) + + return frame \ No newline at end of file diff --git a/skills/slack-gif-creator/core/gif_builder.py b/skills/slack-gif-creator/core/gif_builder.py new file mode 100755 index 000000000..fae52b2a2 --- /dev/null +++ b/skills/slack-gif-creator/core/gif_builder.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +""" +GIF Builder - Core module for assembling frames into GIFs optimized for Slack. + +This module provides the main interface for creating GIFs from programmatically +generated frames, with automatic optimization for Slack's requirements. +""" + +from pathlib import Path +from typing import Optional +import imageio.v3 as imageio +from PIL import Image +import numpy as np + + +class GIFBuilder: + """Builder for creating optimized GIFs from frames.""" + + def __init__(self, width: int = 480, height: int = 480, fps: int = 15): + """ + Initialize GIF builder. + + Args: + width: Frame width in pixels + height: Frame height in pixels + fps: Frames per second + """ + self.width = width + self.height = height + self.fps = fps + self.frames: list[np.ndarray] = [] + + def add_frame(self, frame: np.ndarray | Image.Image): + """ + Add a frame to the GIF. + + Args: + frame: Frame as numpy array or PIL Image (will be converted to RGB) + """ + if isinstance(frame, Image.Image): + frame = np.array(frame.convert('RGB')) + + # Ensure frame is correct size + if frame.shape[:2] != (self.height, self.width): + pil_frame = Image.fromarray(frame) + pil_frame = pil_frame.resize((self.width, self.height), Image.Resampling.LANCZOS) + frame = np.array(pil_frame) + + self.frames.append(frame) + + def add_frames(self, frames: list[np.ndarray | Image.Image]): + """Add multiple frames at once.""" + for frame in frames: + self.add_frame(frame) + + def optimize_colors(self, num_colors: int = 128, use_global_palette: bool = True) -> list[np.ndarray]: + """ + Reduce colors in all frames using quantization. + + Args: + num_colors: Target number of colors (8-256) + use_global_palette: Use a single palette for all frames (better compression) + + Returns: + List of color-optimized frames + """ + optimized = [] + + if use_global_palette and len(self.frames) > 1: + # Create a global palette from all frames + # Sample frames to build palette + sample_size = min(5, len(self.frames)) + sample_indices = [int(i * len(self.frames) / sample_size) for i in range(sample_size)] + sample_frames = [self.frames[i] for i in sample_indices] + + # Combine sample frames into a single image for palette generation + # Flatten each frame to get all pixels, then stack them + all_pixels = np.vstack([f.reshape(-1, 3) for f in sample_frames]) # (total_pixels, 3) + + # Create a properly-shaped RGB image from the pixel data + # We'll make a roughly square image from all the pixels + total_pixels = len(all_pixels) + width = min(512, int(np.sqrt(total_pixels))) # Reasonable width, max 512 + height = (total_pixels + width - 1) // width # Ceiling division + + # Pad if necessary to fill the rectangle + pixels_needed = width * height + if pixels_needed > total_pixels: + padding = np.zeros((pixels_needed - total_pixels, 3), dtype=np.uint8) + all_pixels = np.vstack([all_pixels, padding]) + + # Reshape to proper RGB image format (H, W, 3) + img_array = all_pixels[:pixels_needed].reshape(height, width, 3).astype(np.uint8) + combined_img = Image.fromarray(img_array, mode='RGB') + + # Generate global palette + global_palette = combined_img.quantize(colors=num_colors, method=2) + + # Apply global palette to all frames + for frame in self.frames: + pil_frame = Image.fromarray(frame) + quantized = pil_frame.quantize(palette=global_palette, dither=1) + optimized.append(np.array(quantized.convert('RGB'))) + else: + # Use per-frame quantization + for frame in self.frames: + pil_frame = Image.fromarray(frame) + quantized = pil_frame.quantize(colors=num_colors, method=2, dither=1) + optimized.append(np.array(quantized.convert('RGB'))) + + return optimized + + def deduplicate_frames(self, threshold: float = 0.995) -> int: + """ + Remove duplicate or near-duplicate consecutive frames. + + Args: + threshold: Similarity threshold (0.0-1.0). Higher = more strict (0.995 = very similar). + + Returns: + Number of frames removed + """ + if len(self.frames) < 2: + return 0 + + deduplicated = [self.frames[0]] + removed_count = 0 + + for i in range(1, len(self.frames)): + # Compare with previous frame + prev_frame = np.array(deduplicated[-1], dtype=np.float32) + curr_frame = np.array(self.frames[i], dtype=np.float32) + + # Calculate similarity (normalized) + diff = np.abs(prev_frame - curr_frame) + similarity = 1.0 - (np.mean(diff) / 255.0) + + # Keep frame if sufficiently different + # High threshold (0.995) means only remove truly identical frames + if similarity < threshold: + deduplicated.append(self.frames[i]) + else: + removed_count += 1 + + self.frames = deduplicated + return removed_count + + def save(self, output_path: str | Path, num_colors: int = 128, + optimize_for_emoji: bool = False, remove_duplicates: bool = True) -> dict: + """ + Save frames as optimized GIF for Slack. + + Args: + output_path: Where to save the GIF + num_colors: Number of colors to use (fewer = smaller file) + optimize_for_emoji: If True, optimize for <64KB emoji size + remove_duplicates: Remove duplicate consecutive frames + + Returns: + Dictionary with file info (path, size, dimensions, frame_count) + """ + if not self.frames: + raise ValueError("No frames to save. Add frames with add_frame() first.") + + output_path = Path(output_path) + original_frame_count = len(self.frames) + + # Remove duplicate frames to reduce file size + if remove_duplicates: + removed = self.deduplicate_frames(threshold=0.98) + if removed > 0: + print(f" Removed {removed} duplicate frames") + + # Optimize for emoji if requested + if optimize_for_emoji: + if self.width > 128 or self.height > 128: + print(f" Resizing from {self.width}x{self.height} to 128x128 for emoji") + self.width = 128 + self.height = 128 + # Resize all frames + resized_frames = [] + for frame in self.frames: + pil_frame = Image.fromarray(frame) + pil_frame = pil_frame.resize((128, 128), Image.Resampling.LANCZOS) + resized_frames.append(np.array(pil_frame)) + self.frames = resized_frames + num_colors = min(num_colors, 48) # More aggressive color limit for emoji + + # More aggressive FPS reduction for emoji + if len(self.frames) > 12: + print(f" Reducing frames from {len(self.frames)} to ~12 for emoji size") + # Keep every nth frame to get close to 12 frames + keep_every = max(1, len(self.frames) // 12) + self.frames = [self.frames[i] for i in range(0, len(self.frames), keep_every)] + + # Optimize colors with global palette + optimized_frames = self.optimize_colors(num_colors, use_global_palette=True) + + # Calculate frame duration in milliseconds + frame_duration = 1000 / self.fps + + # Save GIF + imageio.imwrite( + output_path, + optimized_frames, + duration=frame_duration, + loop=0 # Infinite loop + ) + + # Get file info + file_size_kb = output_path.stat().st_size / 1024 + file_size_mb = file_size_kb / 1024 + + info = { + 'path': str(output_path), + 'size_kb': file_size_kb, + 'size_mb': file_size_mb, + 'dimensions': f'{self.width}x{self.height}', + 'frame_count': len(optimized_frames), + 'fps': self.fps, + 'duration_seconds': len(optimized_frames) / self.fps, + 'colors': num_colors + } + + # Print info + print(f"\n✓ GIF created successfully!") + print(f" Path: {output_path}") + print(f" Size: {file_size_kb:.1f} KB ({file_size_mb:.2f} MB)") + print(f" Dimensions: {self.width}x{self.height}") + print(f" Frames: {len(optimized_frames)} @ {self.fps} fps") + print(f" Duration: {info['duration_seconds']:.1f}s") + print(f" Colors: {num_colors}") + + # Warnings + if optimize_for_emoji and file_size_kb > 64: + print(f"\n⚠️ WARNING: Emoji file size ({file_size_kb:.1f} KB) exceeds 64 KB limit") + print(" Try: fewer frames, fewer colors, or simpler design") + elif not optimize_for_emoji and file_size_kb > 2048: + print(f"\n⚠️ WARNING: File size ({file_size_kb:.1f} KB) is large for Slack") + print(" Try: fewer frames, smaller dimensions, or fewer colors") + + return info + + def clear(self): + """Clear all frames (useful for creating multiple GIFs).""" + self.frames = [] \ No newline at end of file diff --git a/skills/slack-gif-creator/core/typography.py b/skills/slack-gif-creator/core/typography.py new file mode 100755 index 000000000..6ba35fc52 --- /dev/null +++ b/skills/slack-gif-creator/core/typography.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python3 +""" +Typography System - Professional text rendering with outlines, shadows, and effects. + +This module provides high-quality text rendering that looks crisp and professional +in GIFs, with outlines for readability and effects for visual impact. +""" + +from PIL import Image, ImageDraw, ImageFont +from typing import Optional + + +# Typography scale - proportional sizing system +TYPOGRAPHY_SCALE = { + 'h1': 60, # Large headers + 'h2': 48, # Medium headers + 'h3': 36, # Small headers + 'title': 50, # Title text + 'body': 28, # Body text + 'small': 20, # Small text + 'tiny': 16, # Tiny text +} + + +def get_font(size: int, bold: bool = False) -> ImageFont.FreeTypeFont: + """ + Get a font with fallback support. + + Args: + size: Font size in pixels + bold: Use bold variant if available + + Returns: + ImageFont object + """ + # Try multiple font paths for cross-platform support + font_paths = [ + # macOS fonts + "/System/Library/Fonts/Helvetica.ttc", + "/System/Library/Fonts/SF-Pro.ttf", + "/Library/Fonts/Arial Bold.ttf" if bold else "/Library/Fonts/Arial.ttf", + # Linux fonts + "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" if bold else "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", + # Windows fonts + "C:\\Windows\\Fonts\\arialbd.ttf" if bold else "C:\\Windows\\Fonts\\arial.ttf", + ] + + for font_path in font_paths: + try: + return ImageFont.truetype(font_path, size) + except: + continue + + # Ultimate fallback + return ImageFont.load_default() + + +def draw_text_with_outline( + frame: Image.Image, + text: str, + position: tuple[int, int], + font_size: int = 40, + text_color: tuple[int, int, int] = (255, 255, 255), + outline_color: tuple[int, int, int] = (0, 0, 0), + outline_width: int = 3, + centered: bool = False, + bold: bool = True +) -> Image.Image: + """ + Draw text with outline for maximum readability. + + This is THE most important function for professional-looking text in GIFs. + The outline ensures text is readable on any background. + + Args: + frame: PIL Image to draw on + text: Text to draw + position: (x, y) position + font_size: Font size in pixels + text_color: RGB color for text fill + outline_color: RGB color for outline + outline_width: Width of outline in pixels (2-4 recommended) + centered: If True, center text at position + bold: Use bold font variant + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + font = get_font(font_size, bold=bold) + + # Calculate position for centering + if centered: + bbox = draw.textbbox((0, 0), text, font=font) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + x = position[0] - text_width // 2 + y = position[1] - text_height // 2 + position = (x, y) + + # Draw outline by drawing text multiple times offset in all directions + x, y = position + for offset_x in range(-outline_width, outline_width + 1): + for offset_y in range(-outline_width, outline_width + 1): + if offset_x != 0 or offset_y != 0: + draw.text((x + offset_x, y + offset_y), text, fill=outline_color, font=font) + + # Draw main text on top + draw.text(position, text, fill=text_color, font=font) + + return frame + + +def draw_text_with_shadow( + frame: Image.Image, + text: str, + position: tuple[int, int], + font_size: int = 40, + text_color: tuple[int, int, int] = (255, 255, 255), + shadow_color: tuple[int, int, int] = (0, 0, 0), + shadow_offset: tuple[int, int] = (3, 3), + centered: bool = False, + bold: bool = True +) -> Image.Image: + """ + Draw text with drop shadow for depth. + + Args: + frame: PIL Image to draw on + text: Text to draw + position: (x, y) position + font_size: Font size in pixels + text_color: RGB color for text + shadow_color: RGB color for shadow + shadow_offset: (x, y) offset for shadow + centered: If True, center text at position + bold: Use bold font variant + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + font = get_font(font_size, bold=bold) + + # Calculate position for centering + if centered: + bbox = draw.textbbox((0, 0), text, font=font) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + x = position[0] - text_width // 2 + y = position[1] - text_height // 2 + position = (x, y) + + # Draw shadow + shadow_pos = (position[0] + shadow_offset[0], position[1] + shadow_offset[1]) + draw.text(shadow_pos, text, fill=shadow_color, font=font) + + # Draw main text + draw.text(position, text, fill=text_color, font=font) + + return frame + + +def draw_text_with_glow( + frame: Image.Image, + text: str, + position: tuple[int, int], + font_size: int = 40, + text_color: tuple[int, int, int] = (255, 255, 255), + glow_color: tuple[int, int, int] = (255, 200, 0), + glow_radius: int = 5, + centered: bool = False, + bold: bool = True +) -> Image.Image: + """ + Draw text with glow effect for emphasis. + + Args: + frame: PIL Image to draw on + text: Text to draw + position: (x, y) position + font_size: Font size in pixels + text_color: RGB color for text + glow_color: RGB color for glow + glow_radius: Radius of glow effect + centered: If True, center text at position + bold: Use bold font variant + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + font = get_font(font_size, bold=bold) + + # Calculate position for centering + if centered: + bbox = draw.textbbox((0, 0), text, font=font) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + x = position[0] - text_width // 2 + y = position[1] - text_height // 2 + position = (x, y) + + # Draw glow layers with decreasing opacity (simulated with same color at different offsets) + x, y = position + for radius in range(glow_radius, 0, -1): + for offset_x in range(-radius, radius + 1): + for offset_y in range(-radius, radius + 1): + if offset_x != 0 or offset_y != 0: + draw.text((x + offset_x, y + offset_y), text, fill=glow_color, font=font) + + # Draw main text + draw.text(position, text, fill=text_color, font=font) + + return frame + + +def draw_text_in_box( + frame: Image.Image, + text: str, + position: tuple[int, int], + font_size: int = 40, + text_color: tuple[int, int, int] = (255, 255, 255), + box_color: tuple[int, int, int] = (0, 0, 0), + box_alpha: float = 0.7, + padding: int = 10, + centered: bool = True, + bold: bool = True +) -> Image.Image: + """ + Draw text in a semi-transparent box for guaranteed readability. + + Args: + frame: PIL Image to draw on + text: Text to draw + position: (x, y) position + font_size: Font size in pixels + text_color: RGB color for text + box_color: RGB color for background box + box_alpha: Opacity of box (0.0-1.0) + padding: Padding around text in pixels + centered: If True, center at position + bold: Use bold font variant + + Returns: + Modified frame + """ + # Create a separate layer for the box with alpha + overlay = Image.new('RGBA', frame.size, (0, 0, 0, 0)) + draw_overlay = ImageDraw.Draw(overlay) + draw = ImageDraw.Draw(frame) + + font = get_font(font_size, bold=bold) + + # Get text dimensions + bbox = draw.textbbox((0, 0), text, font=font) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + + # Calculate box position + if centered: + box_x = position[0] - (text_width + padding * 2) // 2 + box_y = position[1] - (text_height + padding * 2) // 2 + text_x = position[0] - text_width // 2 + text_y = position[1] - text_height // 2 + else: + box_x = position[0] - padding + box_y = position[1] - padding + text_x = position[0] + text_y = position[1] + + # Draw semi-transparent box + box_coords = [ + box_x, + box_y, + box_x + text_width + padding * 2, + box_y + text_height + padding * 2 + ] + alpha_value = int(255 * box_alpha) + draw_overlay.rectangle(box_coords, fill=(*box_color, alpha_value)) + + # Composite overlay onto frame + frame_rgba = frame.convert('RGBA') + frame_rgba = Image.alpha_composite(frame_rgba, overlay) + frame = frame_rgba.convert('RGB') + + # Draw text on top + draw = ImageDraw.Draw(frame) + draw.text((text_x, text_y), text, fill=text_color, font=font) + + return frame + + +def get_text_size(text: str, font_size: int, bold: bool = True) -> tuple[int, int]: + """ + Get the dimensions of text without drawing it. + + Args: + text: Text to measure + font_size: Font size in pixels + bold: Use bold font variant + + Returns: + (width, height) tuple + """ + font = get_font(font_size, bold=bold) + # Create temporary image to measure + temp_img = Image.new('RGB', (1, 1)) + draw = ImageDraw.Draw(temp_img) + bbox = draw.textbbox((0, 0), text, font=font) + width = bbox[2] - bbox[0] + height = bbox[3] - bbox[1] + return (width, height) + + +def get_optimal_font_size(text: str, max_width: int, max_height: int, + start_size: int = 60) -> int: + """ + Find the largest font size that fits within given dimensions. + + Args: + text: Text to size + max_width: Maximum width in pixels + max_height: Maximum height in pixels + start_size: Starting font size to try + + Returns: + Optimal font size + """ + font_size = start_size + while font_size > 10: + width, height = get_text_size(text, font_size) + if width <= max_width and height <= max_height: + return font_size + font_size -= 2 + return 10 # Minimum font size + + +def scale_font_for_frame(base_size: int, frame_width: int, frame_height: int) -> int: + """ + Scale font size proportionally to frame dimensions. + + Useful for maintaining relative text size across different GIF dimensions. + + Args: + base_size: Base font size for 480x480 frame + frame_width: Actual frame width + frame_height: Actual frame height + + Returns: + Scaled font size + """ + # Use average dimension for scaling + avg_dimension = (frame_width + frame_height) / 2 + base_dimension = 480 # Reference dimension + scale_factor = avg_dimension / base_dimension + return max(10, int(base_size * scale_factor)) \ No newline at end of file diff --git a/skills/slack-gif-creator/core/validators.py b/skills/slack-gif-creator/core/validators.py new file mode 100755 index 000000000..7622d4b26 --- /dev/null +++ b/skills/slack-gif-creator/core/validators.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python3 +""" +Validators - Check if GIFs meet Slack's requirements. + +These validators help ensure your GIFs meet Slack's size and dimension constraints. +""" + +from pathlib import Path + + +def check_slack_size(gif_path: str | Path, is_emoji: bool = True) -> tuple[bool, dict]: + """ + Check if GIF meets Slack size limits. + + Args: + gif_path: Path to GIF file + is_emoji: True for emoji GIF (64KB limit), False for message GIF (2MB limit) + + Returns: + Tuple of (passes: bool, info: dict with details) + """ + gif_path = Path(gif_path) + + if not gif_path.exists(): + return False, {'error': f'File not found: {gif_path}'} + + size_bytes = gif_path.stat().st_size + size_kb = size_bytes / 1024 + size_mb = size_kb / 1024 + + limit_kb = 64 if is_emoji else 2048 + limit_mb = limit_kb / 1024 + + passes = size_kb <= limit_kb + + info = { + 'size_bytes': size_bytes, + 'size_kb': size_kb, + 'size_mb': size_mb, + 'limit_kb': limit_kb, + 'limit_mb': limit_mb, + 'passes': passes, + 'type': 'emoji' if is_emoji else 'message' + } + + # Print feedback + if passes: + print(f"✓ {size_kb:.1f} KB - within {limit_kb} KB limit") + else: + print(f"✗ {size_kb:.1f} KB - exceeds {limit_kb} KB limit") + overage_kb = size_kb - limit_kb + overage_percent = (overage_kb / limit_kb) * 100 + print(f" Over by: {overage_kb:.1f} KB ({overage_percent:.1f}%)") + print(f" Try: fewer frames, fewer colors, or simpler design") + + return passes, info + + +def validate_dimensions(width: int, height: int, is_emoji: bool = True) -> tuple[bool, dict]: + """ + Check if dimensions are suitable for Slack. + + Args: + width: Frame width in pixels + height: Frame height in pixels + is_emoji: True for emoji GIF, False for message GIF + + Returns: + Tuple of (passes: bool, info: dict with details) + """ + info = { + 'width': width, + 'height': height, + 'is_square': width == height, + 'type': 'emoji' if is_emoji else 'message' + } + + if is_emoji: + # Emoji GIFs should be 128x128 + optimal = width == height == 128 + acceptable = width == height and 64 <= width <= 128 + + info['optimal'] = optimal + info['acceptable'] = acceptable + + if optimal: + print(f"✓ {width}x{height} - optimal for emoji") + passes = True + elif acceptable: + print(f"⚠ {width}x{height} - acceptable but 128x128 is optimal") + passes = True + else: + print(f"✗ {width}x{height} - emoji should be square, 128x128 recommended") + passes = False + else: + # Message GIFs should be square-ish and reasonable size + aspect_ratio = max(width, height) / min(width, height) if min(width, height) > 0 else float('inf') + reasonable_size = 320 <= min(width, height) <= 640 + + info['aspect_ratio'] = aspect_ratio + info['reasonable_size'] = reasonable_size + + # Check if roughly square (within 2:1 ratio) + is_square_ish = aspect_ratio <= 2.0 + + if is_square_ish and reasonable_size: + print(f"✓ {width}x{height} - good for message GIF") + passes = True + elif is_square_ish: + print(f"⚠ {width}x{height} - square-ish but unusual size") + passes = True + elif reasonable_size: + print(f"⚠ {width}x{height} - good size but not square-ish") + passes = True + else: + print(f"✗ {width}x{height} - unusual dimensions for Slack") + passes = False + + return passes, info + + +def validate_gif(gif_path: str | Path, is_emoji: bool = True) -> tuple[bool, dict]: + """ + Run all validations on a GIF file. + + Args: + gif_path: Path to GIF file + is_emoji: True for emoji GIF, False for message GIF + + Returns: + Tuple of (all_pass: bool, results: dict) + """ + from PIL import Image + + gif_path = Path(gif_path) + + if not gif_path.exists(): + return False, {'error': f'File not found: {gif_path}'} + + print(f"\nValidating {gif_path.name} as {'emoji' if is_emoji else 'message'} GIF:") + print("=" * 60) + + # Check file size + size_pass, size_info = check_slack_size(gif_path, is_emoji) + + # Check dimensions + try: + with Image.open(gif_path) as img: + width, height = img.size + dim_pass, dim_info = validate_dimensions(width, height, is_emoji) + + # Count frames + frame_count = 0 + try: + while True: + img.seek(frame_count) + frame_count += 1 + except EOFError: + pass + + # Get duration if available + try: + duration_ms = img.info.get('duration', 100) + total_duration = (duration_ms * frame_count) / 1000 + fps = frame_count / total_duration if total_duration > 0 else 0 + except: + duration_ms = None + total_duration = None + fps = None + + except Exception as e: + return False, {'error': f'Failed to read GIF: {e}'} + + print(f"\nFrames: {frame_count}") + if total_duration: + print(f"Duration: {total_duration:.1f}s @ {fps:.1f} fps") + + all_pass = size_pass and dim_pass + + results = { + 'file': str(gif_path), + 'passes': all_pass, + 'size': size_info, + 'dimensions': dim_info, + 'frame_count': frame_count, + 'duration_seconds': total_duration, + 'fps': fps + } + + print("=" * 60) + if all_pass: + print("✓ All validations passed!") + else: + print("✗ Some validations failed") + print() + + return all_pass, results + + +def get_optimization_suggestions(results: dict) -> list[str]: + """ + Get suggestions for optimizing a GIF based on validation results. + + Args: + results: Results dict from validate_gif() + + Returns: + List of suggestion strings + """ + suggestions = [] + + if not results.get('passes', False): + size_info = results.get('size', {}) + dim_info = results.get('dimensions', {}) + + # Size suggestions + if not size_info.get('passes', True): + overage = size_info['size_kb'] - size_info['limit_kb'] + if size_info['type'] == 'emoji': + suggestions.append(f"Reduce file size by {overage:.1f} KB:") + suggestions.append(" - Limit to 10-12 frames") + suggestions.append(" - Use 32-40 colors maximum") + suggestions.append(" - Remove gradients (solid colors compress better)") + suggestions.append(" - Simplify design") + else: + suggestions.append(f"Reduce file size by {overage:.1f} KB:") + suggestions.append(" - Reduce frame count or FPS") + suggestions.append(" - Use fewer colors (128 → 64)") + suggestions.append(" - Reduce dimensions") + + # Dimension suggestions + if not dim_info.get('optimal', True) and dim_info.get('type') == 'emoji': + suggestions.append("For optimal emoji GIF:") + suggestions.append(" - Use 128x128 dimensions") + suggestions.append(" - Ensure square aspect ratio") + + return suggestions + + +# Convenience function for quick checks +def is_slack_ready(gif_path: str | Path, is_emoji: bool = True, verbose: bool = True) -> bool: + """ + Quick check if GIF is ready for Slack. + + Args: + gif_path: Path to GIF file + is_emoji: True for emoji GIF, False for message GIF + verbose: Print detailed feedback + + Returns: + True if ready, False otherwise + """ + if verbose: + passes, results = validate_gif(gif_path, is_emoji) + if not passes: + suggestions = get_optimization_suggestions(results) + if suggestions: + print("\nSuggestions:") + for suggestion in suggestions: + print(suggestion) + return passes + else: + size_pass, _ = check_slack_size(gif_path, is_emoji) + return size_pass diff --git a/skills/slack-gif-creator/core/visual_effects.py b/skills/slack-gif-creator/core/visual_effects.py new file mode 100755 index 000000000..2ecbbbda0 --- /dev/null +++ b/skills/slack-gif-creator/core/visual_effects.py @@ -0,0 +1,494 @@ +#!/usr/bin/env python3 +""" +Visual Effects - Particles, motion blur, impacts, and other effects for GIFs. + +This module provides high-impact visual effects that make animations feel +professional and dynamic while keeping file sizes reasonable. +""" + +from PIL import Image, ImageDraw, ImageFilter +import numpy as np +import math +import random +from typing import Optional + + +class Particle: + """A single particle in a particle system.""" + + def __init__(self, x: float, y: float, vx: float, vy: float, + lifetime: float, color: tuple[int, int, int], + size: int = 3, shape: str = 'circle'): + """ + Initialize a particle. + + Args: + x, y: Starting position + vx, vy: Velocity + lifetime: How long particle lives (in frames) + color: RGB color + size: Particle size in pixels + shape: 'circle', 'square', or 'star' + """ + self.x = x + self.y = y + self.vx = vx + self.vy = vy + self.lifetime = lifetime + self.max_lifetime = lifetime + self.color = color + self.size = size + self.shape = shape + self.gravity = 0.5 # Pixels per frame squared + self.drag = 0.98 # Velocity multiplier per frame + + def update(self): + """Update particle position and lifetime.""" + # Apply physics + self.vy += self.gravity + self.vx *= self.drag + self.vy *= self.drag + + # Update position + self.x += self.vx + self.y += self.vy + + # Decrease lifetime + self.lifetime -= 1 + + def is_alive(self) -> bool: + """Check if particle is still alive.""" + return self.lifetime > 0 + + def get_alpha(self) -> float: + """Get particle opacity based on lifetime.""" + return max(0, min(1, self.lifetime / self.max_lifetime)) + + def render(self, frame: Image.Image): + """ + Render particle to frame. + + Args: + frame: PIL Image to draw on + """ + if not self.is_alive(): + return + + draw = ImageDraw.Draw(frame) + alpha = self.get_alpha() + + # Calculate faded color + color = tuple(int(c * alpha) for c in self.color) + + # Draw based on shape + x, y = int(self.x), int(self.y) + size = max(1, int(self.size * alpha)) + + if self.shape == 'circle': + bbox = [x - size, y - size, x + size, y + size] + draw.ellipse(bbox, fill=color) + elif self.shape == 'square': + bbox = [x - size, y - size, x + size, y + size] + draw.rectangle(bbox, fill=color) + elif self.shape == 'star': + # Simple 4-point star + points = [ + (x, y - size), + (x - size // 2, y), + (x, y), + (x, y + size), + (x, y), + (x + size // 2, y), + ] + draw.line(points, fill=color, width=2) + + +class ParticleSystem: + """Manages a collection of particles.""" + + def __init__(self): + """Initialize particle system.""" + self.particles: list[Particle] = [] + + def emit(self, x: int, y: int, count: int = 10, + spread: float = 2.0, speed: float = 5.0, + color: tuple[int, int, int] = (255, 200, 0), + lifetime: float = 20.0, size: int = 3, shape: str = 'circle'): + """ + Emit a burst of particles. + + Args: + x, y: Emission position + count: Number of particles to emit + spread: Angle spread (radians) + speed: Initial speed + color: Particle color + lifetime: Particle lifetime in frames + size: Particle size + shape: Particle shape + """ + for _ in range(count): + # Random angle and speed + angle = random.uniform(0, 2 * math.pi) + vel_mag = random.uniform(speed * 0.5, speed * 1.5) + vx = math.cos(angle) * vel_mag + vy = math.sin(angle) * vel_mag + + # Random lifetime variation + life = random.uniform(lifetime * 0.7, lifetime * 1.3) + + particle = Particle(x, y, vx, vy, life, color, size, shape) + self.particles.append(particle) + + def emit_confetti(self, x: int, y: int, count: int = 20, + colors: Optional[list[tuple[int, int, int]]] = None): + """ + Emit confetti particles (colorful, falling). + + Args: + x, y: Emission position + count: Number of confetti pieces + colors: List of colors (random if None) + """ + if colors is None: + colors = [ + (255, 107, 107), (255, 159, 64), (255, 218, 121), + (107, 185, 240), (162, 155, 254), (255, 182, 193) + ] + + for _ in range(count): + color = random.choice(colors) + vx = random.uniform(-3, 3) + vy = random.uniform(-8, -2) + shape = random.choice(['square', 'circle']) + size = random.randint(2, 4) + lifetime = random.uniform(40, 60) + + particle = Particle(x, y, vx, vy, lifetime, color, size, shape) + particle.gravity = 0.3 # Lighter gravity for confetti + self.particles.append(particle) + + def emit_sparkles(self, x: int, y: int, count: int = 15): + """ + Emit sparkle particles (twinkling stars). + + Args: + x, y: Emission position + count: Number of sparkles + """ + colors = [(255, 255, 200), (255, 255, 255), (255, 255, 150)] + + for _ in range(count): + color = random.choice(colors) + angle = random.uniform(0, 2 * math.pi) + speed = random.uniform(1, 3) + vx = math.cos(angle) * speed + vy = math.sin(angle) * speed + lifetime = random.uniform(15, 30) + + particle = Particle(x, y, vx, vy, lifetime, color, 2, 'star') + particle.gravity = 0 + particle.drag = 0.95 + self.particles.append(particle) + + def update(self): + """Update all particles.""" + # Update alive particles + for particle in self.particles: + particle.update() + + # Remove dead particles + self.particles = [p for p in self.particles if p.is_alive()] + + def render(self, frame: Image.Image): + """Render all particles to frame.""" + for particle in self.particles: + particle.render(frame) + + def get_particle_count(self) -> int: + """Get number of active particles.""" + return len(self.particles) + + +def add_motion_blur(frame: Image.Image, prev_frame: Optional[Image.Image], + blur_amount: float = 0.5) -> Image.Image: + """ + Add motion blur by blending with previous frame. + + Args: + frame: Current frame + prev_frame: Previous frame (None for first frame) + blur_amount: Amount of blur (0.0-1.0) + + Returns: + Frame with motion blur applied + """ + if prev_frame is None: + return frame + + # Blend current frame with previous frame + frame_array = np.array(frame, dtype=np.float32) + prev_array = np.array(prev_frame, dtype=np.float32) + + blended = frame_array * (1 - blur_amount) + prev_array * blur_amount + blended = np.clip(blended, 0, 255).astype(np.uint8) + + return Image.fromarray(blended) + + +def create_impact_flash(frame: Image.Image, position: tuple[int, int], + radius: int = 100, intensity: float = 0.7) -> Image.Image: + """ + Create a bright flash effect at impact point. + + Args: + frame: PIL Image to draw on + position: Center of flash + radius: Flash radius + intensity: Flash intensity (0.0-1.0) + + Returns: + Modified frame + """ + # Create overlay + overlay = Image.new('RGBA', frame.size, (0, 0, 0, 0)) + draw = ImageDraw.Draw(overlay) + + x, y = position + + # Draw concentric circles with decreasing opacity + num_circles = 5 + for i in range(num_circles): + alpha = int(255 * intensity * (1 - i / num_circles)) + r = radius * (1 - i / num_circles) + color = (255, 255, 240, alpha) # Warm white + + bbox = [x - r, y - r, x + r, y + r] + draw.ellipse(bbox, fill=color) + + # Composite onto frame + frame_rgba = frame.convert('RGBA') + frame_rgba = Image.alpha_composite(frame_rgba, overlay) + return frame_rgba.convert('RGB') + + +def create_shockwave_rings(frame: Image.Image, position: tuple[int, int], + radii: list[int], color: tuple[int, int, int] = (255, 200, 0), + width: int = 3) -> Image.Image: + """ + Create expanding ring effects. + + Args: + frame: PIL Image to draw on + position: Center of rings + radii: List of ring radii + color: Ring color + width: Ring width + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x, y = position + + for radius in radii: + bbox = [x - radius, y - radius, x + radius, y + radius] + draw.ellipse(bbox, outline=color, width=width) + + return frame + + +def create_explosion_effect(frame: Image.Image, position: tuple[int, int], + radius: int, progress: float, + color: tuple[int, int, int] = (255, 150, 0)) -> Image.Image: + """ + Create an explosion effect that expands and fades. + + Args: + frame: PIL Image to draw on + position: Explosion center + radius: Maximum radius + progress: Animation progress (0.0-1.0) + color: Explosion color + + Returns: + Modified frame + """ + current_radius = int(radius * progress) + fade = 1 - progress + + # Create overlay + overlay = Image.new('RGBA', frame.size, (0, 0, 0, 0)) + draw = ImageDraw.Draw(overlay) + + x, y = position + + # Draw expanding circle with fade + alpha = int(255 * fade) + r, g, b = color + circle_color = (r, g, b, alpha) + + bbox = [x - current_radius, y - current_radius, x + current_radius, y + current_radius] + draw.ellipse(bbox, fill=circle_color) + + # Composite + frame_rgba = frame.convert('RGBA') + frame_rgba = Image.alpha_composite(frame_rgba, overlay) + return frame_rgba.convert('RGB') + + +def add_glow_effect(frame: Image.Image, mask_color: tuple[int, int, int], + glow_color: tuple[int, int, int], + blur_radius: int = 10) -> Image.Image: + """ + Add a glow effect to areas of a specific color. + + Args: + frame: PIL Image + mask_color: Color to create glow around + glow_color: Color of glow + blur_radius: Blur amount + + Returns: + Frame with glow + """ + # Create mask of target color + frame_array = np.array(frame) + mask = np.all(frame_array == mask_color, axis=-1) + + # Create glow layer + glow = Image.new('RGB', frame.size, (0, 0, 0)) + glow_array = np.array(glow) + glow_array[mask] = glow_color + glow = Image.fromarray(glow_array) + + # Blur the glow + glow = glow.filter(ImageFilter.GaussianBlur(blur_radius)) + + # Blend with original + blended = Image.blend(frame, glow, 0.5) + return blended + + +def add_drop_shadow(frame: Image.Image, object_bounds: tuple[int, int, int, int], + shadow_offset: tuple[int, int] = (5, 5), + shadow_color: tuple[int, int, int] = (0, 0, 0), + blur: int = 5) -> Image.Image: + """ + Add drop shadow to an object. + + Args: + frame: PIL Image + object_bounds: (x1, y1, x2, y2) bounds of object + shadow_offset: (x, y) offset of shadow + shadow_color: Shadow color + blur: Shadow blur amount + + Returns: + Frame with shadow + """ + # Extract object + x1, y1, x2, y2 = object_bounds + obj = frame.crop((x1, y1, x2, y2)) + + # Create shadow + shadow = Image.new('RGBA', obj.size, (*shadow_color, 180)) + + # Create frame with alpha + frame_rgba = frame.convert('RGBA') + + # Paste shadow + shadow_pos = (x1 + shadow_offset[0], y1 + shadow_offset[1]) + frame_rgba.paste(shadow, shadow_pos, shadow) + + # Paste object on top + frame_rgba.paste(obj, (x1, y1)) + + return frame_rgba.convert('RGB') + + +def create_speed_lines(frame: Image.Image, position: tuple[int, int], + direction: float, length: int = 50, + count: int = 5, color: tuple[int, int, int] = (200, 200, 200)) -> Image.Image: + """ + Create speed lines for motion effect. + + Args: + frame: PIL Image to draw on + position: Center position + direction: Angle in radians (0 = right, pi/2 = down) + length: Line length + count: Number of lines + color: Line color + + Returns: + Modified frame + """ + draw = ImageDraw.Draw(frame) + x, y = position + + # Opposite direction (lines trail behind) + trail_angle = direction + math.pi + + for i in range(count): + # Offset from center + offset_angle = trail_angle + random.uniform(-0.3, 0.3) + offset_dist = random.uniform(10, 30) + start_x = x + math.cos(offset_angle) * offset_dist + start_y = y + math.sin(offset_angle) * offset_dist + + # End point + line_length = random.uniform(length * 0.7, length * 1.3) + end_x = start_x + math.cos(trail_angle) * line_length + end_y = start_y + math.sin(trail_angle) * line_length + + # Draw line with varying opacity + alpha = random.randint(100, 200) + width = random.randint(1, 3) + + # Simple line (full opacity simulation) + draw.line([(start_x, start_y), (end_x, end_y)], fill=color, width=width) + + return frame + + +def create_screen_shake_offset(intensity: int, frame_index: int) -> tuple[int, int]: + """ + Calculate screen shake offset for a frame. + + Args: + intensity: Shake intensity in pixels + frame_index: Current frame number + + Returns: + (x, y) offset tuple + """ + # Use frame index for deterministic but random-looking shake + random.seed(frame_index) + offset_x = random.randint(-intensity, intensity) + offset_y = random.randint(-intensity, intensity) + random.seed() # Reset seed + return (offset_x, offset_y) + + +def apply_screen_shake(frame: Image.Image, intensity: int, frame_index: int) -> Image.Image: + """ + Apply screen shake effect to entire frame. + + Args: + frame: PIL Image + intensity: Shake intensity + frame_index: Current frame number + + Returns: + Shaken frame + """ + offset_x, offset_y = create_screen_shake_offset(intensity, frame_index) + + # Create new frame with background + shaken = Image.new('RGB', frame.size, (0, 0, 0)) + + # Paste original frame with offset + shaken.paste(frame, (offset_x, offset_y)) + + return shaken \ No newline at end of file diff --git a/skills/slack-gif-creator/requirements.txt b/skills/slack-gif-creator/requirements.txt new file mode 100644 index 000000000..8bc4493e9 --- /dev/null +++ b/skills/slack-gif-creator/requirements.txt @@ -0,0 +1,4 @@ +pillow>=10.0.0 +imageio>=2.31.0 +imageio-ffmpeg>=0.4.9 +numpy>=1.24.0 \ No newline at end of file diff --git a/skills/slack-gif-creator/templates/bounce.py b/skills/slack-gif-creator/templates/bounce.py new file mode 100755 index 000000000..effbb8276 --- /dev/null +++ b/skills/slack-gif-creator/templates/bounce.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +""" +Bounce Animation Template - Creates bouncing motion for objects. + +Use this to make objects bounce up and down or horizontally with realistic physics. +""" + +import sys +from pathlib import Path + +# Add parent directory to path +sys.path.append(str(Path(__file__).parent.parent)) + +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_circle, draw_emoji +from core.easing import ease_out_bounce, interpolate + + +def create_bounce_animation( + object_type: str = 'circle', + object_data: dict = None, + num_frames: int = 30, + bounce_height: int = 150, + ground_y: int = 350, + start_x: int = 240, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list: + """ + Create frames for a bouncing animation. + + Args: + object_type: 'circle', 'emoji', or 'custom' + object_data: Data for the object (e.g., {'radius': 30, 'color': (255, 0, 0)}) + num_frames: Number of frames in the animation + bounce_height: Maximum height of bounce + ground_y: Y position of ground + start_x: X position (or starting X if moving horizontally) + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'circle': + object_data = {'radius': 30, 'color': (255, 100, 100)} + elif object_type == 'emoji': + object_data = {'emoji': '⚽', 'size': 60} + + for i in range(num_frames): + # Create blank frame + frame = create_blank_frame(frame_width, frame_height, bg_color) + + # Calculate progress (0.0 to 1.0) + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate Y position using bounce easing + y = ground_y - int(ease_out_bounce(t) * bounce_height) + + # Draw object + if object_type == 'circle': + draw_circle( + frame, + center=(start_x, y), + radius=object_data['radius'], + fill_color=object_data['color'] + ) + elif object_type == 'emoji': + draw_emoji( + frame, + emoji=object_data['emoji'], + position=(start_x - object_data['size'] // 2, y - object_data['size'] // 2), + size=object_data['size'] + ) + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating bouncing ball GIF...") + + # Create GIF builder + builder = GIFBuilder(width=480, height=480, fps=20) + + # Generate bounce animation + frames = create_bounce_animation( + object_type='circle', + object_data={'radius': 40, 'color': (255, 100, 100)}, + num_frames=40, + bounce_height=200 + ) + + # Add frames to builder + builder.add_frames(frames) + + # Save GIF + builder.save('bounce_test.gif', num_colors=64) \ No newline at end of file diff --git a/skills/slack-gif-creator/templates/explode.py b/skills/slack-gif-creator/templates/explode.py new file mode 100755 index 000000000..b03a1bd6c --- /dev/null +++ b/skills/slack-gif-creator/templates/explode.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 +""" +Explode Animation - Break objects into pieces that fly outward. + +Creates explosion, shatter, and particle burst effects. +""" + +import sys +from pathlib import Path +import math +import random + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image, ImageDraw +import numpy as np +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced +from core.visual_effects import ParticleSystem +from core.easing import interpolate + + +def create_explode_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + explode_type: str = 'burst', # 'burst', 'shatter', 'dissolve', 'implode' + num_pieces: int = 20, + explosion_speed: float = 5.0, + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create explosion animation. + + Args: + object_type: 'emoji', 'circle', 'text' + object_data: Object configuration + num_frames: Number of frames + explode_type: Type of explosion + num_pieces: Number of pieces/particles + explosion_speed: Speed of explosion + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '💣', 'size': 100} + + # Generate pieces/particles + pieces = [] + for _ in range(num_pieces): + angle = random.uniform(0, 2 * math.pi) + speed = random.uniform(explosion_speed * 0.5, explosion_speed * 1.5) + vx = math.cos(angle) * speed + vy = math.sin(angle) * speed + size = random.randint(3, 12) + color = ( + random.randint(100, 255), + random.randint(100, 255), + random.randint(100, 255) + ) + rotation_speed = random.uniform(-20, 20) + + pieces.append({ + 'vx': vx, + 'vy': vy, + 'size': size, + 'color': color, + 'rotation': 0, + 'rotation_speed': rotation_speed + }) + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + frame = create_blank_frame(frame_width, frame_height, bg_color) + draw = ImageDraw.Draw(frame) + + if explode_type == 'burst': + # Show object at start, then explode + if t < 0.2: + # Object still intact + scale = interpolate(1.0, 1.2, t / 0.2, 'ease_out') + if object_type == 'emoji': + size = int(object_data['size'] * scale) + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(center_pos[0] - size // 2, center_pos[1] - size // 2), + size=size, + shadow=False + ) + else: + # Exploded - draw pieces + explosion_t = (t - 0.2) / 0.8 + for piece in pieces: + # Update position + x = center_pos[0] + piece['vx'] * explosion_t * 50 + y = center_pos[1] + piece['vy'] * explosion_t * 50 + 0.5 * 300 * explosion_t ** 2 # Gravity + + # Fade out + alpha = 1.0 - explosion_t + if alpha > 0: + color = tuple(int(c * alpha) for c in piece['color']) + size = int(piece['size'] * (1 - explosion_t * 0.5)) + + draw.ellipse( + [x - size, y - size, x + size, y + size], + fill=color + ) + + elif explode_type == 'shatter': + # Break into geometric pieces + if t < 0.15: + # Object intact + if object_type == 'emoji': + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(center_pos[0] - object_data['size'] // 2, + center_pos[1] - object_data['size'] // 2), + size=object_data['size'], + shadow=False + ) + else: + # Shattered + shatter_t = (t - 0.15) / 0.85 + + # Draw triangular shards + for piece in pieces[:min(10, len(pieces))]: + x = center_pos[0] + piece['vx'] * shatter_t * 30 + y = center_pos[1] + piece['vy'] * shatter_t * 30 + 0.5 * 200 * shatter_t ** 2 + + # Update rotation + rotation = piece['rotation_speed'] * shatter_t * 100 + + # Draw triangle shard + shard_size = piece['size'] * 2 + points = [] + for j in range(3): + angle = (rotation + j * 120) * math.pi / 180 + px = x + shard_size * math.cos(angle) + py = y + shard_size * math.sin(angle) + points.append((px, py)) + + alpha = 1.0 - shatter_t + if alpha > 0: + color = tuple(int(c * alpha) for c in piece['color']) + draw.polygon(points, fill=color) + + elif explode_type == 'dissolve': + # Dissolve into particles + dissolve_scale = interpolate(1.0, 0.0, t, 'ease_in') + + if dissolve_scale > 0.1: + # Draw fading object + if object_type == 'emoji': + size = int(object_data['size'] * dissolve_scale) + size = max(12, size) + + emoji_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + draw_emoji_enhanced( + emoji_canvas, + emoji=object_data['emoji'], + position=(center_pos[0] - size // 2, center_pos[1] - size // 2), + size=size, + shadow=False + ) + + # Apply opacity + from templates.fade import apply_opacity + emoji_canvas = apply_opacity(emoji_canvas, dissolve_scale) + + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, emoji_canvas) + frame = frame.convert('RGB') + draw = ImageDraw.Draw(frame) + + # Draw outward-moving particles + for piece in pieces: + x = center_pos[0] + piece['vx'] * t * 40 + y = center_pos[1] + piece['vy'] * t * 40 + + alpha = 1.0 - t + if alpha > 0: + color = tuple(int(c * alpha) for c in piece['color']) + size = int(piece['size'] * (1 - t * 0.5)) + draw.ellipse( + [x - size, y - size, x + size, y + size], + fill=color + ) + + elif explode_type == 'implode': + # Reverse explosion - pieces fly inward + if t < 0.7: + # Pieces converging + implode_t = 1.0 - (t / 0.7) + for piece in pieces: + x = center_pos[0] + piece['vx'] * implode_t * 50 + y = center_pos[1] + piece['vy'] * implode_t * 50 + + alpha = 1.0 - (1.0 - implode_t) * 0.5 + color = tuple(int(c * alpha) for c in piece['color']) + size = int(piece['size'] * alpha) + + draw.ellipse( + [x - size, y - size, x + size, y + size], + fill=color + ) + else: + # Object reforms + reform_t = (t - 0.7) / 0.3 + scale = interpolate(0.5, 1.0, reform_t, 'elastic_out') + + if object_type == 'emoji': + size = int(object_data['size'] * scale) + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(center_pos[0] - size // 2, center_pos[1] - size // 2), + size=size, + shadow=False + ) + + frames.append(frame) + + return frames + + +def create_particle_burst( + num_frames: int = 25, + particle_count: int = 30, + center_pos: tuple[int, int] = (240, 240), + colors: list[tuple[int, int, int]] | None = None, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create simple particle burst effect. + + Args: + num_frames: Number of frames + particle_count: Number of particles + center_pos: Burst center + colors: Particle colors (None for random) + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + particles = ParticleSystem() + + # Emit particles + if colors is None: + from core.color_palettes import get_palette + palette = get_palette('vibrant') + colors = [palette['primary'], palette['secondary'], palette['accent']] + + for _ in range(particle_count): + color = random.choice(colors) + particles.emit( + center_pos[0], center_pos[1], + count=1, + speed=random.uniform(3, 8), + color=color, + lifetime=random.uniform(20, 30), + size=random.randint(3, 8), + shape='star' + ) + + frames = [] + for _ in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + + particles.update() + particles.render(frame) + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating explode animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Burst + frames = create_explode_animation( + object_type='emoji', + object_data={'emoji': '💣', 'size': 100}, + num_frames=30, + explode_type='burst', + num_pieces=25 + ) + builder.add_frames(frames) + builder.save('explode_burst.gif', num_colors=128) + + # Example 2: Shatter + builder.clear() + frames = create_explode_animation( + object_type='emoji', + object_data={'emoji': '🪟', 'size': 100}, + num_frames=30, + explode_type='shatter', + num_pieces=12 + ) + builder.add_frames(frames) + builder.save('explode_shatter.gif', num_colors=128) + + # Example 3: Particle burst + builder.clear() + frames = create_particle_burst(num_frames=25, particle_count=40) + builder.add_frames(frames) + builder.save('explode_particles.gif', num_colors=128) + + print("Created explode animations!") diff --git a/skills/slack-gif-creator/templates/fade.py b/skills/slack-gif-creator/templates/fade.py new file mode 100755 index 000000000..fc7d0e303 --- /dev/null +++ b/skills/slack-gif-creator/templates/fade.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +""" +Fade Animation - Fade in, fade out, and crossfade effects. + +Creates smooth opacity transitions for appearing, disappearing, and transitioning. +""" + +import sys +from pathlib import Path + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image, ImageDraw +import numpy as np +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced +from core.easing import interpolate + + +def create_fade_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + fade_type: str = 'in', # 'in', 'out', 'in_out', 'blink' + easing: str = 'ease_in_out', + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create fade animation. + + Args: + object_type: 'emoji', 'text', 'image' + object_data: Object configuration + num_frames: Number of frames + fade_type: Type of fade effect + easing: Easing function + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '✨', 'size': 100} + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate opacity based on fade type + if fade_type == 'in': + opacity = interpolate(0, 1, t, easing) + elif fade_type == 'out': + opacity = interpolate(1, 0, t, easing) + elif fade_type == 'in_out': + if t < 0.5: + opacity = interpolate(0, 1, t * 2, easing) + else: + opacity = interpolate(1, 0, (t - 0.5) * 2, easing) + elif fade_type == 'blink': + # Quick fade out and back in + if t < 0.2: + opacity = interpolate(1, 0, t / 0.2, 'ease_in') + elif t < 0.4: + opacity = interpolate(0, 1, (t - 0.2) / 0.2, 'ease_out') + else: + opacity = 1.0 + else: + opacity = interpolate(0, 1, t, easing) + + # Create background + frame_bg = create_blank_frame(frame_width, frame_height, bg_color) + + # Create object layer with transparency + if object_type == 'emoji': + # Create RGBA canvas for emoji + emoji_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + emoji_size = object_data['size'] + draw_emoji_enhanced( + emoji_canvas, + emoji=object_data['emoji'], + position=(center_pos[0] - emoji_size // 2, center_pos[1] - emoji_size // 2), + size=emoji_size, + shadow=object_data.get('shadow', False) + ) + + # Apply opacity + emoji_canvas = apply_opacity(emoji_canvas, opacity) + + # Composite onto background + frame_bg_rgba = frame_bg.convert('RGBA') + frame = Image.alpha_composite(frame_bg_rgba, emoji_canvas) + frame = frame.convert('RGB') + + elif object_type == 'text': + from core.typography import draw_text_with_outline + + # Create text on separate layer + text_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + text_canvas_rgb = text_canvas.convert('RGB') + text_canvas_rgb.paste(bg_color, (0, 0, frame_width, frame_height)) + + draw_text_with_outline( + text_canvas_rgb, + text=object_data.get('text', 'FADE'), + position=center_pos, + font_size=object_data.get('font_size', 60), + text_color=object_data.get('text_color', (0, 0, 0)), + outline_color=object_data.get('outline_color', (255, 255, 255)), + outline_width=3, + centered=True + ) + + # Convert to RGBA and make background transparent + text_canvas = text_canvas_rgb.convert('RGBA') + data = text_canvas.getdata() + new_data = [] + for item in data: + if item[:3] == bg_color: + new_data.append((255, 255, 255, 0)) + else: + new_data.append(item) + text_canvas.putdata(new_data) + + # Apply opacity + text_canvas = apply_opacity(text_canvas, opacity) + + # Composite + frame_bg_rgba = frame_bg.convert('RGBA') + frame = Image.alpha_composite(frame_bg_rgba, text_canvas) + frame = frame.convert('RGB') + + else: + frame = frame_bg + + frames.append(frame) + + return frames + + +def apply_opacity(image: Image.Image, opacity: float) -> Image.Image: + """ + Apply opacity to an RGBA image. + + Args: + image: RGBA image + opacity: Opacity value (0.0 to 1.0) + + Returns: + Image with adjusted opacity + """ + if image.mode != 'RGBA': + image = image.convert('RGBA') + + # Get alpha channel + r, g, b, a = image.split() + + # Multiply alpha by opacity + a_array = np.array(a, dtype=np.float32) + a_array = a_array * opacity + a = Image.fromarray(a_array.astype(np.uint8)) + + # Merge back + return Image.merge('RGBA', (r, g, b, a)) + + +def create_crossfade( + object1_data: dict, + object2_data: dict, + num_frames: int = 30, + easing: str = 'ease_in_out', + object_type: str = 'emoji', + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Crossfade between two objects. + + Args: + object1_data: First object configuration + object2_data: Second object configuration + num_frames: Number of frames + easing: Easing function + object_type: Type of objects + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate opacities + opacity1 = interpolate(1, 0, t, easing) + opacity2 = interpolate(0, 1, t, easing) + + # Create background + frame = create_blank_frame(frame_width, frame_height, bg_color) + + if object_type == 'emoji': + # Create first emoji + emoji1_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + size1 = object1_data['size'] + draw_emoji_enhanced( + emoji1_canvas, + emoji=object1_data['emoji'], + position=(center_pos[0] - size1 // 2, center_pos[1] - size1 // 2), + size=size1, + shadow=False + ) + emoji1_canvas = apply_opacity(emoji1_canvas, opacity1) + + # Create second emoji + emoji2_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + size2 = object2_data['size'] + draw_emoji_enhanced( + emoji2_canvas, + emoji=object2_data['emoji'], + position=(center_pos[0] - size2 // 2, center_pos[1] - size2 // 2), + size=size2, + shadow=False + ) + emoji2_canvas = apply_opacity(emoji2_canvas, opacity2) + + # Composite both + frame_rgba = frame.convert('RGBA') + frame_rgba = Image.alpha_composite(frame_rgba, emoji1_canvas) + frame_rgba = Image.alpha_composite(frame_rgba, emoji2_canvas) + frame = frame_rgba.convert('RGB') + + frames.append(frame) + + return frames + + +def create_fade_to_color( + start_color: tuple[int, int, int], + end_color: tuple[int, int, int], + num_frames: int = 20, + easing: str = 'linear', + frame_width: int = 480, + frame_height: int = 480 +) -> list[Image.Image]: + """ + Fade from one solid color to another. + + Args: + start_color: Starting RGB color + end_color: Ending RGB color + num_frames: Number of frames + easing: Easing function + frame_width: Frame width + frame_height: Frame height + + Returns: + List of frames + """ + frames = [] + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Interpolate each color channel + r = int(interpolate(start_color[0], end_color[0], t, easing)) + g = int(interpolate(start_color[1], end_color[1], t, easing)) + b = int(interpolate(start_color[2], end_color[2], t, easing)) + + color = (r, g, b) + frame = create_blank_frame(frame_width, frame_height, color) + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating fade animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Fade in + frames = create_fade_animation( + object_type='emoji', + object_data={'emoji': '✨', 'size': 120}, + num_frames=30, + fade_type='in', + easing='ease_out' + ) + builder.add_frames(frames) + builder.save('fade_in.gif', num_colors=128) + + # Example 2: Crossfade + builder.clear() + frames = create_crossfade( + object1_data={'emoji': '😊', 'size': 100}, + object2_data={'emoji': '😂', 'size': 100}, + num_frames=30, + object_type='emoji' + ) + builder.add_frames(frames) + builder.save('fade_crossfade.gif', num_colors=128) + + # Example 3: Blink + builder.clear() + frames = create_fade_animation( + object_type='emoji', + object_data={'emoji': '👀', 'size': 100}, + num_frames=20, + fade_type='blink' + ) + builder.add_frames(frames) + builder.save('fade_blink.gif', num_colors=128) + + print("Created fade animations!") diff --git a/skills/slack-gif-creator/templates/flip.py b/skills/slack-gif-creator/templates/flip.py new file mode 100755 index 000000000..371d393a1 --- /dev/null +++ b/skills/slack-gif-creator/templates/flip.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python3 +""" +Flip Animation - 3D-style card flip and rotation effects. + +Creates horizontal and vertical flips with perspective. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced +from core.easing import interpolate + + +def create_flip_animation( + object1_data: dict, + object2_data: dict | None = None, + num_frames: int = 30, + flip_axis: str = 'horizontal', # 'horizontal', 'vertical' + easing: str = 'ease_in_out', + object_type: str = 'emoji', + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create 3D-style flip animation. + + Args: + object1_data: First object (front side) + object2_data: Second object (back side, None = same as front) + num_frames: Number of frames + flip_axis: Axis to flip around + easing: Easing function + object_type: Type of objects + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + if object2_data is None: + object2_data = object1_data + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + frame = create_blank_frame(frame_width, frame_height, bg_color) + + # Calculate rotation angle (0 to 180 degrees) + angle = interpolate(0, 180, t, easing) + + # Determine which side is visible and calculate scale + if angle < 90: + # Front side visible + current_object = object1_data + scale_factor = math.cos(math.radians(angle)) + else: + # Back side visible + current_object = object2_data + scale_factor = abs(math.cos(math.radians(angle))) + + # Don't draw when edge-on (very thin) + if scale_factor < 0.05: + frames.append(frame) + continue + + if object_type == 'emoji': + size = current_object['size'] + + # Create emoji on canvas + canvas_size = size * 2 + emoji_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + draw_emoji_enhanced( + emoji_canvas, + emoji=current_object['emoji'], + position=(canvas_size // 2 - size // 2, canvas_size // 2 - size // 2), + size=size, + shadow=False + ) + + # Apply flip scaling + if flip_axis == 'horizontal': + # Scale horizontally for horizontal flip + new_width = max(1, int(canvas_size * scale_factor)) + new_height = canvas_size + else: + # Scale vertically for vertical flip + new_width = canvas_size + new_height = max(1, int(canvas_size * scale_factor)) + + # Resize to simulate 3D rotation + emoji_scaled = emoji_canvas.resize((new_width, new_height), Image.LANCZOS) + + # Position centered + paste_x = center_pos[0] - new_width // 2 + paste_y = center_pos[1] - new_height // 2 + + # Composite onto frame + frame_rgba = frame.convert('RGBA') + frame_rgba.paste(emoji_scaled, (paste_x, paste_y), emoji_scaled) + frame = frame_rgba.convert('RGB') + + elif object_type == 'text': + from core.typography import draw_text_with_outline + + # Create text on canvas + text = current_object.get('text', 'FLIP') + font_size = current_object.get('font_size', 50) + + canvas_size = max(frame_width, frame_height) + text_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + # Draw on RGB for text rendering + text_canvas_rgb = text_canvas.convert('RGB') + text_canvas_rgb.paste(bg_color, (0, 0, canvas_size, canvas_size)) + + draw_text_with_outline( + text_canvas_rgb, + text=text, + position=(canvas_size // 2, canvas_size // 2), + font_size=font_size, + text_color=current_object.get('text_color', (0, 0, 0)), + outline_color=current_object.get('outline_color', (255, 255, 255)), + outline_width=3, + centered=True + ) + + # Make background transparent + text_canvas = text_canvas_rgb.convert('RGBA') + data = text_canvas.getdata() + new_data = [] + for item in data: + if item[:3] == bg_color: + new_data.append((255, 255, 255, 0)) + else: + new_data.append(item) + text_canvas.putdata(new_data) + + # Apply flip scaling + if flip_axis == 'horizontal': + new_width = max(1, int(canvas_size * scale_factor)) + new_height = canvas_size + else: + new_width = canvas_size + new_height = max(1, int(canvas_size * scale_factor)) + + text_scaled = text_canvas.resize((new_width, new_height), Image.LANCZOS) + + # Center and crop + if flip_axis == 'horizontal': + left = (new_width - frame_width) // 2 if new_width > frame_width else 0 + top = (canvas_size - frame_height) // 2 + paste_x = center_pos[0] - min(new_width, frame_width) // 2 + paste_y = 0 + + text_cropped = text_scaled.crop(( + left, + top, + left + min(new_width, frame_width), + top + frame_height + )) + else: + left = (canvas_size - frame_width) // 2 + top = (new_height - frame_height) // 2 if new_height > frame_height else 0 + paste_x = 0 + paste_y = center_pos[1] - min(new_height, frame_height) // 2 + + text_cropped = text_scaled.crop(( + left, + top, + left + frame_width, + top + min(new_height, frame_height) + )) + + frame_rgba = frame.convert('RGBA') + frame_rgba.paste(text_cropped, (paste_x, paste_y), text_cropped) + frame = frame_rgba.convert('RGB') + + frames.append(frame) + + return frames + + +def create_quick_flip( + emoji_front: str, + emoji_back: str, + num_frames: int = 20, + frame_size: int = 128 +) -> list[Image.Image]: + """ + Create quick flip for emoji GIFs. + + Args: + emoji_front: Front emoji + emoji_back: Back emoji + num_frames: Number of frames + frame_size: Frame size (square) + + Returns: + List of frames + """ + return create_flip_animation( + object1_data={'emoji': emoji_front, 'size': 80}, + object2_data={'emoji': emoji_back, 'size': 80}, + num_frames=num_frames, + flip_axis='horizontal', + easing='ease_in_out', + object_type='emoji', + center_pos=(frame_size // 2, frame_size // 2), + frame_width=frame_size, + frame_height=frame_size, + bg_color=(255, 255, 255) + ) + + +def create_nope_flip( + num_frames: int = 25, + frame_width: int = 480, + frame_height: int = 480 +) -> list[Image.Image]: + """ + Create "nope" reaction flip (like flipping table). + + Args: + num_frames: Number of frames + frame_width: Frame width + frame_height: Frame height + + Returns: + List of frames + """ + return create_flip_animation( + object1_data={'text': 'NOPE', 'font_size': 80, 'text_color': (255, 50, 50)}, + object2_data={'text': 'NOPE', 'font_size': 80, 'text_color': (255, 50, 50)}, + num_frames=num_frames, + flip_axis='horizontal', + easing='ease_out', + object_type='text', + frame_width=frame_width, + frame_height=frame_height, + bg_color=(255, 255, 255) + ) + + +# Example usage +if __name__ == '__main__': + print("Creating flip animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Emoji flip + frames = create_flip_animation( + object1_data={'emoji': '😊', 'size': 120}, + object2_data={'emoji': '😂', 'size': 120}, + num_frames=30, + flip_axis='horizontal', + object_type='emoji' + ) + builder.add_frames(frames) + builder.save('flip_emoji.gif', num_colors=128) + + # Example 2: Text flip + builder.clear() + frames = create_flip_animation( + object1_data={'text': 'YES', 'font_size': 80, 'text_color': (100, 200, 100)}, + object2_data={'text': 'NO', 'font_size': 80, 'text_color': (200, 100, 100)}, + num_frames=30, + flip_axis='vertical', + object_type='text' + ) + builder.add_frames(frames) + builder.save('flip_text.gif', num_colors=128) + + # Example 3: Quick flip (emoji size) + builder = GIFBuilder(width=128, height=128, fps=15) + frames = create_quick_flip('👍', '👎', num_frames=20) + builder.add_frames(frames) + builder.save('flip_quick.gif', num_colors=48, optimize_for_emoji=True) + + print("Created flip animations!") diff --git a/skills/slack-gif-creator/templates/kaleidoscope.py b/skills/slack-gif-creator/templates/kaleidoscope.py new file mode 100755 index 000000000..4cdcdf075 --- /dev/null +++ b/skills/slack-gif-creator/templates/kaleidoscope.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +""" +Kaleidoscope Effect - Create mirror/rotation effects. + +Apply kaleidoscope effects to frames or objects for psychedelic visuals. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image, ImageOps, ImageDraw +import numpy as np + + +def apply_kaleidoscope(frame: Image.Image, segments: int = 8, + center: tuple[int, int] | None = None) -> Image.Image: + """ + Apply kaleidoscope effect by mirroring/rotating frame sections. + + Args: + frame: Input frame + segments: Number of mirror segments (4, 6, 8, 12 work well) + center: Center point for effect (None = frame center) + + Returns: + Frame with kaleidoscope effect + """ + width, height = frame.size + + if center is None: + center = (width // 2, height // 2) + + # Create output frame + output = Image.new('RGB', (width, height)) + + # Calculate angle per segment + angle_per_segment = 360 / segments + + # For simplicity, we'll create a radial mirror effect + # A full implementation would rotate and mirror properly + # This is a simplified version that creates interesting patterns + + # Convert to numpy for easier manipulation + frame_array = np.array(frame) + output_array = np.zeros_like(frame_array) + + center_x, center_y = center + + # Create wedge mask and mirror it + for y in range(height): + for x in range(width): + # Calculate angle from center + dx = x - center_x + dy = y - center_y + + angle = (math.degrees(math.atan2(dy, dx)) + 180) % 360 + distance = math.sqrt(dx * dx + dy * dy) + + # Which segment does this pixel belong to? + segment = int(angle / angle_per_segment) + + # Mirror angle within segment + segment_angle = angle % angle_per_segment + if segment % 2 == 1: # Mirror every other segment + segment_angle = angle_per_segment - segment_angle + + # Calculate source position + source_angle = segment_angle + (segment // 2) * angle_per_segment * 2 + source_angle_rad = math.radians(source_angle - 180) + + source_x = int(center_x + distance * math.cos(source_angle_rad)) + source_y = int(center_y + distance * math.sin(source_angle_rad)) + + # Bounds check + if 0 <= source_x < width and 0 <= source_y < height: + output_array[y, x] = frame_array[source_y, source_x] + else: + output_array[y, x] = frame_array[y, x] + + return Image.fromarray(output_array) + + +def apply_simple_mirror(frame: Image.Image, mode: str = 'quad') -> Image.Image: + """ + Apply simple mirror effect (faster than full kaleidoscope). + + Args: + frame: Input frame + mode: 'horizontal', 'vertical', 'quad' (4-way), 'radial' + + Returns: + Mirrored frame + """ + width, height = frame.size + center_x, center_y = width // 2, height // 2 + + if mode == 'horizontal': + # Mirror left half to right + left_half = frame.crop((0, 0, center_x, height)) + left_flipped = ImageOps.mirror(left_half) + result = frame.copy() + result.paste(left_flipped, (center_x, 0)) + return result + + elif mode == 'vertical': + # Mirror top half to bottom + top_half = frame.crop((0, 0, width, center_y)) + top_flipped = ImageOps.flip(top_half) + result = frame.copy() + result.paste(top_flipped, (0, center_y)) + return result + + elif mode == 'quad': + # 4-way mirror (top-left quadrant mirrored to all) + quad = frame.crop((0, 0, center_x, center_y)) + + result = Image.new('RGB', (width, height)) + + # Top-left (original) + result.paste(quad, (0, 0)) + + # Top-right (horizontal mirror) + result.paste(ImageOps.mirror(quad), (center_x, 0)) + + # Bottom-left (vertical mirror) + result.paste(ImageOps.flip(quad), (0, center_y)) + + # Bottom-right (both mirrors) + result.paste(ImageOps.flip(ImageOps.mirror(quad)), (center_x, center_y)) + + return result + + else: + return frame + + +def create_kaleidoscope_animation( + base_frame: Image.Image | None = None, + num_frames: int = 30, + segments: int = 8, + rotation_speed: float = 1.0, + width: int = 480, + height: int = 480 +) -> list[Image.Image]: + """ + Create animated kaleidoscope effect. + + Args: + base_frame: Frame to apply effect to (or None for demo pattern) + num_frames: Number of frames + segments: Kaleidoscope segments + rotation_speed: How fast pattern rotates (0.5-2.0) + width: Frame width if generating demo + height: Frame height if generating demo + + Returns: + List of frames with kaleidoscope effect + """ + frames = [] + + # Create demo pattern if no base frame + if base_frame is None: + base_frame = Image.new('RGB', (width, height), (255, 255, 255)) + draw = ImageDraw.Draw(base_frame) + + # Draw some colored shapes + from core.color_palettes import get_palette + palette = get_palette('vibrant') + + colors = [palette['primary'], palette['secondary'], palette['accent']] + + for i, color in enumerate(colors): + x = width // 2 + int(100 * math.cos(i * 2 * math.pi / 3)) + y = height // 2 + int(100 * math.sin(i * 2 * math.pi / 3)) + draw.ellipse([x - 40, y - 40, x + 40, y + 40], fill=color) + + # Rotate base frame and apply kaleidoscope + for i in range(num_frames): + angle = (i / num_frames) * 360 * rotation_speed + + # Rotate base frame + rotated = base_frame.rotate(angle, resample=Image.BICUBIC) + + # Apply kaleidoscope + kaleido_frame = apply_kaleidoscope(rotated, segments=segments) + + frames.append(kaleido_frame) + + return frames + + +# Example usage +if __name__ == '__main__': + from core.gif_builder import GIFBuilder + + print("Creating kaleidoscope GIF...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Create kaleidoscope animation + frames = create_kaleidoscope_animation( + num_frames=40, + segments=8, + rotation_speed=0.5 + ) + + builder.add_frames(frames) + builder.save('kaleidoscope_test.gif', num_colors=128) diff --git a/skills/slack-gif-creator/templates/morph.py b/skills/slack-gif-creator/templates/morph.py new file mode 100755 index 000000000..3c8c46e8f --- /dev/null +++ b/skills/slack-gif-creator/templates/morph.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +""" +Morph Animation - Transform between different emojis or shapes. + +Creates smooth transitions and transformations. +""" + +import sys +from pathlib import Path + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image +import numpy as np +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced, draw_circle +from core.easing import interpolate + + +def create_morph_animation( + object1_data: dict, + object2_data: dict, + num_frames: int = 30, + morph_type: str = 'crossfade', # 'crossfade', 'scale', 'spin_morph' + easing: str = 'ease_in_out', + object_type: str = 'emoji', + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create morphing animation between two objects. + + Args: + object1_data: First object configuration + object2_data: Second object configuration + num_frames: Number of frames + morph_type: Type of morph effect + easing: Easing function + object_type: Type of objects + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + frame = create_blank_frame(frame_width, frame_height, bg_color) + + if morph_type == 'crossfade': + # Simple crossfade between two objects + opacity1 = interpolate(1, 0, t, easing) + opacity2 = interpolate(0, 1, t, easing) + + if object_type == 'emoji': + # Create first emoji + emoji1_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + size1 = object1_data['size'] + draw_emoji_enhanced( + emoji1_canvas, + emoji=object1_data['emoji'], + position=(center_pos[0] - size1 // 2, center_pos[1] - size1 // 2), + size=size1, + shadow=False + ) + + # Apply opacity + from templates.fade import apply_opacity + emoji1_canvas = apply_opacity(emoji1_canvas, opacity1) + + # Create second emoji + emoji2_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + size2 = object2_data['size'] + draw_emoji_enhanced( + emoji2_canvas, + emoji=object2_data['emoji'], + position=(center_pos[0] - size2 // 2, center_pos[1] - size2 // 2), + size=size2, + shadow=False + ) + + emoji2_canvas = apply_opacity(emoji2_canvas, opacity2) + + # Composite both + frame_rgba = frame.convert('RGBA') + frame_rgba = Image.alpha_composite(frame_rgba, emoji1_canvas) + frame_rgba = Image.alpha_composite(frame_rgba, emoji2_canvas) + frame = frame_rgba.convert('RGB') + + elif object_type == 'circle': + # Morph between two circles + radius1 = object1_data['radius'] + radius2 = object2_data['radius'] + color1 = object1_data['color'] + color2 = object2_data['color'] + + # Interpolate properties + current_radius = int(interpolate(radius1, radius2, t, easing)) + current_color = tuple( + int(interpolate(color1[i], color2[i], t, easing)) + for i in range(3) + ) + + draw_circle(frame, center_pos, current_radius, fill_color=current_color) + + elif morph_type == 'scale': + # First object scales down as second scales up + if object_type == 'emoji': + scale1 = interpolate(1.0, 0.0, t, easing) + scale2 = interpolate(0.0, 1.0, t, easing) + + # Draw first emoji (shrinking) + if scale1 > 0.05: + size1 = int(object1_data['size'] * scale1) + size1 = max(12, size1) + emoji1_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + draw_emoji_enhanced( + emoji1_canvas, + emoji=object1_data['emoji'], + position=(center_pos[0] - size1 // 2, center_pos[1] - size1 // 2), + size=size1, + shadow=False + ) + + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, emoji1_canvas) + frame = frame.convert('RGB') + + # Draw second emoji (growing) + if scale2 > 0.05: + size2 = int(object2_data['size'] * scale2) + size2 = max(12, size2) + emoji2_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + draw_emoji_enhanced( + emoji2_canvas, + emoji=object2_data['emoji'], + position=(center_pos[0] - size2 // 2, center_pos[1] - size2 // 2), + size=size2, + shadow=False + ) + + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, emoji2_canvas) + frame = frame.convert('RGB') + + elif morph_type == 'spin_morph': + # Spin while morphing (flip-like) + import math + + # Calculate rotation (0 to 180 degrees) + angle = interpolate(0, 180, t, easing) + scale_factor = abs(math.cos(math.radians(angle))) + + # Determine which object to show + if angle < 90: + current_object = object1_data + else: + current_object = object2_data + + # Skip when edge-on + if scale_factor < 0.05: + frames.append(frame) + continue + + if object_type == 'emoji': + size = current_object['size'] + canvas_size = size * 2 + emoji_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + draw_emoji_enhanced( + emoji_canvas, + emoji=current_object['emoji'], + position=(canvas_size // 2 - size // 2, canvas_size // 2 - size // 2), + size=size, + shadow=False + ) + + # Scale horizontally for spin effect + new_width = max(1, int(canvas_size * scale_factor)) + emoji_scaled = emoji_canvas.resize((new_width, canvas_size), Image.LANCZOS) + + paste_x = center_pos[0] - new_width // 2 + paste_y = center_pos[1] - canvas_size // 2 + + frame_rgba = frame.convert('RGBA') + frame_rgba.paste(emoji_scaled, (paste_x, paste_y), emoji_scaled) + frame = frame_rgba.convert('RGB') + + frames.append(frame) + + return frames + + +def create_reaction_morph( + emoji_start: str, + emoji_end: str, + num_frames: int = 20, + frame_size: int = 128 +) -> list[Image.Image]: + """ + Create quick emoji reaction morph (for emoji GIFs). + + Args: + emoji_start: Starting emoji + emoji_end: Ending emoji + num_frames: Number of frames + frame_size: Frame size (square) + + Returns: + List of frames + """ + return create_morph_animation( + object1_data={'emoji': emoji_start, 'size': 80}, + object2_data={'emoji': emoji_end, 'size': 80}, + num_frames=num_frames, + morph_type='crossfade', + easing='ease_in_out', + object_type='emoji', + center_pos=(frame_size // 2, frame_size // 2), + frame_width=frame_size, + frame_height=frame_size, + bg_color=(255, 255, 255) + ) + + +def create_shape_morph( + shapes: list[dict], + num_frames: int = 60, + frames_per_shape: int = 20, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Morph through a sequence of shapes. + + Args: + shapes: List of shape dicts with 'radius' and 'color' + num_frames: Total number of frames + frames_per_shape: Frames to spend on each morph + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + center = (frame_width // 2, frame_height // 2) + + for i in range(num_frames): + # Determine which shapes we're morphing between + cycle_progress = (i % (frames_per_shape * len(shapes))) / frames_per_shape + shape_idx = int(cycle_progress) % len(shapes) + next_shape_idx = (shape_idx + 1) % len(shapes) + + # Progress between these two shapes + t = cycle_progress - shape_idx + + shape1 = shapes[shape_idx] + shape2 = shapes[next_shape_idx] + + # Interpolate properties + radius = int(interpolate(shape1['radius'], shape2['radius'], t, 'ease_in_out')) + color = tuple( + int(interpolate(shape1['color'][j], shape2['color'][j], t, 'ease_in_out')) + for j in range(3) + ) + + # Draw frame + frame = create_blank_frame(frame_width, frame_height, bg_color) + draw_circle(frame, center, radius, fill_color=color) + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating morph animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Crossfade morph + frames = create_morph_animation( + object1_data={'emoji': '😊', 'size': 100}, + object2_data={'emoji': '😂', 'size': 100}, + num_frames=30, + morph_type='crossfade', + object_type='emoji' + ) + builder.add_frames(frames) + builder.save('morph_crossfade.gif', num_colors=128) + + # Example 2: Scale morph + builder.clear() + frames = create_morph_animation( + object1_data={'emoji': '🌙', 'size': 100}, + object2_data={'emoji': '☀️', 'size': 100}, + num_frames=40, + morph_type='scale', + object_type='emoji' + ) + builder.add_frames(frames) + builder.save('morph_scale.gif', num_colors=128) + + # Example 3: Shape morph cycle + builder.clear() + from core.color_palettes import get_palette + palette = get_palette('vibrant') + + shapes = [ + {'radius': 60, 'color': palette['primary']}, + {'radius': 80, 'color': palette['secondary']}, + {'radius': 50, 'color': palette['accent']}, + {'radius': 70, 'color': palette['success']} + ] + frames = create_shape_morph(shapes, num_frames=80, frames_per_shape=20) + builder.add_frames(frames) + builder.save('morph_shapes.gif', num_colors=64) + + print("Created morph animations!") diff --git a/skills/slack-gif-creator/templates/move.py b/skills/slack-gif-creator/templates/move.py new file mode 100755 index 000000000..e569cc7c9 --- /dev/null +++ b/skills/slack-gif-creator/templates/move.py @@ -0,0 +1,293 @@ +#!/usr/bin/env python3 +""" +Move Animation - Move objects along paths with various motion types. + +Provides flexible movement primitives for objects along linear, arc, or custom paths. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_circle, draw_emoji_enhanced +from core.easing import interpolate, calculate_arc_motion + + +def create_move_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + start_pos: tuple[int, int] = (50, 240), + end_pos: tuple[int, int] = (430, 240), + num_frames: int = 30, + motion_type: str = 'linear', # 'linear', 'arc', 'bezier', 'circle', 'wave' + easing: str = 'ease_out', + motion_params: dict | None = None, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list: + """ + Create frames showing object moving along a path. + + Args: + object_type: 'circle', 'emoji', or 'custom' + object_data: Data for the object + start_pos: Starting (x, y) position + end_pos: Ending (x, y) position + num_frames: Number of frames + motion_type: Type of motion path + easing: Easing function name + motion_params: Additional parameters for motion (e.g., {'arc_height': 100}) + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'circle': + object_data = {'radius': 30, 'color': (100, 150, 255)} + elif object_type == 'emoji': + object_data = {'emoji': '🚀', 'size': 60} + + # Default motion params + if motion_params is None: + motion_params = {} + + for i in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate position based on motion type + if motion_type == 'linear': + # Straight line with easing + x = interpolate(start_pos[0], end_pos[0], t, easing) + y = interpolate(start_pos[1], end_pos[1], t, easing) + + elif motion_type == 'arc': + # Parabolic arc + arc_height = motion_params.get('arc_height', 100) + x, y = calculate_arc_motion(start_pos, end_pos, arc_height, t) + + elif motion_type == 'circle': + # Circular motion around a center + center = motion_params.get('center', (frame_width // 2, frame_height // 2)) + radius = motion_params.get('radius', 150) + start_angle = motion_params.get('start_angle', 0) + angle_range = motion_params.get('angle_range', 360) # Full circle + + angle = start_angle + (angle_range * t) + angle_rad = math.radians(angle) + + x = center[0] + radius * math.cos(angle_rad) + y = center[1] + radius * math.sin(angle_rad) + + elif motion_type == 'wave': + # Move in straight line but add wave motion + wave_amplitude = motion_params.get('wave_amplitude', 50) + wave_frequency = motion_params.get('wave_frequency', 2) + + # Base linear motion + base_x = interpolate(start_pos[0], end_pos[0], t, easing) + base_y = interpolate(start_pos[1], end_pos[1], t, easing) + + # Add wave offset perpendicular to motion direction + dx = end_pos[0] - start_pos[0] + dy = end_pos[1] - start_pos[1] + length = math.sqrt(dx * dx + dy * dy) + + if length > 0: + # Perpendicular direction + perp_x = -dy / length + perp_y = dx / length + + # Wave offset + wave_offset = math.sin(t * wave_frequency * 2 * math.pi) * wave_amplitude + + x = base_x + perp_x * wave_offset + y = base_y + perp_y * wave_offset + else: + x, y = base_x, base_y + + elif motion_type == 'bezier': + # Quadratic bezier curve + control_point = motion_params.get('control_point', ( + (start_pos[0] + end_pos[0]) // 2, + (start_pos[1] + end_pos[1]) // 2 - 100 + )) + + # Quadratic Bezier formula: B(t) = (1-t)²P0 + 2(1-t)tP1 + t²P2 + x = (1 - t) ** 2 * start_pos[0] + 2 * (1 - t) * t * control_point[0] + t ** 2 * end_pos[0] + y = (1 - t) ** 2 * start_pos[1] + 2 * (1 - t) * t * control_point[1] + t ** 2 * end_pos[1] + + else: + # Default to linear + x = interpolate(start_pos[0], end_pos[0], t, easing) + y = interpolate(start_pos[1], end_pos[1], t, easing) + + # Draw object at calculated position + x, y = int(x), int(y) + + if object_type == 'circle': + draw_circle( + frame, + center=(x, y), + radius=object_data['radius'], + fill_color=object_data['color'] + ) + elif object_type == 'emoji': + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(x - object_data['size'] // 2, y - object_data['size'] // 2), + size=object_data['size'], + shadow=object_data.get('shadow', True) + ) + + frames.append(frame) + + return frames + + +def create_path_from_points(points: list[tuple[int, int]], + num_frames: int = 60, + easing: str = 'ease_in_out') -> list[tuple[int, int]]: + """ + Create a smooth path through multiple points. + + Args: + points: List of (x, y) waypoints + num_frames: Total number of frames + easing: Easing between points + + Returns: + List of (x, y) positions for each frame + """ + if len(points) < 2: + return points * num_frames + + path = [] + frames_per_segment = num_frames // (len(points) - 1) + + for i in range(len(points) - 1): + start = points[i] + end = points[i + 1] + + # Last segment gets remaining frames + if i == len(points) - 2: + segment_frames = num_frames - len(path) + else: + segment_frames = frames_per_segment + + for j in range(segment_frames): + t = j / segment_frames if segment_frames > 0 else 0 + x = interpolate(start[0], end[0], t, easing) + y = interpolate(start[1], end[1], t, easing) + path.append((int(x), int(y))) + + return path + + +def apply_trail_effect(frames: list, trail_length: int = 5, + fade_alpha: float = 0.3) -> list: + """ + Add motion trail effect to moving object. + + Args: + frames: List of frames with moving object + trail_length: Number of previous frames to blend + fade_alpha: Opacity of trail frames + + Returns: + List of frames with trail effect + """ + from PIL import Image, ImageChops + import numpy as np + + trailed_frames = [] + + for i, frame in enumerate(frames): + # Start with current frame + result = frame.copy() + + # Blend previous frames + for j in range(1, min(trail_length + 1, i + 1)): + prev_frame = frames[i - j] + + # Calculate fade + alpha = fade_alpha ** j + + # Blend + result_array = np.array(result, dtype=np.float32) + prev_array = np.array(prev_frame, dtype=np.float32) + + blended = result_array * (1 - alpha) + prev_array * alpha + result = Image.fromarray(blended.astype(np.uint8)) + + trailed_frames.append(result) + + return trailed_frames + + +# Example usage +if __name__ == '__main__': + print("Creating movement examples...") + + # Example 1: Linear movement + builder = GIFBuilder(width=480, height=480, fps=20) + frames = create_move_animation( + object_type='emoji', + object_data={'emoji': '🚀', 'size': 60}, + start_pos=(50, 240), + end_pos=(430, 240), + num_frames=30, + motion_type='linear', + easing='ease_out' + ) + builder.add_frames(frames) + builder.save('move_linear.gif', num_colors=128) + + # Example 2: Arc movement + builder.clear() + frames = create_move_animation( + object_type='emoji', + object_data={'emoji': '⚽', 'size': 60}, + start_pos=(50, 350), + end_pos=(430, 350), + num_frames=30, + motion_type='arc', + motion_params={'arc_height': 150}, + easing='linear' + ) + builder.add_frames(frames) + builder.save('move_arc.gif', num_colors=128) + + # Example 3: Circular movement + builder.clear() + frames = create_move_animation( + object_type='emoji', + object_data={'emoji': '🌍', 'size': 50}, + start_pos=(0, 0), # Ignored for circle + end_pos=(0, 0), # Ignored for circle + num_frames=40, + motion_type='circle', + motion_params={ + 'center': (240, 240), + 'radius': 120, + 'start_angle': 0, + 'angle_range': 360 + }, + easing='linear' + ) + builder.add_frames(frames) + builder.save('move_circle.gif', num_colors=128) + + print("Created movement examples!") diff --git a/skills/slack-gif-creator/templates/pulse.py b/skills/slack-gif-creator/templates/pulse.py new file mode 100755 index 000000000..084ea0152 --- /dev/null +++ b/skills/slack-gif-creator/templates/pulse.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +""" +Pulse Animation - Scale objects rhythmically for emphasis. + +Creates pulsing, heartbeat, and throbbing effects. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced, draw_circle +from core.easing import interpolate + + +def create_pulse_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + pulse_type: str = 'smooth', # 'smooth', 'heartbeat', 'throb', 'pop' + scale_range: tuple[float, float] = (0.8, 1.2), + pulses: float = 2.0, + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create pulsing/scaling animation. + + Args: + object_type: 'emoji', 'circle', 'text' + object_data: Object configuration + num_frames: Number of frames + pulse_type: Type of pulsing motion + scale_range: (min_scale, max_scale) tuple + pulses: Number of pulses in animation + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '❤️', 'size': 100} + elif object_type == 'circle': + object_data = {'radius': 50, 'color': (255, 100, 100)} + + min_scale, max_scale = scale_range + + for i in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate scale based on pulse type + if pulse_type == 'smooth': + # Simple sinusoidal pulse + scale = min_scale + (max_scale - min_scale) * ( + 0.5 + 0.5 * math.sin(t * pulses * 2 * math.pi - math.pi / 2) + ) + + elif pulse_type == 'heartbeat': + # Double pump like a heartbeat + phase = (t * pulses) % 1.0 + if phase < 0.15: + # First pump + scale = interpolate(min_scale, max_scale, phase / 0.15, 'ease_out') + elif phase < 0.25: + # First release + scale = interpolate(max_scale, min_scale, (phase - 0.15) / 0.10, 'ease_in') + elif phase < 0.35: + # Second pump (smaller) + scale = interpolate(min_scale, (min_scale + max_scale) / 2, (phase - 0.25) / 0.10, 'ease_out') + elif phase < 0.45: + # Second release + scale = interpolate((min_scale + max_scale) / 2, min_scale, (phase - 0.35) / 0.10, 'ease_in') + else: + # Rest period + scale = min_scale + + elif pulse_type == 'throb': + # Sharp pulse with quick return + phase = (t * pulses) % 1.0 + if phase < 0.2: + scale = interpolate(min_scale, max_scale, phase / 0.2, 'ease_out') + else: + scale = interpolate(max_scale, min_scale, (phase - 0.2) / 0.8, 'ease_in') + + elif pulse_type == 'pop': + # Pop out and back with overshoot + phase = (t * pulses) % 1.0 + if phase < 0.3: + # Pop out with overshoot + scale = interpolate(min_scale, max_scale * 1.1, phase / 0.3, 'elastic_out') + else: + # Settle back + scale = interpolate(max_scale * 1.1, min_scale, (phase - 0.3) / 0.7, 'ease_out') + + else: + scale = min_scale + (max_scale - min_scale) * ( + 0.5 + 0.5 * math.sin(t * pulses * 2 * math.pi) + ) + + # Draw object at calculated scale + if object_type == 'emoji': + base_size = object_data['size'] + current_size = int(base_size * scale) + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(center_pos[0] - current_size // 2, center_pos[1] - current_size // 2), + size=current_size, + shadow=object_data.get('shadow', True) + ) + + elif object_type == 'circle': + base_radius = object_data['radius'] + current_radius = int(base_radius * scale) + draw_circle( + frame, + center=center_pos, + radius=current_radius, + fill_color=object_data['color'] + ) + + elif object_type == 'text': + from core.typography import draw_text_with_outline + base_size = object_data.get('font_size', 50) + current_size = int(base_size * scale) + draw_text_with_outline( + frame, + text=object_data.get('text', 'PULSE'), + position=center_pos, + font_size=current_size, + text_color=object_data.get('text_color', (255, 100, 100)), + outline_color=object_data.get('outline_color', (0, 0, 0)), + outline_width=3, + centered=True + ) + + frames.append(frame) + + return frames + + +def create_attention_pulse( + emoji: str = '⚠️', + num_frames: int = 20, + frame_size: int = 128, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create attention-grabbing pulse (good for emoji GIFs). + + Args: + emoji: Emoji to pulse + num_frames: Number of frames + frame_size: Frame size (square) + bg_color: Background color + + Returns: + List of frames optimized for emoji size + """ + return create_pulse_animation( + object_type='emoji', + object_data={'emoji': emoji, 'size': 80, 'shadow': False}, + num_frames=num_frames, + pulse_type='throb', + scale_range=(0.85, 1.15), + pulses=2, + center_pos=(frame_size // 2, frame_size // 2), + frame_width=frame_size, + frame_height=frame_size, + bg_color=bg_color + ) + + +def create_breathing_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 60, + breaths: float = 2.0, + scale_range: tuple[float, float] = (0.9, 1.1), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (240, 248, 255) +) -> list[Image.Image]: + """ + Create slow, calming breathing animation (in and out). + + Args: + object_type: Type of object + object_data: Object configuration + num_frames: Number of frames + breaths: Number of breathing cycles + scale_range: Min/max scale + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + if object_data is None: + object_data = {'emoji': '😌', 'size': 100} + + return create_pulse_animation( + object_type=object_type, + object_data=object_data, + num_frames=num_frames, + pulse_type='smooth', + scale_range=scale_range, + pulses=breaths, + center_pos=(frame_width // 2, frame_height // 2), + frame_width=frame_width, + frame_height=frame_height, + bg_color=bg_color + ) + + +# Example usage +if __name__ == '__main__': + print("Creating pulse animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Smooth pulse + frames = create_pulse_animation( + object_type='emoji', + object_data={'emoji': '❤️', 'size': 100}, + num_frames=40, + pulse_type='smooth', + scale_range=(0.8, 1.2), + pulses=2 + ) + builder.add_frames(frames) + builder.save('pulse_smooth.gif', num_colors=128) + + # Example 2: Heartbeat + builder.clear() + frames = create_pulse_animation( + object_type='emoji', + object_data={'emoji': '💓', 'size': 100}, + num_frames=60, + pulse_type='heartbeat', + scale_range=(0.85, 1.2), + pulses=3 + ) + builder.add_frames(frames) + builder.save('pulse_heartbeat.gif', num_colors=128) + + # Example 3: Attention pulse (emoji size) + builder = GIFBuilder(width=128, height=128, fps=15) + frames = create_attention_pulse(emoji='⚠️', num_frames=20) + builder.add_frames(frames) + builder.save('pulse_attention.gif', num_colors=48, optimize_for_emoji=True) + + print("Created pulse animations!") diff --git a/skills/slack-gif-creator/templates/shake.py b/skills/slack-gif-creator/templates/shake.py new file mode 100755 index 000000000..5c6873802 --- /dev/null +++ b/skills/slack-gif-creator/templates/shake.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +""" +Shake Animation Template - Creates shaking/vibrating motion. + +Use this for impact effects, emphasis, or nervous/excited reactions. +""" + +import sys +import math +from pathlib import Path + +sys.path.append(str(Path(__file__).parent.parent)) + +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_circle, draw_emoji, draw_text +from core.easing import ease_out_quad + + +def create_shake_animation( + object_type: str = 'emoji', + object_data: dict = None, + num_frames: int = 20, + shake_intensity: int = 15, + center_x: int = 240, + center_y: int = 240, + direction: str = 'horizontal', # 'horizontal', 'vertical', or 'both' + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list: + """ + Create frames for a shaking animation. + + Args: + object_type: 'circle', 'emoji', 'text', or 'custom' + object_data: Data for the object + num_frames: Number of frames + shake_intensity: Maximum shake displacement in pixels + center_x: Center X position + center_y: Center Y position + direction: 'horizontal', 'vertical', or 'both' + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '😱', 'size': 80} + elif object_type == 'text': + object_data = {'text': 'SHAKE!', 'font_size': 50, 'color': (255, 0, 0)} + + for i in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + + # Calculate progress + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Decay shake intensity over time + intensity = shake_intensity * (1 - ease_out_quad(t)) + + # Calculate shake offset using sine wave for smooth oscillation + freq = 3 # Oscillation frequency + offset_x = 0 + offset_y = 0 + + if direction in ['horizontal', 'both']: + offset_x = int(math.sin(t * freq * 2 * math.pi) * intensity) + + if direction in ['vertical', 'both']: + offset_y = int(math.cos(t * freq * 2 * math.pi) * intensity) + + # Apply offset + x = center_x + offset_x + y = center_y + offset_y + + # Draw object + if object_type == 'emoji': + draw_emoji( + frame, + emoji=object_data['emoji'], + position=(x - object_data['size'] // 2, y - object_data['size'] // 2), + size=object_data['size'] + ) + elif object_type == 'text': + draw_text( + frame, + text=object_data['text'], + position=(x, y), + font_size=object_data['font_size'], + color=object_data['color'], + centered=True + ) + elif object_type == 'circle': + draw_circle( + frame, + center=(x, y), + radius=object_data.get('radius', 30), + fill_color=object_data.get('color', (100, 100, 255)) + ) + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating shake GIF...") + + builder = GIFBuilder(width=480, height=480, fps=24) + + frames = create_shake_animation( + object_type='emoji', + object_data={'emoji': '😱', 'size': 100}, + num_frames=30, + shake_intensity=20, + direction='both' + ) + + builder.add_frames(frames) + builder.save('shake_test.gif', num_colors=128) \ No newline at end of file diff --git a/skills/slack-gif-creator/templates/slide.py b/skills/slack-gif-creator/templates/slide.py new file mode 100755 index 000000000..0f441cdc8 --- /dev/null +++ b/skills/slack-gif-creator/templates/slide.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python3 +""" +Slide Animation - Slide elements in from edges with overshoot/bounce. + +Creates smooth entrance and exit animations. +""" + +import sys +from pathlib import Path + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced +from core.easing import interpolate + + +def create_slide_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + direction: str = 'left', # 'left', 'right', 'top', 'bottom' + slide_type: str = 'in', # 'in', 'out', 'across' + easing: str = 'ease_out', + overshoot: bool = False, + final_pos: tuple[int, int] | None = None, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create slide animation. + + Args: + object_type: 'emoji', 'text' + object_data: Object configuration + num_frames: Number of frames + direction: Direction of slide + slide_type: Type of slide (in/out/across) + easing: Easing function + overshoot: Add overshoot/bounce at end + final_pos: Final position (None = center) + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '➡️', 'size': 100} + + if final_pos is None: + final_pos = (frame_width // 2, frame_height // 2) + + # Calculate start and end positions based on direction + size = object_data.get('size', 100) if object_type == 'emoji' else 100 + margin = size + + if direction == 'left': + start_pos = (-margin, final_pos[1]) + end_pos = final_pos if slide_type == 'in' else (frame_width + margin, final_pos[1]) + elif direction == 'right': + start_pos = (frame_width + margin, final_pos[1]) + end_pos = final_pos if slide_type == 'in' else (-margin, final_pos[1]) + elif direction == 'top': + start_pos = (final_pos[0], -margin) + end_pos = final_pos if slide_type == 'in' else (final_pos[0], frame_height + margin) + elif direction == 'bottom': + start_pos = (final_pos[0], frame_height + margin) + end_pos = final_pos if slide_type == 'in' else (final_pos[0], -margin) + else: + start_pos = (-margin, final_pos[1]) + end_pos = final_pos + + # For 'out' type, swap start and end + if slide_type == 'out': + start_pos, end_pos = final_pos, end_pos + elif slide_type == 'across': + # Slide all the way across + if direction == 'left': + start_pos = (-margin, final_pos[1]) + end_pos = (frame_width + margin, final_pos[1]) + elif direction == 'right': + start_pos = (frame_width + margin, final_pos[1]) + end_pos = (-margin, final_pos[1]) + elif direction == 'top': + start_pos = (final_pos[0], -margin) + end_pos = (final_pos[0], frame_height + margin) + elif direction == 'bottom': + start_pos = (final_pos[0], frame_height + margin) + end_pos = (final_pos[0], -margin) + + # Use overshoot easing if requested + if overshoot and slide_type == 'in': + easing = 'back_out' + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + frame = create_blank_frame(frame_width, frame_height, bg_color) + + # Calculate current position + x = int(interpolate(start_pos[0], end_pos[0], t, easing)) + y = int(interpolate(start_pos[1], end_pos[1], t, easing)) + + # Draw object + if object_type == 'emoji': + size = object_data['size'] + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(x - size // 2, y - size // 2), + size=size, + shadow=object_data.get('shadow', True) + ) + + elif object_type == 'text': + from core.typography import draw_text_with_outline + draw_text_with_outline( + frame, + text=object_data.get('text', 'SLIDE'), + position=(x, y), + font_size=object_data.get('font_size', 50), + text_color=object_data.get('text_color', (0, 0, 0)), + outline_color=object_data.get('outline_color', (255, 255, 255)), + outline_width=3, + centered=True + ) + + frames.append(frame) + + return frames + + +def create_multi_slide( + objects: list[dict], + num_frames: int = 30, + stagger_delay: int = 3, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create animation with multiple objects sliding in sequence. + + Args: + objects: List of object configs with 'type', 'data', 'direction', 'final_pos' + num_frames: Number of frames + stagger_delay: Frames between each object starting + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + for i in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + + for idx, obj in enumerate(objects): + # Calculate when this object starts moving + start_frame = idx * stagger_delay + if i < start_frame: + continue # Object hasn't started yet + + # Calculate progress for this object + obj_frame = i - start_frame + obj_duration = num_frames - start_frame + if obj_duration <= 0: + continue + + t = obj_frame / obj_duration + + # Get object properties + obj_type = obj.get('type', 'emoji') + obj_data = obj.get('data', {'emoji': '➡️', 'size': 80}) + direction = obj.get('direction', 'left') + final_pos = obj.get('final_pos', (frame_width // 2, frame_height // 2)) + easing = obj.get('easing', 'back_out') + + # Calculate position + size = obj_data.get('size', 80) + margin = size + + if direction == 'left': + start_x = -margin + end_x = final_pos[0] + y = final_pos[1] + elif direction == 'right': + start_x = frame_width + margin + end_x = final_pos[0] + y = final_pos[1] + elif direction == 'top': + x = final_pos[0] + start_y = -margin + end_y = final_pos[1] + elif direction == 'bottom': + x = final_pos[0] + start_y = frame_height + margin + end_y = final_pos[1] + else: + start_x = -margin + end_x = final_pos[0] + y = final_pos[1] + + # Interpolate position + if direction in ['left', 'right']: + x = int(interpolate(start_x, end_x, t, easing)) + else: + y = int(interpolate(start_y, end_y, t, easing)) + + # Draw object + if obj_type == 'emoji': + draw_emoji_enhanced( + frame, + emoji=obj_data['emoji'], + position=(x - size // 2, y - size // 2), + size=size, + shadow=False + ) + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating slide animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Slide in from left with overshoot + frames = create_slide_animation( + object_type='emoji', + object_data={'emoji': '➡️', 'size': 100}, + num_frames=30, + direction='left', + slide_type='in', + overshoot=True + ) + builder.add_frames(frames) + builder.save('slide_in_left.gif', num_colors=128) + + # Example 2: Slide across + builder.clear() + frames = create_slide_animation( + object_type='emoji', + object_data={'emoji': '🚀', 'size': 80}, + num_frames=40, + direction='left', + slide_type='across', + easing='ease_in_out' + ) + builder.add_frames(frames) + builder.save('slide_across.gif', num_colors=128) + + # Example 3: Multiple objects sliding in + builder.clear() + objects = [ + { + 'type': 'emoji', + 'data': {'emoji': '🎯', 'size': 60}, + 'direction': 'left', + 'final_pos': (120, 240) + }, + { + 'type': 'emoji', + 'data': {'emoji': '🎪', 'size': 60}, + 'direction': 'right', + 'final_pos': (240, 240) + }, + { + 'type': 'emoji', + 'data': {'emoji': '🎨', 'size': 60}, + 'direction': 'top', + 'final_pos': (360, 240) + } + ] + frames = create_multi_slide(objects, num_frames=50, stagger_delay=5) + builder.add_frames(frames) + builder.save('slide_multi.gif', num_colors=128) + + print("Created slide animations!") diff --git a/skills/slack-gif-creator/templates/spin.py b/skills/slack-gif-creator/templates/spin.py new file mode 100755 index 000000000..804bf4b12 --- /dev/null +++ b/skills/slack-gif-creator/templates/spin.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +""" +Spin Animation - Rotate objects continuously or with variation. + +Creates spinning, rotating, and wobbling effects. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced, draw_circle +from core.easing import interpolate + + +def create_spin_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + rotation_type: str = 'clockwise', # 'clockwise', 'counterclockwise', 'wobble', 'pendulum' + full_rotations: float = 1.0, + easing: str = 'linear', + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create spinning/rotating animation. + + Args: + object_type: 'emoji', 'image', 'text' + object_data: Object configuration + num_frames: Number of frames + rotation_type: Type of rotation + full_rotations: Number of complete 360° rotations + easing: Easing function for rotation speed + center_pos: Center position for rotation + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '🔄', 'size': 100} + + for i in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate rotation angle + if rotation_type == 'clockwise': + angle = interpolate(0, 360 * full_rotations, t, easing) + elif rotation_type == 'counterclockwise': + angle = interpolate(0, -360 * full_rotations, t, easing) + elif rotation_type == 'wobble': + # Back and forth rotation + angle = math.sin(t * full_rotations * 2 * math.pi) * 45 + elif rotation_type == 'pendulum': + # Smooth pendulum swing + angle = math.sin(t * full_rotations * 2 * math.pi) * 90 + else: + angle = interpolate(0, 360 * full_rotations, t, easing) + + # Create object on transparent background to rotate + if object_type == 'emoji': + # For emoji, we need to create a larger canvas to avoid clipping during rotation + emoji_size = object_data['size'] + canvas_size = int(emoji_size * 1.5) + emoji_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + # Draw emoji in center of canvas + from core.frame_composer import draw_emoji_enhanced + draw_emoji_enhanced( + emoji_canvas, + emoji=object_data['emoji'], + position=(canvas_size // 2 - emoji_size // 2, canvas_size // 2 - emoji_size // 2), + size=emoji_size, + shadow=False + ) + + # Rotate the canvas + rotated = emoji_canvas.rotate(angle, resample=Image.BICUBIC, expand=False) + + # Paste onto frame + paste_x = center_pos[0] - canvas_size // 2 + paste_y = center_pos[1] - canvas_size // 2 + frame.paste(rotated, (paste_x, paste_y), rotated) + + elif object_type == 'text': + from core.typography import draw_text_with_outline + # Similar approach - create canvas, draw text, rotate + text = object_data.get('text', 'SPIN!') + font_size = object_data.get('font_size', 50) + + canvas_size = max(frame_width, frame_height) + text_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + # Draw text + text_canvas_rgb = text_canvas.convert('RGB') + text_canvas_rgb.paste(bg_color, (0, 0, canvas_size, canvas_size)) + draw_text_with_outline( + text_canvas_rgb, + text, + position=(canvas_size // 2, canvas_size // 2), + font_size=font_size, + text_color=object_data.get('text_color', (0, 0, 0)), + outline_color=object_data.get('outline_color', (255, 255, 255)), + outline_width=3, + centered=True + ) + + # Convert back to RGBA for rotation + text_canvas = text_canvas_rgb.convert('RGBA') + + # Make background transparent + data = text_canvas.getdata() + new_data = [] + for item in data: + if item[:3] == bg_color: + new_data.append((255, 255, 255, 0)) + else: + new_data.append(item) + text_canvas.putdata(new_data) + + # Rotate + rotated = text_canvas.rotate(angle, resample=Image.BICUBIC, expand=False) + + # Composite onto frame + frame_rgba = frame.convert('RGBA') + frame_rgba = Image.alpha_composite(frame_rgba, rotated) + frame = frame_rgba.convert('RGB') + + frames.append(frame) + + return frames + + +def create_loading_spinner( + num_frames: int = 20, + spinner_type: str = 'dots', # 'dots', 'arc', 'emoji' + size: int = 100, + color: tuple[int, int, int] = (100, 150, 255), + frame_width: int = 128, + frame_height: int = 128, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create a loading spinner animation. + + Args: + num_frames: Number of frames + spinner_type: Type of spinner + size: Spinner size + color: Spinner color + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + from PIL import ImageDraw + frames = [] + center = (frame_width // 2, frame_height // 2) + + for i in range(num_frames): + frame = create_blank_frame(frame_width, frame_height, bg_color) + draw = ImageDraw.Draw(frame) + + angle_offset = (i / num_frames) * 360 + + if spinner_type == 'dots': + # Circular dots + num_dots = 8 + for j in range(num_dots): + angle = (j / num_dots * 360 + angle_offset) * math.pi / 180 + x = center[0] + size * 0.4 * math.cos(angle) + y = center[1] + size * 0.4 * math.sin(angle) + + # Fade based on position + alpha = 1.0 - (j / num_dots) + dot_color = tuple(int(c * alpha) for c in color) + dot_radius = int(size * 0.1) + + draw.ellipse( + [x - dot_radius, y - dot_radius, x + dot_radius, y + dot_radius], + fill=dot_color + ) + + elif spinner_type == 'arc': + # Rotating arc + start_angle = angle_offset + end_angle = angle_offset + 270 + arc_width = int(size * 0.15) + + bbox = [ + center[0] - size // 2, + center[1] - size // 2, + center[0] + size // 2, + center[1] + size // 2 + ] + draw.arc(bbox, start_angle, end_angle, fill=color, width=arc_width) + + elif spinner_type == 'emoji': + # Rotating emoji spinner + angle = angle_offset + emoji_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + draw_emoji_enhanced( + emoji_canvas, + emoji='⏳', + position=(center[0] - size // 2, center[1] - size // 2), + size=size, + shadow=False + ) + rotated = emoji_canvas.rotate(angle, center=center, resample=Image.BICUBIC) + frame.paste(rotated, (0, 0), rotated) + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating spin animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Clockwise spin + frames = create_spin_animation( + object_type='emoji', + object_data={'emoji': '🔄', 'size': 100}, + num_frames=30, + rotation_type='clockwise', + full_rotations=2 + ) + builder.add_frames(frames) + builder.save('spin_clockwise.gif', num_colors=128) + + # Example 2: Wobble + builder.clear() + frames = create_spin_animation( + object_type='emoji', + object_data={'emoji': '🎯', 'size': 100}, + num_frames=30, + rotation_type='wobble', + full_rotations=3 + ) + builder.add_frames(frames) + builder.save('spin_wobble.gif', num_colors=128) + + # Example 3: Loading spinner + builder = GIFBuilder(width=128, height=128, fps=15) + frames = create_loading_spinner(num_frames=20, spinner_type='dots') + builder.add_frames(frames) + builder.save('loading_spinner.gif', num_colors=64, optimize_for_emoji=True) + + print("Created spin animations!") diff --git a/skills/slack-gif-creator/templates/wiggle.py b/skills/slack-gif-creator/templates/wiggle.py new file mode 100755 index 000000000..9cff5af5f --- /dev/null +++ b/skills/slack-gif-creator/templates/wiggle.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python3 +""" +Wiggle Animation - Smooth, organic wobbling and jiggling motions. + +Creates playful, elastic movements that are smoother than shake. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced +from core.easing import interpolate + + +def create_wiggle_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + wiggle_type: str = 'jello', # 'jello', 'wave', 'bounce', 'sway' + intensity: float = 1.0, + cycles: float = 2.0, + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create wiggle/wobble animation. + + Args: + object_type: 'emoji', 'text' + object_data: Object configuration + num_frames: Number of frames + wiggle_type: Type of wiggle motion + intensity: Wiggle intensity multiplier + cycles: Number of wiggle cycles + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '🎈', 'size': 100} + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + frame = create_blank_frame(frame_width, frame_height, bg_color) + + # Calculate wiggle transformations + offset_x = 0 + offset_y = 0 + rotation = 0 + scale_x = 1.0 + scale_y = 1.0 + + if wiggle_type == 'jello': + # Jello wobble - multiple frequencies + freq1 = cycles * 2 * math.pi + freq2 = cycles * 3 * math.pi + freq3 = cycles * 5 * math.pi + + decay = 1.0 - t if cycles < 1.5 else 1.0 # Decay for single wiggles + + offset_x = ( + math.sin(freq1 * t) * 15 + + math.sin(freq2 * t) * 8 + + math.sin(freq3 * t) * 3 + ) * intensity * decay + + rotation = ( + math.sin(freq1 * t) * 10 + + math.cos(freq2 * t) * 5 + ) * intensity * decay + + # Squash and stretch + scale_y = 1.0 + math.sin(freq1 * t) * 0.1 * intensity * decay + scale_x = 1.0 / scale_y # Preserve volume + + elif wiggle_type == 'wave': + # Wave motion + freq = cycles * 2 * math.pi + offset_y = math.sin(freq * t) * 20 * intensity + rotation = math.sin(freq * t + math.pi / 4) * 8 * intensity + + elif wiggle_type == 'bounce': + # Bouncy wiggle + freq = cycles * 2 * math.pi + bounce = abs(math.sin(freq * t)) + + scale_y = 1.0 + bounce * 0.2 * intensity + scale_x = 1.0 - bounce * 0.1 * intensity + offset_y = -bounce * 10 * intensity + + elif wiggle_type == 'sway': + # Gentle sway back and forth + freq = cycles * 2 * math.pi + offset_x = math.sin(freq * t) * 25 * intensity + rotation = math.sin(freq * t) * 12 * intensity + + # Subtle scale change + scale = 1.0 + math.sin(freq * t) * 0.05 * intensity + scale_x = scale + scale_y = scale + + elif wiggle_type == 'tail_wag': + # Like a wagging tail - base stays, tip moves + freq = cycles * 2 * math.pi + wag = math.sin(freq * t) * intensity + + # Rotation focused at one end + rotation = wag * 20 + offset_x = wag * 15 + + # Apply transformations + if object_type == 'emoji': + size = object_data['size'] + size_x = int(size * scale_x) + size_y = int(size * scale_y) + + # For non-uniform scaling or rotation, we need to use PIL transforms + if abs(scale_x - scale_y) > 0.01 or abs(rotation) > 0.1: + # Create emoji on transparent canvas + canvas_size = int(size * 2) + emoji_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + # Draw emoji + draw_emoji_enhanced( + emoji_canvas, + emoji=object_data['emoji'], + position=(canvas_size // 2 - size // 2, canvas_size // 2 - size // 2), + size=size, + shadow=False + ) + + # Scale + if abs(scale_x - scale_y) > 0.01: + new_size = (int(canvas_size * scale_x), int(canvas_size * scale_y)) + emoji_canvas = emoji_canvas.resize(new_size, Image.LANCZOS) + canvas_size_x, canvas_size_y = new_size + else: + canvas_size_x = canvas_size_y = canvas_size + + # Rotate + if abs(rotation) > 0.1: + emoji_canvas = emoji_canvas.rotate( + rotation, + resample=Image.BICUBIC, + expand=False + ) + + # Position with offset + paste_x = int(center_pos[0] - canvas_size_x // 2 + offset_x) + paste_y = int(center_pos[1] - canvas_size_y // 2 + offset_y) + + frame_rgba = frame.convert('RGBA') + frame_rgba.paste(emoji_canvas, (paste_x, paste_y), emoji_canvas) + frame = frame_rgba.convert('RGB') + else: + # Simple case - just offset + pos_x = int(center_pos[0] - size // 2 + offset_x) + pos_y = int(center_pos[1] - size // 2 + offset_y) + draw_emoji_enhanced( + frame, + emoji=object_data['emoji'], + position=(pos_x, pos_y), + size=size, + shadow=object_data.get('shadow', True) + ) + + elif object_type == 'text': + from core.typography import draw_text_with_outline + + # Create text on canvas for transformation + canvas_size = max(frame_width, frame_height) + text_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + # Convert to RGB for drawing + text_canvas_rgb = text_canvas.convert('RGB') + text_canvas_rgb.paste(bg_color, (0, 0, canvas_size, canvas_size)) + + draw_text_with_outline( + text_canvas_rgb, + text=object_data.get('text', 'WIGGLE'), + position=(canvas_size // 2, canvas_size // 2), + font_size=object_data.get('font_size', 50), + text_color=object_data.get('text_color', (0, 0, 0)), + outline_color=object_data.get('outline_color', (255, 255, 255)), + outline_width=3, + centered=True + ) + + # Make transparent + text_canvas = text_canvas_rgb.convert('RGBA') + data = text_canvas.getdata() + new_data = [] + for item in data: + if item[:3] == bg_color: + new_data.append((255, 255, 255, 0)) + else: + new_data.append(item) + text_canvas.putdata(new_data) + + # Apply rotation + if abs(rotation) > 0.1: + text_canvas = text_canvas.rotate(rotation, center=(canvas_size // 2, canvas_size // 2), resample=Image.BICUBIC) + + # Crop to frame with offset + left = (canvas_size - frame_width) // 2 - int(offset_x) + top = (canvas_size - frame_height) // 2 - int(offset_y) + text_cropped = text_canvas.crop((left, top, left + frame_width, top + frame_height)) + + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, text_cropped) + frame = frame.convert('RGB') + + frames.append(frame) + + return frames + + +def create_excited_wiggle( + emoji: str = '🎉', + num_frames: int = 20, + frame_size: int = 128 +) -> list[Image.Image]: + """ + Create excited wiggle for emoji GIFs. + + Args: + emoji: Emoji to wiggle + num_frames: Number of frames + frame_size: Frame size (square) + + Returns: + List of frames + """ + return create_wiggle_animation( + object_type='emoji', + object_data={'emoji': emoji, 'size': 80, 'shadow': False}, + num_frames=num_frames, + wiggle_type='jello', + intensity=0.8, + cycles=2, + center_pos=(frame_size // 2, frame_size // 2), + frame_width=frame_size, + frame_height=frame_size, + bg_color=(255, 255, 255) + ) + + +# Example usage +if __name__ == '__main__': + print("Creating wiggle animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Jello wiggle + frames = create_wiggle_animation( + object_type='emoji', + object_data={'emoji': '🎈', 'size': 100}, + num_frames=40, + wiggle_type='jello', + intensity=1.0, + cycles=2 + ) + builder.add_frames(frames) + builder.save('wiggle_jello.gif', num_colors=128) + + # Example 2: Wave + builder.clear() + frames = create_wiggle_animation( + object_type='emoji', + object_data={'emoji': '🌊', 'size': 100}, + num_frames=30, + wiggle_type='wave', + intensity=1.2, + cycles=3 + ) + builder.add_frames(frames) + builder.save('wiggle_wave.gif', num_colors=128) + + # Example 3: Excited wiggle (emoji size) + builder = GIFBuilder(width=128, height=128, fps=15) + frames = create_excited_wiggle(emoji='🎉', num_frames=20) + builder.add_frames(frames) + builder.save('wiggle_excited.gif', num_colors=48, optimize_for_emoji=True) + + print("Created wiggle animations!") diff --git a/skills/slack-gif-creator/templates/zoom.py b/skills/slack-gif-creator/templates/zoom.py new file mode 100755 index 000000000..e45176a80 --- /dev/null +++ b/skills/slack-gif-creator/templates/zoom.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python3 +""" +Zoom Animation - Scale objects dramatically for emphasis. + +Creates zoom in, zoom out, and dramatic scaling effects. +""" + +import sys +from pathlib import Path +import math + +sys.path.append(str(Path(__file__).parent.parent)) + +from PIL import Image, ImageFilter +from core.gif_builder import GIFBuilder +from core.frame_composer import create_blank_frame, draw_emoji_enhanced +from core.easing import interpolate + + +def create_zoom_animation( + object_type: str = 'emoji', + object_data: dict | None = None, + num_frames: int = 30, + zoom_type: str = 'in', # 'in', 'out', 'in_out', 'punch' + scale_range: tuple[float, float] = (0.1, 2.0), + easing: str = 'ease_out', + add_motion_blur: bool = False, + center_pos: tuple[int, int] = (240, 240), + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create zoom animation. + + Args: + object_type: 'emoji', 'text', 'image' + object_data: Object configuration + num_frames: Number of frames + zoom_type: Type of zoom effect + scale_range: (start_scale, end_scale) tuple + easing: Easing function + add_motion_blur: Add blur for speed effect + center_pos: Center position + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + # Default object data + if object_data is None: + if object_type == 'emoji': + object_data = {'emoji': '🔍', 'size': 100} + + base_size = object_data.get('size', 100) if object_type == 'emoji' else object_data.get('font_size', 60) + start_scale, end_scale = scale_range + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Calculate scale based on zoom type + if zoom_type == 'in': + scale = interpolate(start_scale, end_scale, t, easing) + elif zoom_type == 'out': + scale = interpolate(end_scale, start_scale, t, easing) + elif zoom_type == 'in_out': + if t < 0.5: + scale = interpolate(start_scale, end_scale, t * 2, easing) + else: + scale = interpolate(end_scale, start_scale, (t - 0.5) * 2, easing) + elif zoom_type == 'punch': + # Quick zoom in with overshoot then settle + if t < 0.3: + scale = interpolate(start_scale, end_scale * 1.2, t / 0.3, 'ease_out') + else: + scale = interpolate(end_scale * 1.2, end_scale, (t - 0.3) / 0.7, 'elastic_out') + else: + scale = interpolate(start_scale, end_scale, t, easing) + + # Create frame + frame = create_blank_frame(frame_width, frame_height, bg_color) + + if object_type == 'emoji': + current_size = int(base_size * scale) + + # Clamp size to reasonable bounds + current_size = max(12, min(current_size, frame_width * 2)) + + # Create emoji on transparent background + canvas_size = max(frame_width, frame_height, current_size) * 2 + emoji_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + draw_emoji_enhanced( + emoji_canvas, + emoji=object_data['emoji'], + position=(canvas_size // 2 - current_size // 2, canvas_size // 2 - current_size // 2), + size=current_size, + shadow=False + ) + + # Optional motion blur for fast zooms + if add_motion_blur and abs(scale - 1.0) > 0.5: + blur_amount = min(5, int(abs(scale - 1.0) * 3)) + emoji_canvas = emoji_canvas.filter(ImageFilter.GaussianBlur(blur_amount)) + + # Crop to frame size centered + left = (canvas_size - frame_width) // 2 + top = (canvas_size - frame_height) // 2 + emoji_cropped = emoji_canvas.crop((left, top, left + frame_width, top + frame_height)) + + # Composite + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, emoji_cropped) + frame = frame.convert('RGB') + + elif object_type == 'text': + from core.typography import draw_text_with_outline + + current_size = int(base_size * scale) + current_size = max(10, min(current_size, 500)) + + # Create oversized canvas for large text + canvas_size = max(frame_width, frame_height, current_size * 10) + text_canvas = Image.new('RGB', (canvas_size, canvas_size), bg_color) + + draw_text_with_outline( + text_canvas, + text=object_data.get('text', 'ZOOM'), + position=(canvas_size // 2, canvas_size // 2), + font_size=current_size, + text_color=object_data.get('text_color', (0, 0, 0)), + outline_color=object_data.get('outline_color', (255, 255, 255)), + outline_width=max(2, int(current_size * 0.05)), + centered=True + ) + + # Crop to frame + left = (canvas_size - frame_width) // 2 + top = (canvas_size - frame_height) // 2 + frame = text_canvas.crop((left, top, left + frame_width, top + frame_height)) + + frames.append(frame) + + return frames + + +def create_explosion_zoom( + emoji: str = '💥', + num_frames: int = 20, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create dramatic explosion zoom effect. + + Args: + emoji: Emoji to explode + num_frames: Number of frames + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Exponential zoom + scale = 0.1 * math.exp(t * 5) + + # Add rotation for drama + angle = t * 360 * 2 + + frame = create_blank_frame(frame_width, frame_height, bg_color) + + current_size = int(100 * scale) + current_size = max(12, min(current_size, frame_width * 3)) + + # Create emoji + canvas_size = max(frame_width, frame_height, current_size) * 2 + emoji_canvas = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0)) + + draw_emoji_enhanced( + emoji_canvas, + emoji=emoji, + position=(canvas_size // 2 - current_size // 2, canvas_size // 2 - current_size // 2), + size=current_size, + shadow=False + ) + + # Rotate + emoji_canvas = emoji_canvas.rotate(angle, center=(canvas_size // 2, canvas_size // 2), resample=Image.BICUBIC) + + # Add motion blur for later frames + if t > 0.5: + blur_amount = int((t - 0.5) * 10) + emoji_canvas = emoji_canvas.filter(ImageFilter.GaussianBlur(blur_amount)) + + # Crop and composite + left = (canvas_size - frame_width) // 2 + top = (canvas_size - frame_height) // 2 + emoji_cropped = emoji_canvas.crop((left, top, left + frame_width, top + frame_height)) + + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, emoji_cropped) + frame = frame.convert('RGB') + + frames.append(frame) + + return frames + + +def create_mind_blown_zoom( + emoji: str = '🤯', + num_frames: int = 30, + frame_width: int = 480, + frame_height: int = 480, + bg_color: tuple[int, int, int] = (255, 255, 255) +) -> list[Image.Image]: + """ + Create "mind blown" dramatic zoom with shake. + + Args: + emoji: Emoji to use + num_frames: Number of frames + frame_width: Frame width + frame_height: Frame height + bg_color: Background color + + Returns: + List of frames + """ + frames = [] + + for i in range(num_frames): + t = i / (num_frames - 1) if num_frames > 1 else 0 + + # Zoom in then shake + if t < 0.5: + scale = interpolate(0.3, 1.2, t * 2, 'ease_out') + shake_x = 0 + shake_y = 0 + else: + scale = 1.2 + # Shake intensifies + shake_intensity = (t - 0.5) * 40 + shake_x = int(math.sin(t * 50) * shake_intensity) + shake_y = int(math.cos(t * 45) * shake_intensity) + + frame = create_blank_frame(frame_width, frame_height, bg_color) + + current_size = int(100 * scale) + center_x = frame_width // 2 + shake_x + center_y = frame_height // 2 + shake_y + + emoji_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0)) + draw_emoji_enhanced( + emoji_canvas, + emoji=emoji, + position=(center_x - current_size // 2, center_y - current_size // 2), + size=current_size, + shadow=False + ) + + frame_rgba = frame.convert('RGBA') + frame = Image.alpha_composite(frame_rgba, emoji_canvas) + frame = frame.convert('RGB') + + frames.append(frame) + + return frames + + +# Example usage +if __name__ == '__main__': + print("Creating zoom animations...") + + builder = GIFBuilder(width=480, height=480, fps=20) + + # Example 1: Zoom in + frames = create_zoom_animation( + object_type='emoji', + object_data={'emoji': '🔍', 'size': 100}, + num_frames=30, + zoom_type='in', + scale_range=(0.1, 1.5), + easing='ease_out' + ) + builder.add_frames(frames) + builder.save('zoom_in.gif', num_colors=128) + + # Example 2: Explosion zoom + builder.clear() + frames = create_explosion_zoom(emoji='💥', num_frames=20) + builder.add_frames(frames) + builder.save('zoom_explosion.gif', num_colors=128) + + # Example 3: Mind blown + builder.clear() + frames = create_mind_blown_zoom(emoji='🤯', num_frames=30) + builder.add_frames(frames) + builder.save('zoom_mind_blown.gif', num_colors=128) + + print("Created zoom animations!") diff --git a/skills/theme-factory/LICENSE.txt b/skills/theme-factory/LICENSE.txt new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/skills/theme-factory/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/skills/theme-factory/SKILL.md b/skills/theme-factory/SKILL.md new file mode 100644 index 000000000..90dfceaf2 --- /dev/null +++ b/skills/theme-factory/SKILL.md @@ -0,0 +1,59 @@ +--- +name: theme-factory +description: Toolkit for styling artifacts with a theme. These artifacts can be slides, docs, reportings, HTML landing pages, etc. There are 10 pre-set themes with colors/fonts that you can apply to any artifact that has been creating, or can generate a new theme on-the-fly. +license: Complete terms in LICENSE.txt +--- + + +# Theme Factory Skill + +This skill provides a curated collection of professional font and color themes themes, each with carefully selected color palettes and font pairings. Once a theme is chosen, it can be applied to any artifact. + +## Purpose + +To apply consistent, professional styling to presentation slide decks, use this skill. Each theme includes: +- A cohesive color palette with hex codes +- Complementary font pairings for headers and body text +- A distinct visual identity suitable for different contexts and audiences + +## Usage Instructions + +To apply styling to a slide deck or other artifact: + +1. **Show the theme showcase**: Display the `theme-showcase.pdf` file to allow users to see all available themes visually. Do not make any modifications to it; simply show the file for viewing. +2. **Ask for their choice**: Ask which theme to apply to the deck +3. **Wait for selection**: Get explicit confirmation about the chosen theme +4. **Apply the theme**: Once a theme has been chosen, apply the selected theme's colors and fonts to the deck/artifact + +## Themes Available + +The following 10 themes are available, each showcased in `theme-showcase.pdf`: + +1. **Ocean Depths** - Professional and calming maritime theme +2. **Sunset Boulevard** - Warm and vibrant sunset colors +3. **Forest Canopy** - Natural and grounded earth tones +4. **Modern Minimalist** - Clean and contemporary grayscale +5. **Golden Hour** - Rich and warm autumnal palette +6. **Arctic Frost** - Cool and crisp winter-inspired theme +7. **Desert Rose** - Soft and sophisticated dusty tones +8. **Tech Innovation** - Bold and modern tech aesthetic +9. **Botanical Garden** - Fresh and organic garden colors +10. **Midnight Galaxy** - Dramatic and cosmic deep tones + +## Theme Details + +Each theme is defined in the `themes/` directory with complete specifications including: +- Cohesive color palette with hex codes +- Complementary font pairings for headers and body text +- Distinct visual identity suitable for different contexts and audiences + +## Application Process + +After a preferred theme is selected: +1. Read the corresponding theme file from the `themes/` directory +2. Apply the specified colors and fonts consistently throughout the deck +3. Ensure proper contrast and readability +4. Maintain the theme's visual identity across all slides + +## Create your Own Theme +To handle cases where none of the existing themes work for an artifact, create a custom theme. Based on provided inputs, generate a new theme similar to the ones above. Give the theme a similar name describing what the font/color combinations represent. Use any basic description provided to choose appropriate colors/fonts. After generating the theme, show it for review and verification. Following that, apply the theme as described above. diff --git a/skills/theme-factory/theme-showcase.pdf b/skills/theme-factory/theme-showcase.pdf new file mode 100644 index 000000000..24495d145 Binary files /dev/null and b/skills/theme-factory/theme-showcase.pdf differ diff --git a/skills/theme-factory/themes/arctic-frost.md b/skills/theme-factory/themes/arctic-frost.md new file mode 100644 index 000000000..e9f1eb057 --- /dev/null +++ b/skills/theme-factory/themes/arctic-frost.md @@ -0,0 +1,19 @@ +# Arctic Frost + +A cool and crisp winter-inspired theme that conveys clarity, precision, and professionalism. + +## Color Palette + +- **Ice Blue**: `#d4e4f7` - Light backgrounds and highlights +- **Steel Blue**: `#4a6fa5` - Primary accent color +- **Silver**: `#c0c0c0` - Metallic accent elements +- **Crisp White**: `#fafafa` - Clean backgrounds and text + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Healthcare presentations, technology solutions, winter sports, clean tech, pharmaceutical content. diff --git a/skills/theme-factory/themes/botanical-garden.md b/skills/theme-factory/themes/botanical-garden.md new file mode 100644 index 000000000..0c95bf734 --- /dev/null +++ b/skills/theme-factory/themes/botanical-garden.md @@ -0,0 +1,19 @@ +# Botanical Garden + +A fresh and organic theme featuring vibrant garden-inspired colors for lively presentations. + +## Color Palette + +- **Fern Green**: `#4a7c59` - Rich natural green +- **Marigold**: `#f9a620` - Bright floral accent +- **Terracotta**: `#b7472a` - Earthy warm tone +- **Cream**: `#f5f3ed` - Soft neutral backgrounds + +## Typography + +- **Headers**: DejaVu Serif Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Garden centers, food presentations, farm-to-table content, botanical brands, natural products. diff --git a/skills/theme-factory/themes/desert-rose.md b/skills/theme-factory/themes/desert-rose.md new file mode 100644 index 000000000..ea7c74eb8 --- /dev/null +++ b/skills/theme-factory/themes/desert-rose.md @@ -0,0 +1,19 @@ +# Desert Rose + +A soft and sophisticated theme with dusty, muted tones perfect for elegant presentations. + +## Color Palette + +- **Dusty Rose**: `#d4a5a5` - Soft primary color +- **Clay**: `#b87d6d` - Earthy accent +- **Sand**: `#e8d5c4` - Warm neutral backgrounds +- **Deep Burgundy**: `#5d2e46` - Rich dark contrast + +## Typography + +- **Headers**: FreeSans Bold +- **Body Text**: FreeSans + +## Best Used For + +Fashion presentations, beauty brands, wedding planning, interior design, boutique businesses. diff --git a/skills/theme-factory/themes/forest-canopy.md b/skills/theme-factory/themes/forest-canopy.md new file mode 100644 index 000000000..90c2b2651 --- /dev/null +++ b/skills/theme-factory/themes/forest-canopy.md @@ -0,0 +1,19 @@ +# Forest Canopy + +A natural and grounded theme featuring earth tones inspired by dense forest environments. + +## Color Palette + +- **Forest Green**: `#2d4a2b` - Primary dark green +- **Sage**: `#7d8471` - Muted green accent +- **Olive**: `#a4ac86` - Light accent color +- **Ivory**: `#faf9f6` - Backgrounds and text + +## Typography + +- **Headers**: FreeSerif Bold +- **Body Text**: FreeSans + +## Best Used For + +Environmental presentations, sustainability reports, outdoor brands, wellness content, organic products. diff --git a/skills/theme-factory/themes/golden-hour.md b/skills/theme-factory/themes/golden-hour.md new file mode 100644 index 000000000..ed8fc256f --- /dev/null +++ b/skills/theme-factory/themes/golden-hour.md @@ -0,0 +1,19 @@ +# Golden Hour + +A rich and warm autumnal palette that creates an inviting and sophisticated atmosphere. + +## Color Palette + +- **Mustard Yellow**: `#f4a900` - Bold primary accent +- **Terracotta**: `#c1666b` - Warm secondary color +- **Warm Beige**: `#d4b896` - Neutral backgrounds +- **Chocolate Brown**: `#4a403a` - Dark text and anchors + +## Typography + +- **Headers**: FreeSans Bold +- **Body Text**: FreeSans + +## Best Used For + +Restaurant presentations, hospitality brands, fall campaigns, cozy lifestyle content, artisan products. diff --git a/skills/theme-factory/themes/midnight-galaxy.md b/skills/theme-factory/themes/midnight-galaxy.md new file mode 100644 index 000000000..97e1c5f38 --- /dev/null +++ b/skills/theme-factory/themes/midnight-galaxy.md @@ -0,0 +1,19 @@ +# Midnight Galaxy + +A dramatic and cosmic theme with deep purples and mystical tones for impactful presentations. + +## Color Palette + +- **Deep Purple**: `#2b1e3e` - Rich dark base +- **Cosmic Blue**: `#4a4e8f` - Mystical mid-tone +- **Lavender**: `#a490c2` - Soft accent color +- **Silver**: `#e6e6fa` - Light highlights and text + +## Typography + +- **Headers**: FreeSans Bold +- **Body Text**: FreeSans + +## Best Used For + +Entertainment industry, gaming presentations, nightlife venues, luxury brands, creative agencies. diff --git a/skills/theme-factory/themes/modern-minimalist.md b/skills/theme-factory/themes/modern-minimalist.md new file mode 100644 index 000000000..6bd26a29b --- /dev/null +++ b/skills/theme-factory/themes/modern-minimalist.md @@ -0,0 +1,19 @@ +# Modern Minimalist + +A clean and contemporary theme with a sophisticated grayscale palette for maximum versatility. + +## Color Palette + +- **Charcoal**: `#36454f` - Primary dark color +- **Slate Gray**: `#708090` - Medium gray for accents +- **Light Gray**: `#d3d3d3` - Backgrounds and dividers +- **White**: `#ffffff` - Text and clean backgrounds + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Tech presentations, architecture portfolios, design showcases, modern business proposals, data visualization. diff --git a/skills/theme-factory/themes/ocean-depths.md b/skills/theme-factory/themes/ocean-depths.md new file mode 100644 index 000000000..b675126f5 --- /dev/null +++ b/skills/theme-factory/themes/ocean-depths.md @@ -0,0 +1,19 @@ +# Ocean Depths + +A professional and calming maritime theme that evokes the serenity of deep ocean waters. + +## Color Palette + +- **Deep Navy**: `#1a2332` - Primary background color +- **Teal**: `#2d8b8b` - Accent color for highlights and emphasis +- **Seafoam**: `#a8dadc` - Secondary accent for lighter elements +- **Cream**: `#f1faee` - Text and light backgrounds + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Corporate presentations, financial reports, professional consulting decks, trust-building content. diff --git a/skills/theme-factory/themes/sunset-boulevard.md b/skills/theme-factory/themes/sunset-boulevard.md new file mode 100644 index 000000000..df799a0cc --- /dev/null +++ b/skills/theme-factory/themes/sunset-boulevard.md @@ -0,0 +1,19 @@ +# Sunset Boulevard + +A warm and vibrant theme inspired by golden hour sunsets, perfect for energetic and creative presentations. + +## Color Palette + +- **Burnt Orange**: `#e76f51` - Primary accent color +- **Coral**: `#f4a261` - Secondary warm accent +- **Warm Sand**: `#e9c46a` - Highlighting and backgrounds +- **Deep Purple**: `#264653` - Dark contrast and text + +## Typography + +- **Headers**: DejaVu Serif Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Creative pitches, marketing presentations, lifestyle brands, event promotions, inspirational content. diff --git a/skills/theme-factory/themes/tech-innovation.md b/skills/theme-factory/themes/tech-innovation.md new file mode 100644 index 000000000..e029a435f --- /dev/null +++ b/skills/theme-factory/themes/tech-innovation.md @@ -0,0 +1,19 @@ +# Tech Innovation + +A bold and modern theme with high-contrast colors perfect for cutting-edge technology presentations. + +## Color Palette + +- **Electric Blue**: `#0066ff` - Vibrant primary accent +- **Neon Cyan**: `#00ffff` - Bright highlight color +- **Dark Gray**: `#1e1e1e` - Deep backgrounds +- **White**: `#ffffff` - Clean text and contrast + +## Typography + +- **Headers**: DejaVu Sans Bold +- **Body Text**: DejaVu Sans + +## Best Used For + +Tech startups, software launches, innovation showcases, AI/ML presentations, digital transformation content. diff --git a/skills/video-downloader/SKILL.md b/skills/video-downloader/SKILL.md new file mode 100644 index 000000000..5db1cdf10 --- /dev/null +++ b/skills/video-downloader/SKILL.md @@ -0,0 +1,106 @@ +--- +name: video-downloader +description: Downloads videos from YouTube and other platforms for offline viewing, editing, or archival. Handles various formats and quality options. +--- + +# Video Downloader + +This skill downloads videos from YouTube and other platforms directly to your computer. + +## When to Use This Skill + +- Downloading YouTube videos for offline viewing +- Saving educational content for reference +- Archiving important videos +- Getting video files for editing or repurposing +- Downloading your own content from platforms +- Saving conference talks or webinars + +## What This Skill Does + +1. **Downloads Videos**: Fetches videos from YouTube and other platforms +2. **Quality Selection**: Lets you choose resolution (480p, 720p, 1080p, 4K) +3. **Format Options**: Downloads in various formats (MP4, WebM, audio-only) +4. **Batch Downloads**: Can download multiple videos or playlists +5. **Metadata Preservation**: Saves title, description, and thumbnail + +## How to Use + +### Basic Download + +``` +Download this YouTube video: https://youtube.com/watch?v=... +``` + +``` +Download this video in 1080p quality +``` + +### Audio Only + +``` +Download the audio from this YouTube video as MP3 +``` + +### Playlist Download + +``` +Download all videos from this YouTube playlist: [URL] +``` + +### Batch Download + +``` +Download these 5 YouTube videos: +1. [URL] +2. [URL] +... +``` + +## Example + +**User**: "Download this YouTube video: https://youtube.com/watch?v=abc123" + +**Output**: +``` +Downloading from YouTube... + +Video: "How to Build Products Users Love" +Channel: Lenny's Podcast +Duration: 45:32 +Quality: 1080p + +Progress: ████████████████████ 100% + +✓ Downloaded: how-to-build-products-users-love.mp4 +✓ Saved thumbnail: how-to-build-products-users-love.jpg +✓ Size: 342 MB + +Saved to: ~/Downloads/ +``` + +**Inspired by:** Lenny's workflow from his newsletter + +## Important Notes + +⚠️ **Copyright & Fair Use** +- Only download videos you have permission to download +- Respect copyright laws and platform terms of service +- Use for personal, educational, or fair use purposes +- Don't redistribute copyrighted content + +## Tips + +- Specify quality if you need lower file size (720p vs 1080p) +- Use audio-only for podcasts or music to save space +- Download to a dedicated folder to stay organized +- Check file size before downloading on slow connections + +## Common Use Cases + +- **Education**: Save tutorials and courses for offline learning +- **Research**: Archive videos for reference +- **Content Creation**: Download your own content from platforms +- **Backup**: Save important videos before they're removed +- **Offline Viewing**: Watch videos without internet access + diff --git a/skills/xlsx/LICENSE.txt b/skills/xlsx/LICENSE.txt new file mode 100644 index 000000000..c55ab4222 --- /dev/null +++ b/skills/xlsx/LICENSE.txt @@ -0,0 +1,30 @@ +© 2025 Anthropic, PBC. All rights reserved. + +LICENSE: Use of these materials (including all code, prompts, assets, files, +and other components of this Skill) is governed by your agreement with +Anthropic regarding use of Anthropic's services. If no separate agreement +exists, use is governed by Anthropic's Consumer Terms of Service or +Commercial Terms of Service, as applicable: +https://www.anthropic.com/legal/consumer-terms +https://www.anthropic.com/legal/commercial-terms +Your applicable agreement is referred to as the "Agreement." "Services" are +as defined in the Agreement. + +ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the +contrary, users may not: + +- Extract these materials from the Services or retain copies of these + materials outside the Services +- Reproduce or copy these materials, except for temporary copies created + automatically during authorized use of the Services +- Create derivative works based on these materials +- Distribute, sublicense, or transfer these materials to any third party +- Make, offer to sell, sell, or import any inventions embodied in these + materials +- Reverse engineer, decompile, or disassemble these materials + +The receipt, viewing, or possession of these materials does not convey or +imply any license or right beyond those expressly granted above. + +Anthropic retains all right, title, and interest in these materials, +including all copyrights, patents, and other intellectual property rights. diff --git a/skills/xlsx/SKILL.md b/skills/xlsx/SKILL.md new file mode 100644 index 000000000..22db189c8 --- /dev/null +++ b/skills/xlsx/SKILL.md @@ -0,0 +1,289 @@ +--- +name: xlsx +description: "Comprehensive spreadsheet creation, editing, and analysis with support for formulas, formatting, data analysis, and visualization. When Claude needs to work with spreadsheets (.xlsx, .xlsm, .csv, .tsv, etc) for: (1) Creating new spreadsheets with formulas and formatting, (2) Reading or analyzing data, (3) Modify existing spreadsheets while preserving formulas, (4) Data analysis and visualization in spreadsheets, or (5) Recalculating formulas" +license: Proprietary. LICENSE.txt has complete terms +--- + +# Requirements for Outputs + +## All Excel files + +### Zero Formula Errors +- Every Excel model MUST be delivered with ZERO formula errors (#REF!, #DIV/0!, #VALUE!, #N/A, #NAME?) + +### Preserve Existing Templates (when updating templates) +- Study and EXACTLY match existing format, style, and conventions when modifying files +- Never impose standardized formatting on files with established patterns +- Existing template conventions ALWAYS override these guidelines + +## Financial models + +### Color Coding Standards +Unless otherwise stated by the user or existing template + +#### Industry-Standard Color Conventions +- **Blue text (RGB: 0,0,255)**: Hardcoded inputs, and numbers users will change for scenarios +- **Black text (RGB: 0,0,0)**: ALL formulas and calculations +- **Green text (RGB: 0,128,0)**: Links pulling from other worksheets within same workbook +- **Red text (RGB: 255,0,0)**: External links to other files +- **Yellow background (RGB: 255,255,0)**: Key assumptions needing attention or cells that need to be updated + +### Number Formatting Standards + +#### Required Format Rules +- **Years**: Format as text strings (e.g., "2024" not "2,024") +- **Currency**: Use $#,##0 format; ALWAYS specify units in headers ("Revenue ($mm)") +- **Zeros**: Use number formatting to make all zeros "-", including percentages (e.g., "$#,##0;($#,##0);-") +- **Percentages**: Default to 0.0% format (one decimal) +- **Multiples**: Format as 0.0x for valuation multiples (EV/EBITDA, P/E) +- **Negative numbers**: Use parentheses (123) not minus -123 + +### Formula Construction Rules + +#### Assumptions Placement +- Place ALL assumptions (growth rates, margins, multiples, etc.) in separate assumption cells +- Use cell references instead of hardcoded values in formulas +- Example: Use =B5*(1+$B$6) instead of =B5*1.05 + +#### Formula Error Prevention +- Verify all cell references are correct +- Check for off-by-one errors in ranges +- Ensure consistent formulas across all projection periods +- Test with edge cases (zero values, negative numbers) +- Verify no unintended circular references + +#### Documentation Requirements for Hardcodes +- Comment or in cells beside (if end of table). Format: "Source: [System/Document], [Date], [Specific Reference], [URL if applicable]" +- Examples: + - "Source: Company 10-K, FY2024, Page 45, Revenue Note, [SEC EDGAR URL]" + - "Source: Company 10-Q, Q2 2025, Exhibit 99.1, [SEC EDGAR URL]" + - "Source: Bloomberg Terminal, 8/15/2025, AAPL US Equity" + - "Source: FactSet, 8/20/2025, Consensus Estimates Screen" + +# XLSX creation, editing, and analysis + +## Overview + +A user may ask you to create, edit, or analyze the contents of an .xlsx file. You have different tools and workflows available for different tasks. + +## Important Requirements + +**LibreOffice Required for Formula Recalculation**: You can assume LibreOffice is installed for recalculating formula values using the `recalc.py` script. The script automatically configures LibreOffice on first run + +## Reading and analyzing data + +### Data analysis with pandas +For data analysis, visualization, and basic operations, use **pandas** which provides powerful data manipulation capabilities: + +```python +import pandas as pd + +# Read Excel +df = pd.read_excel('file.xlsx') # Default: first sheet +all_sheets = pd.read_excel('file.xlsx', sheet_name=None) # All sheets as dict + +# Analyze +df.head() # Preview data +df.info() # Column info +df.describe() # Statistics + +# Write Excel +df.to_excel('output.xlsx', index=False) +``` + +## Excel File Workflows + +## CRITICAL: Use Formulas, Not Hardcoded Values + +**Always use Excel formulas instead of calculating values in Python and hardcoding them.** This ensures the spreadsheet remains dynamic and updateable. + +### ❌ WRONG - Hardcoding Calculated Values +```python +# Bad: Calculating in Python and hardcoding result +total = df['Sales'].sum() +sheet['B10'] = total # Hardcodes 5000 + +# Bad: Computing growth rate in Python +growth = (df.iloc[-1]['Revenue'] - df.iloc[0]['Revenue']) / df.iloc[0]['Revenue'] +sheet['C5'] = growth # Hardcodes 0.15 + +# Bad: Python calculation for average +avg = sum(values) / len(values) +sheet['D20'] = avg # Hardcodes 42.5 +``` + +### ✅ CORRECT - Using Excel Formulas +```python +# Good: Let Excel calculate the sum +sheet['B10'] = '=SUM(B2:B9)' + +# Good: Growth rate as Excel formula +sheet['C5'] = '=(C4-C2)/C2' + +# Good: Average using Excel function +sheet['D20'] = '=AVERAGE(D2:D19)' +``` + +This applies to ALL calculations - totals, percentages, ratios, differences, etc. The spreadsheet should be able to recalculate when source data changes. + +## Common Workflow +1. **Choose tool**: pandas for data, openpyxl for formulas/formatting +2. **Create/Load**: Create new workbook or load existing file +3. **Modify**: Add/edit data, formulas, and formatting +4. **Save**: Write to file +5. **Recalculate formulas (MANDATORY IF USING FORMULAS)**: Use the recalc.py script + ```bash + python recalc.py output.xlsx + ``` +6. **Verify and fix any errors**: + - The script returns JSON with error details + - If `status` is `errors_found`, check `error_summary` for specific error types and locations + - Fix the identified errors and recalculate again + - Common errors to fix: + - `#REF!`: Invalid cell references + - `#DIV/0!`: Division by zero + - `#VALUE!`: Wrong data type in formula + - `#NAME?`: Unrecognized formula name + +### Creating new Excel files + +```python +# Using openpyxl for formulas and formatting +from openpyxl import Workbook +from openpyxl.styles import Font, PatternFill, Alignment + +wb = Workbook() +sheet = wb.active + +# Add data +sheet['A1'] = 'Hello' +sheet['B1'] = 'World' +sheet.append(['Row', 'of', 'data']) + +# Add formula +sheet['B2'] = '=SUM(A1:A10)' + +# Formatting +sheet['A1'].font = Font(bold=True, color='FF0000') +sheet['A1'].fill = PatternFill('solid', start_color='FFFF00') +sheet['A1'].alignment = Alignment(horizontal='center') + +# Column width +sheet.column_dimensions['A'].width = 20 + +wb.save('output.xlsx') +``` + +### Editing existing Excel files + +```python +# Using openpyxl to preserve formulas and formatting +from openpyxl import load_workbook + +# Load existing file +wb = load_workbook('existing.xlsx') +sheet = wb.active # or wb['SheetName'] for specific sheet + +# Working with multiple sheets +for sheet_name in wb.sheetnames: + sheet = wb[sheet_name] + print(f"Sheet: {sheet_name}") + +# Modify cells +sheet['A1'] = 'New Value' +sheet.insert_rows(2) # Insert row at position 2 +sheet.delete_cols(3) # Delete column 3 + +# Add new sheet +new_sheet = wb.create_sheet('NewSheet') +new_sheet['A1'] = 'Data' + +wb.save('modified.xlsx') +``` + +## Recalculating formulas + +Excel files created or modified by openpyxl contain formulas as strings but not calculated values. Use the provided `recalc.py` script to recalculate formulas: + +```bash +python recalc.py [timeout_seconds] +``` + +Example: +```bash +python recalc.py output.xlsx 30 +``` + +The script: +- Automatically sets up LibreOffice macro on first run +- Recalculates all formulas in all sheets +- Scans ALL cells for Excel errors (#REF!, #DIV/0!, etc.) +- Returns JSON with detailed error locations and counts +- Works on both Linux and macOS + +## Formula Verification Checklist + +Quick checks to ensure formulas work correctly: + +### Essential Verification +- [ ] **Test 2-3 sample references**: Verify they pull correct values before building full model +- [ ] **Column mapping**: Confirm Excel columns match (e.g., column 64 = BL, not BK) +- [ ] **Row offset**: Remember Excel rows are 1-indexed (DataFrame row 5 = Excel row 6) + +### Common Pitfalls +- [ ] **NaN handling**: Check for null values with `pd.notna()` +- [ ] **Far-right columns**: FY data often in columns 50+ +- [ ] **Multiple matches**: Search all occurrences, not just first +- [ ] **Division by zero**: Check denominators before using `/` in formulas (#DIV/0!) +- [ ] **Wrong references**: Verify all cell references point to intended cells (#REF!) +- [ ] **Cross-sheet references**: Use correct format (Sheet1!A1) for linking sheets + +### Formula Testing Strategy +- [ ] **Start small**: Test formulas on 2-3 cells before applying broadly +- [ ] **Verify dependencies**: Check all cells referenced in formulas exist +- [ ] **Test edge cases**: Include zero, negative, and very large values + +### Interpreting recalc.py Output +The script returns JSON with error details: +```json +{ + "status": "success", // or "errors_found" + "total_errors": 0, // Total error count + "total_formulas": 42, // Number of formulas in file + "error_summary": { // Only present if errors found + "#REF!": { + "count": 2, + "locations": ["Sheet1!B5", "Sheet1!C10"] + } + } +} +``` + +## Best Practices + +### Library Selection +- **pandas**: Best for data analysis, bulk operations, and simple data export +- **openpyxl**: Best for complex formatting, formulas, and Excel-specific features + +### Working with openpyxl +- Cell indices are 1-based (row=1, column=1 refers to cell A1) +- Use `data_only=True` to read calculated values: `load_workbook('file.xlsx', data_only=True)` +- **Warning**: If opened with `data_only=True` and saved, formulas are replaced with values and permanently lost +- For large files: Use `read_only=True` for reading or `write_only=True` for writing +- Formulas are preserved but not evaluated - use recalc.py to update values + +### Working with pandas +- Specify data types to avoid inference issues: `pd.read_excel('file.xlsx', dtype={'id': str})` +- For large files, read specific columns: `pd.read_excel('file.xlsx', usecols=['A', 'C', 'E'])` +- Handle dates properly: `pd.read_excel('file.xlsx', parse_dates=['date_column'])` + +## Code Style Guidelines +**IMPORTANT**: When generating Python code for Excel operations: +- Write minimal, concise Python code without unnecessary comments +- Avoid verbose variable names and redundant operations +- Avoid unnecessary print statements + +**For Excel files themselves**: +- Add comments to cells with complex formulas or important assumptions +- Document data sources for hardcoded values +- Include notes for key calculations and model sections \ No newline at end of file diff --git a/skills/xlsx/recalc.py b/skills/xlsx/recalc.py new file mode 100644 index 000000000..102e157b0 --- /dev/null +++ b/skills/xlsx/recalc.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +Excel Formula Recalculation Script +Recalculates all formulas in an Excel file using LibreOffice +""" + +import json +import sys +import subprocess +import os +import platform +from pathlib import Path +from openpyxl import load_workbook + + +def setup_libreoffice_macro(): + """Setup LibreOffice macro for recalculation if not already configured""" + if platform.system() == 'Darwin': + macro_dir = os.path.expanduser('~/Library/Application Support/LibreOffice/4/user/basic/Standard') + else: + macro_dir = os.path.expanduser('~/.config/libreoffice/4/user/basic/Standard') + + macro_file = os.path.join(macro_dir, 'Module1.xba') + + if os.path.exists(macro_file): + with open(macro_file, 'r') as f: + if 'RecalculateAndSave' in f.read(): + return True + + if not os.path.exists(macro_dir): + subprocess.run(['soffice', '--headless', '--terminate_after_init'], + capture_output=True, timeout=10) + os.makedirs(macro_dir, exist_ok=True) + + macro_content = ''' + + + Sub RecalculateAndSave() + ThisComponent.calculateAll() + ThisComponent.store() + ThisComponent.close(True) + End Sub +''' + + try: + with open(macro_file, 'w') as f: + f.write(macro_content) + return True + except Exception: + return False + + +def recalc(filename, timeout=30): + """ + Recalculate formulas in Excel file and report any errors + + Args: + filename: Path to Excel file + timeout: Maximum time to wait for recalculation (seconds) + + Returns: + dict with error locations and counts + """ + if not Path(filename).exists(): + return {'error': f'File {filename} does not exist'} + + abs_path = str(Path(filename).absolute()) + + if not setup_libreoffice_macro(): + return {'error': 'Failed to setup LibreOffice macro'} + + cmd = [ + 'soffice', '--headless', '--norestore', + 'vnd.sun.star.script:Standard.Module1.RecalculateAndSave?language=Basic&location=application', + abs_path + ] + + # Handle timeout command differences between Linux and macOS + if platform.system() != 'Windows': + timeout_cmd = 'timeout' if platform.system() == 'Linux' else None + if platform.system() == 'Darwin': + # Check if gtimeout is available on macOS + try: + subprocess.run(['gtimeout', '--version'], capture_output=True, timeout=1, check=False) + timeout_cmd = 'gtimeout' + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + + if timeout_cmd: + cmd = [timeout_cmd, str(timeout)] + cmd + + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0 and result.returncode != 124: # 124 is timeout exit code + error_msg = result.stderr or 'Unknown error during recalculation' + if 'Module1' in error_msg or 'RecalculateAndSave' not in error_msg: + return {'error': 'LibreOffice macro not configured properly'} + else: + return {'error': error_msg} + + # Check for Excel errors in the recalculated file - scan ALL cells + try: + wb = load_workbook(filename, data_only=True) + + excel_errors = ['#VALUE!', '#DIV/0!', '#REF!', '#NAME?', '#NULL!', '#NUM!', '#N/A'] + error_details = {err: [] for err in excel_errors} + total_errors = 0 + + for sheet_name in wb.sheetnames: + ws = wb[sheet_name] + # Check ALL rows and columns - no limits + for row in ws.iter_rows(): + for cell in row: + if cell.value is not None and isinstance(cell.value, str): + for err in excel_errors: + if err in cell.value: + location = f"{sheet_name}!{cell.coordinate}" + error_details[err].append(location) + total_errors += 1 + break + + wb.close() + + # Build result summary + result = { + 'status': 'success' if total_errors == 0 else 'errors_found', + 'total_errors': total_errors, + 'error_summary': {} + } + + # Add non-empty error categories + for err_type, locations in error_details.items(): + if locations: + result['error_summary'][err_type] = { + 'count': len(locations), + 'locations': locations[:20] # Show up to 20 locations + } + + # Add formula count for context - also check ALL cells + wb_formulas = load_workbook(filename, data_only=False) + formula_count = 0 + for sheet_name in wb_formulas.sheetnames: + ws = wb_formulas[sheet_name] + for row in ws.iter_rows(): + for cell in row: + if cell.value and isinstance(cell.value, str) and cell.value.startswith('='): + formula_count += 1 + wb_formulas.close() + + result['total_formulas'] = formula_count + + return result + + except Exception as e: + return {'error': str(e)} + + +def main(): + if len(sys.argv) < 2: + print("Usage: python recalc.py [timeout_seconds]") + print("\nRecalculates all formulas in an Excel file using LibreOffice") + print("\nReturns JSON with error details:") + print(" - status: 'success' or 'errors_found'") + print(" - total_errors: Total number of Excel errors found") + print(" - total_formulas: Number of formulas in the file") + print(" - error_summary: Breakdown by error type with locations") + print(" - #VALUE!, #DIV/0!, #REF!, #NAME?, #NULL!, #NUM!, #N/A") + sys.exit(1) + + filename = sys.argv[1] + timeout = int(sys.argv[2]) if len(sys.argv) > 2 else 30 + + result = recalc(filename, timeout) + print(json.dumps(result, indent=2)) + + +if __name__ == '__main__': + main() \ No newline at end of file