Skip to content

Commit

Permalink
init
Browse files Browse the repository at this point in the history
  • Loading branch information
TheExplainthis committed Feb 19, 2023
0 parents commit 50de707
Show file tree
Hide file tree
Showing 20 changed files with 597 additions and 0 deletions.
92 changes: 92 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
*.log
config.dev.json
config.json
# Git
.git
.gitignore
.gitattributes


# CI
.codeclimate.yml
.travis.yml
.taskcluster.yml

# Docker
docker-compose.yml
Dockerfile
.docker
.dockerignore

# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]

# C extensions
*.so

# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml

# Translations
*.mo
*.pot

# Django stuff:
*.log

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Virtual environment
.env
.venv/
venv/

# PyCharm
.idea

# Python mode for VIM
.ropeproject
**/.ropeproject

# Vim swap files
**/*.swp

# VS Code
.vscode/
4 changes: 4 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
OPENAI_API =
OPENAI_MODEL_ENGINE =
OPENAI_MAX_TOKENS = 128
DISCORD_TOKEN =
121 changes: 121 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
# Logs
.DS_Store
*/.DS_Store
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# Snowpack dependency directory (https://snowpack.dev/)
web_modules/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variables file
.env
.env.test

# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

# Next.js build output
.next
out

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public

# vuepress build output
.vuepress/dist

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# TernJS port file
.tern-port

# Stores VSCode versions used for testing VSCode extensions
.vscode-test

# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*



9 changes: 9 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
FROM python:3.9-alpine


COPY ./ /DiscordBot
WORKDIR /DiscordBot

RUN pip3 install -r requirements.txt

CMD ["python3", "main.py"]
21 changes: 21 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2023 ExplainThis

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
1 change: 1 addition & 0 deletions Procfile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
web: python main.py
9 changes: 9 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
version: "3"

services:
app:
container_name: discord-chatgpt-ai-assistant
build: .
restart: always
ports:
- "${APP_PORT}:${APP_PORT}"
60 changes: 60 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@

import os

from dotenv import load_dotenv
import discord

from src.discordBot import DiscordClient, Sender
from src.logger import logger
from src.chatgpt import ChatGPT, DALLE
from src.models import OpenAIModel
from src.memory import Memory
from src.server import keep_alive

load_dotenv()

models = OpenAIModel(api_key=os.getenv('OPENAI_API'), model_engine=os.getenv('OPENAI_MODEL_ENGINE'), max_tokens=int(os.getenv('OPENAI_MAX_TOKENS')))

memory = Memory()
chatgpt = ChatGPT(models, memory)
dalle = DALLE(models)


def run():
client = DiscordClient()
sender = Sender()

@client.tree.command(name="chat", description="Have a chat with ChatGPT")
async def chat(interaction: discord.Interaction, *, message: str):
if interaction.user == client.user:
return
await interaction.response.defer()
receive = chatgpt.get_response(interaction.user, message)
await sender.send_message(interaction, message, receive)

@client.tree.command(name="imagine", description="Generate image from text")
async def imagine(interaction: discord.Interaction, *, prompt: str):
if interaction.user == client.user:
return
await interaction.response.defer()
image_url = dalle.generate(prompt)
await sender.send_image(interaction, prompt, image_url)

@client.tree.command(name="reset", description="Reset ChatGPT conversation history")
async def reset(interaction: discord.Interaction):
user_id = interaction.user.id
logger.info(f"resetting memory from {user_id}")
try:
chatgpt.clean_history(user_id)
await interaction.response.defer(ephemeral=True)
await interaction.followup.send(f'> Reset ChatGPT conversation history < - <@{user_id}>')
except Exception as e:
logger.error(f"Error resetting memory: {e}")
await interaction.followup.send('> Oops! Something went wrong. <')

client.run(os.getenv('DISCORD_TOKEN'))


if __name__ == '__main__':
keep_alive()
run()
5 changes: 5 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
openai==0.26.5
requests==2.28.2
discord.py==2.1.1
python-dotenv==0.21.1
Flask==2.2.3
1 change: 1 addition & 0 deletions runtime.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python-3.9.16
Empty file added src/__init__.py
Empty file.
26 changes: 26 additions & 0 deletions src/chatgpt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from src.models import ModelInterface
from src.memory import MemoryInterface


class ChatGPT:
def __init__(self, model: ModelInterface, memory: MemoryInterface = None):
self.model = model
self.memory = memory

def get_response(self, user_id: str, text: str) -> str:
prompt = text if self.memory is None else f'{self.memory.get(user_id)}\n\n{text}'
response = self.model.text_completion(f'{prompt} <|endoftext|>')
if self.memory is not None:
self.memory.append(user_id, [prompt, response])
return response

def clean_history(self, user_id: str) -> None:
self.memory.remove(user_id)


class DALLE:
def __init__(self, model: ModelInterface):
self.model = model

def generate(self, text: str) -> str:
return self.model.image_generation(text)
Loading

0 comments on commit 50de707

Please sign in to comment.