|
1 | 1 | from rest_framework_dataclasses.serializers import DataclassSerializer
|
| 2 | +import openai |
2 | 3 | from typing import Literal, Optional, List, Dict
|
| 4 | +from core.api.db_agent import DBChatAgent |
| 5 | +from django.conf import settings |
3 | 6 | from datetime import datetime
|
4 | 7 | from drf_spectacular.utils import extend_schema
|
5 | 8 | from dataclasses import dataclass
|
|
13 | 16 | from asgiref.sync import sync_to_async, async_to_sync
|
14 | 17 | from core.models import Project, ChatMessage, translate_to_all_langs_in_list, get_langs_in_project
|
15 | 18 | from core.api.user_data import serialize_message
|
| 19 | +from core.mdconverter import convert_markdown |
| 20 | +from uuid import uuid4 |
| 21 | +import base64 |
| 22 | + |
| 23 | + |
| 24 | +def pdf_to_base64(file_path): |
| 25 | + with open(file_path, "rb") as pdf_file: |
| 26 | + # Read the PDF content |
| 27 | + pdf_content = pdf_file.read() |
| 28 | + |
| 29 | + # Convert PDF content to base64 string |
| 30 | + base64_content = base64.b64encode(pdf_content).decode("utf-8") |
| 31 | + |
| 32 | + return base64_content |
16 | 33 |
|
17 | 34 |
|
18 | 35 | @dataclass
|
@@ -42,30 +59,165 @@ def request_report(request):
|
42 | 59 | project_group_slug = f"project-{data.project_hash}"
|
43 | 60 |
|
44 | 61 | project = Project.objects.get(hash=data.project_hash)
|
45 |
| - msg = "Ok starting on that report for you give me a moment" |
| 62 | + msg = "Ok starting on that report for you give me a moment\n" |
| 63 | + #msg += "I'll perform the following steps to generate your report:" |
| 64 | + ai_user_for_project = project.ai_user |
46 | 65 |
|
47 | 66 | project_langs = get_langs_in_project(project)
|
48 | 67 | message = ChatMessage.objects.create(
|
49 | 68 | project=project,
|
50 |
| - original_message=data.text, |
51 |
| - sender=request.user, |
| 69 | + original_message=msg, |
| 70 | + sender=ai_user_for_project, |
52 | 71 | data=translate_to_all_langs_in_list(
|
53 | 72 | msg, project_langs, str("english")),
|
54 | 73 | )
|
55 | 74 |
|
56 |
| - ai_user_for_project = project.ai_user |
57 |
| - |
58 | 75 | # first let the ai say that it's working on the report now
|
59 |
| - async_to_sync(channel_layer.group_send)(project_group_slug, { |
60 |
| - "type": "broadcast_message", |
61 |
| - "data": { |
62 |
| - "event": data.type, |
63 |
| - **serialize_message(message), |
64 |
| - "user": { |
65 |
| - "hash": str(ai_user_for_project.hash), |
66 |
| - "name": ai_user_for_project.first_name |
| 76 | + def send_message(_message): |
| 77 | + async_to_sync(channel_layer.group_send)(project_group_slug, { |
| 78 | + "type": "broadcast_message", |
| 79 | + "data": { |
| 80 | + "event": "new_message", |
| 81 | + **serialize_message(_message), |
| 82 | + "user": { |
| 83 | + "hash": str(ai_user_for_project.hash), |
| 84 | + "name": ai_user_for_project.first_name |
| 85 | + } |
67 | 86 | }
|
| 87 | + }) |
| 88 | + |
| 89 | + send_message(message) |
| 90 | + |
| 91 | + # THis agent was to overpowerd and needed more tools to use, it would always ask to many questions so we just use the api for now |
| 92 | + # agent = DBChatAgent( |
| 93 | + # ai_user=ai_user_for_project, project=project, |
| 94 | + # send_message_func=send_message, |
| 95 | + # user_lang=str(request.user.profile.language), |
| 96 | + # memory_state=message_state, |
| 97 | + # model="gpt-4", |
| 98 | + # open_ai_api_key=settings.OPENAI_KEY, |
| 99 | + # buffer_memory_token_limit=500, |
| 100 | + # verbose=True, |
| 101 | + # lang_list=project_langs, |
| 102 | + # tools=[], |
| 103 | + # ) |
| 104 | + #out, after_state, token_usage = agent(prompt) |
| 105 | + |
| 106 | + prompts = [ |
| 107 | + "based on the whole conversation determine the topic of the project and generate a short description", |
| 108 | + "Based on the whole conversation plase generate an outline of which events took place and return it as a markdown list" |
| 109 | + ] |
| 110 | + |
| 111 | + parts = [ |
| 112 | + "description", |
| 113 | + "outline", |
| 114 | + "timeline", |
| 115 | + "shedule", |
| 116 | + "technicans", |
| 117 | + "expenses" |
| 118 | + ] |
| 119 | + |
| 120 | + titles = [ |
| 121 | + "## Description", |
| 122 | + "## Overview", |
| 123 | + "## Timeline", |
| 124 | + "## Shedule Performance", |
| 125 | + "## Technicians", |
| 126 | + "## Expenses", |
| 127 | + "## Problems and difficulties" |
| 128 | + ] |
| 129 | + |
| 130 | + def as_message(message, user="user"): |
| 131 | + return { |
| 132 | + 'role': user, |
| 133 | + "content": message |
68 | 134 | }
|
69 |
| - }) |
| 135 | + |
| 136 | + datas = {} |
| 137 | + |
| 138 | + i = 0 |
| 139 | + for pompt in prompts: |
| 140 | + past_messages = ChatMessage.objects.filter( |
| 141 | + project=project).order_by("-time") |
| 142 | + |
| 143 | + # setup a db agent: |
| 144 | + # It gets the full message history |
| 145 | + message_state = [] |
| 146 | + |
| 147 | + for message in past_messages: |
| 148 | + if message.sender == ai_user_for_project: |
| 149 | + message_state.append(as_message( |
| 150 | + message.original_message, "assistant")) |
| 151 | + else: |
| 152 | + message_state.append(as_message( |
| 153 | + message.original_message, "user")) |
| 154 | + |
| 155 | + prompt = "Based on the whole conversation plase generate an outline of which events took place and return it as a markdown list" |
| 156 | + |
| 157 | + message_state.append(as_message(prompt)) |
| 158 | + |
| 159 | + openai.api_key = settings.OPENAI_KEY |
| 160 | + |
| 161 | + res = openai.ChatCompletion.create( |
| 162 | + model="gpt-3.5-turbo", |
| 163 | + messages=message_state, |
| 164 | + stream=False |
| 165 | + ) |
| 166 | + |
| 167 | + completion = res["choices"][0]["message"]["content"] |
| 168 | + |
| 169 | + msg = ChatMessage.objects.create( |
| 170 | + project=project, |
| 171 | + original_message=completion, |
| 172 | + sender=ai_user_for_project, |
| 173 | + data=translate_to_all_langs_in_list( |
| 174 | + completion, project_langs, str(project.base_language)), |
| 175 | + ) |
| 176 | + |
| 177 | + datas[parts[i]] = completion |
| 178 | + |
| 179 | + msg = f"Finished generating {titles[i]}" |
| 180 | + new_message = ChatMessage.objects.create( |
| 181 | + project=project, |
| 182 | + original_message=msg, |
| 183 | + sender=ai_user_for_project, |
| 184 | + data=translate_to_all_langs_in_list( |
| 185 | + msg, project_langs, str("english")), |
| 186 | + ) |
| 187 | + send_message(new_message) |
| 188 | + i += 1 |
| 189 | + |
| 190 | + out = f"# Project Report: {project.name} \n\n\n" |
| 191 | + i = 0 |
| 192 | + for prompt in prompts: |
| 193 | + out += f"{titles[i]}\n\n" |
| 194 | + key = parts[i] |
| 195 | + out += datas[key] |
| 196 | + i += 1 |
| 197 | + |
| 198 | + msg = "Heres your report" |
| 199 | + |
| 200 | + temp_file = f"{uuid4()}" |
| 201 | + convert_markdown(out, output_folder_path="/tmp", |
| 202 | + output_format="pdf", output_file_name=temp_file) |
| 203 | + temp_file = "/tmp/" + temp_file + ".pdf" |
| 204 | + |
| 205 | + base64 = pdf_to_base64(temp_file) |
| 206 | + |
| 207 | + file_meta, file_content = base64.split(',') |
| 208 | + |
| 209 | + new_message = ChatMessage.objects.create( |
| 210 | + project=project, |
| 211 | + original_message=msg, |
| 212 | + sender=ai_user_for_project, |
| 213 | + file_attachment=file_content, |
| 214 | + file_meta=file_meta, |
| 215 | + data=translate_to_all_langs_in_list( |
| 216 | + msg, project_langs, str("english")), |
| 217 | + ) |
| 218 | + send_message(new_message) |
| 219 | + |
| 220 | + print(base64) |
| 221 | + # Report |
70 | 222 |
|
71 | 223 | return Response(status=status.HTTP_200_OK)
|
0 commit comments