Update presentation_assistant/presentation_assistant.py
Browse files
presentation_assistant/presentation_assistant.py
CHANGED
|
@@ -5,7 +5,17 @@ import openai
|
|
| 5 |
import subprocess
|
| 6 |
from io import BytesIO
|
| 7 |
import sys
|
|
|
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
sys.path.append("/home/user/app")
|
| 10 |
|
| 11 |
# Function to generate text2ppt input prompt
|
|
@@ -63,24 +73,9 @@ def generate_text2ppt_input_prompt(input_type, input_value, input_pages):
|
|
| 63 |
|
| 64 |
# Function to execute text2ppt
|
| 65 |
def text2ppt(token_key, input_prompt, input_theme):
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
messages = [
|
| 69 |
-
{"role": "system", "content": "You are a kind helpful PPT designer."},
|
| 70 |
-
]
|
| 71 |
-
|
| 72 |
-
message = input_prompt
|
| 73 |
-
|
| 74 |
-
if message:
|
| 75 |
-
messages.append(
|
| 76 |
-
{"role": "user", "content": message},
|
| 77 |
-
)
|
| 78 |
-
chat = openai.ChatCompletion.create(
|
| 79 |
-
model="gpt-3.5-turbo-0301", messages=messages
|
| 80 |
-
)
|
| 81 |
|
| 82 |
reply = chat.choices[0].message.content
|
| 83 |
-
messages.append({"role": "assistant", "content": reply})
|
| 84 |
|
| 85 |
md_text = reply[4:] if reply[:3] == "---" else reply
|
| 86 |
md_text_list = md_text.split('\n')
|
|
@@ -139,21 +134,8 @@ def ppt2script(token_key, input_file, input_type):
|
|
| 139 |
|
| 140 |
input_prompt = header + text
|
| 141 |
|
| 142 |
-
|
| 143 |
-
{"role": "system", "content": "You are a kind helpful PPT Assistant."},
|
| 144 |
-
]
|
| 145 |
-
|
| 146 |
-
message = input_prompt
|
| 147 |
-
|
| 148 |
-
if message:
|
| 149 |
-
messages.append(
|
| 150 |
-
{"role": "user", "content": message},
|
| 151 |
-
)
|
| 152 |
-
chat = openai.ChatCompletion.create(
|
| 153 |
-
model="gpt-3.5-turbo-0301", messages=messages
|
| 154 |
-
)
|
| 155 |
|
| 156 |
reply = chat.choices[0].message.content
|
| 157 |
-
messages.append({"role": "assistant", "content": reply})
|
| 158 |
|
| 159 |
return reply
|
|
|
|
| 5 |
import subprocess
|
| 6 |
from io import BytesIO
|
| 7 |
import sys
|
| 8 |
+
import requests
|
| 9 |
|
| 10 |
+
hf_token = os.environ['MY_HF_TOKEN']
|
| 11 |
+
|
| 12 |
+
API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-7b-instruct"
|
| 13 |
+
headers = {"Authorization": "Bearer "+hf_token}
|
| 14 |
+
|
| 15 |
+
def query(payload):
|
| 16 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 17 |
+
return response.json()
|
| 18 |
+
|
| 19 |
sys.path.append("/home/user/app")
|
| 20 |
|
| 21 |
# Function to generate text2ppt input prompt
|
|
|
|
| 73 |
|
| 74 |
# Function to execute text2ppt
|
| 75 |
def text2ppt(token_key, input_prompt, input_theme):
|
| 76 |
+
reply = query({"inputs": "You are a kind helpful PPT designer.", input_prompt})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
reply = chat.choices[0].message.content
|
|
|
|
| 79 |
|
| 80 |
md_text = reply[4:] if reply[:3] == "---" else reply
|
| 81 |
md_text_list = md_text.split('\n')
|
|
|
|
| 134 |
|
| 135 |
input_prompt = header + text
|
| 136 |
|
| 137 |
+
reply = query({"inputs": "You are a kind helpful PPT Assistant.", input_prompt})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
reply = chat.choices[0].message.content
|
|
|
|
| 140 |
|
| 141 |
return reply
|