Update app.py
Browse files
app.py
CHANGED
|
@@ -12,8 +12,6 @@ from transformers import pipeline
|
|
| 12 |
st.title("Hi, I am Chatbot Philio :mermaid:")
|
| 13 |
st.write("I am your hotel booking assistant for today.")
|
| 14 |
|
| 15 |
-
# tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
|
| 16 |
-
|
| 17 |
tokenizer, model = demo_chat.load_model()
|
| 18 |
|
| 19 |
model_identifier = "KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b"
|
|
@@ -22,6 +20,18 @@ task = "text-generation" # Change this to your model's task
|
|
| 22 |
# Load the model using the pipeline
|
| 23 |
model_pipeline = pipeline(task, model=model,tokenizer=tokenizer)
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def render_chat_history(chat_history):
|
| 26 |
#renders chat history
|
| 27 |
for message in chat_history:
|
|
@@ -30,47 +40,47 @@ def render_chat_history(chat_history):
|
|
| 30 |
st.markdown(message["content"])
|
| 31 |
|
| 32 |
#Application
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
if 'memory' not in st.session_state:
|
| 37 |
-
st.session_state.memory = demo_chat.demo_miny_memory(model)
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
|
|
|
| 58 |
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
-
with st.chat_message("
|
| 62 |
-
st.markdown(
|
| 63 |
-
st.session_state.chat_history.append({"role"
|
| 64 |
-
|
| 65 |
-
with st.spinner("Generating response..."):
|
| 66 |
-
#chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
|
| 67 |
-
#first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
|
| 68 |
-
tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
| 69 |
-
#st.write(tokenizer.decode(tokenized_chat[0]))
|
| 70 |
-
outputs = model.generate(tokenized_chat, max_new_tokens=128)
|
| 71 |
-
first_answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
|
| 72 |
-
|
| 73 |
-
with st.chat_message("assistant"):
|
| 74 |
-
st.markdown(first_answer)
|
| 75 |
-
st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
|
| 76 |
-
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
| 12 |
st.title("Hi, I am Chatbot Philio :mermaid:")
|
| 13 |
st.write("I am your hotel booking assistant for today.")
|
| 14 |
|
|
|
|
|
|
|
| 15 |
tokenizer, model = demo_chat.load_model()
|
| 16 |
|
| 17 |
model_identifier = "KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b"
|
|
|
|
| 20 |
# Load the model using the pipeline
|
| 21 |
model_pipeline = pipeline(task, model=model,tokenizer=tokenizer)
|
| 22 |
|
| 23 |
+
scrollable_div_style = """
|
| 24 |
+
<style>
|
| 25 |
+
.scrollable-div {
|
| 26 |
+
height: 200px; /* Adjust the height as needed */
|
| 27 |
+
overflow-y: auto; /* Enable vertical scrolling */
|
| 28 |
+
padding: 5px;
|
| 29 |
+
border: 1px solid #ccc; /* Optional: adds a border around the div */
|
| 30 |
+
border-radius: 5px; /* Optional: rounds the corners of the border */
|
| 31 |
+
}
|
| 32 |
+
</style>
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
def render_chat_history(chat_history):
|
| 36 |
#renders chat history
|
| 37 |
for message in chat_history:
|
|
|
|
| 40 |
st.markdown(message["content"])
|
| 41 |
|
| 42 |
#Application
|
| 43 |
+
#Langchain memory in session cache
|
| 44 |
+
if 'memory' not in st.session_state:
|
| 45 |
+
st.session_state.memory = demo_chat.demo_miny_memory(model)
|
|
|
|
|
|
|
| 46 |
|
| 47 |
+
system_content = "You are a friendly chatbot who always helps the user book a hotel room based on his/her needs." +
|
| 48 |
+
"Before you confirm a booking/reservation you should ask for personal information by the user: first and last name, email and phone number." +
|
| 49 |
+
"Based on the current social norms you wait for the user's response to your proposals."
|
| 50 |
+
#Check if chat history exists in this session
|
| 51 |
+
if 'chat_history' not in st.session_state:
|
| 52 |
+
st.session_state.chat_history = [
|
| 53 |
+
{
|
| 54 |
+
"role": "system",
|
| 55 |
+
"content": system_content,
|
| 56 |
+
},
|
| 57 |
+
{"role": "assistant", "content": "Hello, how can I help you today?"},
|
| 58 |
+
] #Initialize chat history
|
| 59 |
+
|
| 60 |
+
if 'model' not in st.session_state:
|
| 61 |
+
st.session_state.model = model
|
| 62 |
+
|
| 63 |
+
st.markdown('<div class="scrollable-div">', unsafe_allow_html=True)
|
| 64 |
+
render_chat_history(st.session_state.chat_history)
|
| 65 |
+
|
| 66 |
+
#Set up input text field
|
| 67 |
+
#input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
|
| 68 |
|
| 69 |
+
if input_text := st.chat_input(placeholder="Here you can chat with our hotel booking model."):
|
| 70 |
|
| 71 |
+
with st.chat_message("user"):
|
| 72 |
+
st.markdown(input_text)
|
| 73 |
+
st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history
|
| 74 |
|
| 75 |
+
with st.spinner("Generating response..."):
|
| 76 |
+
#chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
|
| 77 |
+
#first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
|
| 78 |
+
tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
| 79 |
+
#st.write(tokenizer.decode(tokenized_chat[0]))
|
| 80 |
+
outputs = model.generate(tokenized_chat, max_new_tokens=128)
|
| 81 |
+
first_answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
|
| 82 |
|
| 83 |
+
with st.chat_message("assistant"):
|
| 84 |
+
st.markdown(first_answer)
|
| 85 |
+
st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
|
| 86 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|