maargdarshak/app_response.py

71 lines
2.2 KiB
Python
Raw Normal View History

2024-10-13 00:47:26 +00:00
import streamlit as st
import requests
# Streamlit App
def chatbot():
st.title("Chatbot powered by Ollama")
# Initialize session state for storing the chat history
if "chat_history" not in st.session_state:
st.session_state["chat_history"] = []
# User input section
user_input = st.text_input("You: ", "")
if user_input:
# Get Ollama response
response = query_ollama(user_input)
# Append the conversation to the chat history
st.session_state["chat_history"].append({"role": "user", "text": user_input})
st.session_state["chat_history"].append({"role": "bot", "text": response})
# Display chat history
if st.session_state["chat_history"]:
for chat in st.session_state["chat_history"]:
if chat["role"] == "user":
st.write(f"**You:** {chat['text']}")
else:
st.write(f"**Bot:** {chat['text']}")
# Function to query Ollama locally
def query_ollama(prompt):
# Assuming Ollama API is running locally on port 11434
url = "http://localhost:11434/api/generate"
payload = {
"model": "llama2", # Change model name if necessary
"prompt": prompt
}
headers = {
"Content-Type": "application/json"
}
try:
response = requests.post(url, json=payload, headers=headers)
# Print the status code and response text for debugging
print(f"Status Code: {response.status_code}")
print(f"Response Text: {response.text}")
# Check if the response is successful
if response.status_code == 200:
try:
response_json = response.json()
return response_json.get("output", "Sorry, I couldn't generate a response.")
except ValueError:
st.error("The server returned an invalid response. Please check the API.")
return "Invalid response format"
else:
st.error(f"Error from API: {response.status_code}")
return f"Error: {response.status_code}"
except requests.exceptions.RequestException as e:
st.error(f"Request failed: {e}")
return "Failed to communicate with the server."
if __name__ == "__main__":
chatbot()