Add deepseek local model deepseek-r1:1.5b

This commit is contained in:
BuffTechTalk
2025-01-27 13:53:41 -06:00
parent 340c66dfac
commit 6ed8d0dda9
18 changed files with 51 additions and 33 deletions

View File

@@ -23,7 +23,7 @@ def buffbot():
st.code(f.read(), language="python")
st.divider()
# Select AI model for chatbot
model_options = ["llama3.2:1b", "deepseek-chat", ]
model_options = ["deepseek-r1:1.5b", "llama3.2:1b", "deepseek-chat", ]
# on_change callback to clear chat history when model is changed
selected_model = st.selectbox("**👉Please select a model to start**", model_options, on_change=clear_chat)
@@ -36,26 +36,35 @@ def buffbot():
# Load API credentials from config.json
# the config file contains the API key and base URL for the selected model
"""
{
"deepseek":{
"api_url": "https://api.deepseek.com",
"api_key": "YOUR_API_KEY"
},
"ollama":{
"api_url": "http://localhost:11434/v1",
"api_key": "ollama"
}
}
"""
"""
{
"deepseek":{
"api_url": "https://api.deepseek.com",
"api_key": "YOUR_API_KEY",
"model":"deepseek-chat"
},
"ollama3.2:1b":{
"api_url": "http://localhost:11434/v1",
"api_key": "ollama",
"model":"llama3.2:1b"
},
"deepseek-r1:1.5b":{
"api_url": "http://localhost:11434/v1",
"api_key": "ollama",
"model":"deepseek-r1:1.5b"
},
}
"""
# The API key and base URL are loaded based on the selected model
with open('app_config.json') as config_file:
config = json.load(config_file)
# deepseek-chat model, online API
if selected_model == "deepseek-chat":
api_base_url = config["deepseek"]["api_url"]
api_key = config["deepseek"]["api_key"]
api_base_url = config[selected_model]["api_url"]
api_key = config[selected_model]["api_key"]
model = config[selected_model]["model"]
st.info("Powered by the online [DeepSeek](https://www.deepseek.com/) API!\
Just a heads up, you have 10 messages to use.")
# Set the maximum number of user messages
@@ -63,11 +72,20 @@ def buffbot():
# llama3.2:1b model, local API
if selected_model == "llama3.2:1b":
api_base_url = config["ollama"]["api_url"]
api_key = config["ollama"]["api_key"]
api_base_url = config[selected_model]["api_url"]
api_key = config[selected_model]["api_key"]
model = config[selected_model]["model"]
st.info("Powered by local llama3.2:1b model via [Ollama](https://ollama.com/library/llama3.2:1b)!\
Just a heads up, you have 100 messages to use.")
MAX_USER_MESSAGES = 100
MAX_USER_MESSAGES = 100
if selected_model == "deepseek-r1:1.5b":
api_base_url = config[selected_model]["api_url"]
api_key = config[selected_model]["api_key"]
model = config[selected_model]["model"]
st.info("Powered by local deepseek-r1:1.5b model via [Ollama](https://ollama.com/library/deepseek-r1:1.5b)!\
Just a heads up, you have 100 messages to use.")
MAX_USER_MESSAGES = 100
# Initialize OpenAI client to connect with the selected model API
client = OpenAI(api_key=api_key, base_url=api_base_url)