remove local llm model, update AI Image Editor
This commit is contained in:
@@ -23,7 +23,7 @@ def buffbot():
|
||||
st.code(f.read(), language="python")
|
||||
st.divider()
|
||||
# Select AI model for chatbot
|
||||
model_options = ["gemini-2.0-flash", "deepseek-r1:1.5b", "llama3.2:1b", "deepseek-chat", ]
|
||||
model_options = ["gemini-2.5-flash-lite", "deepseek-chat", ]
|
||||
# on_change callback to clear chat history when model is changed
|
||||
selected_model = st.selectbox("**👉Please select a model to start**", model_options, on_change=clear_chat)
|
||||
|
||||
@@ -44,32 +44,18 @@ def buffbot():
|
||||
"model":"deepseek-chat"
|
||||
},
|
||||
|
||||
"ollama3.2:1b":{
|
||||
"api_url": "http://localhost:11434/v1",
|
||||
"api_key": "ollama",
|
||||
"model":"llama3.2:1b"
|
||||
},
|
||||
|
||||
"deepseek-r1:1.5b":{
|
||||
"api_url": "http://localhost:11434/v1",
|
||||
"api_key": "ollama",
|
||||
"model":"deepseek-r1:1.5b"
|
||||
},
|
||||
"gemini-2.0-flash":{
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model": "gemini-2.0-flash"
|
||||
},
|
||||
|
||||
}
|
||||
"""
|
||||
# The API key and base URL are loaded based on the selected model
|
||||
with open('app_config.json') as config_file:
|
||||
config = json.load(config_file)
|
||||
|
||||
if selected_model == "grok-3":
|
||||
api_base_url = config[selected_model]["api_url"]
|
||||
api_key = config[selected_model]["api_key"]
|
||||
st.info("Powered by the online [Grok](https://www.grok.com/) API!\
|
||||
Just a heads up, you have 20 messages to use.")
|
||||
# Set the maximum number of user messages
|
||||
MAX_USER_MESSAGES = 20
|
||||
|
||||
if selected_model == "gemini-2.0-flash":
|
||||
if selected_model == "gemini-2.5-flash-lite":
|
||||
api_base_url = config[selected_model]["api_url"]
|
||||
api_key = config[selected_model]["api_key"]
|
||||
st.info("Powered by the online [Gemini](https://deepmind.google/technologies/gemini/) API!\
|
||||
|
||||
Reference in New Issue
Block a user