Spaces:
Runtime error
Runtime error
from models import * | |
import gradio as gr | |
GPT_4 = "deepseek/deepseek-chat-v3-0324:free" #12 | |
PHI_4 = "microsoft/phi-4" #2 | |
PHI_3 = "microsoft/phi-3-medium-128k-instruct:free" #3 | |
GEMMA_3_27B = "google/gemma-3-27b-it:free" #4 | |
GEMIN_FLASH_THINK = "google/gemini-2.0-flash-thinking-exp:free" #6 | |
GEMIN_FLASH = "google/gemini-flash-1.5-8b-exp" #7 | |
QWEN_32B = "qwen/qwq-32b:free" #8 | |
QWEN_25 = "qwen/qwen2.5-vl-72b-instruct:free" #10 #error | |
DEEPSEEK_R1 = "deepseek/deepseek-r1:free" #11 | |
DEEPSEEK_R1_ZERO = "deepseek/deepseek-r1-zero:free" #13 | |
META_LLAMA_MODEL = "meta-llama/Llama-3.3-70B-Instruct:free" #14 | |
MISTRAL_SMALL_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free" #15 | |
MISTRAL_NEMO = "mistralai/mistral-nemo:free" #16 | |
ZEPHYR = "huggingfaceh4/zephyr-7b-beta:free" #17 | |
OLYMPIC_CODER = "open-r1/olympiccoder-32b:free" #19 | |
LEARN = "google/learnlm-1.5-pro-experimental:free" #20 | |
REKA_FLASH = "rekaai/reka-flash-3:free" #21 | |
OPEN_CHAT = "openchat/openchat-7b:free" #21 | |
TOPPY = "undi95/toppy-m-7b:free" #23 | |
MOONLIGHT = "moonshotai/moonlight-16b-a3b-instruct:free" | |
CONCISE_ENGLISH_PROMPT = "Answer in short and precise English sentences." | |
def get_model(title, dev, model, name, user_input, system_prompt): | |
if user_input.lower() == "data": | |
df = get_data() | |
return df | |
if user_input.lower() == "text": | |
text = get_text() | |
return text | |
if name == "" or name is None: | |
return "Enter Your Name !" | |
if model is None or model == "": | |
return "Select AI Model !" | |
chain = ModelChain() | |
prompt = system_prompt + " " + CONCISE_ENGLISH_PROMPT | |
# Check the model and map to the correct model | |
if "ChatGPT" == model: #1 | |
return chain.generate_response(GPT_4, name, user_input, prompt) | |
elif "Phi-4" == model: #2 | |
return chain.generate_response(PHI_4, name, user_input, prompt) | |
elif "Phi-3" == model: #3 | |
return chain.generate_response(PHI_3, name, user_input, prompt) | |
elif "Gemma-3" == model: #4 | |
return chain.generate_response(GEMMA_3_27B, name, user_input, prompt) | |
elif "Gemini-2-Flash-Think" == model: #6 | |
return chain.generate_response(GEMIN_FLASH_THINK, name, user_input, prompt) | |
elif "Gemini-Flash" == model: #7 | |
return chain.generate_response(GEMIN_FLASH, name, user_input, prompt) | |
elif "QwQ-32B" == model: #8 | |
return chain.generate_response(QWEN_32B, name, user_input, prompt) | |
elif "Qwen2.5" == model: #10 | |
return chain.generate_response(QWEN_25, name, user_input, prompt) | |
elif "DeepSeek-R1" == model: #11 | |
return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt) | |
elif "DeepSeek-R1-Zero" == model: #11 | |
return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt) | |
elif "Llama-3.3" == model: #14 | |
return chain.generate_response(META_LLAMA_MODEL, name, user_input, prompt) | |
elif "Mistral-Small" == model: #15 | |
return chain.generate_response(MISTRAL_SMALL_MODEL, name, user_input, prompt) | |
elif "Mistral-Nemo" == model: #16 | |
return chain.generate_response(MISTRAL_NEMO, name, user_input, prompt) | |
elif "Zephyr" == model: #17 | |
return chain.generate_response(ZEPHYR, name, user_input, prompt) | |
elif "Olympic-Coder" == model: #19 | |
return chain.generate_response(OLYMPIC_CODER, name, user_input, prompt) | |
elif "LearnLM" == model: #20 | |
return chain.generate_response(LEARN, name, user_input, prompt) | |
elif "Reka-Flash" == model: #21 | |
return chain.generate_response(REKA_FLASH, name, user_input, prompt) | |
elif "OpenChat" == model: #21 | |
return chain.generate_response(OPEN_CHAT, name, user_input, prompt) | |
elif "Toppy" == model: #21 | |
return chain.generate_response(TOPPY, name, user_input, prompt) | |
elif "MoonLight" == model: #21 | |
return chain.generate_response(MOONLIGHT, name, user_input, prompt) | |
else: | |
return "Invalid Model Name : " + model | |
def main(): | |
view = gr.Interface( | |
fn= get_model, | |
inputs = [ | |
gr.Markdown("# Switch AI"), | |
gr.Markdown("### by Kalash"), | |
gr.Radio( | |
[ | |
"ChatGPT", #1 | |
"Phi-4", #2 | |
"Phi-3", #3 | |
"Gemma-3", #4 | |
"Gemini-2-Flash-Think", #6 | |
"Gemini-Flash", #7 | |
"QwQ-32B", #9 | |
"Qwen2.5", #11 | |
"DeepSeek-R1", #12 | |
"DeepSeek-R1-Zero", #12 | |
"Llama-3.3", #15 | |
"Mistral-Small", #16 | |
"Mistral-Nemo", #17 | |
"Zephyr", #18 | |
"Olympic-Coder", #20 | |
"LearnLM", #8 | |
"Reka-Flash", #21 | |
"OpenChat", #22 | |
"Toppy", #22 | |
"MoonLight", #22 | |
], | |
label = "Choose AI Model", value = "ChatGPT"), | |
gr.Textbox(label = "Your Name", placeholder = "Enter Your Name"), | |
gr.Textbox(label = "Your Query", placeholder = "Enter Your Question"), | |
gr.Textbox(label = "System Prompt", placeholder = "Enter Custom System Propmt (Optional)"), | |
], | |
outputs = [gr.Textbox(label ="AI Response", lines = 25)], | |
flagging_mode = "never" | |
).launch(share=True) | |
# ).launch(share=False, server_port=54321) | |
if __name__ == '__main__': | |
main() |