File size: 5,500 Bytes
784dbce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from models import *
import gradio as gr

GPT_4 = "deepseek/deepseek-chat-v3-0324:free" #12
PHI_4 = "microsoft/phi-4" #2
PHI_3 = "microsoft/phi-3-medium-128k-instruct:free" #3
GEMMA_3_27B = "google/gemma-3-27b-it:free" #4
GEMIN_FLASH_THINK = "google/gemini-2.0-flash-thinking-exp:free" #6
GEMIN_FLASH = "google/gemini-flash-1.5-8b-exp" #7
QWEN_32B = "qwen/qwq-32b:free" #8
QWEN_25 = "qwen/qwen2.5-vl-72b-instruct:free" #10 #error
DEEPSEEK_R1 = "deepseek/deepseek-r1:free" #11
DEEPSEEK_R1_ZERO = "deepseek/deepseek-r1-zero:free" #13
META_LLAMA_MODEL = "meta-llama/Llama-3.3-70B-Instruct:free" #14
MISTRAL_SMALL_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free" #15
MISTRAL_NEMO = "mistralai/mistral-nemo:free" #16
ZEPHYR = "huggingfaceh4/zephyr-7b-beta:free" #17
OLYMPIC_CODER = "open-r1/olympiccoder-32b:free" #19
LEARN = "google/learnlm-1.5-pro-experimental:free" #20
REKA_FLASH = "rekaai/reka-flash-3:free" #21
OPEN_CHAT = "openchat/openchat-7b:free" #21
TOPPY = "undi95/toppy-m-7b:free" #23
MOONLIGHT = "moonshotai/moonlight-16b-a3b-instruct:free"

CONCISE_ENGLISH_PROMPT = "Answer in short and precise English sentences."

def get_model(title, dev, model, name, user_input, system_prompt):
    if user_input.lower() == "data":
        df = get_data()
        return df
    
    if user_input.lower() == "text":
        text = get_text()
        return text

    if name == "" or name is None:
        return "Enter Your Name !"
        
    if model is None or model == "":
        return "Select AI Model !"

    chain = ModelChain()
    prompt = system_prompt + " " + CONCISE_ENGLISH_PROMPT

    # Check the model and map to the correct model
    if "ChatGPT" == model: #1
        return chain.generate_response(GPT_4, name, user_input, prompt)
    elif "Phi-4" == model: #2
        return chain.generate_response(PHI_4, name, user_input, prompt)
    elif "Phi-3" == model: #3
        return chain.generate_response(PHI_3, name, user_input, prompt)
    elif "Gemma-3" == model: #4
        return chain.generate_response(GEMMA_3_27B, name, user_input, prompt)
    elif "Gemini-2-Flash-Think" == model: #6
        return chain.generate_response(GEMIN_FLASH_THINK, name, user_input, prompt)
    elif "Gemini-Flash" == model: #7
        return chain.generate_response(GEMIN_FLASH, name, user_input, prompt)
    elif "QwQ-32B" == model: #8
        return chain.generate_response(QWEN_32B, name, user_input, prompt)
    elif "Qwen2.5" == model: #10
        return chain.generate_response(QWEN_25, name, user_input, prompt)
    elif "DeepSeek-R1" == model: #11
        return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt)
    elif "DeepSeek-R1-Zero" == model: #11
        return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt)
    elif "Llama-3.3" == model: #14
        return chain.generate_response(META_LLAMA_MODEL, name, user_input, prompt)
    elif "Mistral-Small" == model: #15
        return chain.generate_response(MISTRAL_SMALL_MODEL, name, user_input, prompt)
    elif "Mistral-Nemo" == model: #16
        return chain.generate_response(MISTRAL_NEMO, name, user_input, prompt)
    elif "Zephyr" == model: #17
        return chain.generate_response(ZEPHYR, name, user_input, prompt)
    elif "Olympic-Coder" == model: #19
        return chain.generate_response(OLYMPIC_CODER, name, user_input, prompt)
    elif "LearnLM" == model: #20
        return chain.generate_response(LEARN, name, user_input, prompt)
    elif "Reka-Flash" == model: #21
        return chain.generate_response(REKA_FLASH, name, user_input, prompt)
    elif "OpenChat" == model: #21
        return chain.generate_response(OPEN_CHAT, name, user_input, prompt)
    elif "Toppy" == model: #21
        return chain.generate_response(TOPPY, name, user_input, prompt)
    elif "MoonLight" == model: #21
        return chain.generate_response(MOONLIGHT, name, user_input, prompt)
    else:
        return "Invalid Model Name : " + model

def main():
    view = gr.Interface(
        fn= get_model,
        inputs = [
            gr.Markdown("# Switch AI"),
            gr.Markdown("### by Kalash"),
            gr.Radio(
                [
                "ChatGPT", #1
                "Phi-4", #2
                "Phi-3", #3
                "Gemma-3", #4
                "Gemini-2-Flash-Think", #6
                "Gemini-Flash", #7
                "QwQ-32B", #9
                "Qwen2.5", #11
                "DeepSeek-R1", #12
                "DeepSeek-R1-Zero", #12
                "Llama-3.3", #15
                "Mistral-Small", #16
                "Mistral-Nemo", #17
                "Zephyr", #18
                "Olympic-Coder", #20
                "LearnLM", #8
                "Reka-Flash", #21
                "OpenChat", #22
                "Toppy", #22
                "MoonLight", #22
                ],
            label = "Choose AI Model", value = "ChatGPT"),
            gr.Textbox(label = "Your Name", placeholder = "Enter Your Name"),
            gr.Textbox(label = "Your Query", placeholder = "Enter Your Question"),
            gr.Textbox(label = "System Prompt", placeholder = "Enter Custom System Propmt (Optional)"),
        ],
        outputs = [gr.Textbox(label ="AI Response", lines = 25)],
        flagging_mode = "never"
    ).launch(share=True)
    # ).launch(share=False, server_port=54321)

if __name__ == '__main__':
    main()