File size: 7,489 Bytes
775c806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5aef00
775c806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea6f91
775c806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155

import gradio as gr
import os

import verify2


# adapted from https://github.com/hwchase17/langchain/issues/2428#issuecomment-1512280045

from datasets import load_dataset


BASE_DIR = 'outs'
def list_files(directory):
    dir_path = os.path.join(BASE_DIR, directory)
    if not os.path.exists(dir_path):
        return []
    files = os.listdir(dir_path)
    return files

def file_content(directory, file_name):
    file_path = os.path.join(BASE_DIR, directory, file_name)
    with open(file_path, 'r') as file:
        content = file.read()
    return content

def download_file(directory, file_name):
    file_path = os.path.join(BASE_DIR, directory, file_name)
    return file_path

examples = [
    '''SimplePDL is an experimental language for specifying processes. The SPEM standard (Software Process Engineering Metamodel) proposed by the OMG inspired our work, but we also took ideas from the UMA metamodel (Unified Method Architecture) used in the EPF Eclipse plug-in (Eclipse Process Framework), dedicated to process modeling. SimplePDL is simplified to keep the presentation simple.
Its metamodel is given in the figure 1. It defines the process concept (Process) composed of a set of work definitions (WorkDefinition) representing the activities to be performed during the development. One workdefinition may depend upon another (WorkSequence). In such a case, an ordering constraint (linkType) on the second workdefinition is specified, using the enumeration WorkSequenceType. For example, linking two workdefinitions wd1 and wd2 by a precedence relation of kind finishToStart means that wd2 can be started only if wd1 is finished (and respectively for startToStart, startToFinish and finishToFinish). SimplePDL does also allow to explicitly represent resources (Resource) that are needed in order to perform one workdefinition (designer, computer, server...) and also time constraints (min_time and max_time on WorkDefinition and Process) to specify the minimum (resp. maximum) time allowed to perform the workdefinition or the whole process.''',
    " A FSM is conceived as an abstract machine that can be in one of a finite number of states. The machine is in only one state at a time; the state it is in at any given time is called the current state. It can change from one state to another when initiated by a triggering event or condition; this is called a transition. A particular FSM is defined by a list of its states, and the triggering condition for each transition.",
    "Un Website est l'élément racine. Il est décrit par deux attributs (copyright et isMobileFriendly) et par une composition d'une ou plusieurs pages. Une page est décrite par deux attributs (son nom et son titre), ainsi que par des références à d'autres pages."
]

def trigger_example(example):
    chat, updated_history = generate_response(example)
    return chat, updated_history

def generate_response(user_message,  history):

    #history.append((user_message,str(chatbot.chat(user_message))))
    history, errors = verify2.iterative_prompting(user_message,verify2.description,model=verify2.model)
    return "", history

def apply_gpt_settings_button(prompt, model_name):
    verify2.model = model_name
    return "", []

def clear_chat():
    return [], []
custom_css = """
#logo-img {
    border: none !important;
}
#chat-message {
    font-size: 14px;
    min-height: 300px;
}
"""
GPT_MODELS_NAMES = ["gpt-4-turbo", "gpt-4o", "gpt-3.5-turbo"]
with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:

    
    with gr.Tab("OPENAI API"):
        with gr.Row():
            with gr.Column(scale=1):
                gr.Image("images/logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
            with gr.Column(scale=3):
                gr.Markdown("""This Chatbot has been made to showcase our work on generating meta-model from textual descriptions.
                <br/><br/>
                The output of this conversation is going to be an ecore file that is validated by PyEcore [Pyecore (https://github.com/pyecore/pyecore)]
                <br/>
                Available Models : <br>
                - GPT3-Turbo<br>
                - GPT4-Turbo<br>
                - GPT4-Omni                
                
            """
                )
            
        with gr.Row():
            chatbot1 = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
        
        with gr.Row():
            user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)

      
        with gr.Row():
            submit_button = gr.Button("Submit")
            clear_button = gr.Button("Clear chat")

            

                        
        history = gr.State([])
        
        user_message.submit(fn=generate_response, inputs=[user_message, chatbot1], outputs=[user_message, chatbot1], concurrency_limit=32)
        submit_button.click(fn=generate_response, inputs=[user_message, chatbot1], outputs=[user_message, chatbot1], concurrency_limit=32)
        
        clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot1, history], concurrency_limit=32)
        

        
        with gr.Accordion("Settings", open=False):
                            model_name = gr.Dropdown(
                                choices=GPT_MODELS_NAMES, value=GPT_MODELS_NAMES[0], label="model"
                            )
                            settings_button = gr.Button("Apply")
                            settings_button.click(
                                apply_gpt_settings_button,
                                [user_message,model_name],
                                [user_message, chatbot1],
                            )


        with gr.Row():
            gr.Examples(
                examples=examples,
                inputs=user_message,
                cache_examples=False,
                fn=trigger_example,
                outputs=[chatbot1],
                examples_per_page=100
            )
        #user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
        #submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
        #clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
    with gr.Tab("File Browser"):
        
        directory_dropdown = gr.Dropdown(choices=["OAI"], label="Select Directory")
        file_dropdown = gr.Dropdown(choices=[], label="Files")
        file_content_display = gr.Textbox(label="File Content", lines=10, interactive=False)
        download_button = gr.File(label="Download File")

        def update_file_list(directory):
            files = list_files(directory)
            return gr.Dropdown(choices=files)

        def update_file_content_and_path(directory, file_name):
            content = file_content(directory, file_name)
            file_path = download_file(directory, file_name)
            return content, file_path

        directory_dropdown.change(update_file_list, inputs=directory_dropdown, outputs=file_dropdown)
        file_dropdown.change(update_file_content_and_path, inputs=[directory_dropdown, file_dropdown], outputs=[file_content_display, download_button])

if __name__ == "__main__":
    # demo.launch(debug=True)
    try:
        demo.queue(api_open=False, max_size=40).launch(show_api=False)
    except Exception as e:
        print(f"Error: {e}")