Altayebhssab commited on
Commit
81c8402
·
1 Parent(s): d3543df

new update

Browse files
Files changed (1) hide show
  1. app.py +75 -54
app.py CHANGED
@@ -1,109 +1,130 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- import time
4
- import torch
5
- import requests # للتكامل مع Sidecar
6
 
7
- # إعداد النموذج والمُرمز (Caching لتحسين الأداء)
8
  @st.cache_resource
9
  def load_model():
10
  tokenizer = AutoTokenizer.from_pretrained("Salesforce/codet5-base")
11
  model = AutoModelForSeq2SeqLM.from_pretrained("Salesforce/codet5-base")
12
  return tokenizer, model
13
 
 
14
  tokenizer, model = load_model()
15
 
16
- # إعدادات Sidecar
17
  SIDECAR_URL = "http://127.0.0.1:42424"
18
 
19
- # واجهة المستخدم الرئيسية
20
- st.title("AI Code Assistant")
21
- st.sidebar.title("Options")
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # الأقسام الرئيسية
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  section = st.sidebar.radio(
25
  "Choose a Section",
26
  ("Generate Code", "Train Model", "Prompt Engineer", "Optimize Model", "Sidecar Integration")
27
  )
28
 
29
- # 1. توليد الكود بناءً على وصف النص
30
  if section == "Generate Code":
31
- st.header("Generate Code from Description")
32
- prompt = st.text_area("Enter your description:", "Write a Python function to reverse a string.")
33
 
34
- if st.button("Generate Code"):
 
 
 
 
 
 
35
  with st.spinner("Generating code..."):
36
  try:
37
- # تكامل مع Sidecar إذا كان متاحًا
38
  response = requests.post(f"{SIDECAR_URL}/generate", json={"prompt": prompt})
39
  if response.status_code == 200:
40
  code = response.json().get("code", "No response from Sidecar.")
41
  else:
42
- # في حالة فشل Sidecar، استخدم النموذج المحلي
43
  inputs = tokenizer(prompt, return_tensors="pt")
44
  outputs = model.generate(inputs["input_ids"], max_length=100)
45
  code = tokenizer.decode(outputs[0], skip_special_tokens=True)
46
  st.code(code, language="python")
47
  except Exception as e:
48
- st.error(f"Error generating code: {e}")
49
 
50
- # 2. تدريب النموذج
51
  elif section == "Train Model":
52
- st.header("Train the Model")
53
- st.write("Upload your dataset to fine-tune the model.")
54
 
55
  uploaded_file = st.file_uploader("Upload Dataset (JSON/CSV):")
56
- if uploaded_file is not None:
57
- st.write("Dataset uploaded successfully!")
58
- # يمكنك إضافة كود لتحليل البيانات أو عرض عينات منها.
59
-
60
  if st.button("Start Training"):
61
- with st.spinner("Training the model..."):
62
- time.sleep(5) # محاكاة وقت التدريب
63
- st.success("Model training completed!")
64
 
65
- # 3. تحسين الـ Prompts
66
  elif section == "Prompt Engineer":
67
- st.header("Prompt Engineering")
68
- st.write("Experiment with different prompts to get the best results.")
69
-
70
- prompt_input = st.text_area("Enter a prompt:", "Explain this code: def add(a, b): return a + b")
71
  if st.button("Test Prompt"):
72
- with st.spinner("Testing the prompt..."):
73
  try:
74
- inputs = tokenizer(prompt_input, return_tensors="pt")
75
  outputs = model.generate(inputs["input_ids"], max_length=100)
76
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
77
- st.write("Model Response:")
78
- st.code(response)
79
  except Exception as e:
80
- st.error(f"Error testing prompt: {e}")
81
 
82
- # 4. تحسين أداء النموذج
83
  elif section == "Optimize Model":
84
- st.header("Optimize Model Performance")
85
- st.write("Adjust model parameters to improve performance.")
 
 
 
 
 
86
 
87
- learning_rate = st.slider("Learning Rate:", 1e-5, 1e-3, 1e-4, step=1e-5)
88
- batch_size = st.slider("Batch Size:", 1, 64, 8, step=1)
89
- epochs = st.slider("Number of Epochs:", 1, 10, 3)
90
-
91
- if st.button("Apply Settings"):
92
- st.write(f"Settings Applied:\n- Learning Rate: {learning_rate}\n- Batch Size: {batch_size}\n- Epochs: {epochs}")
93
- st.success("Optimization settings saved!")
94
-
95
- # 5. تكامل Sidecar
96
  elif section == "Sidecar Integration":
97
- st.header("Sidecar Integration")
98
- st.write("Test the Sidecar server and ensure it is running.")
99
-
100
- # اختبار Sidecar
101
  if st.button("Ping Sidecar"):
102
  try:
103
  response = requests.get(f"{SIDECAR_URL}/ping")
104
  if response.status_code == 200:
105
- st.success("Sidecar is running!")
106
  else:
107
  st.error("Sidecar is not responding.")
108
  except Exception as e:
109
- st.error(f"Error connecting to Sidecar: {e}")
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import requests
 
 
4
 
5
+ # Load the model and tokenizer
6
  @st.cache_resource
7
  def load_model():
8
  tokenizer = AutoTokenizer.from_pretrained("Salesforce/codet5-base")
9
  model = AutoModelForSeq2SeqLM.from_pretrained("Salesforce/codet5-base")
10
  return tokenizer, model
11
 
12
+ # Initialize model and tokenizer
13
  tokenizer, model = load_model()
14
 
15
+ # Sidecar settings
16
  SIDECAR_URL = "http://127.0.0.1:42424"
17
 
18
+ # Page Configurations
19
+ st.set_page_config(
20
+ page_title="AI Code Assistant",
21
+ page_icon="🤖",
22
+ layout="wide",
23
+ initial_sidebar_state="expanded"
24
+ )
25
+
26
+ # Apply custom CSS for modern design
27
+ def local_css(file_name):
28
+ with open(file_name) as f:
29
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
30
+
31
+ # Load custom CSS file (Add your own CSS styling in 'styles.css')
32
+ local_css("styles.css")
33
 
34
+ # Header Section
35
+ st.markdown(
36
+ """
37
+ <div style="text-align: center; padding: 20px; background-color: #1E88E5; color: white; border-radius: 8px;">
38
+ <h1>🤖 AI Code Assistant</h1>
39
+ <p>Your assistant for generating and optimizing code with AI.</p>
40
+ </div>
41
+ """,
42
+ unsafe_allow_html=True
43
+ )
44
+
45
+ # Sidebar Section
46
+ st.sidebar.markdown(
47
+ """
48
+ <div style="text-align: center; margin-bottom: 20px;">
49
+ <h2>⚙️ Options</h2>
50
+ </div>
51
+ """,
52
+ unsafe_allow_html=True
53
+ )
54
  section = st.sidebar.radio(
55
  "Choose a Section",
56
  ("Generate Code", "Train Model", "Prompt Engineer", "Optimize Model", "Sidecar Integration")
57
  )
58
 
59
+ # Main Content Section
60
  if section == "Generate Code":
61
+ st.markdown("<h2 style='text-align: center;'>📝 Generate Code from Description</h2>", unsafe_allow_html=True)
62
+ st.write("Provide a description, and the AI will generate the corresponding Python code.")
63
 
64
+ prompt = st.text_area(
65
+ "Enter your description:",
66
+ "Write a Python function to reverse a string.",
67
+ placeholder="Enter a detailed code description...",
68
+ height=150
69
+ )
70
+ if st.button("🚀 Generate Code"):
71
  with st.spinner("Generating code..."):
72
  try:
 
73
  response = requests.post(f"{SIDECAR_URL}/generate", json={"prompt": prompt})
74
  if response.status_code == 200:
75
  code = response.json().get("code", "No response from Sidecar.")
76
  else:
 
77
  inputs = tokenizer(prompt, return_tensors="pt")
78
  outputs = model.generate(inputs["input_ids"], max_length=100)
79
  code = tokenizer.decode(outputs[0], skip_special_tokens=True)
80
  st.code(code, language="python")
81
  except Exception as e:
82
+ st.error(f"Error: {e}")
83
 
 
84
  elif section == "Train Model":
85
+ st.markdown("<h2 style='text-align: center;'>📚 Train the Model</h2>", unsafe_allow_html=True)
86
+ st.write("Upload your dataset to fine-tune the AI model.")
87
 
88
  uploaded_file = st.file_uploader("Upload Dataset (JSON/CSV):")
89
+ if uploaded_file:
90
+ st.success("Dataset uploaded successfully!")
 
 
91
  if st.button("Start Training"):
92
+ with st.spinner("Training in progress..."):
93
+ st.success("Model training completed successfully!")
 
94
 
 
95
  elif section == "Prompt Engineer":
96
+ st.markdown("<h2 style='text-align: center;'>⚙️ Prompt Engineering</h2>", unsafe_allow_html=True)
97
+ st.write("Experiment with different prompts to improve code generation.")
98
+ prompt = st.text_area("Enter your prompt:", "Explain the following code: def add(a, b): return a + b")
 
99
  if st.button("Test Prompt"):
100
+ with st.spinner("Testing prompt..."):
101
  try:
102
+ inputs = tokenizer(prompt, return_tensors="pt")
103
  outputs = model.generate(inputs["input_ids"], max_length=100)
104
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
105
+ st.write("**Model Output:**")
106
+ st.code(response, language="text")
107
  except Exception as e:
108
+ st.error(f"Error: {e}")
109
 
 
110
  elif section == "Optimize Model":
111
+ st.markdown("<h2 style='text-align: center;'>🚀 Optimize Model Performance</h2>", unsafe_allow_html=True)
112
+ st.write("Adjust model parameters for improved performance.")
113
+ lr = st.slider("Learning Rate:", 1e-5, 1e-3, value=1e-4, step=1e-5)
114
+ batch_size = st.slider("Batch Size:", 1, 64, value=16)
115
+ epochs = st.slider("Number of Epochs:", 1, 10, value=3)
116
+ if st.button("Apply Optimization Settings"):
117
+ st.success(f"Settings applied: LR={lr}, Batch Size={batch_size}, Epochs={epochs}")
118
 
 
 
 
 
 
 
 
 
 
119
  elif section == "Sidecar Integration":
120
+ st.markdown("<h2 style='text-align: center;'>🔗 Sidecar Integration</h2>", unsafe_allow_html=True)
121
+ st.write("Test the Sidecar server connection.")
 
 
122
  if st.button("Ping Sidecar"):
123
  try:
124
  response = requests.get(f"{SIDECAR_URL}/ping")
125
  if response.status_code == 200:
126
+ st.success("Sidecar server is running!")
127
  else:
128
  st.error("Sidecar is not responding.")
129
  except Exception as e:
130
+ st.error(f"Error: {e}")