import streamlit as st from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import time import torch # إعداد النموذج والمُرمز @st.cache_resource def load_model(): tokenizer = AutoTokenizer.from_pretrained("Salesforce/codet5-base") model = AutoModelForSeq2SeqLM.from_pretrained("Salesforce/codet5-base") return tokenizer, model tokenizer, model = load_model() # واجهة المستخدم الرئيسية st.title("AI Code Assistant") st.sidebar.title("Options") # الأقسام الرئيسية section = st.sidebar.radio( "Choose a Section", ("Generate Code", "Train Model", "Prompt Engineer", "Optimize Model") ) # 1. توليد الكود بناءً على وصف النص if section == "Generate Code": st.header("Generate Code from Description") prompt = st.text_area("Enter your description:", "Write a Python function to reverse a string.") if st.button("Generate Code"): with st.spinner("Generating code..."): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_length=100) code = tokenizer.decode(outputs[0], skip_special_tokens=True) st.code(code, language="python") # 2. تدريب النموذج elif section == "Train Model": st.header("Train the Model") st.write("Upload your dataset to fine-tune the model.") uploaded_file = st.file_uploader("Upload Dataset (JSON/CSV):") if uploaded_file is not None: st.write("Dataset uploaded successfully!") # يمكنك إضافة الكود لتحليل البيانات أو عرض عينات منها هنا. if st.button("Start Training"): with st.spinner("Training the model..."): time.sleep(5) # محاكاة وقت التدريب st.success("Model training completed!") # 3. تحسين الـ Prompts elif section == "Prompt Engineer": st.header("Prompt Engineering") st.write("Experiment with different prompts to get the best results.") prompt_input = st.text_area("Enter a prompt:", "Explain this code: def add(a, b): return a + b") if st.button("Test Prompt"): with st.spinner("Testing the prompt..."): inputs = tokenizer(prompt_input, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_length=100) response = tokenizer.decode(outputs[0], skip_special_tokens=True) st.write("Model Response:") st.code(response) # 4. تحسين أداء النموذج elif section == "Optimize Model": st.header("Optimize Model Performance") st.write("Adjust model parameters to improve performance.") learning_rate = st.slider("Learning Rate:", 1e-5, 1e-3, 1e-4, step=1e-5) batch_size = st.slider("Batch Size:", 1, 64, 8, step=1) epochs = st.slider("Number of Epochs:", 1, 10, 3) if st.button("Apply Settings"): st.write(f"Settings Applied:\n- Learning Rate: {learning_rate}\n- Batch Size: {batch_size}\n- Epochs: {epochs}") st.success("Optimization settings saved!") --- ### **الخطوة الثالثة: تشغيل التطبيق** - قم بتشغيل التطبيق باستخدام الأمر: ```bash streamlit run app.py