acecalisto3 commited on
Commit
d021e06
·
verified ·
1 Parent(s): 9a306a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -96
app.py CHANGED
@@ -1,100 +1,137 @@
1
  import gradio as gr
2
- import requests
3
  from huggingface_hub import InferenceClient
4
-
5
- # Initialize Hugging Face client
6
- HF_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
7
- HF_TOKEN = "your_hugging_face_api_token" # Replace with your token
8
- client = InferenceClient(model=HF_MODEL, token=HF_TOKEN)
9
-
10
- # Persistent bot knowledge state
11
- bot_knowledge = {"dataset": None}
12
-
13
- def load_model(hf_token):
14
- """Loads the model and tokenizer using the provided Hugging Face token."""
15
- global model, tokenizer
16
-
17
- # Set the token for Hugging Face authentication
18
- HfFolder.save_token(hf_token, 'huggingface')
19
-
20
- # Load the model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  try:
22
- with gr.Progress() as progress:
23
- progress(0, "Loading model...")
24
- model = AutoModelForCausalLM.from_pretrained(DEFAULT_MODEL)
25
- tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL)
26
- progress(100, "Model loaded successfully!")
27
- return "Model loaded successfully!"
28
  except Exception as e:
29
- return f"Error loading model: {str(e)}"
30
-
31
- # Train chatbot by setting the dataset
32
- def train_chatbot(dataset):
33
- bot_knowledge["dataset"] = dataset
34
- return "Chatbot trained successfully!"
35
-
36
-
37
- # Chat function to process user input and generate bot responses
38
- def chat_with_bot(history, user_input):
39
- if not bot_knowledge["dataset"]:
40
- return history + [{"role": "bot", "content": "No dataset loaded. Please train the bot first."}]
41
-
42
- # Append user input to the chat history
43
- history.append({"role": "user", "content": user_input})
44
-
45
- # Generate bot response
46
- prompt = f"{bot_knowledge['dataset']} {user_input}"
47
- try:
48
- response = client.text_generation(prompt=prompt, max_new_tokens=128)
49
- bot_response = response.get("generated_text", "Sorry, I couldn't generate a response.")
50
- except Exception as e:
51
- bot_response = f"Error generating response: {e}"
52
-
53
- # Append bot response to the history
54
- history.append({"role": "bot", "content": bot_response})
55
- return history
56
-
57
-
58
- # Gradio Interface
59
- with gr.Blocks(theme="default") as app:
60
- gr.Markdown("# **Intelligent Chatbot with Knowledge Training**")
61
- gr.Markdown(
62
- """
63
- Train a chatbot with custom datasets and interact with it dynamically.
64
- The bot will persist knowledge from the dataset and answer questions accordingly.
65
- """
66
- )
67
-
68
- # Train chatbot section
69
- with gr.Row():
70
- chat_dataset = gr.Textbox(
71
- label="Dataset for Training",
72
- placeholder="Paste a dataset here to train the chatbot.",
73
- lines=5,
74
- )
75
- train_button = gr.Button("Train Chatbot")
76
-
77
- train_status = gr.Textbox(label="Training Status", interactive=False)
78
-
79
- # Chat section
80
- with gr.Row():
81
- chatbot = gr.Chatbot(
82
- label="Chat with Trained Bot",
83
- type="messages",
84
-
85
- )
86
- user_input = gr.Textbox(
87
- label="Your Message",
88
- placeholder="Type your message and press Enter...",
89
- lines=1,
90
- )
91
-
92
- # Train chatbot logic
93
- train_button.click(train_chatbot, inputs=[chat_dataset], outputs=[train_status])
94
-
95
- # Chat interaction logic
96
- user_input.submit(chat_with_bot, inputs=[chatbot, user_input], outputs=chatbot)
97
-
98
- # Launch app
99
- if __name__ == "__main__":
100
- app.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
3
+ import os
4
+ import asyncio
5
+ import aiohttp
6
+ import json
7
+ import logging
8
+ import hashlib
9
+ from typing import List, Dict, Tuple
10
+ from transformers import pipeline
11
+ from sentence_transformers import SentenceTransformer, util
12
+ from pydantic import BaseModel, SecretStr
13
+
14
+ # Enable detailed logging
15
+ logging.basicConfig(level=logging.INFO)
16
+
17
+ # Hugging Face Inference Client
18
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
19
+
20
+ # Load a pre-trained model for sentence similarity
21
+ similarity_model = SentenceTransformer('all-mpnet-base-v2')
22
+
23
+ class GitHubConfig(BaseModel):
24
+ username: str
25
+ repository: str
26
+ api_token: SecretStr
27
+
28
+ class GitHubIntegration:
29
+ def __init__(self, config: GitHubConfig):
30
+ self.config = config
31
+ self.headers = {
32
+ "Authorization": f"Bearer {self.config.api_token.get_secret_value()}",
33
+ "Accept": "application/vnd.github.v3+json"
34
+ }
35
+ self.url = "https://api.github.com"
36
+
37
+ async def fetch_issues(self) -> List[Dict]:
38
+ cache_key = hashlib.md5(f"{self.config.username}/{self.config.repository}".encode()).hexdigest()
39
+ cached_data = await self._load_cache(cache_key)
40
+ if cached_data:
41
+ return cached_data
42
+
43
+ url = f"{self.url}/repos/{self.config.username}/{self.config.repository}/issues"
44
+ try:
45
+ async with aiohttp.ClientSession() as session:
46
+ async with session.get(url, headers=self.headers) as response:
47
+ response.raise_for_status()
48
+ issues = await response.json()
49
+ await self._save_cache(cache_key, issues)
50
+ return issues
51
+ except Exception as e:
52
+ logging.error(f"GitHub API error: {str(e)}")
53
+ return []
54
+
55
+ async def _load_cache(self, key: str) -> List[Dict] | None:
56
+ cache_file = f"cache_{key}.json"
57
+ if os.path.exists(cache_file):
58
+ async with aiohttp.ClientSession() as session:
59
+ async with session.get(f"file://{cache_file}") as f:
60
+ return json.loads(await f.text())
61
+ return None
62
+
63
+ async def _save_cache(self, key: str, data: List[Dict]):
64
+ cache_file = f"cache_{key}.json"
65
+ with open(cache_file, "w") as f:
66
+ json.dump(data, f)
67
+
68
+ async def analyze_issues(issue_text: str, model_name: str) -> str:
69
+ """
70
+ Analyze issues and provide solutions.
71
+ """
72
+ logging.info(f"Analyzing issue with model: {model_name}")
73
+ prompt = f"""
74
+ Issue: {issue_text}
75
+ Please provide a comprehensive resolution in the following format:
76
+ ## Problem Summary:
77
+ ## Root Cause Analysis:
78
+ ## Solution Options:
79
+ ## Recommended Solution:
80
+ ## Implementation Steps:
81
+ ## Verification Steps:
82
+ """
83
  try:
84
+ nlp = pipeline("text-generation", model=model_name, max_length=1000)
85
+ result = nlp(prompt)
86
+ return result[0]['generated_text']
 
 
 
87
  except Exception as e:
88
+ logging.error(f"Error analyzing issue: {e}")
89
+ return "Error analyzing issue."
90
+
91
+ async def find_related_issues(issue_text: str, issues: list) -> list:
92
+ """
93
+ Find related issues using similarity model.
94
+ """
95
+ issue_embedding = similarity_model.encode(issue_text)
96
+ related_issues = [
97
+ (issue, util.cos_sim(issue_embedding, similarity_model.encode(issue['title']))[0][0])
98
+ for issue in issues
99
+ ]
100
+ return sorted(related_issues, key=lambda x: x[1], reverse=True)[:3]
101
+
102
+ def respond(command: str, github_api_token: str, github_username: str, github_repository: str, selected_model: str) -> str:
103
+ """
104
+ Handle chat responses.
105
+ """
106
+ if command.startswith("/github"):
107
+ parts = command.split(maxsplit=2)
108
+ if len(parts) < 3:
109
+ return "❌ Format: /github <username> <repo> <token>"
110
+ github_client = GitHubIntegration(GitHubConfig(username=parts[0], repository=parts[1], api_token=SecretStr(parts[2])))
111
+ issues = asyncio.run(github_client.fetch_issues())
112
+ return "✅ GitHub configured successfully."
113
+ elif command.startswith("/analyze"):
114
+ issue_text = command.replace("/analyze", "").strip()
115
+ return asyncio.run(analyze_issues(issue_text, selected_model))
116
+ elif command.startswith("/list_issues"):
117
+ github_client = GitHubIntegration(GitHubConfig(username=github_username, repository=github_repository, api_token=SecretStr(github_api_token)))
118
+ issues = asyncio.run(github_client.fetch_issues())
119
+ return "\n".join([f"- {issue['title']} (Issue #{issue['number']})" for issue in issues])
120
+ else:
121
+ return "Unknown command. Use /help for instructions."
122
+
123
+ iface = gr.Interface(
124
+ fn=respond,
125
+ inputs=[
126
+ gr.Textbox(label="Command"),
127
+ gr.Textbox(label="GitHub API Token", placeholder="Enter your GitHub API token"),
128
+ gr.Textbox(label="GitHub Username", placeholder="Enter your GitHub username"),
129
+ gr.Textbox(label="GitHub Repository", placeholder="Enter your GitHub repository"),
130
+ gr.Dropdown(label="Model", choices=["text-davinci-003", "text-curie-001"], value="text-davinci-003"),
131
+ ],
132
+ outputs=gr.Textbox(label="Response"),
133
+ title="AI GitHub Assistant",
134
+ description="Interact with GitHub and analyze issues with AI.",
135
+ )
136
+
137
+ iface.launch(share=True)