MingZ6 commited on
Commit
f57b851
·
1 Parent(s): b52dd21

Add Dockerfile, update agents and app for environment variable support, and enhance requirements

Browse files
Files changed (5) hide show
  1. Dockerfile +32 -0
  2. agents.py +41 -17
  3. app.py +4 -1
  4. requirements.txt +5 -0
  5. space.yml +7 -0
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ build-essential \
8
+ curl \
9
+ software-properties-common \
10
+ git \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Set environment variables
14
+ ENV PYTHONDONTWRITEBYTECODE=1
15
+ ENV PYTHONUNBUFFERED=1
16
+ ENV PORT=7860
17
+
18
+ # Copy requirements and install Python dependencies
19
+ COPY requirements.txt .
20
+ RUN pip install --no-cache-dir -r requirements.txt
21
+
22
+ # Copy the rest of the application
23
+ COPY . .
24
+
25
+ # Set default API key (this will be overridden by Hugging Face secrets)
26
+ ENV TOGETHER_API_KEY=""
27
+
28
+ # Expose the port the app runs on
29
+ EXPOSE 7860
30
+
31
+ # Command to run the application
32
+ CMD ["python", "app.py"]
agents.py CHANGED
@@ -5,21 +5,42 @@ import json
5
  warnings.filterwarnings("ignore")
6
  from together import Together
7
 
8
- # Load configuration from config.json
9
  def load_config():
10
- config_path = os.path.join(os.path.dirname(__file__), "config.json")
11
- with open(config_path, "r") as f:
12
- return json.load(f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  # Get API key and model from config
15
  config = load_config()
16
  your_api_key = config["together_ai_token"]
17
  model = config.get("model", "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") # Use default if not in config
18
- client = Together(api_key=your_api_key)
19
 
 
 
20
 
21
  def prompt_llm(prompt, show_cost=False):
22
  # This function allows us to prompt an LLM via the Together API
 
 
23
 
24
  # Calculate the number of tokens
25
  tokens = len(prompt.split())
@@ -31,16 +52,19 @@ def prompt_llm(prompt, show_cost=False):
31
  print(f"Estimated cost for {model}: ${cost:.10f}\n")
32
 
33
  # Make the API call
34
- response = client.chat.completions.create(
35
- model=model,
36
- messages=[{"role": "user", "content": prompt}],
37
- )
38
- return response.choices[0].message.content
 
 
 
39
 
40
 
41
  class SummarizerAgent:
42
  def __init__(self):
43
- self.client = Together(api_key=your_api_key)
44
 
45
  def process(self, content):
46
  prompt = """SYSTEM: You are an expert code summarizer.
@@ -64,7 +88,7 @@ class SummarizerAgent:
64
 
65
  class InsightAgent:
66
  def __init__(self):
67
- self.client = Together(api_key=your_api_key)
68
 
69
  def process_text(self, summaries):
70
  # Process a list of summary texts directly
@@ -94,7 +118,7 @@ class InsightAgent:
94
 
95
  class RecommenderAgent:
96
  def __init__(self):
97
- self.client = Together(api_key=your_api_key)
98
 
99
  def process(self, insights, summaries, user_goal, persona=""):
100
  prompt = """SYSTEM: You are an expert code consultant who provides actionable recommendations.
@@ -160,7 +184,7 @@ class RecommenderAgent:
160
 
161
  class QuestionGeneratorAgent:
162
  def __init__(self):
163
- self.client = Together(api_key=your_api_key)
164
 
165
  def generate_questions(self, content, category, source):
166
  prompt_template_path = os.path.join(os.path.dirname(__file__), "PromptTemplate.json")
@@ -180,7 +204,7 @@ class QuestionGeneratorAgent:
180
 
181
  class CLISetupAgent:
182
  def __init__(self):
183
- self.client = Together(api_key=your_api_key)
184
 
185
  def generate_setup_instructions(self, repo_content, repo_metadata):
186
  """Generate step-by-step CLI instructions to set up the environment for a repository."""
@@ -262,7 +286,7 @@ class ChatbotAgent:
262
  """Agent for answering questions about GitHub repositories."""
263
 
264
  def __init__(self):
265
- self.client = Together(api_key=your_api_key)
266
 
267
  def answer_question(self, question, repo_content, repo_metadata, summaries=None, insights=None):
268
  """
@@ -400,7 +424,7 @@ class PRReviewAgent:
400
  """Agent for reviewing GitHub Pull Requests and providing professional code feedback."""
401
 
402
  def __init__(self):
403
- self.client = Together(api_key=your_api_key)
404
 
405
  def review_pr(self, pr_details, target_branch_code):
406
  """
 
5
  warnings.filterwarnings("ignore")
6
  from together import Together
7
 
8
+ # Load configuration from config.json or environment variables
9
  def load_config():
10
+ # First try to get API key from environment variable (for Hugging Face deployment)
11
+ together_api_key = os.environ.get("TOGETHER_API_KEY", "")
12
+
13
+ # If not found in environment, try to load from config.json (for local development)
14
+ if not together_api_key:
15
+ try:
16
+ config_path = os.path.join(os.path.dirname(__file__), "config.json")
17
+ if os.path.exists(config_path):
18
+ with open(config_path, "r") as f:
19
+ config = json.load(f)
20
+ together_api_key = config.get("together_ai_token", "")
21
+ model_name = config.get("model", "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8")
22
+ return {"together_ai_token": together_api_key, "model": model_name}
23
+ except Exception as e:
24
+ print(f"Error loading config.json: {str(e)}")
25
+
26
+ # Return a config dictionary with the API key from environment variable
27
+ return {
28
+ "together_ai_token": together_api_key,
29
+ "model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" # Default model
30
+ }
31
 
32
  # Get API key and model from config
33
  config = load_config()
34
  your_api_key = config["together_ai_token"]
35
  model = config.get("model", "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") # Use default if not in config
 
36
 
37
+ # Initialize client only if we have an API key
38
+ client = Together(api_key=your_api_key) if your_api_key else None
39
 
40
  def prompt_llm(prompt, show_cost=False):
41
  # This function allows us to prompt an LLM via the Together API
42
+ if not client:
43
+ return "Error: Together API client not initialized. Please check your API key."
44
 
45
  # Calculate the number of tokens
46
  tokens = len(prompt.split())
 
52
  print(f"Estimated cost for {model}: ${cost:.10f}\n")
53
 
54
  # Make the API call
55
+ try:
56
+ response = client.chat.completions.create(
57
+ model=model,
58
+ messages=[{"role": "user", "content": prompt}],
59
+ )
60
+ return response.choices[0].message.content
61
+ except Exception as e:
62
+ return f"Error calling Together API: {str(e)}"
63
 
64
 
65
  class SummarizerAgent:
66
  def __init__(self):
67
+ self.client = client
68
 
69
  def process(self, content):
70
  prompt = """SYSTEM: You are an expert code summarizer.
 
88
 
89
  class InsightAgent:
90
  def __init__(self):
91
+ self.client = client
92
 
93
  def process_text(self, summaries):
94
  # Process a list of summary texts directly
 
118
 
119
  class RecommenderAgent:
120
  def __init__(self):
121
+ self.client = client
122
 
123
  def process(self, insights, summaries, user_goal, persona=""):
124
  prompt = """SYSTEM: You are an expert code consultant who provides actionable recommendations.
 
184
 
185
  class QuestionGeneratorAgent:
186
  def __init__(self):
187
+ self.client = client
188
 
189
  def generate_questions(self, content, category, source):
190
  prompt_template_path = os.path.join(os.path.dirname(__file__), "PromptTemplate.json")
 
204
 
205
  class CLISetupAgent:
206
  def __init__(self):
207
+ self.client = client
208
 
209
  def generate_setup_instructions(self, repo_content, repo_metadata):
210
  """Generate step-by-step CLI instructions to set up the environment for a repository."""
 
286
  """Agent for answering questions about GitHub repositories."""
287
 
288
  def __init__(self):
289
+ self.client = client
290
 
291
  def answer_question(self, question, repo_content, repo_metadata, summaries=None, insights=None):
292
  """
 
424
  """Agent for reviewing GitHub Pull Requests and providing professional code feedback."""
425
 
426
  def __init__(self):
427
+ self.client = client
428
 
429
  def review_pr(self, pr_details, target_branch_code):
430
  """
app.py CHANGED
@@ -316,4 +316,7 @@ def verify_credentials():
316
  return jsonify({"valid": False, "error": "Invalid GitHub credentials"}), 401
317
 
318
  if __name__ == "__main__":
319
- app.run(debug=True, port=5001, host="0.0.0.0")
 
 
 
 
316
  return jsonify({"valid": False, "error": "Invalid GitHub credentials"}), 401
317
 
318
  if __name__ == "__main__":
319
+ # Use environment variables for port if available (needed for Hugging Face)
320
+ port = int(os.environ.get('PORT', 5001))
321
+ # Bind to 0.0.0.0 instead of 127.0.0.1 to be accessible from outside the container
322
+ app.run(debug=False, host='0.0.0.0', port=port)
requirements.txt CHANGED
@@ -1,3 +1,8 @@
1
  flask==2.3.3
2
  requests==2.31.0
 
 
 
 
 
3
  duckduckgo-search==3.0.2
 
1
  flask==2.3.3
2
  requests==2.31.0
3
+ beautifulsoup4==4.12.2
4
+ together>=0.1.5
5
+ bs4==0.0.1
6
+ lxml>=5.3.0
7
+ gunicorn==20.1.0
8
  duckduckgo-search==3.0.2
space.yml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ title: GitHub Helper
2
+ emoji: 🛠️
3
+ colorFrom: blue
4
+ colorTo: purple
5
+ sdk: docker
6
+ pinned: false
7
+ app_port: 7860