Daemontatox commited on
Commit
e677444
·
verified ·
1 Parent(s): 40ce582

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +153 -96
app.py CHANGED
@@ -1,33 +1,125 @@
1
  import gradio as gr
2
- from threading import Thread
3
- import requests
4
- from typing import List, Tuple
5
- import json
6
  import os
7
- from queue import Queue
8
-
9
- # Replace with your Hugging Face API token
10
- HF_API_TOKEN = os.getenv("HF_API_TOKEN")
11
- API_URL = f"https://api-inference.huggingface.co/models/FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview"
12
-
13
- DEFAULT_SYSTEM_PROMPT = """
14
- You are an Advanced AI Coding Assistant, designed to solve complex challenges and deliver efficient, dependable solutions. Follow this structured workflow for every task:
15
-
16
- 1. Understand: Analyze the problem thoroughly. Identify core objectives, resolve ambiguities, and ask clarifying questions if needed to ensure a complete understanding.
17
 
18
- 2. Plan: Outline a clear, step-by-step approach, detailing the tools, frameworks, and algorithms required to achieve the solution effectively.
 
 
 
 
19
 
20
- 3. Implement: Execute the plan with well-structured, efficient, and well-commented code. Provide a clear explanation of your thought process and the rationale behind key decisions as you proceed.
21
 
22
- 4. Validate: Test the solution rigorously to ensure accuracy, efficiency, and alignment with best practices. Debug and optimize where necessary.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- 5. Conclude: Summarize the solution with a clear conclusion, highlighting its effectiveness. Suggest improvements, optimizations, or alternative approaches if applicable.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- Guiding Principles:
27
- Use code as a tool for reasoning, with clear and educational explanations.
28
- Prioritize code readability, scalability, and maintainability.
29
- Adapt explanations to the user's skill level to maximize learning value.
30
- Refine solutions iteratively, incorporating feedback or evolving requirements.
31
  """
32
 
33
  CSS = """
@@ -44,89 +136,54 @@ def format_response(text: str) -> str:
44
  .replace("[Reason]", '\n<strong class="special-tag">[Reason]</strong>\n') \
45
  .replace("[Verify]", '\n<strong class="special-tag">[Verify]</strong>\n')
46
 
47
- def query_huggingface(
48
- payload: dict,
49
- api_url: str = API_URL,
50
- headers: dict = {"Authorization": f"Bearer {HF_API_TOKEN}"}
51
- ) -> requests.Response:
52
- """Send a request to the Hugging Face API endpoint."""
53
- return requests.post(api_url, headers=headers, json=payload)
54
-
55
- def stream_response(response_queue: Queue, chat_history: List[Tuple[str, str]], message: str):
56
- """Stream the response from the API and update the chat history."""
57
- partial_message = ""
58
- new_history = chat_history + [(message, "")]
59
-
60
- while True:
61
- try:
62
- token = response_queue.get()
63
- if token is None: # End of stream
64
- break
65
-
66
- partial_message += token
67
- formatted = format_response(partial_message)
68
- new_history[-1] = (message, formatted + "▌")
69
- yield new_history
70
-
71
- except Exception as e:
72
- print(f"Error in streaming: {e}")
73
- break
74
-
75
- # Final update without cursor
76
- new_history[-1] = (message, format_response(partial_message))
77
- yield new_history
78
-
79
  def generate_response(
80
  message: str,
81
  chat_history: List[Tuple[str, str]],
82
  system_prompt: str,
83
  temperature: float,
84
  max_tokens: int
85
- ) -> List[Tuple[str, str]]:
86
- """Generate a response using the Hugging Face API."""
87
  # Create conversation history
88
- conversation = [{"role": "system", "content": system_prompt}]
89
  for user_msg, bot_msg in chat_history:
90
- conversation.extend([
91
  {"role": "user", "content": user_msg},
92
  {"role": "assistant", "content": bot_msg}
93
  ])
94
- conversation.append({"role": "user", "content": message})
95
-
96
- # Prepare the payload
97
- payload = {
98
- "inputs": conversation,
99
- "parameters": {
100
- "max_new_tokens": max_tokens,
101
- "temperature": temperature,
102
- "stream": True
103
- }
104
- }
105
-
106
- # Create a queue for streaming tokens
107
- response_queue = Queue()
108
-
109
- def process_stream():
110
- try:
111
- response = query_huggingface(payload)
112
-
113
- # Parse streaming response
114
- for line in response.iter_lines():
115
- if line:
116
- json_response = json.loads(line)
117
- if "token" in json_response:
118
- response_queue.put(json_response["token"]["text"])
119
-
120
- response_queue.put(None) # Signal end of stream
121
- except Exception as e:
122
- print(f"Error in API call: {e}")
123
- response_queue.put(None)
124
-
125
- # Start processing thread
126
- Thread(target=process_stream).start()
127
-
128
- # Return generator for streaming
129
- return stream_response(response_queue, chat_history, message)
130
 
131
  # Create Gradio interface
132
  with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
@@ -154,7 +211,7 @@ with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
154
  clear.click(lambda: None, None, chatbot, queue=False)
155
 
156
  if __name__ == "__main__":
157
- if not HF_API_TOKEN:
158
  print("Please set your Hugging Face API token as environment variable HF_API_TOKEN")
159
  else:
160
  demo.queue().launch()
 
1
  import gradio as gr
2
+ from openai import OpenAI
 
 
 
3
  import os
4
+ from typing import List, Tuple
5
+ import time
 
 
 
 
 
 
 
 
6
 
7
+ # Initialize OpenAI client with Hugging Face endpoint
8
+ client = OpenAI(
9
+ base_url="https://api-inference.huggingface.co/v1/",
10
+ api_key=os.getenv("HF_API_TOKEN")
11
+ )
12
 
13
+ MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
14
 
15
+ DEFAULT_SYSTEM_PROMPT = """
16
+ You are an expert software testing agent specializing in designing comprehensive test strategies and writing high-quality automated test scripts. Your role is to assist developers, product managers, and quality assurance teams by analyzing features, branch names, or explanations to produce detailed, effective test cases. You excel in identifying edge cases, ensuring robust test coverage, and delivering Playwright test scripts in JavaScript.
17
+
18
+ Capabilities:
19
+ Feature Understanding:
20
+
21
+ Analyze the feature description, branch name, or user explanation to extract its purpose, expected behavior, and key functionality.
22
+ Infer implicit requirements and edge cases that might not be explicitly mentioned.
23
+ Test Case Generation:
24
+
25
+ Design manual test cases for functional, non-functional, and exploratory testing. These should include:
26
+ Positive test cases (expected behavior).
27
+ Negative test cases (handling invalid inputs or unexpected conditions).
28
+ Edge cases (extreme or boundary conditions).
29
+ Performance and security-related scenarios, if applicable.
30
+ Write automated test cases in Playwright using JavaScript that adhere to modern testing standards.
31
+ Playwright Expertise:
32
+
33
+ Generate Playwright test scripts with modular, reusable code that follows best practices for maintainability and readability.
34
+ Use robust selectors (data attributes preferred) and implement techniques like handling asynchronous operations, mocking API responses, and parameterized testing where applicable.
35
+ Write test scripts with proper comments, error handling, and clear structure.
36
+ Coverage Prioritization:
37
+
38
+ Focus on high-priority areas like critical user flows, core functionality, and areas prone to failure.
39
+ Ensure comprehensive coverage for edge cases to make the system resilient.
40
+ Response Guidelines:
41
+ Context Analysis:
42
+
43
+ If the user provides a branch name, infer the feature or functionality it relates to and proceed to generate test cases.
44
+ If the user provides a feature explanation, ensure your test cases align with the described functionality and its goals.
45
+ Ask clarifying questions if necessary to improve your understanding before generating test cases.
46
+ Structured Output:
47
+
48
+ Start with a brief summary of the feature or inferred functionality based on the input.
49
+ Present manual test cases first, with a clear numbering format and detailed steps for testers to follow.
50
+ Follow with automated Playwright test scripts, formatted with proper indentation and ready for execution.
51
+ Test Cases Format:
52
+
53
+ Manual Test Cases:
54
+ ID: Test case identifier (e.g., TC001).
55
+ Title: Clear and descriptive title.
56
+ Precondition(s): Any setup required before execution.
57
+ Steps: Step-by-step instructions for execution.
58
+ Expected Result: The expected outcome of the test.
59
+ Playwright Automated Test Cases:
60
+ Include setup (browser context and page), reusable utility functions, and parameterized test cases where applicable.
61
+ Ensure clear commenting for each section of the script.
62
+ Best Practices:
63
+
64
+ Recommend improvements to testability if the input feature is unclear or incomplete.
65
+ Provide tips for maintaining the test suite, such as organizing tests by feature or tagging tests for easy execution.
66
+ Sample Output Template:
67
+ Feature Summary:
68
+
69
+ A concise summary of the feature or inferred functionality based on the user input.
70
+ Manual Test Cases:
71
+
72
+ vbnet
73
+ Copy
74
+ Edit
75
+ TC001: Verify successful login with valid credentials
76
+ Precondition(s): The user must have a valid account.
77
+ Steps:
78
+ 1. Navigate to the login page.
79
+ 2. Enter valid username and password.
80
+ 3. Click on the "Login" button.
81
+ Expected Result: The user is redirected to the dashboard.
82
+ Automated Playwright Test Case (JavaScript):
83
+
84
+ javascript
85
+ Copy
86
+ Edit
87
+ const { test, expect } = require('@playwright/test');
88
+
89
+ test.describe('Login Feature Tests', () => {
90
+ test('Verify successful login with valid credentials', async ({ page }) => {
91
+ // Navigate to the login page
92
+ await page.goto('https://example.com/login');
93
+
94
+ // Enter credentials
95
+ await page.fill('#username', 'testuser');
96
+ await page.fill('#password', 'password123');
97
+
98
+ // Click the login button
99
+ await page.click('button#login');
100
+
101
+ // Assert redirection to dashboard
102
+ await expect(page).toHaveURL('https://example.com/dashboard');
103
+ });
104
 
105
+ test('Verify login fails with invalid credentials', async ({ page }) => {
106
+ // Navigate to the login page
107
+ await page.goto('https://example.com/login');
108
+
109
+ // Enter invalid credentials
110
+ await page.fill('#username', 'invaliduser');
111
+ await page.fill('#password', 'wrongpassword');
112
+
113
+ // Click the login button
114
+ await page.click('button#login');
115
+
116
+ // Assert error message is displayed
117
+ const errorMessage = await page.locator('.error-message');
118
+ await expect(errorMessage).toHaveText('Invalid username or password.');
119
+ });
120
+ });
121
+ With this structure, you’ll provide detailed, high-quality test plans that are both actionable and easy to implement. Let me know if you'd like additional examples or refinements!
122
 
 
 
 
 
 
123
  """
124
 
125
  CSS = """
 
136
  .replace("[Reason]", '\n<strong class="special-tag">[Reason]</strong>\n') \
137
  .replace("[Verify]", '\n<strong class="special-tag">[Verify]</strong>\n')
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  def generate_response(
140
  message: str,
141
  chat_history: List[Tuple[str, str]],
142
  system_prompt: str,
143
  temperature: float,
144
  max_tokens: int
145
+ ):
146
+ """Generate a response using the OpenAI-compatible Hugging Face API."""
147
  # Create conversation history
148
+ messages = [{"role": "system", "content": system_prompt}]
149
  for user_msg, bot_msg in chat_history:
150
+ messages.extend([
151
  {"role": "user", "content": user_msg},
152
  {"role": "assistant", "content": bot_msg}
153
  ])
154
+ messages.append({"role": "user", "content": message})
155
+
156
+ # Create new chat history with empty response
157
+ new_history = chat_history + [(message, "")]
158
+ partial_message = ""
159
+
160
+ try:
161
+ # Create streaming completion
162
+ stream = client.chat.completions.create(
163
+ model=MODEL_ID,
164
+ messages=messages,
165
+ temperature=temperature,
166
+ max_tokens=max_tokens,
167
+ stream=True
168
+ )
169
+
170
+ # Process the stream
171
+ for chunk in stream:
172
+ if chunk.choices[0].delta.content is not None:
173
+ partial_message += chunk.choices[0].delta.content
174
+ formatted = format_response(partial_message)
175
+ new_history[-1] = (message, formatted + "▌")
176
+ yield new_history
177
+ time.sleep(0.01) # Small delay to prevent UI lag
178
+
179
+ # Final update without cursor
180
+ new_history[-1] = (message, format_response(partial_message))
181
+ yield new_history
182
+
183
+ except Exception as e:
184
+ error_message = f"Error: {str(e)}"
185
+ new_history[-1] = (message, error_message)
186
+ yield new_history
 
 
 
187
 
188
  # Create Gradio interface
189
  with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
 
211
  clear.click(lambda: None, None, chatbot, queue=False)
212
 
213
  if __name__ == "__main__":
214
+ if not os.getenv("HF_API_TOKEN"):
215
  print("Please set your Hugging Face API token as environment variable HF_API_TOKEN")
216
  else:
217
  demo.queue().launch()