ginipick commited on
Commit
9857813
ยท
verified ยท
1 Parent(s): 40b95f1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +397 -0
app.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient, HfApi
3
+ import os
4
+ import requests
5
+ from typing import List, Dict, Union, Tuple
6
+ import traceback
7
+ from PIL import Image
8
+ from io import BytesIO
9
+ import asyncio
10
+ from gradio_client import Client
11
+ import time
12
+ import threading
13
+ import json
14
+ import re
15
+ import asyncio
16
+
17
+
18
+ HF_TOKEN = os.getenv("HF_TOKEN")
19
+ hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=HF_TOKEN)
20
+ hf_api = HfApi(token=HF_TOKEN)
21
+
22
+ def get_headers():
23
+ if not HF_TOKEN:
24
+ raise ValueError("Hugging Face token not found in environment variables")
25
+ return {"Authorization": f"Bearer {HF_TOKEN}"}
26
+
27
+ def get_file_content(space_id: str, file_path: str) -> str:
28
+ file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
29
+ try:
30
+ response = requests.get(file_url, headers=get_headers())
31
+ if response.status_code == 200:
32
+ return response.text
33
+ else:
34
+ return f"File not found or inaccessible: {file_path}"
35
+ except requests.RequestException:
36
+ return f"Error fetching content for file: {file_path}"
37
+
38
+ def get_space_structure(space_id: str) -> Dict:
39
+ try:
40
+ files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
41
+
42
+ tree = {"type": "directory", "path": "", "name": space_id, "children": []}
43
+ for file in files:
44
+ path_parts = file.split('/')
45
+ current = tree
46
+ for i, part in enumerate(path_parts):
47
+ if i == len(path_parts) - 1: # ํŒŒ์ผ
48
+ current["children"].append({"type": "file", "path": file, "name": part})
49
+ else: # ๋””๋ ‰ํ† ๋ฆฌ
50
+ found = False
51
+ for child in current["children"]:
52
+ if child["type"] == "directory" and child["name"] == part:
53
+ current = child
54
+ found = True
55
+ break
56
+ if not found:
57
+ new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
58
+ current["children"].append(new_dir)
59
+ current = new_dir
60
+
61
+ return tree
62
+ except Exception as e:
63
+ print(f"Error in get_space_structure: {str(e)}")
64
+ return {"error": f"API request error: {str(e)}"}
65
+
66
+ def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
67
+ if "error" in tree_data:
68
+ return tree_data["error"]
69
+
70
+ formatted = f"{indent}{'๐Ÿ“' if tree_data.get('type') == 'directory' else '๐Ÿ“„'} {tree_data.get('name', 'Unknown')}\n"
71
+ if tree_data.get("type") == "directory":
72
+ for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
73
+ formatted += format_tree_structure(child, indent + " ")
74
+ return formatted
75
+
76
+ def summarize_code(app_content: str):
77
+ system_message = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์š”์•ฝํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์š”์•ฝํ•ด์ฃผ์„ธ์š”."
78
+ user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ์š”์•ฝํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
79
+
80
+ messages = [
81
+ {"role": "system", "content": system_message},
82
+ {"role": "user", "content": user_message}
83
+ ]
84
+
85
+ try:
86
+ response = hf_client.chat_completion(messages, max_tokens=200, temperature=0.7)
87
+ return response.choices[0].message.content
88
+ except Exception as e:
89
+ return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
90
+
91
+ def analyze_code(app_content: str):
92
+ system_message = """๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ๋‹ค์Œ ํ•ญ๋ชฉ์— ๋Œ€ํ•ด ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:
93
+ A. ๋ฐฐ๊ฒฝ ๋ฐ ํ•„์š”์„ฑ
94
+ B. ๊ธฐ๋Šฅ์  ํšจ์šฉ์„ฑ ๋ฐ ๊ฐ€์น˜
95
+ C. ํŠน์žฅ์ 
96
+ D. ์ ์šฉ ๋Œ€์ƒ ๋ฐ ํƒ€๊ฒŸ
97
+ E. ๊ธฐ๋Œ€ํšจ๊ณผ
98
+ ๊ธฐ์กด ๋ฐ ์œ ์‚ฌ ํ”„๋กœ์ ํŠธ์™€ ๋น„๊ตํ•˜์—ฌ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."""
99
+ user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
100
+
101
+ messages = [
102
+ {"role": "system", "content": system_message},
103
+ {"role": "user", "content": user_message}
104
+ ]
105
+
106
+ try:
107
+ response = hf_client.chat_completion(messages, max_tokens=1000, temperature=0.7)
108
+ return response.choices[0].message.content
109
+ except Exception as e:
110
+ return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
111
+
112
+ def explain_usage(app_content: str):
113
+ system_message = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋Š” ๊ฒƒ์ฒ˜๋Ÿผ ์‚ฌ์šฉ๋ฒ•์„ ์ƒ์„ธํžˆ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."
114
+ user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ์˜ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
115
+
116
+ messages = [
117
+ {"role": "system", "content": system_message},
118
+ {"role": "user", "content": user_message}
119
+ ]
120
+
121
+ try:
122
+ response = hf_client.chat_completion(messages, max_tokens=800, temperature=0.7)
123
+ return response.choices[0].message.content
124
+ except Exception as e:
125
+ return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
126
+
127
+ def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
128
+ """
129
+ ์ฝ”๋“œ ๋‚ด์šฉ์— ๋”ฐ๋ผ lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค.
130
+
131
+ Parameters:
132
+ - code_content (str): ์ฝ”๋“œ ํ…์ŠคํŠธ ๋‚ด์šฉ
133
+ - min_lines (int): ์ตœ์†Œ lines ์ˆ˜
134
+ - max_lines (int): ์ตœ๋Œ€ lines ์ˆ˜
135
+
136
+ Returns:
137
+ - int: ์„ค์ •๋œ lines ์ˆ˜
138
+ """
139
+ # ์ฝ”๋“œ์˜ ์ค„ ์ˆ˜ ๊ณ„์‚ฐ
140
+ num_lines = len(code_content.split('\n'))
141
+ # ์ค„ ์ˆ˜๊ฐ€ min_lines๋ณด๋‹ค ์ ๋‹ค๋ฉด min_lines ์‚ฌ์šฉ, max_lines๋ณด๋‹ค ํฌ๋ฉด max_lines ์‚ฌ์šฉ
142
+ return min(max(num_lines, min_lines), max_lines)
143
+
144
+ def analyze_space(url: str, progress=gr.Progress()):
145
+ try:
146
+ space_id = url.split('spaces/')[-1]
147
+
148
+ # Space ID ์œ ํšจ์„ฑ ๊ฒ€์‚ฌ ์ˆ˜์ •
149
+ if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
150
+ raise ValueError(f"Invalid Space ID format: {space_id}")
151
+
152
+ progress(0.1, desc="ํŒŒ์ผ ๊ตฌ์กฐ ๋ถ„์„ ์ค‘...")
153
+ tree_structure = get_space_structure(space_id)
154
+ if "error" in tree_structure:
155
+ raise ValueError(tree_structure["error"])
156
+ tree_view = format_tree_structure(tree_structure)
157
+
158
+ progress(0.3, desc="app.py ๋‚ด์šฉ ๊ฐ€์ ธ์˜ค๋Š” ์ค‘...")
159
+ app_content = get_file_content(space_id, "app.py")
160
+
161
+ progress(0.5, desc="์ฝ”๋“œ ์š”์•ฝ ์ค‘...")
162
+ summary = summarize_code(app_content)
163
+
164
+ progress(0.7, desc="์ฝ”๋“œ ๋ถ„์„ ์ค‘...")
165
+ analysis = analyze_code(app_content)
166
+
167
+ progress(0.9, desc="์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘...")
168
+ usage = explain_usage(app_content)
169
+
170
+ # ์ค„ ์ˆ˜ ๊ณ„์‚ฐํ•˜์—ฌ lines ์„ค์ •
171
+ app_py_lines = adjust_lines_for_code(app_content)
172
+
173
+ progress(1.0, desc="์™„๋ฃŒ")
174
+ return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, app_py_lines
175
+ except Exception as e:
176
+ print(f"Error in analyze_space: {str(e)}")
177
+ print(traceback.format_exc())
178
+ return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
179
+
180
+
181
+
182
+ async def respond_stream(message: str, chat_history: List[Dict[str, str]], max_tokens: int, temperature: float, top_p: float):
183
+ system_message = """๋‹น์‹ ์€ ํ—ˆ๊น…ํŽ˜์ด์Šค์— ํŠนํ™”๋œ AI ์ฝ”๋”ฉ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž์˜ ์งˆ๋ฌธ์— ์นœ์ ˆํ•˜๊ณ  ์ƒ์„ธํ•˜๊ฒŒ ๋‹ต๋ณ€ํ•ด์ฃผ์„ธ์š”.
184
+ Gradio ํŠน์„ฑ์„ ์ •ํ™•ํžˆ ์ธ์‹ํ•˜๊ณ  Requirements.txt ๋ˆ„๋ฝ์—†์ด ์ฝ”๋”ฉ๊ณผ ์˜ค๋ฅ˜๋ฅผ ํ•ด๊ฒฐํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.
185
+ ํ•ญ์ƒ ์ •ํ™•ํ•˜๊ณ  ์œ ์šฉํ•œ ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๋„๋ก ๋…ธ๋ ฅํ•˜์„ธ์š”."""
186
+
187
+ messages = [{"role": "system", "content": system_message}]
188
+ messages.extend(chat_history)
189
+ messages.append({"role": "user", "content": message})
190
+
191
+ try:
192
+ stream = hf_client.text_generation(
193
+ "CohereForAI/c4ai-command-r-plus-08-2024",
194
+ messages,
195
+ max_new_tokens=max_tokens,
196
+ temperature=temperature,
197
+ top_p=top_p,
198
+ stream=True
199
+ )
200
+
201
+ full_response = ""
202
+ for response in stream:
203
+ if response.token.special:
204
+ continue
205
+ full_response += response.token.text
206
+ yield full_response
207
+ except Exception as e:
208
+ yield f"์‘๋‹ต ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
209
+
210
+
211
+ def create_ui():
212
+ try:
213
+ css = """
214
+ footer {visibility: hidden;}
215
+ .output-group {
216
+ border: 1px solid #ddd;
217
+ border-radius: 5px;
218
+ padding: 10px;
219
+ margin-bottom: 20px;
220
+ }
221
+ .scroll-lock {
222
+ overflow-y: auto !important;
223
+ max-height: calc((100vh - 200px) / 5) !important;
224
+ }
225
+ .tree-view-scroll {
226
+ overflow-y: auto !important;
227
+ max-height: calc((100vh - 200px) / 2) !important;
228
+ }
229
+ .full-height {
230
+ height: calc(200em * 1.2) !important;
231
+ overflow-y: auto !important;
232
+ }
233
+ .code-box {
234
+ overflow-x: auto !important;
235
+ overflow-y: auto !important;
236
+ white-space: pre !important;
237
+ word-wrap: normal !important;
238
+ height: 100% !important;
239
+ }
240
+ .code-box > div {
241
+ min-width: 100% !important;
242
+ }
243
+ .code-box > div > textarea {
244
+ word-break: normal !important;
245
+ overflow-wrap: normal !important;
246
+ }
247
+ .tab-nav {
248
+ background-color: #2c3e50;
249
+ border-radius: 5px 5px 0 0;
250
+ overflow: hidden;
251
+ }
252
+ .tab-nav button {
253
+ color: #ecf0f1 !important;
254
+ background-color: #34495e;
255
+ border: none;
256
+ padding: 10px 20px;
257
+ margin: 0;
258
+ transition: background-color 0.3s;
259
+ font-size: 16px;
260
+ font-weight: bold;
261
+ }
262
+ .tab-nav button:hover {
263
+ background-color: #2980b9;
264
+ }
265
+ .tab-nav button.selected {
266
+ color: #2c3e50 !important;
267
+ background-color: #ecf0f1;
268
+ }
269
+ input[type="text"], textarea {
270
+ color: #2c3e50 !important;
271
+ background-color: #ecf0f1 !important;
272
+ }
273
+ """
274
+
275
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
276
+ gr.Markdown("# Mouse: HuggingFace")
277
+
278
+ with gr.Tabs() as tabs:
279
+ with gr.TabItem("๋ถ„์„"):
280
+ with gr.Row():
281
+ with gr.Column(scale=6): # ์™ผ์ชฝ 60%
282
+ url_input = gr.Textbox(label="HuggingFace Space URL")
283
+ analyze_button = gr.Button("๋ถ„์„")
284
+
285
+ with gr.Group(elem_classes="output-group scroll-lock"):
286
+ summary_output = gr.Markdown(label="์š”์•ฝ (3์ค„ ์ด๋‚ด)")
287
+
288
+ with gr.Group(elem_classes="output-group scroll-lock"):
289
+ analysis_output = gr.Markdown(label="๋ถ„์„")
290
+
291
+ with gr.Group(elem_classes="output-group scroll-lock"):
292
+ usage_output = gr.Markdown(label="์‚ฌ์šฉ๋ฒ•")
293
+
294
+ with gr.Group(elem_classes="output-group tree-view-scroll"): # ํŠธ๋ฆฌ ๋ทฐ ์Šคํฌ๋กค ์ถ”๊ฐ€
295
+ tree_view_output = gr.Textbox(label="ํŒŒ์ผ ๊ตฌ์กฐ (Tree View)", lines=30)
296
+
297
+ with gr.Column(scale=4): # ์˜ค๋ฅธ์ชฝ 40%
298
+ with gr.Group(elem_classes="output-group full-height"):
299
+ code_tabs = gr.Tabs()
300
+ with code_tabs:
301
+ app_py_tab = gr.TabItem("app.py")
302
+ with app_py_tab:
303
+ app_py_content = gr.Code(
304
+ language="python",
305
+ label="app.py",
306
+ lines=200,
307
+ elem_classes="full-height code-box"
308
+ )
309
+ requirements_tab = gr.TabItem("requirements.txt")
310
+ with requirements_tab:
311
+ requirements_content = gr.Textbox(
312
+ label="requirements.txt",
313
+ lines=200,
314
+ elem_classes="full-height code-box"
315
+ )
316
+
317
+
318
+ with gr.TabItem("AI ์ฝ”๋”ฉ"):
319
+ chatbot = gr.Chatbot(label="๋Œ€ํ™”", type='messages')
320
+ msg = gr.Textbox(label="๋ฉ”์‹œ์ง€")
321
+
322
+ max_tokens = gr.Slider(minimum=1, maximum=8000, value=4000, label="Max Tokens", visible=False)
323
+ temperature = gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature", visible=False)
324
+ top_p = gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P", visible=False)
325
+
326
+ examples = [
327
+ ["์ƒ์„ธํ•œ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์„ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋ฉด์„œ ์„ค๋ช…ํ•˜๋“ฏ์ด 4000 ํ† ํฐ ์ด์ƒ ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
328
+ ["FAQ 20๊ฑด์„ ์ƒ์„ธํ•˜๊ฒŒ ์ž‘์„ฑํ•˜๋ผ. 4000ํ† ํฐ ์ด์ƒ ์‚ฌ์šฉํ•˜๋ผ."],
329
+ ["์‚ฌ์šฉ ๋ฐฉ๋ฒ•๊ณผ ์ฐจ๋ณ„์ , ํŠน์ง•, ๊ฐ•์ ์„ ์ค‘์‹ฌ์œผ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์œ ํŠœ๋ธŒ ์˜์ƒ ์Šคํฌ๋ฆฝํŠธ ํ˜•ํƒœ๋กœ ์ž‘์„ฑํ•˜๋ผ"],
330
+ ["๋ณธ ์„œ๋น„์Šค๋ฅผ SEO ์ตœ์ ํ™”ํ•˜์—ฌ ๋ธ”๋กœ๊ทธ ํฌ์ŠคํŠธ(๋ฐฐ๊ฒฝ ๋ฐ ํ•„์š”์„ฑ, ๊ธฐ์กด ์œ ์‚ฌ ์„œ๋น„์Šค์™€ ๋น„๊ตํ•˜์—ฌ ํŠน์žฅ์ , ํ™œ์šฉ์ฒ˜, ๊ฐ€์น˜, ๊ธฐ๋Œ€ํšจ๊ณผ, ๊ฒฐ๋ก ์„ ํฌํ•จ)๋กœ 4000 ํ† ํฐ ์ด์ƒ ์ž‘์„ฑํ•˜๋ผ"],
331
+ ["ํŠนํ—ˆ ์ถœ์›์— ํ™œ์šฉํ•  ๊ธฐ์ˆ  ๋ฐ ๋น„์ฆˆ๋‹ˆ์Šค๋ชจ๋ธ ์ธก๋ฉด์„ ํฌํ•จํ•˜์—ฌ ํŠนํ—ˆ ์ถœ์›์„œ ๊ตฌ์„ฑ์— ๋งž๊ฒŒ ํ˜์‹ ์ ์ธ ์ฐฝ์˜ ๋ฐœ๋ช… ๋‚ด์šฉ์„ ์ค‘์‹ฌ์œผ๋กœ 4000ํ† ํฐ ์ด์ƒ ์ž‘์„ฑํ•˜๋ผ."],
332
+ ["๊ณ„์† ์ด์–ด์„œ ๋‹ต๋ณ€ํ•˜๋ผ"],
333
+ ]
334
+
335
+ gr.Examples(examples, inputs=msg)
336
+
337
+ def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
338
+ bot_message = asyncio.run(respond_stream(message, chat_history, max_tokens, temperature, top_p).__anext__())
339
+ chat_history.append({"role": "user", "content": message})
340
+ chat_history.append({"role": "assistant", "content": bot_message})
341
+ return "", chat_history
342
+
343
+ msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
344
+
345
+
346
+
347
+
348
+
349
+
350
+ space_id_state = gr.State()
351
+ tree_structure_state = gr.State()
352
+ app_py_content_lines = gr.State()
353
+
354
+ analyze_button.click(
355
+ analyze_space,
356
+ inputs=[url_input],
357
+ outputs=[app_py_content, tree_view_output, tree_structure_state, space_id_state, summary_output, analysis_output, usage_output, app_py_content_lines]
358
+ ).then(
359
+ lambda space_id: get_file_content(space_id, "requirements.txt"),
360
+ inputs=[space_id_state],
361
+ outputs=[requirements_content]
362
+ )
363
+
364
+ # lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์„ค์ •
365
+ app_py_content.change(lambda lines: gr.update(lines=lines), inputs=[app_py_content_lines], outputs=[app_py_content])
366
+
367
+ return demo
368
+
369
+ except Exception as e:
370
+ print(f"Error in create_ui: {str(e)}")
371
+ print(traceback.format_exc())
372
+ raise
373
+
374
+ if __name__ == "__main__":
375
+ try:
376
+ print("Starting HuggingFace Space Analyzer...")
377
+ demo = create_ui()
378
+ print("UI created successfully.")
379
+
380
+ print("Configuring Gradio queue...")
381
+ demo.queue()
382
+ print("Gradio queue configured.")
383
+
384
+ print("Launching Gradio app...")
385
+ demo.launch(
386
+ server_name="0.0.0.0",
387
+ server_port=7860,
388
+ share=False,
389
+ debug=True,
390
+ show_api=False
391
+ )
392
+ print("Gradio app launched successfully.")
393
+ except Exception as e:
394
+ print(f"Error in main: {str(e)}")
395
+ print("Detailed error information:")
396
+ print(traceback.format_exc())
397
+ raise