Maouu commited on
Commit
def63ae
·
verified ·
1 Parent(s): a57c7fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -178
app.py CHANGED
@@ -1,182 +1,105 @@
1
- from fastapi import FastAPI, Request
2
- from fastapi.responses import StreamingResponse
3
- from fastapi.middleware.cors import CORSMiddleware
4
- from typing import List, Dict, Any, Optional
5
- from pydantic import BaseModel
6
- import asyncio
7
- import httpx
8
-
9
- from config import cookies, headers
10
- from prompts import ChiplingPrompts
11
-
12
- app = FastAPI()
13
-
14
- # Add CORS middleware
15
- app.add_middleware(
16
- CORSMiddleware,
17
- allow_origins=["*"],
18
- allow_credentials=True,
19
- allow_methods=["*"],
20
- allow_headers=["*"],
21
- )
22
-
23
- # Define request model
24
- class ChatRequest(BaseModel):
25
- message: str
26
- messages: List[Dict[Any, Any]]
27
- model: Optional[str] = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
28
-
29
- async def generate(json_data: Dict[str, Any]):
30
- max_retries = 5
31
- for attempt in range(max_retries):
32
- async with httpx.AsyncClient(timeout=None) as client:
33
- try:
34
- request_ctx = client.stream(
35
- "POST",
36
- "https://api.together.ai/inference",
37
- cookies=cookies,
38
- headers=headers,
39
- json=json_data
40
- )
41
-
42
- async with request_ctx as response:
43
- if response.status_code == 200:
44
- async for line in response.aiter_lines():
45
- if line:
46
- yield f"{line}\n"
47
- return
48
- elif response.status_code == 429:
49
- if attempt < max_retries - 1:
50
- await asyncio.sleep(0.5)
51
- continue
52
- yield "data: [Rate limited, max retries]\n\n"
53
- return
54
- else:
55
- yield f"data: [Unexpected status code: {response.status_code}]\n\n"
56
- return
57
- except Exception as e:
58
- yield f"data: [Connection error: {str(e)}]\n\n"
59
- return
60
-
61
- yield "data: [Max retries reached]\n\n"
62
-
63
- @app.get("/")
64
- async def index():
65
- return {"status": "ok"}
66
-
67
- @app.post("/chat")
68
- async def chat(request: ChatRequest):
69
- current_messages = request.messages.copy()
70
 
71
- # Handle both single text or list content
72
- if request.messages and isinstance(request.messages[-1].get('content'), list):
73
- current_messages = request.messages
74
- else:
75
- current_messages.append({
76
- 'content': [{
77
- 'type': 'text',
78
- 'text': request.message
79
- }],
80
- 'role': 'user'
81
- })
82
-
83
- json_data = {
84
- 'model': request.model,
85
- 'max_tokens': None,
86
- 'temperature': 0.7,
87
- 'top_p': 0.7,
88
- 'top_k': 50,
89
- 'repetition_penalty': 1,
90
- 'stream_tokens': True,
91
- 'stop': ['<|eot_id|>', '<|eom_id|>'],
92
- 'messages': current_messages,
93
- 'stream': True,
94
- }
95
-
96
- return StreamingResponse(generate(json_data), media_type='text/event-stream')
97
-
98
-
99
- @app.post("/generate-modules")
100
- async def generate_modules(request: Request):
101
- data = await request.json()
102
- search_query = data.get("searchQuery")
103
-
104
- if not search_query:
105
- return {"error": "searchQuery is required"}
106
-
107
- system_prompt = ChiplingPrompts.generateModules(search_query)
108
-
109
- current_messages = [
110
- {
111
- 'role': 'system',
112
- 'content': [{
113
- 'type': 'text',
114
- 'text': system_prompt
115
- }]
116
- },
117
- {
118
- 'role': 'user',
119
- 'content': [{
120
- 'type': 'text',
121
- 'text': search_query
122
- }]
123
- }
124
- ]
125
-
126
- json_data = {
127
- 'model': "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
128
- 'max_tokens': None,
129
- 'temperature': 0.7,
130
- 'top_p': 0.7,
131
- 'top_k': 50,
132
- 'repetition_penalty': 1,
133
- 'stream_tokens': True,
134
- 'stop': ['<|eot_id|>', '<|eom_id|>'],
135
- 'messages': current_messages,
136
- 'stream': True,
137
  }
138
 
139
- return StreamingResponse(generate(json_data), media_type='text/event-stream')
140
-
141
-
142
- @app.post("/generate-topics")
143
- async def generate_topics(request: Request):
144
- data = await request.json()
145
- search_query = data.get("searchQuery")
146
-
147
- if not search_query:
148
- return {"error": "searchQuery is required"}
149
-
150
- system_prompt = ChiplingPrompts.generateTopics(search_query)
151
-
152
- current_messages = [
153
- {
154
- 'role': 'system',
155
- 'content': [{
156
- 'type': 'text',
157
- 'text': system_prompt
158
- }]
159
- },
160
- {
161
- 'role': 'user',
162
- 'content': [{
163
- 'type': 'text',
164
- 'text': search_query
165
- }]
166
  }
167
- ]
168
-
169
- json_data = {
170
- 'model': "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
171
- 'max_tokens': None,
172
- 'temperature': 0.7,
173
- 'top_p': 0.7,
174
- 'top_k': 50,
175
- 'repetition_penalty': 1,
176
- 'stream_tokens': True,
177
- 'stop': ['<|eot_id|>', '<|eom_id|>'],
178
- 'messages': current_messages,
179
- 'stream': True,
180
- }
181
-
182
- return StreamingResponse(generate(json_data), media_type='text/event-stream')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify
2
+ from flask_cors import CORS
3
+ from googleapiclient.discovery import build
4
+ import random
5
+
6
+ app = Flask(__name__)
7
+ CORS(app)
8
+
9
+ API_KEY = 'AIzaSyDO_RMlMevaTD5KHAeY415sKJLqWqrf0mQ'
10
+ youtube = build('youtube', 'v3', developerKey=API_KEY)
11
+
12
+ def get_video_info(video_id):
13
+ request = youtube.videos().list(
14
+ part='snippet,statistics',
15
+ id=video_id
16
+ )
17
+ response = request.execute()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ if not response['items']:
20
+ return None
21
+
22
+ item = response['items'][0]
23
+ return {
24
+ 'title': item['snippet']['title'],
25
+ 'views': item['statistics'].get('viewCount', 0),
26
+ 'likes': item['statistics'].get('likeCount', 0),
27
+ 'channel': item['snippet']['channelTitle'],
28
+ 'video_id': item['id'],
29
+ 'thumbnail': item['snippet']['thumbnails']['high']['url'] # Add this line
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  }
31
 
32
+ def get_trending_videos():
33
+ request = youtube.videos().list(
34
+ part='snippet,statistics',
35
+ chart='mostPopular',
36
+ regionCode='US',
37
+ maxResults=50
38
+ )
39
+
40
+ response = request.execute()
41
+
42
+ videos = []
43
+ for item in response['items']:
44
+ video_data = {
45
+ 'title': item['snippet']['title'],
46
+ 'video_id': item['id'],
47
+ 'views': item['statistics'].get('viewCount', 0),
48
+ 'likes': item['statistics'].get('likeCount', 0),
49
+ 'channel': item['snippet']['channelTitle'],
50
+ 'thumbnail': item['snippet']['thumbnails']['high']['url'] # Add this line
 
 
 
 
 
 
 
 
51
  }
52
+ videos.append(video_data)
53
+ return videos
54
+
55
+
56
+ @app.route('/random')
57
+ def random_videos():
58
+ try:
59
+ videos = get_trending_videos()
60
+ random_selection = random.sample(videos, min(5, len(videos)))
61
+ return jsonify(random_selection)
62
+ except Exception as e:
63
+ return jsonify({'error': str(e)}), 500
64
+
65
+ @app.route('/trending')
66
+ def trending_videos():
67
+ try:
68
+ videos = get_trending_videos()
69
+ return jsonify(videos)
70
+ except Exception as e:
71
+ return jsonify({'error': str(e)}), 500
72
+
73
+ @app.route('/info/<video_id>')
74
+ def video_info(video_id):
75
+ try:
76
+ info = get_video_info(video_id)
77
+ if info is None:
78
+ return jsonify({'error': 'Video not found'}), 404
79
+ return jsonify(info)
80
+ except Exception as e:
81
+ return jsonify({'error': str(e)}), 500
82
+
83
+ @app.route('/search/<query>')
84
+ def search_videos(query):
85
+ try:
86
+ request = youtube.search().list(
87
+ part='snippet',
88
+ q=query,
89
+ type='video',
90
+ maxResults=10
91
+ )
92
+ response = request.execute()
93
+
94
+ videos = []
95
+ for item in response['items']:
96
+ video_id = item['id']['videoId']
97
+ video_info = get_video_info(video_id)
98
+ if video_info:
99
+ videos.append(video_info)
100
+ return jsonify(videos)
101
+ except Exception as e:
102
+ return jsonify({'error': str(e)}), 500
103
+
104
+ if __name__ == '__main__':
105
+ app.run(debug=True)