Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -30,7 +30,7 @@ def format_prompt(message):
|
|
30 |
return f"<s>[INST] {user_prompt} [/INST] {bot_response}</s> [INST] {message} [/INST]"
|
31 |
|
32 |
|
33 |
-
@app.route('/ai_mentor', methods=['
|
34 |
def ai_mentor():
|
35 |
data = request.get_json()
|
36 |
message = data.get('message')
|
@@ -68,7 +68,7 @@ def ai_mentor():
|
|
68 |
return jsonify({"message": f"Failed to process request: {str(e)}"}), 500
|
69 |
|
70 |
|
71 |
-
@app.route('/get_course', methods=['
|
72 |
def get_course():
|
73 |
temperature = 0.9
|
74 |
max_new_tokens = 256
|
@@ -104,7 +104,7 @@ def get_course():
|
|
104 |
return jsonify({"ans": stream})
|
105 |
|
106 |
|
107 |
-
@app.route('/get_mentor', methods=['
|
108 |
def get_mentor():
|
109 |
temperature = 0.9
|
110 |
max_new_tokens = 256
|
@@ -159,6 +159,40 @@ def get_mentor():
|
|
159 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
|
160 |
return jsonify({"ans": stream})
|
161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
|
|
|
|
|
|
|
|
163 |
if __name__ == '__main__':
|
164 |
app.run(debug=True)
|
|
|
30 |
return f"<s>[INST] {user_prompt} [/INST] {bot_response}</s> [INST] {message} [/INST]"
|
31 |
|
32 |
|
33 |
+
@app.route('/ai_mentor', methods=['GET'])
|
34 |
def ai_mentor():
|
35 |
data = request.get_json()
|
36 |
message = data.get('message')
|
|
|
68 |
return jsonify({"message": f"Failed to process request: {str(e)}"}), 500
|
69 |
|
70 |
|
71 |
+
@app.route('/get_course', methods=['GET'])
|
72 |
def get_course():
|
73 |
temperature = 0.9
|
74 |
max_new_tokens = 256
|
|
|
104 |
return jsonify({"ans": stream})
|
105 |
|
106 |
|
107 |
+
@app.route('/get_mentor', methods=['GET'])
|
108 |
def get_mentor():
|
109 |
temperature = 0.9
|
110 |
max_new_tokens = 256
|
|
|
159 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
|
160 |
return jsonify({"ans": stream})
|
161 |
|
162 |
+
@app.route('/get_streams', methods=['GET'])
|
163 |
+
def get_streams():
|
164 |
+
temperature = 0.9
|
165 |
+
max_new_tokens = 256
|
166 |
+
top_p = 0.95
|
167 |
+
repetition_penalty = 1.0
|
168 |
+
|
169 |
+
|
170 |
+
content = request.json
|
171 |
+
user_degree = content.get('degree')
|
172 |
+
user_stream = content.get('stream')
|
173 |
+
#user_semester = content.get('semester')
|
174 |
+
|
175 |
+
generate_kwargs = dict(
|
176 |
+
temperature=temperature,
|
177 |
+
max_new_tokens=max_new_tokens,
|
178 |
+
top_p=top_p,
|
179 |
+
repetition_penalty=repetition_penalty,
|
180 |
+
do_sample=True,
|
181 |
+
seed=42,
|
182 |
+
)
|
183 |
+
prompt = f""" prompt:
|
184 |
+
You need to act like as recommendation engine.
|
185 |
+
List all 40+ streams/branches in below degree
|
186 |
+
Degree: {user_degree}
|
187 |
+
Note: Output should be list in below format:
|
188 |
+
[course1, course2, course3,...]
|
189 |
+
Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
|
190 |
+
"""
|
191 |
+
formatted_prompt = format_prompt(prompt)
|
192 |
|
193 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
|
194 |
+
return jsonify({"ans": stream})
|
195 |
+
|
196 |
+
|
197 |
if __name__ == '__main__':
|
198 |
app.run(debug=True)
|