course_recommend / main.py
Swapnil-101's picture
Update main.py
01793a8 verified
from huggingface_hub import InferenceClient
import os
import random
import json
from flask import Flask, jsonify
from flask import Flask, request, jsonify, redirect, url_for
from flask_cors import CORS
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import LargeBinary
from sqlalchemy.orm import sessionmaker, relationship
import json
HF_TOKEN = os.getenv("HF_TOKEN")
client = InferenceClient(model="mistralai/Mixtral-8x7B-Instruct-v0.1", token=HF_TOKEN)
connection_string = "postgresql://neondb_owner:[email protected]/neondb?sslmode=require"
Base = declarative_base()
class Stream(Base):
__tablename__ = 'streams'
name = Column(String, primary_key=True, nullable=False)
class Mentor(Base):
__tablename__ = 'mentors'
id = Column(Integer, primary_key=True)
mentor_name = Column(String)
username = Column(String, unique=True)
profile_photo = Column(LargeBinary)
description = Column(String)
highest_degree = Column(String)
expertise = Column(String)
recent_project = Column(String)
meeting_time = Column(String)
fees = Column(String)
stream_name = Column(String, ForeignKey('streams.name'))
country = Column(String)
verified = Column(Boolean, default=False)
stream = relationship("Stream", backref="mentors")
stream = relationship("Stream", backref="mentors")
engine = create_engine(connection_string)
Session = sessionmaker(bind=engine)
app = Flask(__name__)
CORS(app)
@app.route('/')
def home():
return jsonify({"message": "Welcome to the Recommendation API!"})
def format_prompt(message):
# Generate a random user prompt and bot response pair
user_prompt = "UserPrompt"
bot_response = "BotResponse"
return f"<s>[INST] {user_prompt} [/INST] {bot_response}</s> [INST] {message} [/INST]"
@app.route('/ai_mentor', methods=['POST'])
def ai_mentor():
data = request.get_json()
message = data.get('message')
if not message:
return jsonify({"message": "Missing message"}), 400
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
# Define prompt for the conversation
prompt = f""" prompt:
You are an AI mentor providing concise and complete responses. Answer the user's question clearly and in a few sentences.
User: {message}"""
formatted_prompt = format_prompt(prompt)
try:
# Generate response from the Language Model
response = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"response": response}), 200
except Exception as e:
return jsonify({"message": f"Failed to process request: {str(e)}"}), 500
@app.route('/get_course', methods=['POST'])
def get_course():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
content = request.json
# user_degree = content.get('degree') # Uncomment this line
user_stream = content.get('stream')
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
prompt = f""" prompt:
You need to act like as recommendation engine for degree recommendation for a student. Below are current details.
Stream: {user_stream}
Based on current details recommend the degree for higher education.
Note: Output should be list in below format:
[course1, course2, course3,...]
Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
"""
formatted_prompt = format_prompt(prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"ans": stream})
@app.route('/get_mentor', methods=['POST'])
def get_mentor():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
content = request.json
user_stream = content.get('stream')
session = Session()
# Query verified mentors
verified_mentors = session.query(Mentor).filter_by(verified=True).all()
mentor_list = []
for mentor in verified_mentors:
mentor_info = {
"id": mentor.id,
"mentor_name": mentor.mentor_name,
"profile_photo": mentor.profile_photo.decode('utf-8'), # Decode binary photo to string
"description": mentor.description,
"highest_degree": mentor.highest_degree,
"expertise": mentor.expertise,
"recent_project": mentor.recent_project,
"meeting_time": mentor.meeting_time,
"fees": mentor.fees,
"stream": mentor.stream,
"country": mentor.country,
"verified": mentor.verified
}
mentor_list.append(mentor_info)
session.close()
mentors_data= mentor_list
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
prompt = f""" prompt:
You need to act as a recommendation engine for mentor recommendations based on the student's stream and a list of available mentors.
Stream: {user_stream}
Mentor list: {mentors_data}
Based on the provided details, recommend the mentors that relate to the student's stream. Dont choose mentor outside mentors list
Note: The output should be a valid list, containing only the mentor's name from attached mentor list. Dont give unnecessary explanations or additional details
"""
formatted_prompt = format_prompt(prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"ans": stream})
@app.route('/get_streams', methods=['GET'])
def get_streams():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
prompt = """
You are a recommendation engine.
List at least 40 branches of study (e.g., Computer Science, Chemical Engineering, Aerospace).
**Output should be a valid JSON array with double quotes, like this:**
["Computer Science", "Chemical Engineering", "Aerospace", ...]
Do not add extra text, explanations, or newlines.
"""
formatted_prompt = format_prompt(prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
try:
# Ensure the model's response is a valid JSON array
cleaned_data = stream.strip()
# Fix incomplete or malformed JSON
if not cleaned_data.startswith("[") or not cleaned_data.endswith("]"):
cleaned_data = cleaned_data.split("[", 1)[-1] # Keep text after first [
cleaned_data = "[" + cleaned_data # Add missing opening bracket
cleaned_data = cleaned_data.rsplit("]", 1)[0] # Keep text before last ]
cleaned_data = cleaned_data + "]" # Add missing closing bracket
# Parse JSON safely
parsed_data = json.loads(cleaned_data)
if not isinstance(parsed_data, list): # Ensure it's a list
raise ValueError("Response is not a valid list")
return jsonify({"ans": parsed_data}) # Return clean JSON list
except Exception as e:
return jsonify({"error": "Invalid response format", "details": str(e)})
@app.route('/get_education_profiles', methods=['GET'])
def get_education_profiles():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
sectors = ["engineering", "medical", "arts", "commerce", "science", "management"] # Example sectors
prompt = f"""prompt:
You need to act like a recommendation engine.
List all education-related profiles in sectors like {', '.join(sectors)}.
Note: Output should be a list in the below format:
[profile1, profile2, profile3,...]
Return only the answer, not the prompt or unnecessary stuff, and don't add any special characters or punctuation marks.
"""
formatted_prompt = format_prompt(prompt)
education_profiles = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"ans": education_profiles})
@app.route('/get_certificate', methods=['POST'])
def get_certificate():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
content = request.json
# user_degree = content.get('degree') # Uncomment this line
user_stream = content.get('stream')
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
prompt = f""" prompt:
You need to act like as recommendation engine for certification recommendation for a student. Below are current details.
Stream: {user_stream}
Based on current details recommend the certification
Note: Output should be list in below format:
[course1, course2, course3,...]
Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
"""
formatted_prompt = format_prompt(prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"ans": stream})
@app.route('/get_three_streams', methods=['POST'])
def get_three_streams():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
content = request.json
user_degree = content.get('degree') # Uncomment this line
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
prompt = f""" prompt:
You need to act like as recommendation engine for stream recommendation for a student based on user degree. Below are details.
Degree: {user_degree}
Based on above degree details recommend only 3 the streams
Note: Output should be list in below format:
[stream1, stream2, stream2]
Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
"""
formatted_prompt = format_prompt(prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"ans": stream})
@app.route('/get_competition', methods=['POST'])
def get_competition():
temperature = 0.9
max_new_tokens = 256
top_p = 0.95
repetition_penalty = 1.0
content = request.json
# user_degree = content.get('degree') # Uncomment this line
user_stream = content.get('stream')
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
prompt = f""" prompt:
You need to act like as recommendation engine for competition recommendation for a student. Below are current details.
Stream: {user_stream}
Based on current details recommend the competition
Note: Output should be list in below format:
[course1, course2, course3,...]
Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
"""
formatted_prompt = format_prompt(prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
return jsonify({"ans": stream})
def validate_field(field_name, field_value):
prompt = f"Check if the following {field_name} is valid: {field_value}. Return 'true' if valid, else 'false'."
response = client.text_generation(prompt)
return "true" in response.lower()
@app.route("/validate", methods=["POST"])
def validate():
data = request.json
if not data:
return jsonify({"error": "No data provided"}), 400
validation_results = {}
for field, value in data.items():
validation_results[field] = validate_field(field, value)
return jsonify(validation_results)
if __name__ == '__main__':
app.run(debug=True)