Spaces:
Running
Running
File size: 2,800 Bytes
cc8a450 ae51d62 84f0cff 279839c b4f3263 d5114e6 ae51d62 782aa38 279839c ae51d62 d5114e6 cc8a450 ae51d62 d5114e6 ae51d62 d5114e6 782aa38 279839c ae51d62 1c1651d ae51d62 1c1651d b4f3263 1c1651d b4f3263 1c1651d b4f3263 1c1651d 84f0cff 279839c b4f3263 1c1651d 279839c b4f3263 279839c 782aa38 279839c b4f3263 782aa38 279839c 782aa38 279839c 782aa38 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
from fastapi import FastAPI, Response, status
from pydantic import BaseModel
from hypothesis import BaseModelHypothesis
from secondary_model_dependencies import SecondaryModelDependencies
from secondary_model import SecondaryModel
from main_model import PredictMainModel
import numpy as np
from typing import List
app = FastAPI()
class PredictRequest(BaseModel):
question: str
answer: str
backspace_count: int
typing_duration: int
letter_click_counts: dict[str, int]
gpt35_answer: str
gpt4o_answer: str
class RequestModel(BaseModel):
instances: List[PredictRequest]
@app.get("/health")
async def is_alive():
return Response(status_code=status.HTTP_200_OK)
@app.post("/predict")
async def predict(request: RequestModel):
responses = [process_instance(data) for data in request.instances]
return {"predictions": responses}
def process_instance(data: PredictRequest):
question = data.question
answer = data.answer
backspace_count = data.backspace_count
typing_duration = data.typing_duration
letter_click_counts = data.letter_click_counts
gpt35_answer = data.gpt35_answer
gpt4o_answer = data.gpt4o_answer
# Data preparation for 1st model
hypothesis = BaseModelHypothesis()
additional_features = hypothesis.calculate_features_dataframe(answer)
# 1st model prediction
main_model = PredictMainModel()
main_model_probability = main_model.predict(
answer, additional_features)
# Data preparation for 2nd model
secondary_model_dependencies = SecondaryModelDependencies()
secondary_model_features = secondary_model_dependencies.calculate_features(
answer, main_model_probability, backspace_count, typing_duration,
letter_click_counts, gpt35_answer, gpt4o_answer)
# 2nd model prediction
secondary_model = SecondaryModel()
secondary_model_probability = secondary_model.predict(
secondary_model_features)
return {
"predicted_class": "AI" if secondary_model_probability > 0.57 else "HUMAN",
"main_model_probability": str(main_model_probability),
"secondary_model_probability": secondary_model_probability,
"confidence": get_confidence(main_model_probability, secondary_model_probability)
}
def get_confidence(main_model_output: float, secondary_model_output: int):
threshold = 0.57
if (main_model_output >= 0.8 and secondary_model_output >= threshold) or (main_model_output <= 0.2 and secondary_model_output <= 1 - threshold):
return 'High Confidence'
elif (0.5 < main_model_output < 0.8 and secondary_model_output >= threshold) or (0.2 < main_model_output <= 0.5 and secondary_model_output < threshold):
return 'Partially Confident'
else:
return 'Low Confidence'
|