Spaces:
Running
Running
File size: 2,674 Bytes
cc8a450 ae51d62 84f0cff 279839c b4f3263 d5114e6 ae51d62 279839c ae51d62 d5114e6 cc8a450 ae51d62 d5114e6 ae51d62 d5114e6 279839c ae51d62 1c1651d ae51d62 1c1651d b4f3263 1c1651d b4f3263 1c1651d b4f3263 1c1651d 84f0cff 7a92e6c b4f3263 1c1651d 279839c b4f3263 8b0574e b4f3263 8b0574e 782aa38 7a92e6c 8b0574e b4f3263 782aa38 8b0574e 279839c 782aa38 279839c 782aa38 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
from fastapi import FastAPI, Response, status
from pydantic import BaseModel
from hypothesis import BaseModelHypothesis
from secondary_model_dependencies import SecondaryModelDependencies
from secondary_model import SecondaryModel
from main_model import PredictMainModel
import numpy as np
from typing import List
app = FastAPI()
class PredictRequest(BaseModel):
answer: str
backspace_count: int
letter_click_counts: dict[str, int]
gpt4o_answer: str
class RequestModel(BaseModel):
instances: List[PredictRequest]
@app.get("/health")
async def is_alive():
return Response(status_code=status.HTTP_200_OK)
@app.post("/predict")
async def predict(request: RequestModel):
responses = [process_instance(data) for data in request.instances]
return {"predictions": responses}
def process_instance(data: PredictRequest):
answer = data.answer
backspace_count = data.backspace_count
letter_click_counts = data.letter_click_counts
gpt4o_answer = data.gpt4o_answer
# Data preparation for 1st model
hypothesis = BaseModelHypothesis()
additional_features = hypothesis.calculate_features_dataframe(answer)
# 1st model prediction
main_model = PredictMainModel()
main_model_probability = main_model.predict(
answer, additional_features)
# Data preparation for 2nd model
secondary_model_dependencies = SecondaryModelDependencies()
secondary_model_features = secondary_model_dependencies.calculate_features(
answer, main_model_probability, backspace_count,
letter_click_counts, gpt4o_answer)
# 2nd model prediction
secondary_model = SecondaryModel()
secondary_model_probability = secondary_model.predict(
secondary_model_features)
second_model_threshold = 0.54
return {
"predicted_class": "AI" if secondary_model_probability > second_model_threshold else "HUMAN",
"main_model_probability": str(main_model_probability),
"secondary_model_probability": str(secondary_model_probability),
"confidence": get_confidence(main_model_probability, secondary_model_probability, second_model_threshold)
}
def get_confidence(main_model_output: float, secondary_model_output: int, threshold: float):
if (main_model_output >= 0.8 and secondary_model_output >= threshold) or (main_model_output <= 0.2 and secondary_model_output <= 1 - threshold):
return 'High Confidence'
elif (0.5 < main_model_output < 0.8 and secondary_model_output >= threshold) or (0.2 < main_model_output <= 0.5 and secondary_model_output < threshold):
return 'Partially Confident'
else:
return 'Low Confidence'
|