ONDC_test / main.py
Witcape's picture
Upload 2 files
e62366f verified
# -*- coding: utf-8 -*-
"""Main.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/payal15604/ONDC-Test/blob/main/Main.ipynb
"""
# Commented out IPython magic to ensure Python compatibility.
!pip install git+https://github.com/PrithivirajDamodaran/ZSIC.git
!pip install transformers -U
!pip install pyDecision
!git clone https://github.com/payal15604/ONDC-Test
# %cd ONDC-Test
from zero_shot_text import zero_shot_text as ztext
from zero_shot_image import zero_shot_image as zimg
from text_summarizer import text_summarizer as t_sum
from similarity_scoring import calculate_similarity as c_sim
import numpy as np
"""JSON FILE WILL COME"""
import json
import requests
# Define the URL of your API endpoint
url = "http://localhost:4500/api/datasender"
def parse_json(json_data):
try:
# Convert boolean literals to uppercase
json_data = json_data.replace("true", "True").replace("false", "False")
# Parse JSON data
parsed_json = json.loads(json_data)
return parsed_json
except json.JSONDecodeError:
print("Error: Invalid JSON data")
return None
try:
# Send a POST request to the API endpoint
response = requests.post(url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the JSON data from the response
received_data = response.text
# Parse the JSON data
parsed_data = parse_json(received_data)
if parsed_data:
# Process each received item
for item in parsed_data.get("Received data", []):
# Accessing fields from the item
name = item.get("name", "")
short_disc = item.get("short_desc", "")
long_disc = item.get("long_desc", "")
image = item.get("images", [])
symbol = item.get("symbol", "")
# Example: Print the fields of each item
print("Name:", name)
print("Short Description:", short_disc)
print("Long Description:", long_disc)
print("Images:", image)
print("Symbol:", symbol)
# TODO: Process the fields in your ML model
else:
print("Error: Failed to parse JSON data")
else:
# Handle the case where the request was not successful
print("Error: Failed to fetch data from the API. Status code:", response.status_code)
except Exception as e:
# Handle any exceptions that occur during the request
print("Error:", e)
labels = ['coffee','tea','shampoo','face serum','bread','honey','soap','biscuit','milk','chocolate','juice']
result = ztext(name, labels)
def zero_shot_text_formatted(text, labels):
result = ztext(text, labels)
temp_text_sequence = result['labels']
temp_text_scores = result['scores']
formatted_output = f"{temp_text_sequence[0]} = {temp_text_scores[0]}"
print(formatted_output)
return formatted_output
def zero_shot_image_formatted(img, labels):
result = zimg(img, labels)
result_score = result['scores'][0]
result_label = result['labels'][0]
img_res = f"{result_label} = {result_score}"
print(img_res)
return img_res
# temp_name_score = zero_shot_text_formatted(name, labels)
# temp_sdisc_score = zero_shot_text_formatted(short_disc, labels)
# temp_ldisc_score = zero_shot_text_formatted(long_disc, labels)
# name_sdisc_score = c_sim(temp_name_score, temp_sdisc_score, model="en_core_web_sm")
# name_ldisc_score = c_sim(temp_name_score, temp_ldisc_score, model="en_core_web_sm")
# name_sldisc_score = c_sim(temp_sdisc_score, temp_ldisc_score, model="en_core_web_sm")
# print(name_sdisc_score)
# print(name_ldisc_score)
# print(name_sldisc_score)
# temp_summary = t_sum(long_disc)
# print(temp_summary)
# print(temp_name_score)
# print(temp_sdisc_score)
# print(temp_ldisc_score)
# print(temp_summary)
"""## **SCORING**"""
def name_disc_score(name, short_disc, long_disc, labels):
n_compute = zero_shot_text_formatted(name, labels)
sd_compute = zero_shot_text_formatted(short_disc, labels)
ld_compute = zero_shot_text_formatted(long_disc, labels)
n_sd_score = c_sim(n_compute, sd_compute, model="en_core_web_sm")
n_ld_score = c_sim(n_compute, ld_compute, model="en_core_web_sm")
sd_ld_score = c_sim(sd_compute, ld_compute, model="en_core_web_sm")
return n_sd_score, n_ld_score, sd_ld_score
def name_symbol_score(name, symbol, labels):
n_compute = zero_shot_text_formatted(name, labels)
s_compute = zero_shot_image_formatted(symbol, labels)
n_s_score = c_sim(n_compute, s_compute, model="en_core_web_sm")
return n_s_score
def name_image_score(name, image_list, labels):
n_compute = zero_shot_text_formatted(name, labels)
n_i_scores = []
for i in range(len(image_list)):
i_compute = zero_shot_image_formatted(image_list[i], labels)
n_i_score = c_sim(n_compute, i_compute)
n_i_scores.append(n_i_score)
return sum(n_i_scores)/len(image_list) #average
N_Sd_score, N_Ld_score, Sd_Ld_score = name_disc_score(name, short_disc, long_disc, labels)
print((N_Sd_score + N_Ld_score + Sd_Ld_score)/3)
N_S_score = name_symbol_score(name, symbol, labels)
N_S_score = name_image_score(name, image, labels)
print(N_S_score)
# Required Libraries
import numpy as np
from pyDecision.algorithm import topsis_method
# TOPSIS
def topsis(name, long_disc, short_disc, image, symbol,labels):
# Weights
#[0.3,0.25, 0.2, 0.1, 0.15]
weights = [0.2,0.2,0.2,0.2,0.2] #assigned manually
N_i_score = name_image_score(name, image, labels)
N_Sd_score, N_Ld_score, Sd_Ld_score = name_disc_score(name, short_disc, long_disc, labels)
N_S_score = name_symbol_score(name, symbol, labels)
# Load Criterion Type: 'max' or 'min'
criterion_type = ['max', 'max', 'max', 'max','max']
# Dataset
dataset = np.array([
[N_S_score, N_i_score, N_Sd_score, N_Ld_score, Sd_Ld_score],
[0.87, 0.9, 0.47, 0.46, 0.5],
[1,0.67,0.57,0.56,0.8]#demo data
])
# Call TOPSIS
relative_closeness = topsis_method(dataset, weights, criterion_type, graph = False, verbose = True)
return relative_closeness
relative_closeness=topsis(name, long_disc, short_disc, image, symbol,labels)
print(relative_closeness)
## Call TOPSIS
#relative_closeness = topsis_method(dataset, weights, criterion_type, graph = False, verbose = True)