|
|
|
"""Main.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/github/payal15604/ONDC-Test/blob/main/Main.ipynb |
|
""" |
|
|
|
|
|
!pip install git+https://github.com/PrithivirajDamodaran/ZSIC.git |
|
!pip install transformers -U |
|
!pip install pyDecision |
|
!git clone https://github.com/payal15604/ONDC-Test |
|
|
|
|
|
from zero_shot_text import zero_shot_text as ztext |
|
from zero_shot_image import zero_shot_image as zimg |
|
|
|
from text_summarizer import text_summarizer as t_sum |
|
from similarity_scoring import calculate_similarity as c_sim |
|
|
|
import numpy as np |
|
|
|
"""JSON FILE WILL COME""" |
|
|
|
import json |
|
import requests |
|
|
|
|
|
url = "http://localhost:4500/api/datasender" |
|
|
|
def parse_json(json_data): |
|
try: |
|
|
|
json_data = json_data.replace("true", "True").replace("false", "False") |
|
|
|
parsed_json = json.loads(json_data) |
|
return parsed_json |
|
except json.JSONDecodeError: |
|
print("Error: Invalid JSON data") |
|
return None |
|
|
|
try: |
|
|
|
response = requests.post(url) |
|
|
|
|
|
if response.status_code == 200: |
|
|
|
received_data = response.text |
|
|
|
|
|
parsed_data = parse_json(received_data) |
|
|
|
if parsed_data: |
|
|
|
for item in parsed_data.get("Received data", []): |
|
|
|
name = item.get("name", "") |
|
short_disc = item.get("short_desc", "") |
|
long_disc = item.get("long_desc", "") |
|
image = item.get("images", []) |
|
symbol = item.get("symbol", "") |
|
|
|
|
|
print("Name:", name) |
|
print("Short Description:", short_disc) |
|
print("Long Description:", long_disc) |
|
print("Images:", image) |
|
print("Symbol:", symbol) |
|
|
|
|
|
|
|
else: |
|
print("Error: Failed to parse JSON data") |
|
|
|
else: |
|
|
|
print("Error: Failed to fetch data from the API. Status code:", response.status_code) |
|
|
|
except Exception as e: |
|
|
|
print("Error:", e) |
|
labels = ['coffee','tea','shampoo','face serum','bread','honey','soap','biscuit','milk','chocolate','juice'] |
|
|
|
result = ztext(name, labels) |
|
|
|
def zero_shot_text_formatted(text, labels): |
|
result = ztext(text, labels) |
|
|
|
temp_text_sequence = result['labels'] |
|
temp_text_scores = result['scores'] |
|
formatted_output = f"{temp_text_sequence[0]} = {temp_text_scores[0]}" |
|
|
|
print(formatted_output) |
|
return formatted_output |
|
|
|
def zero_shot_image_formatted(img, labels): |
|
result = zimg(img, labels) |
|
|
|
result_score = result['scores'][0] |
|
result_label = result['labels'][0] |
|
img_res = f"{result_label} = {result_score}" |
|
|
|
print(img_res) |
|
return img_res |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""## **SCORING**""" |
|
|
|
def name_disc_score(name, short_disc, long_disc, labels): |
|
n_compute = zero_shot_text_formatted(name, labels) |
|
sd_compute = zero_shot_text_formatted(short_disc, labels) |
|
ld_compute = zero_shot_text_formatted(long_disc, labels) |
|
|
|
n_sd_score = c_sim(n_compute, sd_compute, model="en_core_web_sm") |
|
n_ld_score = c_sim(n_compute, ld_compute, model="en_core_web_sm") |
|
sd_ld_score = c_sim(sd_compute, ld_compute, model="en_core_web_sm") |
|
|
|
return n_sd_score, n_ld_score, sd_ld_score |
|
|
|
def name_symbol_score(name, symbol, labels): |
|
n_compute = zero_shot_text_formatted(name, labels) |
|
s_compute = zero_shot_image_formatted(symbol, labels) |
|
|
|
n_s_score = c_sim(n_compute, s_compute, model="en_core_web_sm") |
|
|
|
return n_s_score |
|
|
|
def name_image_score(name, image_list, labels): |
|
n_compute = zero_shot_text_formatted(name, labels) |
|
n_i_scores = [] |
|
|
|
for i in range(len(image_list)): |
|
i_compute = zero_shot_image_formatted(image_list[i], labels) |
|
n_i_score = c_sim(n_compute, i_compute) |
|
n_i_scores.append(n_i_score) |
|
|
|
return sum(n_i_scores)/len(image_list) |
|
|
|
N_Sd_score, N_Ld_score, Sd_Ld_score = name_disc_score(name, short_disc, long_disc, labels) |
|
|
|
print((N_Sd_score + N_Ld_score + Sd_Ld_score)/3) |
|
|
|
N_S_score = name_symbol_score(name, symbol, labels) |
|
|
|
N_S_score = name_image_score(name, image, labels) |
|
print(N_S_score) |
|
|
|
|
|
import numpy as np |
|
|
|
from pyDecision.algorithm import topsis_method |
|
|
|
|
|
|
|
|
|
def topsis(name, long_disc, short_disc, image, symbol,labels): |
|
|
|
|
|
|
|
weights = [0.2,0.2,0.2,0.2,0.2] |
|
N_i_score = name_image_score(name, image, labels) |
|
N_Sd_score, N_Ld_score, Sd_Ld_score = name_disc_score(name, short_disc, long_disc, labels) |
|
N_S_score = name_symbol_score(name, symbol, labels) |
|
|
|
criterion_type = ['max', 'max', 'max', 'max','max'] |
|
|
|
|
|
dataset = np.array([ |
|
[N_S_score, N_i_score, N_Sd_score, N_Ld_score, Sd_Ld_score], |
|
[0.87, 0.9, 0.47, 0.46, 0.5], |
|
[1,0.67,0.57,0.56,0.8] |
|
]) |
|
|
|
|
|
relative_closeness = topsis_method(dataset, weights, criterion_type, graph = False, verbose = True) |
|
return relative_closeness |
|
|
|
relative_closeness=topsis(name, long_disc, short_disc, image, symbol,labels) |
|
print(relative_closeness) |
|
|
|
|
|
|
|
|
|
|