File size: 6,544 Bytes
e62366f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
# -*- coding: utf-8 -*-
"""Main.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/payal15604/ONDC-Test/blob/main/Main.ipynb
"""
# Commented out IPython magic to ensure Python compatibility.
!pip install git+https://github.com/PrithivirajDamodaran/ZSIC.git
!pip install transformers -U
!pip install pyDecision
!git clone https://github.com/payal15604/ONDC-Test
# %cd ONDC-Test
from zero_shot_text import zero_shot_text as ztext
from zero_shot_image import zero_shot_image as zimg
from text_summarizer import text_summarizer as t_sum
from similarity_scoring import calculate_similarity as c_sim
import numpy as np
"""JSON FILE WILL COME"""
import json
import requests
# Define the URL of your API endpoint
url = "http://localhost:4500/api/datasender"
def parse_json(json_data):
try:
# Convert boolean literals to uppercase
json_data = json_data.replace("true", "True").replace("false", "False")
# Parse JSON data
parsed_json = json.loads(json_data)
return parsed_json
except json.JSONDecodeError:
print("Error: Invalid JSON data")
return None
try:
# Send a POST request to the API endpoint
response = requests.post(url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the JSON data from the response
received_data = response.text
# Parse the JSON data
parsed_data = parse_json(received_data)
if parsed_data:
# Process each received item
for item in parsed_data.get("Received data", []):
# Accessing fields from the item
name = item.get("name", "")
short_disc = item.get("short_desc", "")
long_disc = item.get("long_desc", "")
image = item.get("images", [])
symbol = item.get("symbol", "")
# Example: Print the fields of each item
print("Name:", name)
print("Short Description:", short_disc)
print("Long Description:", long_disc)
print("Images:", image)
print("Symbol:", symbol)
# TODO: Process the fields in your ML model
else:
print("Error: Failed to parse JSON data")
else:
# Handle the case where the request was not successful
print("Error: Failed to fetch data from the API. Status code:", response.status_code)
except Exception as e:
# Handle any exceptions that occur during the request
print("Error:", e)
labels = ['coffee','tea','shampoo','face serum','bread','honey','soap','biscuit','milk','chocolate','juice']
result = ztext(name, labels)
def zero_shot_text_formatted(text, labels):
result = ztext(text, labels)
temp_text_sequence = result['labels']
temp_text_scores = result['scores']
formatted_output = f"{temp_text_sequence[0]} = {temp_text_scores[0]}"
print(formatted_output)
return formatted_output
def zero_shot_image_formatted(img, labels):
result = zimg(img, labels)
result_score = result['scores'][0]
result_label = result['labels'][0]
img_res = f"{result_label} = {result_score}"
print(img_res)
return img_res
# temp_name_score = zero_shot_text_formatted(name, labels)
# temp_sdisc_score = zero_shot_text_formatted(short_disc, labels)
# temp_ldisc_score = zero_shot_text_formatted(long_disc, labels)
# name_sdisc_score = c_sim(temp_name_score, temp_sdisc_score, model="en_core_web_sm")
# name_ldisc_score = c_sim(temp_name_score, temp_ldisc_score, model="en_core_web_sm")
# name_sldisc_score = c_sim(temp_sdisc_score, temp_ldisc_score, model="en_core_web_sm")
# print(name_sdisc_score)
# print(name_ldisc_score)
# print(name_sldisc_score)
# temp_summary = t_sum(long_disc)
# print(temp_summary)
# print(temp_name_score)
# print(temp_sdisc_score)
# print(temp_ldisc_score)
# print(temp_summary)
"""## **SCORING**"""
def name_disc_score(name, short_disc, long_disc, labels):
n_compute = zero_shot_text_formatted(name, labels)
sd_compute = zero_shot_text_formatted(short_disc, labels)
ld_compute = zero_shot_text_formatted(long_disc, labels)
n_sd_score = c_sim(n_compute, sd_compute, model="en_core_web_sm")
n_ld_score = c_sim(n_compute, ld_compute, model="en_core_web_sm")
sd_ld_score = c_sim(sd_compute, ld_compute, model="en_core_web_sm")
return n_sd_score, n_ld_score, sd_ld_score
def name_symbol_score(name, symbol, labels):
n_compute = zero_shot_text_formatted(name, labels)
s_compute = zero_shot_image_formatted(symbol, labels)
n_s_score = c_sim(n_compute, s_compute, model="en_core_web_sm")
return n_s_score
def name_image_score(name, image_list, labels):
n_compute = zero_shot_text_formatted(name, labels)
n_i_scores = []
for i in range(len(image_list)):
i_compute = zero_shot_image_formatted(image_list[i], labels)
n_i_score = c_sim(n_compute, i_compute)
n_i_scores.append(n_i_score)
return sum(n_i_scores)/len(image_list) #average
N_Sd_score, N_Ld_score, Sd_Ld_score = name_disc_score(name, short_disc, long_disc, labels)
print((N_Sd_score + N_Ld_score + Sd_Ld_score)/3)
N_S_score = name_symbol_score(name, symbol, labels)
N_S_score = name_image_score(name, image, labels)
print(N_S_score)
# Required Libraries
import numpy as np
from pyDecision.algorithm import topsis_method
# TOPSIS
def topsis(name, long_disc, short_disc, image, symbol,labels):
# Weights
#[0.3,0.25, 0.2, 0.1, 0.15]
weights = [0.2,0.2,0.2,0.2,0.2] #assigned manually
N_i_score = name_image_score(name, image, labels)
N_Sd_score, N_Ld_score, Sd_Ld_score = name_disc_score(name, short_disc, long_disc, labels)
N_S_score = name_symbol_score(name, symbol, labels)
# Load Criterion Type: 'max' or 'min'
criterion_type = ['max', 'max', 'max', 'max','max']
# Dataset
dataset = np.array([
[N_S_score, N_i_score, N_Sd_score, N_Ld_score, Sd_Ld_score],
[0.87, 0.9, 0.47, 0.46, 0.5],
[1,0.67,0.57,0.56,0.8]#demo data
])
# Call TOPSIS
relative_closeness = topsis_method(dataset, weights, criterion_type, graph = False, verbose = True)
return relative_closeness
relative_closeness=topsis(name, long_disc, short_disc, image, symbol,labels)
print(relative_closeness)
## Call TOPSIS
#relative_closeness = topsis_method(dataset, weights, criterion_type, graph = False, verbose = True)
|