Spaces:
Running
Running
key added
Browse files- app.py +19 -5
- requirements.txt +2 -1
app.py
CHANGED
@@ -19,6 +19,7 @@ from langchain.prompts import PromptTemplate
|
|
19 |
from torchvision.models import resnet50
|
20 |
import nest_asyncio
|
21 |
from sentence_transformers import SentenceTransformer
|
|
|
22 |
|
23 |
# model = SentenceTransformer("Alibaba-NLP/gte-Qwen2-1.5B-instruct", trust_remote_code=True)
|
24 |
|
@@ -118,7 +119,11 @@ rag_chain = RetrievalQA.from_chain_type(
|
|
118 |
Load My models
|
119 |
'''
|
120 |
|
121 |
-
def load_model(
|
|
|
|
|
|
|
|
|
122 |
model = EvoViTModel(img_size=224, patch_size=16, in_channels=3, embed_dim=768, num_classes=2, hidden_dim=512)
|
123 |
model.classifier = nn.Linear(512, 1)
|
124 |
state_dict = torch.load(model_path, map_location=device)
|
@@ -168,9 +173,11 @@ def load_binary_models():
|
|
168 |
"Vasculitis Photos": 'keerthi/Vasculitis/best_global_model_10fold.pth',
|
169 |
"Warts Molluscum and other Viral Infections": 'santhosh/10fold_model_warts.pth'
|
170 |
}
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
174 |
base_models.append(model)
|
175 |
return base_models
|
176 |
|
@@ -255,8 +262,15 @@ class SkinDiseaseClassifier:
|
|
255 |
self.resnet_feature_extractor.eval()
|
256 |
|
257 |
# Load meta model
|
258 |
-
meta_model_path = 'best_meta_model_two_layer_version4.pth'
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
checkpoint = torch.load(meta_model_path, map_location=self.device)
|
|
|
260 |
correct_input_size = checkpoint['state_dict']['fc.0.weight'].shape[1]
|
261 |
input_size = 23 + 4 + 7168 # Adjust based on your actual feature size
|
262 |
fc_layers = [1024, 512, 256] # Use whatever was in your best model
|
|
|
19 |
from torchvision.models import resnet50
|
20 |
import nest_asyncio
|
21 |
from sentence_transformers import SentenceTransformer
|
22 |
+
from huggingface_hub import hf_hub_download
|
23 |
|
24 |
# model = SentenceTransformer("Alibaba-NLP/gte-Qwen2-1.5B-instruct", trust_remote_code=True)
|
25 |
|
|
|
119 |
Load My models
|
120 |
'''
|
121 |
|
122 |
+
def load_model(repo_id, filename):
|
123 |
+
model_path = hf_hub_download(
|
124 |
+
repo_id=repo_id,
|
125 |
+
filename=filename,
|
126 |
+
)
|
127 |
model = EvoViTModel(img_size=224, patch_size=16, in_channels=3, embed_dim=768, num_classes=2, hidden_dim=512)
|
128 |
model.classifier = nn.Linear(512, 1)
|
129 |
state_dict = torch.load(model_path, map_location=device)
|
|
|
173 |
"Vasculitis Photos": 'keerthi/Vasculitis/best_global_model_10fold.pth',
|
174 |
"Warts Molluscum and other Viral Infections": 'santhosh/10fold_model_warts.pth'
|
175 |
}
|
176 |
+
repo_id = "KeerthiVM/SkinCancerDiagnosis" # Your Hugging Face repo
|
177 |
+
|
178 |
+
for class_name, filename in class_models_mapping.items():
|
179 |
+
# model_path = os.path.join("best_models_overall", rel_path)
|
180 |
+
model = load_model(repo_id, filename)
|
181 |
base_models.append(model)
|
182 |
return base_models
|
183 |
|
|
|
262 |
self.resnet_feature_extractor.eval()
|
263 |
|
264 |
# Load meta model
|
265 |
+
# meta_model_path = 'best_meta_model_two_layer_version4.pth'
|
266 |
+
# checkpoint = torch.load(meta_model_path, map_location=self.device)
|
267 |
+
|
268 |
+
meta_model_path = hf_hub_download(
|
269 |
+
repo_id="KeerthiVM/SkinCancerDiagnosis",
|
270 |
+
filename="best_meta_model_two_layer_version4.pth"
|
271 |
+
)
|
272 |
checkpoint = torch.load(meta_model_path, map_location=self.device)
|
273 |
+
|
274 |
correct_input_size = checkpoint['state_dict']['fc.0.weight'].shape[1]
|
275 |
input_size = 23 + 4 + 7168 # Adjust based on your actual feature size
|
276 |
fc_layers = [1024, 512, 256] # Use whatever was in your best model
|
requirements.txt
CHANGED
@@ -15,4 +15,5 @@ scikit-learn
|
|
15 |
langchain_openai
|
16 |
nest_asyncio
|
17 |
sentence_transformers
|
18 |
-
langchain-qdrant
|
|
|
|
15 |
langchain_openai
|
16 |
nest_asyncio
|
17 |
sentence_transformers
|
18 |
+
langchain-qdrant
|
19 |
+
huggingface_hub
|