davanstrien HF Staff commited on
Commit
9f4bace
·
1 Parent(s): 0b8ef86
Files changed (1) hide show
  1. app.py +26 -12
app.py CHANGED
@@ -61,7 +61,7 @@ def _try_load_model_card(hub_id):
61
 
62
  def _try_parse_card_data(hub_id):
63
  data = {}
64
- keys = ["license", "language", "datasets"]
65
  for key in keys:
66
  try:
67
  value = model_info(hub_id, token=token).cardData[key]
@@ -95,10 +95,10 @@ class ModelMetadata:
95
  library_name = model.library_name
96
  except AttributeError:
97
  library_name = None
98
- try:
99
- tags = model.tags
100
- except AttributeError:
101
- tags = None
102
  try:
103
  pipeline_tag = model.pipeline_tag
104
  except AttributeError:
@@ -106,7 +106,7 @@ class ModelMetadata:
106
  return ModelMetadata(
107
  hub_id=hub_id,
108
  languages=data["language"],
109
- tags=tags,
110
  license=data["license"],
111
  library_name=library_name,
112
  datasets=data["datasets"],
@@ -138,6 +138,15 @@ COMMON_SCORES = {
138
  "score": 3,
139
  "missing_recommendation": """You haven't created a model card for your model. It is strongly recommended to have a model card for your model. \nYou can create for your model by clicking [here](https://huggingface.co/HUB_ID/edit/main/README.md)""",
140
  },
 
 
 
 
 
 
 
 
 
141
  }
142
 
143
 
@@ -250,6 +259,7 @@ def generate_task_scores_dict():
250
 
251
  SCORES = generate_task_scores_dict()
252
 
 
253
  @lru_cache(maxsize=None)
254
  def _basic_check(hub_id):
255
  try:
@@ -263,22 +273,25 @@ def _basic_check(hub_id):
263
  if k.startswith("_"):
264
  continue
265
  if data_dict[k] is None:
266
- to_fix[k] = task_scores[k]["missing_recommendation"]
 
 
267
  if data_dict[k] is not None:
268
  score += v["score"]
269
  max_score = task_scores["_max_score"]
270
  score = score / max_score
271
  score_summary = (
272
- f"Your model's metadata score is {round(score*100)}% based on suggested metadata for {task}"
 
273
  )
274
- recommendations = []
275
  if to_fix:
276
  recommendations = (
277
  "Here are some suggestions to improve your model's metadata for"
278
- f" {task}."
279
  )
280
  for v in to_fix.values():
281
- recommendations.append(v)
282
  return score_summary + recommendations if recommendations else score_summary
283
  except Exception as e:
284
  print(e)
@@ -288,6 +301,7 @@ def _basic_check(hub_id):
288
  def basic_check(hub_id):
289
  return _basic_check(hub_id)
290
 
 
291
  # print("caching models...")
292
  # print("getting top 5,000 models")
293
  # models = list_models(sort="downloads", direction=-1, limit=5_000)
@@ -296,4 +310,4 @@ def basic_check(hub_id):
296
  # thread_map(basic_check, model_ids)
297
 
298
 
299
- gr.Interface(fn=basic_check, inputs="text", outputs="text").launch()
 
61
 
62
  def _try_parse_card_data(hub_id):
63
  data = {}
64
+ keys = ["license", "language", "datasets", "tags"]
65
  for key in keys:
66
  try:
67
  value = model_info(hub_id, token=token).cardData[key]
 
95
  library_name = model.library_name
96
  except AttributeError:
97
  library_name = None
98
+ # try:
99
+ # tags = model.tags
100
+ # except AttributeError:
101
+ # tags = None
102
  try:
103
  pipeline_tag = model.pipeline_tag
104
  except AttributeError:
 
106
  return ModelMetadata(
107
  hub_id=hub_id,
108
  languages=data["language"],
109
+ tags=data["tags"],
110
  license=data["license"],
111
  library_name=library_name,
112
  datasets=data["datasets"],
 
138
  "score": 3,
139
  "missing_recommendation": """You haven't created a model card for your model. It is strongly recommended to have a model card for your model. \nYou can create for your model by clicking [here](https://huggingface.co/HUB_ID/edit/main/README.md)""",
140
  },
141
+ "tags": {
142
+ "required": False,
143
+ "score": 2,
144
+ "missing_recommendation": (
145
+ "You don't have any tags defined in your model metadata. Tags can help"
146
+ " people find relevant models on the Hub. You can create for your model by"
147
+ " clicking [here](https://huggingface.co/HUB_ID/edit/main/README.md)"
148
+ ),
149
+ },
150
  }
151
 
152
 
 
259
 
260
  SCORES = generate_task_scores_dict()
261
 
262
+
263
  @lru_cache(maxsize=None)
264
  def _basic_check(hub_id):
265
  try:
 
273
  if k.startswith("_"):
274
  continue
275
  if data_dict[k] is None:
276
+ to_fix[k] = task_scores[k]["missing_recommendation"].replace(
277
+ "HUB_ID", hub_id
278
+ )
279
  if data_dict[k] is not None:
280
  score += v["score"]
281
  max_score = task_scores["_max_score"]
282
  score = score / max_score
283
  score_summary = (
284
+ f"Your model's metadata score is {round(score*100)}% based on suggested"
285
+ f" metadata for {task}. \n"
286
  )
287
+ # recommendations = []
288
  if to_fix:
289
  recommendations = (
290
  "Here are some suggestions to improve your model's metadata for"
291
+ f" {task}: \n"
292
  )
293
  for v in to_fix.values():
294
+ recommendations += f"\n- {v}"
295
  return score_summary + recommendations if recommendations else score_summary
296
  except Exception as e:
297
  print(e)
 
301
  def basic_check(hub_id):
302
  return _basic_check(hub_id)
303
 
304
+
305
  # print("caching models...")
306
  # print("getting top 5,000 models")
307
  # models = list_models(sort="downloads", direction=-1, limit=5_000)
 
310
  # thread_map(basic_check, model_ids)
311
 
312
 
313
+ gr.Interface(fn=basic_check, inputs="text", outputs="markdown").launch(debug=True)