fioriclass commited on
Commit
2040f85
·
1 Parent(s): 2e5a32e

correction

Browse files
src/conf/model.yaml CHANGED
@@ -14,7 +14,7 @@ models:
14
  model_name: "bert-base-uncased"
15
  num_labels: 2
16
  learning_rate: 5e-5
17
- epochs: 3
18
  batch_size: 12
19
  warmup_steps: 0
20
  weight_decay: 0.0
@@ -25,7 +25,7 @@ models:
25
  model_name: "roberta-base"
26
  num_labels: 2
27
  learning_rate: 5e-5
28
- epochs: 3
29
  batch_size: 12
30
  warmup_steps: 0
31
  weight_decay: 0.0
 
14
  model_name: "bert-base-uncased"
15
  num_labels: 2
16
  learning_rate: 5e-5
17
+ epochs: 8
18
  batch_size: 12
19
  warmup_steps: 0
20
  weight_decay: 0.0
 
25
  model_name: "roberta-base"
26
  num_labels: 2
27
  learning_rate: 5e-5
28
+ epochs: 8
29
  batch_size: 12
30
  warmup_steps: 0
31
  weight_decay: 0.0
src/mlflow_integration/mlflow_decorator.py CHANGED
@@ -49,14 +49,6 @@ class MLflowDecorator:
49
  Gets the relevant parameters from the trainer's config.
50
  This replaces the singledispatch logic previously in parameter_logging.py
51
  """
52
- # In this specific project structure, all trainers seem to store
53
- # the relevant hyperparameters directly in trainer_instance.config.model.params.
54
- # If specific trainers needed different logic, we could add isinstance checks here.
55
- # from trainers.cuml.svm_trainer import SvmTrainer # Example import if needed
56
- # if isinstance(trainer_instance, SvmTrainer):
57
- # return specific_logic_for_svm(trainer_instance)
58
-
59
- # Default logic: return the model params from the config
60
  if hasattr(trainer_instance, 'config') and hasattr(trainer_instance.config, 'model') and hasattr(trainer_instance.config.model, 'params'):
61
  return trainer_instance.config.model.params
62
  return {}
 
49
  Gets the relevant parameters from the trainer's config.
50
  This replaces the singledispatch logic previously in parameter_logging.py
51
  """
 
 
 
 
 
 
 
 
52
  if hasattr(trainer_instance, 'config') and hasattr(trainer_instance.config, 'model') and hasattr(trainer_instance.config.model, 'params'):
53
  return trainer_instance.config.model.params
54
  return {}
src/trainers/huggingface/huggingface_transformer_trainer.py CHANGED
@@ -143,7 +143,8 @@ class HuggingFaceTransformerTrainer(BaseTrainer):
143
  train_dataset=self.train_dataset, # Sera défini dans train()
144
  eval_dataset=self.eval_dataset, # Sera défini dans train()
145
  compute_metrics=compute_metrics,
146
- tokenizer=self.tokenizer # Ajout du tokenizer pour le padding dynamique si besoin
 
147
  )
148
 
149
  def train(self) -> None:
@@ -264,11 +265,11 @@ class HuggingFaceTransformerTrainer(BaseTrainer):
264
  learning_rate=float(params.get("learning_rate")),
265
  warmup_steps=int(params.get("warmup_steps")),
266
  weight_decay=float(params.get("weight_decay")),
267
- evaluation_strategy="epoch",
268
- save_strategy="epoch",
269
  logging_dir="./logs",
270
- logging_steps=10
271
- # Ne pas désactiver les rapports MLflow intégrés
 
272
  )
273
 
274
  def optimize_if_needed(self) -> None:
 
143
  train_dataset=self.train_dataset, # Sera défini dans train()
144
  eval_dataset=self.eval_dataset, # Sera défini dans train()
145
  compute_metrics=compute_metrics,
146
+ tokenizer=self.tokenizer, # Ajout du tokenizer pour le padding dynamique si besoin
147
+ callbacks=[]
148
  )
149
 
150
  def train(self) -> None:
 
265
  learning_rate=float(params.get("learning_rate")),
266
  warmup_steps=int(params.get("warmup_steps")),
267
  weight_decay=float(params.get("weight_decay")),
268
+ save_steps=50,
 
269
  logging_dir="./logs",
270
+ logging_strategy="no",
271
+ save_strategy="epoch",
272
+ report_to="mlflow"
273
  )
274
 
275
  def optimize_if_needed(self) -> None: