yakine commited on
Commit
a94bc3f
·
verified ·
1 Parent(s): c73b72d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -82,13 +82,13 @@ def generate_synthetic_data(description, columns):
82
  formatted_prompt = format_prompt(description, columns)
83
 
84
  # Tokenize the prompt with truncation enabled
85
- inputs = tokenizer_llama(formatted_prompt, return_tensors="pt", truncation=True, max_length=512).to(model_llama.device)
86
 
87
  # Generate synthetic data
88
  with torch.no_grad():
89
  outputs = model_llama.generate(
90
  **inputs,
91
- max_length=256,
92
  top_p=generation_params["top_p"],
93
  temperature=generation_params["temperature"],
94
  num_return_sequences=1,
 
82
  formatted_prompt = format_prompt(description, columns)
83
 
84
  # Tokenize the prompt with truncation enabled
85
+ inputs = tokenizer_llama(formatted_prompt, return_tensors="pt", truncation=True, max_length=1000).to(model_llama.device)
86
 
87
  # Generate synthetic data
88
  with torch.no_grad():
89
  outputs = model_llama.generate(
90
  **inputs,
91
+ max_length=512,
92
  top_p=generation_params["top_p"],
93
  temperature=generation_params["temperature"],
94
  num_return_sequences=1,