nouamanetazi HF Staff commited on
Commit
0a908c1
·
verified ·
1 Parent(s): 109fb13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -37
app.py CHANGED
@@ -91,40 +91,29 @@ def generate_text(prompt, max_length=256, temperature=0.7, top_p=0.9, top_k=150,
91
 
92
  start_time = time.time()
93
 
94
- # Update progress bar - tokenization step
95
- progress(0.1, desc="تحليل النص (Tokenizing input)")
96
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
97
-
98
- # Update progress bar - generation starting
99
- progress(0.2, desc="توليد النص (Generating text)")
100
-
101
- # Define a callback function to update progress during generation
102
- def generation_callback(beam_idx, token_idx, token_id, scores, generation_config):
103
- # Estimate progress based on token index and max length
104
- # We start at 20% and go to 90%, leaving room for post-processing
105
- progress_value = 0.2 + 0.7 * min(token_idx / max_length, 1.0)
106
- progress(progress_value, desc=f"توليد النص: {token_idx}/{max_length} (Generating: {token_idx}/{max_length})")
107
- return False # Continue generation
108
-
109
- # Generate with callback
110
- output = model.generate(
111
- **inputs,
112
- max_length=max_length,
113
- temperature=temperature,
114
- top_p=top_p,
115
- do_sample=True,
116
- repetition_penalty=repetition_penalty,
117
- num_beams=num_beams,
118
- top_k=top_k,
119
- early_stopping=True,
120
- pad_token_id=tokenizer.pad_token_id,
121
- eos_token_id=tokenizer.eos_token_id,
122
- callback=generation_callback if hasattr(model, "generation_config") else None,
123
- )
124
-
125
- # Update progress bar - decoding step
126
- progress(0.9, desc="معالجة النتائج (Processing results)")
127
- result = tokenizer.decode(output[0], skip_special_tokens=True)
128
 
129
  # Update stats
130
  generation_time = time.time() - start_time
@@ -154,9 +143,6 @@ def generate_text(prompt, max_length=256, temperature=0.7, top_p=0.9, top_k=150,
154
  }
155
  )
156
 
157
- # Complete progress
158
- progress(1.0, desc="اكتمل (Complete)")
159
-
160
  return result, f"تم توليد {token_count} رمز في {generation_time:.2f} ثانية (Generated {token_count} tokens in {generation_time:.2f} seconds)"
161
 
162
  def save_feedback(input, output, params) -> None:
 
91
 
92
  start_time = time.time()
93
 
94
+ # Simply use the progress.tqdm wrapper for automatic progress tracking
95
+ with progress.tqdm("توليد النص (Generating text)", total=1) as pbar:
96
+ # Tokenize input
97
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
98
+
99
+ # Generate text
100
+ output = model.generate(
101
+ **inputs,
102
+ max_length=max_length,
103
+ temperature=temperature,
104
+ top_p=top_p,
105
+ do_sample=True,
106
+ repetition_penalty=repetition_penalty,
107
+ num_beams=num_beams,
108
+ top_k=top_k,
109
+ early_stopping=True,
110
+ pad_token_id=tokenizer.pad_token_id,
111
+ eos_token_id=tokenizer.eos_token_id,
112
+ )
113
+
114
+ # Decode output
115
+ result = tokenizer.decode(output[0], skip_special_tokens=True)
116
+ pbar.update(1)
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  # Update stats
119
  generation_time = time.time() - start_time
 
143
  }
144
  )
145
 
 
 
 
146
  return result, f"تم توليد {token_count} رمز في {generation_time:.2f} ثانية (Generated {token_count} tokens in {generation_time:.2f} seconds)"
147
 
148
  def save_feedback(input, output, params) -> None: