k1h0 commited on
Commit
f136598
·
verified ·
1 Parent(s): 76abbed

Create process_for.grepp.py

Browse files
Files changed (1) hide show
  1. process_for.grepp.py +100 -0
process_for.grepp.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from vllm import LLM, SamplingParams
3
+ import argparse
4
+ from tqdm import tqdm
5
+
6
+ def load_jsonl(file_path):
7
+ """Load JSONL file line by line."""
8
+ data = []
9
+ with open(file_path, 'r', encoding='utf-8') as f:
10
+ for line in f:
11
+ data.append(json.loads(line.strip()))
12
+ return data
13
+
14
+ def save_jsonl(data, file_path):
15
+ """Save data to JSONL file."""
16
+ with open(file_path, 'w', encoding='utf-8') as f:
17
+ for item in data:
18
+ f.write(json.dumps(item, ensure_ascii=False) + '\n')
19
+
20
+ def format_prompt(instruction, input_text):
21
+ """Format the prompt for code generation."""
22
+ return f"""Below is a coding problem with instructions and example inputs. Write a solution in code.
23
+
24
+ Problem Description:
25
+ {instruction}
26
+
27
+ Example Inputs:
28
+ {input_text}
29
+
30
+ Write your solution code:"""
31
+
32
+ def main():
33
+ parser = argparse.ArgumentParser(description='Process JSONL file with CodeLlama model')
34
+ parser.add_argument('--input_file', type=str, required=True, help='Input JSONL file path')
35
+ parser.add_argument('--output_file', type=str, required=True, help='Output JSONL file path')
36
+ parser.add_argument('--batch_size', type=int, default=4, help='Batch size for inference')
37
+ args = parser.parse_args()
38
+
39
+ # Initialize the model
40
+ print("Loading CodeLlama model...")
41
+ model = LLM(
42
+ model="codellama/CodeLlama-7b-hf",
43
+ trust_remote_code=True,
44
+ tensor_parallel_size=1
45
+ )
46
+
47
+ # Set sampling parameters
48
+ sampling_params = SamplingParams(
49
+ temperature=0.2, # Lower temperature for more focused code generation
50
+ top_p=0.95,
51
+ max_tokens=2048, # Increased max tokens for longer code solutions
52
+ stop_tokens=["\n\n\n", "```"] # Stop at clear code boundaries
53
+ )
54
+
55
+ # Load data
56
+ print("Loading input data...")
57
+ data = load_jsonl(args.input_file)
58
+
59
+ # Prepare prompts
60
+ prompts = []
61
+ for item in data:
62
+ instruction = item.get('instruction', '')
63
+ input_text = item.get('input', '')
64
+ prompt = format_prompt(instruction, input_text)
65
+ prompts.append(prompt)
66
+
67
+ # Process in batches
68
+ print("Generating outputs...")
69
+ outputs = []
70
+ for i in tqdm(range(0, len(prompts), args.batch_size)):
71
+ batch_prompts = prompts[i:i + args.batch_size]
72
+ batch_outputs = model.generate(batch_prompts, sampling_params)
73
+
74
+ # Process outputs
75
+ for j, output in enumerate(batch_outputs):
76
+ idx = i + j
77
+ if idx < len(data):
78
+ # Clean the output to extract only the code
79
+ generated_code = output.outputs[0].text.strip()
80
+ # Remove any markdown code blocks if present
81
+ if generated_code.startswith("```"):
82
+ generated_code = generated_code.split("```")[1]
83
+ if generated_code.startswith("python"):
84
+ generated_code = generated_code[6:]
85
+ generated_code = generated_code.strip()
86
+
87
+ result = {
88
+ 'instruction': data[idx]['instruction'],
89
+ 'input': data[idx]['input'],
90
+ 'output': generated_code
91
+ }
92
+ outputs.append(result)
93
+
94
+ # Save results
95
+ print("Saving results...")
96
+ save_jsonl(outputs, args.output_file)
97
+ print(f"Processing complete. Results saved to {args.output_file}")
98
+
99
+ if __name__ == "__main__":
100
+ main()