lmy0802 commited on
Commit
c923f73
·
verified ·
1 Parent(s): 7aedadb

Upload 7 files

Browse files
dividing_into_different_subsets/6/EI/CC_EI.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,CC_subset_1,CC_subset_2,CC_subset_3,CC_subset_4,CC_subset_5,CC_subset_6
2
+ CodeFuse-DeepSeek-33b,81.63,76.09,46.67,100.0,100.0,66.67
3
+ Nxcode-CQ-7B,87.35,91.41,78.67,20.0,55.0,98.33
4
+ codegemma-2b,31.63,28.26,2.67,0.0,0.0,3.33
5
+ codegemma-7b,46.84,36.09,13.33,0.0,40.0,11.67
6
+ codegemma-7b-it,57.35,54.13,34.67,0.0,5.0,26.67
7
+ deepseek-coder-1.3b-base,38.57,29.57,10.67,0.0,0.0,0.0
8
+ deepseek-coder-6.7b-base,51.68,47.39,15.0,0.0,0.0,6.67
9
+ deepseek_coder-6.7b-instruct,73.16,77.72,56.67,0.0,0.0,48.33
10
+ deepseek_coder_33b-base,56.58,54.46,34.0,0.0,80.0,3.33
11
+ deepseek_coder_33b-instruct,67.86,70.33,53.0,0.0,10.0,51.67
12
+ codeqwen1.5-7b,55.92,53.04,18.33,0.0,95.0,45.0
dividing_into_different_subsets/6/EI/EI.json ADDED
The diff for this file is too large to render. See raw diff
 
dividing_into_different_subsets/6/EI/calculate_humaneval_result.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import csv
4
+ # 定义文件所在的目录
5
+ input_dir = 'E:/python-testn/pythonProject3/hh_1/evaluate_result'
6
+
7
+ # 获取目录中的所有文件
8
+ files = os.listdir(input_dir)
9
+
10
+ # with open("token_counts_EI.csv","w", newline='') as csvfile:
11
+ # writer = csv.writer(csvfile)
12
+ # writer.writerow(["Model", "token_subset_1", "token_subset_2","token_subset_3","token_subset_4","token_subset_5","token_subset_6"])
13
+ #
14
+
15
+ # with open("line_counts_EI.csv","w", newline='') as csvfile:
16
+ # writer = csv.writer(csvfile)
17
+ # writer.writerow(["Model", "line_subset_1", "line_subset_2","line_subset_3","line_subset_4","line_subset_5","line_subset_6"])
18
+
19
+ with open("CC_EI.csv", "w", newline='') as csvfile:
20
+ writer = csv.writer(csvfile)
21
+ writer.writerow(["Model", "CC_subset_1", "CC_subset_2","CC_subset_3","CC_subset_4","CC_subset_5","CC_subset_6"])
22
+
23
+
24
+
25
+ for file_name in files:
26
+ # 构建完整的文件路径
27
+ input_file_path = os.path.join(input_dir, file_name)
28
+ first_underscore_index = file_name.find('_')
29
+
30
+ # 找到最后一个 - 的位置
31
+ last_dash_index = file_name.rfind('-')
32
+ model_name = file_name[first_underscore_index + 1:last_dash_index]
33
+ print(model_name)
34
+ with open(input_file_path,"r",encoding="utf-8") as file:
35
+ data1=json.load(file)
36
+
37
+ with open("EI.json", "r", encoding="utf-8") as file:
38
+ data2=json.load(file)
39
+ sum0=0
40
+ count0=0
41
+ sum1=0
42
+ count1=0
43
+ sum2=0
44
+ count2=0
45
+ sum3 = 0
46
+ count3 = 0
47
+ sum4=0
48
+ count4=0
49
+ sum5 = 0
50
+ count5 = 0
51
+
52
+
53
+
54
+ for (item1,item2) in zip(data1["humaneval"]["pass@1"],data2):
55
+ # #按照token个数划分后的评估结果
56
+ # if item2["token_diff"] == 0:
57
+ # index, value = item1
58
+ # print(item2["token_diff"],index,value)
59
+ # sum0=sum0+value
60
+ # count0=count0+1
61
+ # if item2["token_diff"] == 1:
62
+ # index, value = item1
63
+ # print(item2["token_diff"], index, value)
64
+ # sum1=sum1+value
65
+ # count1=count1+1
66
+ # if item2["token_diff"] == 2:
67
+ # index, value = item1
68
+ # print(item2["token_diff"], index, value)
69
+ # sum2=sum2+value
70
+ # count2=count2+1
71
+ # if item2["token_diff"] == 3:
72
+ # index, value = item1
73
+ # print(item2["token_diff"], index, value)
74
+ # sum3=sum3+value
75
+ # count3=count3+1
76
+ # if item2["token_diff"] == 4:
77
+ # index, value = item1
78
+ # print(item2["token_diff"], index, value)
79
+ # sum4 = sum4 + value
80
+ # count4 = count4 + 1
81
+ # if item2["token_diff"] ==5:
82
+ # index, value = item1
83
+ # print(item2["token_diff"], index, value)
84
+ # sum5 = sum5 + value
85
+ # count5 = count5 + 1
86
+
87
+
88
+ #按照行数划分后的评估结果
89
+ # if item2["line_diff"] == 0:
90
+ # index, value = item1
91
+ # print(item2["line_diff"],index,value)
92
+ # sum0=sum0+value
93
+ # count0=count0+1
94
+ # if item2["line_diff"] == 1:
95
+ # index, value = item1
96
+ # print(item2["line_diff"], index, value)
97
+ # sum1=sum1+value
98
+ # count1=count1+1
99
+ # if item2["line_diff"] == 2:
100
+ # index, value = item1
101
+ # print(item2["line_diff"], index, value)
102
+ # sum2=sum2+value
103
+ # count2=count2+1
104
+ # if item2["line_diff"] == 3:
105
+ # index, value = item1
106
+ # print(item2["line_diff"], index, value)
107
+ # sum3=sum3+value
108
+ # count3=count3+1
109
+ # if item2["line_diff"] == 4:
110
+ # index, value = item1
111
+ # print(item2["line_diff"], index, value)
112
+ # sum4=sum4+value
113
+ # count4=count4+1
114
+ # if item2["line_diff"] == 5:
115
+ # index, value = item1
116
+ # print(item2["line_diff"], index, value)
117
+ # sum5 = sum5 + value
118
+ # count5 = count5 + 1
119
+
120
+ #按照圈复杂度划分后的评估结果
121
+ if item2["CC_diff"] == 0:
122
+ index, value = item1
123
+ print(item2["CC_diff"],index,value)
124
+ sum0=sum0+value
125
+ count0=count0+1
126
+ if item2["CC_diff"] == 1:
127
+ index, value = item1
128
+ print(item2["CC_diff"], index, value)
129
+ sum1=sum1+value
130
+ count1=count1+1
131
+ if item2["CC_diff"] == 2:
132
+ index, value = item1
133
+ print(item2["CC_diff"], index, value)
134
+ sum2=sum2+value
135
+ count2=count2+1
136
+ if item2["CC_diff"] == 3 :
137
+ index, value = item1
138
+ print(item2["CC_diff"], index, value)
139
+ sum3=sum3+value
140
+ count3=count3+1
141
+ if item2["CC_diff"] == 4 :
142
+ index, value = item1
143
+ print(item2["CC_diff"], index, value)
144
+ sum4=sum4+value
145
+ count4=count4+1
146
+ if item2["CC_diff"] == 5 :
147
+ index, value = item1
148
+ print(item2["CC_diff"], index, value)
149
+ sum5=sum5+value
150
+ count5=count5+1
151
+
152
+
153
+
154
+ mean0=round(sum0/count0*100,2)
155
+
156
+ mean1=round(sum1/count1*100,2)
157
+ mean2=round(sum2/count2*100,2)
158
+ mean3=round(sum3/count3*100,2)
159
+ mean4=round(sum4/count4*100,2)
160
+ mean5 = round(sum5 / count5 * 100, 2)
161
+ print("count_result!!")
162
+ print(count0,count1,count2,count3,count4,count5)
163
+ print(mean0,mean1,mean2,mean3,count4,mean5)
164
+ # with open("token_counts_EI.csv", mode='a', newline='', encoding='utf-8') as file:
165
+ # writer = csv.writer(file)
166
+ # writer.writerow([model_name,mean0,mean1,mean2,mean3,mean4,mean5])
167
+
168
+ # with open("line_counts_EI.csv", mode='a', newline='', encoding='utf-8') as file:
169
+ # writer = csv.writer(file)
170
+ # writer.writerow([model_name,mean0,mean1,mean2,mean3,mean4,mean5])
171
+ # #
172
+ with open("CC_EI.csv", mode='a', newline='', encoding='utf-8') as file:
173
+ writer = csv.writer(file)
174
+ writer.writerow([model_name,mean0,mean1,mean2,mean3,mean4,mean5])
175
+
176
+
dividing_into_different_subsets/6/EI/even.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ # 读取数据
4
+ with open("humaneval_new.json", "r", encoding="utf-8") as f:
5
+ data = json.load(f)
6
+
7
+ # 定义划分区间数
8
+ num_intervals = 6
9
+
10
+ # 计算每个特征的值范围
11
+ line_min = min(item['line'] for item in data)
12
+ line_max = max(item['line'] for item in data)
13
+ line_interval_size = (line_max - line_min) / num_intervals
14
+
15
+ token_min = min(item['token'] for item in data)
16
+ token_max = max(item['token'] for item in data)
17
+ token_interval_size = (token_max - token_min) / num_intervals
18
+
19
+ cyclomatic_complexity_min = min(item['cyclomatic_complexity'] for item in data)
20
+ cyclomatic_complexity_max = max(item['cyclomatic_complexity'] for item in data)
21
+ cyclomatic_complexity_interval_size = (cyclomatic_complexity_max - cyclomatic_complexity_min) / num_intervals
22
+ count1=0
23
+ count2=0
24
+ count3=0
25
+ count4=0
26
+ count5=0
27
+ count6=0
28
+
29
+ # 根据等距划分数据
30
+ for item in data:
31
+ # 计算 line 特征的区间
32
+ line_diff = int((item['line'] - line_min) // line_interval_size)
33
+ item['line_diff'] = min(line_diff,num_intervals-1) # 确保区间索引在范围内
34
+
35
+
36
+ # 计算 token 特征的区间
37
+ token_diff = int((item['token'] - token_min) // token_interval_size)
38
+ item['token_diff'] = min(token_diff,num_intervals-1)
39
+ if item['token_diff'] == 0:
40
+ count1 = count1 + 1
41
+ if item['token_diff'] == 1:
42
+ count2 = count2 + 1
43
+ if item['token_diff'] == 2:
44
+ count3 = count3 + 1
45
+ if item['token_diff'] == 3:
46
+ count4 = count4 + 1
47
+ if item['token_diff'] == 4:
48
+ count5 = count5 + 1
49
+ if item['token_diff'] == 5:
50
+ count6 = count6 + 1
51
+
52
+ # 计算 cyclomatic_complexity 特征的区间
53
+ CC_diff = int((item['cyclomatic_complexity'] - cyclomatic_complexity_min) // cyclomatic_complexity_interval_size)
54
+ item['CC_diff'] = min(CC_diff,num_intervals-1) # 确保区间索引在范围内
55
+
56
+ # 恢复原始顺序
57
+ data.sort(key=lambda x: x['id'])
58
+ print(count1,count2,count3,count4,count5,count6)
59
+
60
+ # 将更新后的数据写回JSON文件
61
+ with open('EI.json', 'w', encoding='utf-8') as file:
62
+ json.dump(data, file, ensure_ascii=False, indent=4)
dividing_into_different_subsets/6/EI/humaneval_new.json ADDED
The diff for this file is too large to render. See raw diff
 
dividing_into_different_subsets/6/EI/line_counts_EI.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,line_subset_1,line_subset_2,line_subset_3,line_subset_4,line_subset_5,line_subset_6
2
+ CodeFuse-DeepSeek-33b,77.27,77.22,76.92,57.14,80.0,100.0
3
+ Nxcode-CQ-7B,90.11,88.1,88.46,82.86,59.0,71.67
4
+ codegemma-2b,47.16,20.44,26.92,6.43,2.0,1.67
5
+ codegemma-7b,56.14,35.38,39.42,20.0,5.0,23.33
6
+ codegemma-7b-it,69.43,50.57,48.46,27.14,26.0,28.33
7
+ deepseek-coder-1.3b-base,49.43,28.04,28.08,6.43,9.0,30.0
8
+ deepseek-coder-6.7b-base,64.89,40.89,47.88,13.57,6.0,11.67
9
+ deepseek_coder-6.7b-instruct,82.39,71.9,71.35,52.86,28.0,23.33
10
+ deepseek_coder_33b-base,72.5,48.99,47.69,32.86,3.0,35.0
11
+ deepseek_coder_33b-instruct,82.95,64.11,61.73,53.57,17.0,21.67
12
+ codeqwen1.5-7b,60.91,49.68,56.54,28.57,12.0,30.0
dividing_into_different_subsets/6/EI/token_counts_EI.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,token_subset_1,token_subset_2,token_subset_3,token_subset_4,token_subset_5,token_subset_6
2
+ CodeFuse-DeepSeek-33b,74.0,86.49,58.33,60.0,80.0,100.0
3
+ Nxcode-CQ-7B,89.8,87.5,87.92,82.5,79.0,20.0
4
+ codegemma-2b,41.0,27.97,13.12,1.0,1.0,0.0
5
+ codegemma-7b,53.1,37.84,32.08,18.0,24.0,0.0
6
+ codegemma-7b-it,68.5,48.38,45.42,45.0,34.0,0.0
7
+ deepseek-coder-1.3b-base,49.4,31.42,15.42,4.5,18.0,0.0
8
+ deepseek-coder-6.7b-base,64.3,44.53,31.46,16.0,13.0,0.0
9
+ deepseek_coder-6.7b-instruct,84.5,71.55,63.54,49.0,41.0,0.0
10
+ deepseek_coder_33b-base,66.2,54.32,37.08,25.5,35.0,0.0
11
+ deepseek_coder_33b-instruct,80.4,67.43,48.75,26.0,81.0,0.0
12
+ codeqwen1.5-7b,65.4,47.3,50.42,28.0,33.0,0.0