lmy0802 commited on
Commit
7aedadb
·
verified ·
1 Parent(s): 23fc8a9

Upload 7 files

Browse files
dividing_into_different_subsets/5/EI/CC_EI.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,CC_subset_1,CC_subset_2,CC_subset_3,CC_subset_4,CC_subset_5
2
+ CodeFuse-DeepSeek-33b,81.63,72.73,33.33,100.0,66.67
3
+ Nxcode-CQ-7B,87.35,88.73,84.17,37.5,98.33
4
+ codegemma-2b,31.63,23.82,5.0,0.0,3.33
5
+ codegemma-7b,46.84,32.18,15.0,20.0,11.67
6
+ codegemma-7b-it,57.35,50.55,38.33,2.5,26.67
7
+ deepseek-coder-1.3b-base,38.57,26.64,9.17,0.0,0.0
8
+ deepseek-coder-6.7b-base,51.68,41.73,18.33,0.0,6.67
9
+ deepseek_coder-6.7b-instruct,73.16,76.36,37.5,0.0,48.33
10
+ deepseek_coder_33b-base,56.58,50.82,36.67,40.0,3.33
11
+ deepseek_coder_33b-instruct,67.86,67.09,56.67,5.0,51.67
12
+ codeqwen1.5-7b,55.92,47.55,16.67,47.5,45.0
dividing_into_different_subsets/5/EI/EI.json ADDED
The diff for this file is too large to render. See raw diff
 
dividing_into_different_subsets/5/EI/calculate_humaneval_result.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import csv
4
+ # 定义文件所在的目录
5
+ input_dir = 'E:/python-testn/pythonProject3/hh_2/evaluate_result'
6
+
7
+ # 获取目录中的所有文件
8
+ files = os.listdir(input_dir)
9
+
10
+ # with open("token_counts_EI.csv","w", newline='') as csvfile:
11
+ # writer = csv.writer(csvfile)
12
+ # writer.writerow(["Model", "token_subset_1", "token_subset_2","token_subset_3","token_subset_4","token_subset_5"])
13
+
14
+
15
+ # with open("line_counts_EI.csv","w", newline='') as csvfile:
16
+ # writer = csv.writer(csvfile)
17
+ # writer.writerow(["Model", "line_subset_1", "line_subset_2","line_subset_3","line_subset_4","line_subset_5"])
18
+
19
+ with open("CC_EI.csv", "w", newline='') as csvfile:
20
+ writer = csv.writer(csvfile)
21
+ writer.writerow(["Model", "CC_subset_1", "CC_subset_2","CC_subset_3","CC_subset_4","CC_subset_5"])
22
+
23
+
24
+
25
+ for file_name in files:
26
+ # 构建完整的文件路径
27
+ input_file_path = os.path.join(input_dir, file_name)
28
+ first_underscore_index = file_name.find('_')
29
+
30
+ # 找到最后一个 - 的位置
31
+ last_dash_index = file_name.rfind('-')
32
+ model_name = file_name[first_underscore_index + 1:last_dash_index]
33
+ print(model_name)
34
+ with open(input_file_path,"r",encoding="utf-8") as file:
35
+ data1=json.load(file)
36
+
37
+ with open("EI.json", "r", encoding="utf-8") as file:
38
+ data2=json.load(file)
39
+ sum0=0
40
+ count0=0
41
+ sum1=0
42
+ count1=0
43
+ sum2=0
44
+ count2=0
45
+ sum3 = 0
46
+ count3 = 0
47
+ sum4=0
48
+ count4=0
49
+
50
+
51
+ for (item1,item2) in zip(data1["humaneval"]["pass@1"],data2):
52
+ # #按照token个数划分后的评估结果
53
+ # if item2["token_diff"] == 0:
54
+ # index, value = item1
55
+ # print(item2["token_diff"],index,value)
56
+ # sum0=sum0+value
57
+ # count0=count0+1
58
+ # if item2["token_diff"] == 1:
59
+ # index, value = item1
60
+ # print(item2["token_diff"], index, value)
61
+ # sum1=sum1+value
62
+ # count1=count1+1
63
+ # if item2["token_diff"] == 2:
64
+ # index, value = item1
65
+ # print(item2["token_diff"], index, value)
66
+ # sum2=sum2+value
67
+ # count2=count2+1
68
+ # if item2["token_diff"] == 3:
69
+ # index, value = item1
70
+ # print(item2["token_diff"], index, value)
71
+ # sum3=sum3+value
72
+ # count3=count3+1
73
+ # if item2["token_diff"] == 4:
74
+ # index, value = item1
75
+ # print(item2["token_diff"], index, value)
76
+ # sum4 = sum4 + value
77
+ # count4 = count4 + 1
78
+
79
+
80
+ #按照行数划分后的评估结果
81
+ # if item2["line_diff"] == 0:
82
+ # index, value = item1
83
+ # print(item2["line_diff"],index,value)
84
+ # sum0=sum0+value
85
+ # count0=count0+1
86
+ # if item2["line_diff"] == 1:
87
+ # index, value = item1
88
+ # print(item2["line_diff"], index, value)
89
+ # sum1=sum1+value
90
+ # count1=count1+1
91
+ # if item2["line_diff"] == 2:
92
+ # index, value = item1
93
+ # print(item2["line_diff"], index, value)
94
+ # sum2=sum2+value
95
+ # count2=count2+1
96
+ # if item2["line_diff"] == 3:
97
+ # index, value = item1
98
+ # print(item2["line_diff"], index, value)
99
+ # sum3=sum3+value
100
+ # count3=count3+1
101
+ # if item2["line_diff"] == 4:
102
+ # index, value = item1
103
+ # print(item2["line_diff"], index, value)
104
+ # sum4=sum4+value
105
+ # count4=count4+1
106
+
107
+ #按照圈复杂度划分后的评估结果
108
+ if item2["CC_diff"] == 0:
109
+ index, value = item1
110
+ print(item2["CC_diff"],index,value)
111
+ sum0=sum0+value
112
+ count0=count0+1
113
+ if item2["CC_diff"] == 1:
114
+ index, value = item1
115
+ print(item2["CC_diff"], index, value)
116
+ sum1=sum1+value
117
+ count1=count1+1
118
+ if item2["CC_diff"] == 2:
119
+ index, value = item1
120
+ print(item2["CC_diff"], index, value)
121
+ sum2=sum2+value
122
+ count2=count2+1
123
+ if item2["CC_diff"] == 3 :
124
+ index, value = item1
125
+ print(item2["CC_diff"], index, value)
126
+ sum3=sum3+value
127
+ count3=count3+1
128
+ if item2["CC_diff"] == 4 :
129
+ index, value = item1
130
+ print(item2["CC_diff"], index, value)
131
+ sum4=sum4+value
132
+ count4=count4+1
133
+
134
+
135
+
136
+ mean0=round(sum0/count0*100,2)
137
+
138
+ mean1=round(sum1/count1*100,2)
139
+ mean2=round(sum2/count2*100,2)
140
+ mean3=round(sum3/count3*100,2)
141
+ mean4=round(sum4/count4*100,2)
142
+ print("count_result!!")
143
+ print(count0,count1,count2,count3,count4)
144
+ print(mean0,mean1,mean2,mean3,count4)
145
+ # with open("token_counts_EI.csv", mode='a', newline='', encoding='utf-8') as file:
146
+ # writer = csv.writer(file)
147
+ # writer.writerow([model_name,mean0,mean1,mean2,mean3,mean4])
148
+
149
+ # with open("line_counts_EI.csv", mode='a', newline='', encoding='utf-8') as file:
150
+ # writer = csv.writer(file)
151
+ # writer.writerow([model_name,mean0,mean1,mean2,mean3,mean4])
152
+ #
153
+ with open("CC_EI.csv", mode='a', newline='', encoding='utf-8') as file:
154
+ writer = csv.writer(file)
155
+ writer.writerow([model_name,mean0,mean1,mean2,mean3,mean4])
156
+
157
+
dividing_into_different_subsets/5/EI/even.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ # 读取数据
4
+ with open("humaneval_new.json", "r", encoding="utf-8") as f:
5
+ data = json.load(f)
6
+
7
+ # 定义划分区间数
8
+ num_intervals = 5
9
+
10
+ # 计算每个特征的值范围
11
+ line_min = min(item['line'] for item in data)
12
+ line_max = max(item['line'] for item in data)
13
+ line_interval_size = (line_max - line_min) / num_intervals
14
+
15
+ token_min = min(item['token'] for item in data)
16
+ token_max = max(item['token'] for item in data)
17
+ token_interval_size = (token_max - token_min) / num_intervals
18
+
19
+ cyclomatic_complexity_min = min(item['cyclomatic_complexity'] for item in data)
20
+ cyclomatic_complexity_max = max(item['cyclomatic_complexity'] for item in data)
21
+ cyclomatic_complexity_interval_size = (cyclomatic_complexity_max - cyclomatic_complexity_min) / num_intervals
22
+ count1=0
23
+ count2=0
24
+ count3=0
25
+ count4=0
26
+ count5=0
27
+
28
+ # 根据等距划分数据
29
+ for item in data:
30
+ # 计算 line 特征的区间
31
+ line_diff = int((item['line'] - line_min) // line_interval_size)
32
+ item['line_diff'] = min(line_diff,num_intervals-1) # 确保区间索引在范围内
33
+
34
+
35
+ # 计算 token 特征的区间
36
+ token_diff = int((item['token'] - token_min) // token_interval_size)
37
+ item['token_diff'] = min(token_diff,num_intervals-1)
38
+ if item['token_diff'] == 0:
39
+ count1 = count1 + 1
40
+ if item['token_diff'] == 1:
41
+ count2 = count2 + 1
42
+ if item['token_diff'] == 2:
43
+ count3 = count3 + 1
44
+ if item['token_diff'] == 3:
45
+ count4 = count4 + 1
46
+ if item['token_diff'] == 4:
47
+ count5 = count5 + 1
48
+
49
+
50
+ # 计算 cyclomatic_complexity 特征的区间
51
+ CC_diff = int((item['cyclomatic_complexity'] - cyclomatic_complexity_min) // cyclomatic_complexity_interval_size)
52
+ item['CC_diff'] = min(CC_diff,num_intervals-1) # 确保区间索引在范围内
53
+
54
+ # 恢复原始顺序
55
+ data.sort(key=lambda x: x['id'])
56
+ print(count1,count2,count3,count4,count5)
57
+
58
+ # 将更新后的数据写回JSON文件
59
+ with open('EI.json', 'w', encoding='utf-8') as file:
60
+ json.dump(data, file, ensure_ascii=False, indent=4)
dividing_into_different_subsets/5/EI/humaneval_new.json ADDED
The diff for this file is too large to render. See raw diff
 
dividing_into_different_subsets/5/EI/line_counts_EI.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,line_subset_1,line_subset_2,line_subset_3,line_subset_4,line_subset_5
2
+ CodeFuse-DeepSeek-33b,80.0,73.24,73.68,100.0,75.0
3
+ Nxcode-CQ-7B,89.62,88.73,83.16,59.0,78.75
4
+ codegemma-2b,39.46,20.07,21.58,7.0,3.75
5
+ codegemma-7b,48.77,35.85,34.47,15.0,20.0
6
+ codegemma-7b-it,63.23,48.73,44.21,26.0,43.75
7
+ deepseek-coder-1.3b-base,42.69,26.76,24.47,5.0,33.75
8
+ deepseek-coder-6.7b-base,59.85,39.44,38.68,0.0,16.25
9
+ deepseek_coder-6.7b-instruct,81.69,69.72,62.63,24.0,42.5
10
+ deepseek_coder_33b-base,64.62,47.46,48.16,12.0,26.25
11
+ deepseek_coder_33b-instruct,77.92,60.85,68.95,16.0,17.5
12
+ codeqwen1.5-7b,58.85,48.52,48.95,15.0,37.5
dividing_into_different_subsets/5/EI/token_counts_EI.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,token_subset_1,token_subset_2,token_subset_3,token_subset_4,token_subset_5
2
+ CodeFuse-DeepSeek-33b,79.22,78.95,61.9,75.0,100.0
3
+ Nxcode-CQ-7B,90.06,88.25,78.81,84.38,20.0
4
+ codegemma-2b,41.69,21.05,1.19,1.88,0.0
5
+ codegemma-7b,54.48,30.26,22.14,17.5,0.0
6
+ codegemma-7b-it,69.35,40.53,38.1,33.12,0.0
7
+ deepseek-coder-1.3b-base,49.22,21.05,8.33,16.87,0.0
8
+ deepseek-coder-6.7b-base,63.31,35.7,23.1,11.88,0.0
9
+ deepseek_coder-6.7b-instruct,79.81,72.28,55.48,38.75,0.0
10
+ deepseek_coder_33b-base,68.51,45.09,29.76,22.5,0.0
11
+ deepseek_coder_33b-instruct,79.35,62.89,34.52,53.12,0.0
12
+ codeqwen1.5-7b,64.42,42.89,36.43,31.88,0.0