Upload 36 files
Browse files- AAmain.py +34 -0
- allcombo.py +32 -0
- allcombo10.py +32 -0
- allcombo2.py +32 -0
- allcombo3.py +32 -0
- allcombo4.py +32 -0
- allcombo5.py +32 -0
- allcombo6.py +32 -0
- allcombo7.py +32 -0
- allcombo8.py +32 -0
- allcombo9.py +32 -0
- app.py +60 -0
- app2.py +48 -0
- app3.py +55 -0
- appp.py +70 -0
- appself.py +75 -0
- backend1.py +43 -0
- backend2.py +30 -0
- backend2later.py +32 -0
- breakdown1.py +29 -0
- col.py +42 -0
- col2.py +37 -0
- data.py +75 -0
- data2.py +77 -0
- finalloop.py +31 -0
- finalloop2.py +26 -0
- finalloopneedfix.py +27 -0
- first.html +19 -0
- index.html +321 -17
- mod8.py +101 -0
- mod8l2.py +108 -0
- mod8l3.py +79 -0
- mod8l4.py +92 -0
- mod8l5.py +80 -0
- textto.py +19 -0
- word_database.txt +143 -0
AAmain.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
|
3 |
+
# Function to call a script
|
4 |
+
def call_script(script_name):
|
5 |
+
try:
|
6 |
+
subprocess.run(['python', f'{script_name}.py'], check=True)
|
7 |
+
except subprocess.CalledProcessError as e:
|
8 |
+
print(f"Error running script {script_name}: {e}")
|
9 |
+
|
10 |
+
# Call scripts in order
|
11 |
+
call_script('backend1')
|
12 |
+
call_script('backend2')
|
13 |
+
call_script('breakdown1')
|
14 |
+
call_script('textto')
|
15 |
+
call_script('mod8')
|
16 |
+
call_script('mod8l2')
|
17 |
+
call_script('mod8l3')
|
18 |
+
call_script('mod8l4')
|
19 |
+
call_script('mod8l5')
|
20 |
+
call_script('allcombo')
|
21 |
+
call_script('allcombo2')
|
22 |
+
call_script('allcombo3')
|
23 |
+
call_script('allcombo4')
|
24 |
+
call_script('allcombo5')
|
25 |
+
call_script('allcombo6')
|
26 |
+
call_script('allcombo7')
|
27 |
+
call_script('allcombo8')
|
28 |
+
call_script('allcombo9')
|
29 |
+
call_script('allcombo10')
|
30 |
+
call_script('space')
|
31 |
+
call_script('finalloop2')
|
32 |
+
call_script('col')
|
33 |
+
call_script('finalloop')
|
34 |
+
call_script('looploop2all')
|
allcombo.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text1combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum1.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo10.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text10combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum10.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo2.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text2combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum2.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo3.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text3combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum3.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo4.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text4combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum4.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo5.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text5combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum5.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo6.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text6combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum6.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo7.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text7combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum7.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo8.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text8combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum8.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
allcombo9.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Function to extract the first column from a CSV file
|
5 |
+
def extract_first_column(csv_file):
|
6 |
+
with open(csv_file, 'r') as file:
|
7 |
+
reader = csv.reader(file)
|
8 |
+
first_column = [int(row[0]) for row in reader]
|
9 |
+
return first_column
|
10 |
+
|
11 |
+
# Directory containing combo CSV files
|
12 |
+
csv_directory = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
+
# List to store the first columns from each CSV
|
15 |
+
all_first_columns = []
|
16 |
+
|
17 |
+
# Loop through each combo CSV file
|
18 |
+
for i in range(1, 7): # Assuming combo files are named combo1.csv to combo6.csv
|
19 |
+
csv_file_path = os.path.join(csv_directory, f'text9combo{i}.csv')
|
20 |
+
first_column = extract_first_column(csv_file_path)
|
21 |
+
all_first_columns.append(first_column)
|
22 |
+
|
23 |
+
# Transpose the list of lists to get a list of columns
|
24 |
+
all_first_columns_transposed = list(map(list, zip(*all_first_columns)))
|
25 |
+
|
26 |
+
# Save the transposed result into a new CSV file
|
27 |
+
output_csv_path = os.path.join(csv_directory, 'loopnum9.csv')
|
28 |
+
with open(output_csv_path, 'w', newline='') as csvfile:
|
29 |
+
writer = csv.writer(csvfile)
|
30 |
+
writer.writerows(all_first_columns_transposed)
|
31 |
+
|
32 |
+
print(f"Combined first columns saved to {output_csv_path}")
|
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# streamlit_app.py
|
2 |
+
import streamlit as st
|
3 |
+
import requests
|
4 |
+
import csv
|
5 |
+
from datetime import datetime
|
6 |
+
import subprocess
|
7 |
+
|
8 |
+
# Define Streamlit app
|
9 |
+
st.title("Conversation Generator")
|
10 |
+
|
11 |
+
@st.cache(allow_output_mutation=True)
|
12 |
+
def generate_conversation(prompt):
|
13 |
+
try:
|
14 |
+
# Introduce slight variations in the prompt
|
15 |
+
prompt_variation = prompt + str(hash(prompt))[:3]
|
16 |
+
|
17 |
+
# Adjust temperature for more diverse responses
|
18 |
+
response = requests.post('https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill', json={
|
19 |
+
"inputs": prompt_variation,
|
20 |
+
"options": {"temperature": 0.8} # Adjust as needed
|
21 |
+
})
|
22 |
+
|
23 |
+
conversation = response.json()["generated_text"]
|
24 |
+
return conversation
|
25 |
+
except Exception as e:
|
26 |
+
return f"Error: {str(e)}"
|
27 |
+
|
28 |
+
def save_to_csv(prompt, conversation):
|
29 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
30 |
+
filename = f"info.csv"
|
31 |
+
|
32 |
+
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
|
33 |
+
csv_writer = csv.writer(csv_file)
|
34 |
+
csv_writer.writerow(['Prompt', 'Generated Conversation'])
|
35 |
+
csv_writer.writerow([prompt, conversation])
|
36 |
+
|
37 |
+
return filename
|
38 |
+
|
39 |
+
# Load index.html content
|
40 |
+
with open("index.html", "r", encoding="utf-8") as html_file:
|
41 |
+
index_html_content = html_file.read()
|
42 |
+
|
43 |
+
# Embed HTML content in Streamlit
|
44 |
+
st.markdown(index_html_content, unsafe_allow_html=True)
|
45 |
+
|
46 |
+
# Define Streamlit UI
|
47 |
+
prompt = st.text_area("Enter prompt:")
|
48 |
+
if st.button("Generate and Display"):
|
49 |
+
conversation = generate_conversation(prompt)
|
50 |
+
csv_filename = save_to_csv(prompt, conversation)
|
51 |
+
st.write("Generated Conversation:")
|
52 |
+
st.write(conversation)
|
53 |
+
st.write("CSV file saved:", csv_filename)
|
54 |
+
|
55 |
+
if st.button("Run AAMain.py"):
|
56 |
+
try:
|
57 |
+
subprocess.run(["python", "AAmain.py"])
|
58 |
+
st.success("AAmain.py process started successfully.")
|
59 |
+
except Exception as e:
|
60 |
+
st.error(f"Error: {str(e)}")
|
app2.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Form
|
2 |
+
from fastapi.templating import Jinja2Templates
|
3 |
+
import gpt_2_simple as gpt2
|
4 |
+
from datetime import datetime
|
5 |
+
import csv
|
6 |
+
|
7 |
+
app = FastAPI()
|
8 |
+
templates = Jinja2Templates(directory="templates")
|
9 |
+
|
10 |
+
# Download the GPT-2 model if not already downloaded
|
11 |
+
gpt2.download_gpt2(model_name="124M")
|
12 |
+
|
13 |
+
# Load the GPT-2 model
|
14 |
+
sess = gpt2.start_tf_sess()
|
15 |
+
gpt2.load_gpt2(sess, model_name="124M")
|
16 |
+
|
17 |
+
async def generate_conversation(prompt):
|
18 |
+
try:
|
19 |
+
conversation = gpt2.generate(sess, prefix=prompt, length=300, temperature=0.7, return_as_list=True)[0]
|
20 |
+
return conversation
|
21 |
+
except Exception as e:
|
22 |
+
return f"Error: {str(e)}"
|
23 |
+
|
24 |
+
def save_to_csv(prompt, conversation):
|
25 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
26 |
+
filename = f"info.csv"
|
27 |
+
|
28 |
+
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
|
29 |
+
csv_writer = csv.writer(csv_file)
|
30 |
+
csv_writer.writerow(['Prompt', 'Generated Conversation'])
|
31 |
+
csv_writer.writerow([prompt, conversation])
|
32 |
+
|
33 |
+
return filename
|
34 |
+
|
35 |
+
@app.get("/")
|
36 |
+
def read_form(request: Request):
|
37 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
38 |
+
|
39 |
+
@app.post("/")
|
40 |
+
async def generate_and_display(request: Request, prompt: str = Form(...)):
|
41 |
+
conversation = await generate_conversation(prompt)
|
42 |
+
csv_filename = save_to_csv(prompt, conversation)
|
43 |
+
return templates.TemplateResponse("index.html", {"request": request, "prompt": prompt, "conversation": conversation, "csv_filename": csv_filename})
|
44 |
+
|
45 |
+
if __name__ == "__main__":
|
46 |
+
import uvicorn
|
47 |
+
|
48 |
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
app3.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Form
|
2 |
+
from fastapi.templating import Jinja2Templates
|
3 |
+
import csv
|
4 |
+
from datetime import datetime # Used for generating unique filenames
|
5 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
6 |
+
|
7 |
+
# Install these libraries if you haven't already:
|
8 |
+
# pip install transformers accelerate
|
9 |
+
|
10 |
+
app = FastAPI()
|
11 |
+
templates = Jinja2Templates(directory="templates")
|
12 |
+
|
13 |
+
# Load GPT-J 6B model and tokenizer
|
14 |
+
model_name = "EleutherAI/gpt-j-6B"
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
16 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
17 |
+
|
18 |
+
async def generate_conversation(prompt):
|
19 |
+
try:
|
20 |
+
# Tokenize the prompt
|
21 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
22 |
+
|
23 |
+
# Generate response using local model
|
24 |
+
output = model.generate(**inputs)
|
25 |
+
conversation = tokenizer.decode(output[0], skip_special_tokens=True)
|
26 |
+
|
27 |
+
return conversation
|
28 |
+
except Exception as e:
|
29 |
+
return f"Error: {str(e)}"
|
30 |
+
|
31 |
+
def save_to_csv(prompt, conversation):
|
32 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
33 |
+
filename = f"info.csv"
|
34 |
+
|
35 |
+
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
|
36 |
+
csv_writer = csv.writer(csv_file)
|
37 |
+
csv_writer.writerow(['Prompt', 'Generated Conversation'])
|
38 |
+
csv_writer.writerow([prompt, conversation])
|
39 |
+
|
40 |
+
return filename
|
41 |
+
|
42 |
+
@app.get("/")
|
43 |
+
def read_form(request: Request):
|
44 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
45 |
+
|
46 |
+
@app.post("/")
|
47 |
+
async def generate_and_display(request: Request, prompt: str = Form(...)):
|
48 |
+
conversation = await generate_conversation(prompt)
|
49 |
+
csv_filename = save_to_csv(prompt, conversation)
|
50 |
+
return templates.TemplateResponse("index.html", {"request": request, "prompt": prompt, "conversation": conversation, "csv_filename": csv_filename})
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
import uvicorn
|
54 |
+
|
55 |
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
appp.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Form
|
2 |
+
from fastapi.templating import Jinja2Templates
|
3 |
+
import httpx
|
4 |
+
import csv
|
5 |
+
from datetime import datetime
|
6 |
+
import subprocess
|
7 |
+
|
8 |
+
app = FastAPI()
|
9 |
+
templates = Jinja2Templates(directory="templates")
|
10 |
+
|
11 |
+
async def generate_conversation(prompt):
|
12 |
+
try:
|
13 |
+
# Introduce slight variations in the prompt
|
14 |
+
prompt_variation = prompt + str(hash(prompt))[:3]
|
15 |
+
|
16 |
+
# Adjust temperature for more diverse responses
|
17 |
+
async with httpx.AsyncClient() as client:
|
18 |
+
response = await client.post('https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill', json={
|
19 |
+
"inputs": prompt_variation,
|
20 |
+
"options": {"temperature": 0.8} # Adjust as needed
|
21 |
+
})
|
22 |
+
|
23 |
+
conversation = response.json()["generated_text"]
|
24 |
+
return conversation
|
25 |
+
except Exception as e:
|
26 |
+
return f"Error: {str(e)}"
|
27 |
+
|
28 |
+
|
29 |
+
def save_to_csv(prompt, conversation):
|
30 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
31 |
+
filename = f"info.csv"
|
32 |
+
|
33 |
+
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
|
34 |
+
csv_writer = csv.writer(csv_file)
|
35 |
+
csv_writer.writerow(['Prompt', 'Generated Conversation'])
|
36 |
+
csv_writer.writerow([prompt, conversation])
|
37 |
+
|
38 |
+
return filename
|
39 |
+
|
40 |
+
|
41 |
+
def run_aamain_script():
|
42 |
+
try:
|
43 |
+
# Run the AAmain.py script
|
44 |
+
subprocess.run(["python", "AAmain.py"], check=True)
|
45 |
+
return "AAmain.py script executed successfully"
|
46 |
+
except subprocess.CalledProcessError as e:
|
47 |
+
return f"Error executing AAmain.py script: {str(e)}"
|
48 |
+
|
49 |
+
|
50 |
+
@app.get("/")
|
51 |
+
def read_form(request: Request):
|
52 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
53 |
+
|
54 |
+
@app.post("/generate_ai")
|
55 |
+
async def generate_ai(request: Request, ai_prompt: str = Form(...)):
|
56 |
+
# Handle generation logic
|
57 |
+
conversation = await generate_conversation(ai_prompt)
|
58 |
+
csv_filename = save_to_csv(ai_prompt, conversation)
|
59 |
+
return templates.TemplateResponse("index.html", {"request": request, "prompt": ai_prompt, "conversation": conversation, "csv_filename": csv_filename})
|
60 |
+
|
61 |
+
@app.post("/run_aamain")
|
62 |
+
async def run_aamain(request: Request):
|
63 |
+
# Handle running AAmain.py script
|
64 |
+
result = run_aamain_script()
|
65 |
+
return templates.TemplateResponse("index.html", {"request": request, "aamain_result": result})
|
66 |
+
|
67 |
+
if __name__ == "__main__":
|
68 |
+
import uvicorn
|
69 |
+
|
70 |
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
appself.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Form
|
2 |
+
from fastapi.templating import Jinja2Templates
|
3 |
+
import httpx
|
4 |
+
import csv
|
5 |
+
from datetime import datetime
|
6 |
+
from fastapi.staticfiles import StaticFiles
|
7 |
+
import subprocess
|
8 |
+
|
9 |
+
app = FastAPI()
|
10 |
+
|
11 |
+
CSV_FILE_PATH0 = 'info.csv'
|
12 |
+
PYTHON_SCRIPT_PATH0 = 'AAmain.py'
|
13 |
+
|
14 |
+
# Set up static files
|
15 |
+
app.mount("/static", StaticFiles(directory="."), name="static")
|
16 |
+
|
17 |
+
templates = Jinja2Templates(directory="templates")
|
18 |
+
|
19 |
+
def generate_conversation(prompt):
|
20 |
+
try:
|
21 |
+
# Introduce slight variations in the prompt
|
22 |
+
prompt_variation = prompt + str(hash(prompt))[:3]
|
23 |
+
|
24 |
+
# Adjust temperature for more diverse responses
|
25 |
+
with httpx.Client() as client:
|
26 |
+
response = client.post('https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill', json={
|
27 |
+
"inputs": prompt_variation,
|
28 |
+
"options": {"temperature": 0.8} # Adjust as needed
|
29 |
+
})
|
30 |
+
|
31 |
+
conversation = response.json()["generated_text"]
|
32 |
+
return conversation
|
33 |
+
except Exception as e:
|
34 |
+
return f"Error: {str(e)}"
|
35 |
+
|
36 |
+
def save_to_csv(prompt, conversation):
|
37 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
38 |
+
filename = f"info.csv"
|
39 |
+
|
40 |
+
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
|
41 |
+
csv_writer = csv.writer(csv_file)
|
42 |
+
csv_writer.writerow(['Prompt', 'Generated Conversation'])
|
43 |
+
csv_writer.writerow([prompt, conversation])
|
44 |
+
|
45 |
+
return filename
|
46 |
+
|
47 |
+
@app.get("/")
|
48 |
+
def read_form(request: Request):
|
49 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
50 |
+
|
51 |
+
@app.post("/")
|
52 |
+
async def generate_and_display(request: Request, prompt: str = Form(...)):
|
53 |
+
conversation = generate_conversation(prompt)
|
54 |
+
csv_filename = save_to_csv(prompt, conversation)
|
55 |
+
|
56 |
+
return templates.TemplateResponse("index.html", {"request": request, "prompt": prompt, "conversation": conversation, "csv_filename": csv_filename})
|
57 |
+
|
58 |
+
# New route for running AAmain.py
|
59 |
+
@app.post("/run_aamain")
|
60 |
+
async def run_aamain(csv_filename: str = Form(...)):
|
61 |
+
subprocess.run(["python", "AAmain.py", csv_filename]) # Adjust arguments as needed
|
62 |
+
return {"message": "AAmain.py process started successfully."}
|
63 |
+
|
64 |
+
# New route for generating AI
|
65 |
+
@app.post("/generate_ai")
|
66 |
+
async def generate_ai(prompt: str = Form(...)):
|
67 |
+
conversation = generate_conversation(prompt)
|
68 |
+
csv_filename = save_to_csv(prompt, conversation)
|
69 |
+
|
70 |
+
return {"prompt": prompt, "conversation": conversation, "csv_filename": csv_filename}
|
71 |
+
|
72 |
+
if __name__ == "__main__":
|
73 |
+
import uvicorn
|
74 |
+
|
75 |
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
backend1.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
|
3 |
+
def load_word_database(database_filename):
|
4 |
+
with open(database_filename, mode='r', encoding='utf-8') as database_file:
|
5 |
+
return set(word.strip().lower() for word in database_file)
|
6 |
+
|
7 |
+
def check_generated_conversation_for_words(csv_filename, word_database):
|
8 |
+
words_scores = []
|
9 |
+
|
10 |
+
with open(csv_filename, mode='r', newline='', encoding='utf-8') as csv_file:
|
11 |
+
csv_reader = csv.DictReader(csv_file)
|
12 |
+
|
13 |
+
for row in csv_reader:
|
14 |
+
generated_conversation = row.get('Generated Conversation', '').lower()
|
15 |
+
word_scores = []
|
16 |
+
|
17 |
+
for word in generated_conversation.split():
|
18 |
+
score = 1 if word in word_database else 0
|
19 |
+
word_scores.append({'Word': word, 'Score': score})
|
20 |
+
|
21 |
+
words_scores.extend(word_scores)
|
22 |
+
|
23 |
+
return words_scores
|
24 |
+
|
25 |
+
def save_word_scores_to_csv(word_scores, output_csv_filename):
|
26 |
+
with open(output_csv_filename, mode='w', newline='', encoding='utf-8') as output_csv_file:
|
27 |
+
csv_writer = csv.DictWriter(output_csv_file, fieldnames=['Word', 'Score'])
|
28 |
+
csv_writer.writeheader()
|
29 |
+
|
30 |
+
for word_score in word_scores:
|
31 |
+
csv_writer.writerow(word_score)
|
32 |
+
|
33 |
+
def main():
|
34 |
+
database_filename = 'word_database.txt'
|
35 |
+
csv_filename = 'info.csv' # Replace with your CSV file
|
36 |
+
output_csv_filename = 'word_scores.csv'
|
37 |
+
|
38 |
+
word_database = load_word_database(database_filename)
|
39 |
+
words_scores = check_generated_conversation_for_words(csv_filename, word_database)
|
40 |
+
save_word_scores_to_csv(words_scores, output_csv_filename)
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
main()
|
backend2.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
|
3 |
+
def load_word_database(database_filename):
|
4 |
+
with open(database_filename, mode='r', encoding='utf-8') as database_file:
|
5 |
+
return set(word.strip().lower() for word in database_file)
|
6 |
+
|
7 |
+
def check_generated_conversation_for_words(csv_filename, word_database):
|
8 |
+
with open(csv_filename, mode='r', newline='', encoding='utf-8') as csv_file:
|
9 |
+
csv_reader = csv.DictReader(csv_file)
|
10 |
+
words = []
|
11 |
+
for row in csv_reader:
|
12 |
+
generated_conversation = row.get('Generated Conversation', '').lower()
|
13 |
+
words.extend(word for word in generated_conversation.split() if word in word_database)
|
14 |
+
save_words_to_csv(words)
|
15 |
+
|
16 |
+
def save_words_to_csv(words):
|
17 |
+
output_csv_filename = "text.csv"
|
18 |
+
with open(output_csv_filename, mode='w', newline='', encoding='utf-8') as output_csv_file:
|
19 |
+
csv_writer = csv.writer(output_csv_file)
|
20 |
+
csv_writer.writerows([[word] for word in words])
|
21 |
+
|
22 |
+
def main():
|
23 |
+
database_filename = 'word_database.txt'
|
24 |
+
csv_filename = 'info.csv' # Replace with your CSV file
|
25 |
+
|
26 |
+
word_database = load_word_database(database_filename)
|
27 |
+
check_generated_conversation_for_words(csv_filename, word_database)
|
28 |
+
|
29 |
+
if __name__ == "__main__":
|
30 |
+
main()
|
backend2later.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
|
3 |
+
def load_word_database(database_filename):
|
4 |
+
with open(database_filename, mode='r', encoding='utf-8') as database_file:
|
5 |
+
return set(word.strip().lower() for word in database_file)
|
6 |
+
|
7 |
+
def check_generated_conversation_for_words(csv_filename, word_database):
|
8 |
+
with open(csv_filename, mode='r', newline='', encoding='utf-8') as csv_file:
|
9 |
+
csv_reader = csv.DictReader(csv_file)
|
10 |
+
for row in csv_reader:
|
11 |
+
generated_conversation = row.get('Generated Conversation', '').lower()
|
12 |
+
for word in generated_conversation.split():
|
13 |
+
if word in word_database:
|
14 |
+
save_word_to_csv(word)
|
15 |
+
|
16 |
+
def save_word_to_csv(word):
|
17 |
+
output_csv_filename = "text.csv"
|
18 |
+
with open(output_csv_filename, mode='w', newline='', encoding='utf-8') as output_csv_file:
|
19 |
+
csv_writer = csv.writer(output_csv_file)
|
20 |
+
csv_writer.writerow([word])
|
21 |
+
|
22 |
+
def main():
|
23 |
+
database_filename = 'word_database.txt'
|
24 |
+
csv_filename = 'info.csv' # Replace with your CSV file
|
25 |
+
|
26 |
+
word_database = load_word_database(database_filename)
|
27 |
+
check_generated_conversation_for_words(csv_filename, word_database)
|
28 |
+
|
29 |
+
if __name__ == "__main__":
|
30 |
+
main()
|
31 |
+
|
32 |
+
|
breakdown1.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
|
3 |
+
def process_csv(input_file, output_file):
|
4 |
+
with open(input_file, 'r') as csvfile:
|
5 |
+
reader = csv.reader(csvfile)
|
6 |
+
next(reader) # Skip header row
|
7 |
+
|
8 |
+
output_data = []
|
9 |
+
|
10 |
+
for row in reader:
|
11 |
+
word, score = row[0], int(row[1])
|
12 |
+
if score == 1:
|
13 |
+
# Break down the word into letters and append each letter with ,1
|
14 |
+
for letter in word:
|
15 |
+
output_data.append([letter, '1'])
|
16 |
+
else:
|
17 |
+
output_data.append([word, str(score)])
|
18 |
+
|
19 |
+
with open(output_file, 'w', newline='') as csvfile:
|
20 |
+
writer = csv.writer(csvfile)
|
21 |
+
writer.writerow(['Letter', 'Score'])
|
22 |
+
writer.writerows(output_data)
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
input_csv = "word_scores.csv"
|
26 |
+
output_csv = "word_scores2.csv"
|
27 |
+
|
28 |
+
process_csv(input_csv, output_csv)
|
29 |
+
print(f"Conversion complete. Output saved to {output_csv}")
|
col.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import numpy as np
|
3 |
+
import random # Import the random module
|
4 |
+
|
5 |
+
def change_fourth_occurrence(matrix):
|
6 |
+
for col in range(len(matrix[0])):
|
7 |
+
count = 1
|
8 |
+
for row in range(1, len(matrix)):
|
9 |
+
if matrix[row][col] == matrix[row - 1][col]:
|
10 |
+
count += 1
|
11 |
+
else:
|
12 |
+
count = 1
|
13 |
+
|
14 |
+
if count == 4:
|
15 |
+
current_number = matrix[row][col]
|
16 |
+
|
17 |
+
# Generate a random number within the range [current_number-1, current_number+1]
|
18 |
+
next_number = random.choice([current_number - 1, current_number + 1])
|
19 |
+
|
20 |
+
matrix[row][col] = next_number
|
21 |
+
count = 1
|
22 |
+
|
23 |
+
# Post-processing: Ensure there are no negative numbers in the matrix
|
24 |
+
matrix[matrix < 0] = 0
|
25 |
+
|
26 |
+
# Read the matrix from CSV
|
27 |
+
input_matrix = []
|
28 |
+
with open('matrix2.csv', 'r') as file:
|
29 |
+
csv_reader = csv.reader(file)
|
30 |
+
for row in csv_reader:
|
31 |
+
input_matrix.append([int(num) for num in row])
|
32 |
+
|
33 |
+
# Convert the matrix to a NumPy array for easy column indexing
|
34 |
+
input_matrix = np.array(input_matrix)
|
35 |
+
|
36 |
+
# Call the function to modify the matrix
|
37 |
+
change_fourth_occurrence(input_matrix)
|
38 |
+
|
39 |
+
# Write the modified matrix back to CSV
|
40 |
+
with open('matrix5.csv', 'w', newline='') as file:
|
41 |
+
csv_writer = csv.writer(file)
|
42 |
+
csv_writer.writerows(input_matrix)
|
col2.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
|
3 |
+
def change_fourth_occurrence(matrix):
|
4 |
+
for col in range(len(matrix[0])):
|
5 |
+
count = 1
|
6 |
+
for row in range(1, len(matrix)):
|
7 |
+
if matrix[row][col] == matrix[row - 1][col]:
|
8 |
+
count += 1
|
9 |
+
else:
|
10 |
+
count = 1
|
11 |
+
|
12 |
+
if count == 4:
|
13 |
+
next_number = matrix[row][col] + 1
|
14 |
+
while next_number in matrix[:, col]:
|
15 |
+
next_number += 1
|
16 |
+
|
17 |
+
matrix[row][col] = next_number
|
18 |
+
count = 1
|
19 |
+
|
20 |
+
# Read the matrix from CSV
|
21 |
+
input_matrix = []
|
22 |
+
with open('matrix2.csv', 'r') as file:
|
23 |
+
csv_reader = csv.reader(file)
|
24 |
+
for row in csv_reader:
|
25 |
+
input_matrix.append([int(num) for num in row])
|
26 |
+
|
27 |
+
# Convert the matrix to a NumPy array for easy column indexing
|
28 |
+
import numpy as np
|
29 |
+
input_matrix = np.array(input_matrix)
|
30 |
+
|
31 |
+
# Call the function to modify the matrix
|
32 |
+
change_fourth_occurrence(input_matrix)
|
33 |
+
|
34 |
+
# Write the modified matrix back to CSV
|
35 |
+
with open('matrix5.csv', 'w', newline='') as file:
|
36 |
+
csv_writer = csv.writer(file)
|
37 |
+
csv_writer.writerows(input_matrix)
|
data.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Define a list of font sizes
|
2 |
+
font_sizes = ["17px", "19px", "21px", "23px", "25px", "27px",]
|
3 |
+
# Define a list of font sizes
|
4 |
+
font_tops = ["11px", "11px", "13px", "13px", "15px", "15px",]
|
5 |
+
|
6 |
+
# Define a list of letter spacings
|
7 |
+
letter_spacing = [" -3px", " -2px", "-4px", "-4px", " -2px", "-4px", " -6px", " -4px", " -4px", "-2px", "6px"]
|
8 |
+
|
9 |
+
# Define a list of text shadow combinations
|
10 |
+
text_shadow = [
|
11 |
+
"0px 0px 1px",
|
12 |
+
"0px 0px 2px",
|
13 |
+
"1px 0px 0px",
|
14 |
+
"0px 0px 0px",
|
15 |
+
"0px 1px 0px",
|
16 |
+
"0px 2px 0px",
|
17 |
+
"0px 1px 1px",
|
18 |
+
"1px 1px 0px",
|
19 |
+
"1px 0px 1px",
|
20 |
+
"0px 1px 0px",
|
21 |
+
"0px 1px 0px"
|
22 |
+
]
|
23 |
+
|
24 |
+
# Define a list of skew options
|
25 |
+
skew_options = [
|
26 |
+
"(0deg, 0deg)",
|
27 |
+
"(-0deg, 0deg)",
|
28 |
+
"(-5deg, 0deg)",
|
29 |
+
"(-20deg, 0deg)",
|
30 |
+
"(-0deg, 0deg)",
|
31 |
+
"(-0deg, 0deg)",
|
32 |
+
"(-0deg, 0deg)",
|
33 |
+
"(10deg, 0deg)",
|
34 |
+
"(15deg, 0deg)",
|
35 |
+
"(20deg, 0deg)",
|
36 |
+
"(0deg, 0deg)",
|
37 |
+
"(0deg, 0deg)",
|
38 |
+
"(0deg, 0deg)"
|
39 |
+
]
|
40 |
+
|
41 |
+
# Define a list of font styles
|
42 |
+
font_styles = [
|
43 |
+
"Raleway, sans-serif",
|
44 |
+
"'Open Sans Condensed', sans-serif",
|
45 |
+
"'Roboto Condensed', sans-serif",
|
46 |
+
"'Poiret One', sans-serif",
|
47 |
+
"'Dosis', sans-serif",
|
48 |
+
"'Fjalla One', sans-serif",
|
49 |
+
"'Indie Flower', sans-serif",
|
50 |
+
"'Josefin Sans', sans-serif",
|
51 |
+
"'Roboto Slab', sans-serif",
|
52 |
+
"'Annie Use Your Telescope', sans-serif",
|
53 |
+
|
54 |
+
]
|
55 |
+
|
56 |
+
|
57 |
+
color_values = [
|
58 |
+
"#000000", # Blue
|
59 |
+
"#000000", # Blue
|
60 |
+
"#000000", # Blue
|
61 |
+
"#000000", # Blue
|
62 |
+
"#000000", # Blue
|
63 |
+
"#000000", # Orange
|
64 |
+
"#000000", # Orange
|
65 |
+
"#5afc87", # Orange
|
66 |
+
"#5afc87", # Orange
|
67 |
+
"#5afc87", # Orange
|
68 |
+
"#5afc87", # Orange
|
69 |
+
"#5afc87", # Orange
|
70 |
+
"#5afc87", # Orange
|
71 |
+
"#000000", # Orange
|
72 |
+
"#000000", # Orange
|
73 |
+
"#000000", # Orange
|
74 |
+
]
|
75 |
+
|
data2.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
# Define a list of font sizes
|
6 |
+
font_sizes = ["17px", "19px", "21px", "23px", "25px", "27px",]
|
7 |
+
# Define a list of font sizes
|
8 |
+
font_tops = ["11px", "11px", "13px", "13px", "15px", "15px",]
|
9 |
+
|
10 |
+
# Define a list of letter spacings
|
11 |
+
letter_spacing = ["-6px", "-5px", "-4px", "-3px", "-2px", "-1px", "0px", "1px", "2px", "6px"]
|
12 |
+
|
13 |
+
# Define a list of text shadow combinations
|
14 |
+
text_shadow = [
|
15 |
+
"0px 0px 1px",
|
16 |
+
"0px 0px 2px",
|
17 |
+
"1px 0px 0px",
|
18 |
+
"2px 0px 0px",
|
19 |
+
"0px 1px 0px",
|
20 |
+
"0px 2px 0px",
|
21 |
+
"2px 1px 1px",
|
22 |
+
"1px 1px 2px",
|
23 |
+
"1px 2px 1px",
|
24 |
+
"2px 1px 0px",
|
25 |
+
"2px 2px 1px"
|
26 |
+
]
|
27 |
+
|
28 |
+
# Define a list of skew options
|
29 |
+
skew_options = [
|
30 |
+
"(0deg, 0deg)",
|
31 |
+
"(-10deg, 0deg)",
|
32 |
+
"(-15deg, 0deg)",
|
33 |
+
"(-20deg, 0deg)",
|
34 |
+
"(-25deg, 0deg)",
|
35 |
+
"(-30deg, 0deg)",
|
36 |
+
"(-35deg, 0deg)",
|
37 |
+
"(10deg, 0deg)",
|
38 |
+
"(15deg, 0deg)",
|
39 |
+
"(20deg, 0deg)",
|
40 |
+
"(25deg, 0deg)",
|
41 |
+
"(30deg, 0deg)",
|
42 |
+
"(35deg, 0deg)"
|
43 |
+
]
|
44 |
+
|
45 |
+
# Define a list of font styles
|
46 |
+
font_styles = [
|
47 |
+
"Raleway, sans-serif",
|
48 |
+
"'Open Sans Condensed', sans-serif",
|
49 |
+
"'Roboto Condensed', sans-serif",
|
50 |
+
"'Poiret One', sans-serif",
|
51 |
+
"'Dosis', sans-serif",
|
52 |
+
"'Fjalla One', sans-serif",
|
53 |
+
"'Indie Flower', sans-serif",
|
54 |
+
"'Josefin Sans', sans-serif",
|
55 |
+
"'Roboto Slab', sans-serif",
|
56 |
+
"'Righteous', sans-serif",
|
57 |
+
"'Yellowtail', sans-serif",
|
58 |
+
"'Annie Use Your Telescope', sans-serif",
|
59 |
+
"'Just Me Again Down Here', sans-serif",
|
60 |
+
"'Nixie One', sans-serif",
|
61 |
+
"'Unkempt', sans-serif",
|
62 |
+
]
|
63 |
+
|
64 |
+
|
65 |
+
color_values = [
|
66 |
+
"#FF0000", # Red
|
67 |
+
"#00FF00", # Green
|
68 |
+
"#0000FF", # Blue
|
69 |
+
"#FFFF00", # Yellow
|
70 |
+
"#FF00FF", # Magenta
|
71 |
+
"#00FFFF", # Cyan
|
72 |
+
"#FFA500", # Orange
|
73 |
+
"#800080", # Purple
|
74 |
+
"#008000", # Dark Green
|
75 |
+
"#808080", # Gray
|
76 |
+
"#000000" # Gray
|
77 |
+
]
|
finalloop.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
# Read word_scores2.csv
|
4 |
+
word_scores = pd.read_csv('word_scores2.csv')
|
5 |
+
|
6 |
+
# Read matrix5.csv
|
7 |
+
matrix5 = pd.read_csv('matrix5.csv', header=None)
|
8 |
+
|
9 |
+
# Initialize an empty matrix
|
10 |
+
output_matrix = []
|
11 |
+
|
12 |
+
# Initialize a variable to keep track of the position in matrix5
|
13 |
+
matrix_position = 0
|
14 |
+
|
15 |
+
# Iterate through each row in word_scores
|
16 |
+
for index, row in word_scores.iterrows():
|
17 |
+
score = row['Score']
|
18 |
+
|
19 |
+
# If the score is 0, add a row of zeros to the output matrix
|
20 |
+
if score == 0:
|
21 |
+
output_matrix.append([0] * 6)
|
22 |
+
# If the score is 1, use the corresponding row from matrix5.csv and update position
|
23 |
+
elif score == 1:
|
24 |
+
output_matrix.append(matrix5.iloc[matrix_position % len(matrix5)].tolist())
|
25 |
+
matrix_position += 1
|
26 |
+
|
27 |
+
# Convert the output_matrix to a DataFrame
|
28 |
+
result_output_matrix = pd.DataFrame(output_matrix, columns=['Col1', 'Col2', 'Col3', 'Col4', 'Col5', 'Col6'])
|
29 |
+
|
30 |
+
# Save the result_output_matrix to a CSV file without a header
|
31 |
+
result_output_matrix.to_csv('loopnumber.csv', header=False, index=False)
|
finalloop2.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
# Initialize an empty list to store loopnum dataframes
|
5 |
+
loopnum_dataframes = []
|
6 |
+
|
7 |
+
# Iterate through loopnum1 to loopnum6
|
8 |
+
for i in range(1, 11):
|
9 |
+
loopnum_file = f'loopnum{i}.csv'
|
10 |
+
|
11 |
+
# Check if the loopnum file exists before trying to read it
|
12 |
+
if not os.path.exists(loopnum_file):
|
13 |
+
print(f"Warning: {loopnum_file} not found. Skipping.")
|
14 |
+
else:
|
15 |
+
loopnum_data = pd.read_csv(loopnum_file, header=None)
|
16 |
+
loopnum_dataframes.append(loopnum_data)
|
17 |
+
|
18 |
+
# Check if any valid loopnum files were found
|
19 |
+
if loopnum_dataframes:
|
20 |
+
# Concatenate the loopnum dataframes into one dataframe
|
21 |
+
result_matrix2 = pd.concat(loopnum_dataframes, ignore_index=True)
|
22 |
+
|
23 |
+
# Save the result_matrix2 to a CSV file without a header
|
24 |
+
result_matrix2.to_csv('matrix2.csv', header=False, index=False)
|
25 |
+
else:
|
26 |
+
print("No valid loopnum files found.")
|
finalloopneedfix.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
# Read word_scores2.csv
|
4 |
+
word_scores = pd.read_csv('word_scores2.csv')
|
5 |
+
|
6 |
+
# Read matrix2.csv
|
7 |
+
matrix2 = pd.read_csv('matrix5.csv', header=None)
|
8 |
+
|
9 |
+
# Initialize an empty matrix
|
10 |
+
output_matrix = []
|
11 |
+
|
12 |
+
# Iterate through each row in word_scores
|
13 |
+
for index, row in word_scores.iterrows():
|
14 |
+
score = row['Score']
|
15 |
+
|
16 |
+
# If the score is 0, add a row of zeros to the output matrix
|
17 |
+
if score == 0:
|
18 |
+
output_matrix.append([0] * 6)
|
19 |
+
# If the score is 1, use the corresponding row from matrix2.csv
|
20 |
+
elif score == 1:
|
21 |
+
output_matrix.append(matrix2.iloc[index % len(matrix2)].tolist())
|
22 |
+
|
23 |
+
# Convert the output_matrix to a DataFrame
|
24 |
+
result_output_matrix = pd.DataFrame(output_matrix, columns=['Col1', 'Col2', 'Col3', 'Col4', 'Col5', 'Col6'])
|
25 |
+
|
26 |
+
# Save the result_output_matrix to a CSV file without a header
|
27 |
+
result_output_matrix.to_csv('loopnum.csv', header=False, index=False)
|
first.html
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<html><head></head><body><span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -3px; text-shadow: 0px 0px 1px; font-family: Raleway, sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #5afc87;">b</div></div></span>
|
2 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -3px; text-shadow: 0px 0px 1px; font-family: Raleway, sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #000000;">a</div></div></span>
|
3 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -3px; text-shadow: 0px 0px 1px; font-family: Raleway, sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #000000;">s</div></div></span>
|
4 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -2px; text-shadow: 0px 0px 1px; font-family: 'Open Sans Condensed', sans-serif; font-size: 19px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(15deg, 0deg); color: #000000;">k</div></div></span>
|
5 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -3px; text-shadow: 0px 0px 1px; font-family: Raleway, sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #000000;">e</div></div></span>
|
6 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -3px; text-shadow: 0px 0px 1px; font-family: Raleway, sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #5afc87;">t</div></div></span>
|
7 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -3px; text-shadow: 0px 0px 1px; font-family: Raleway, sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #000000;">b</div></div></span>
|
8 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -2px; text-shadow: 0px 0px 2px; font-family: 'Poiret One', sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(-0deg, 0deg); color: #5afc87;">a</div></div></span>
|
9 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: -4px; text-shadow: 0px 0px 0px; font-family: 'Open Sans Condensed', sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #000000;">l</div></div></span>
|
10 |
+
<span class="css-1dbjc4n r-xoduu5x"><div id="div1" class="relative"><div id="ddiv10" class="CIRCDosis_SZ18_SP-3_SH1_SK0_MA0" style="letter-spacing: 6px; text-shadow: 0px 0px 1px; font-family: 'Poiret One', sans-serif; font-size: 17px; top: 11px; margin-top: -0.08cm; line-height: 118%; transform: skew(10deg, 0deg); color: #5afc87;">l</div></div></span>
|
11 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">was</span>
|
12 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">invented</span>
|
13 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">in</span>
|
14 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">1891</span>
|
15 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">by</span>
|
16 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">dr.</span>
|
17 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">james</span>
|
18 |
+
<span class="css-901oao2sx" style="color: #a8dab5; letter-spacing: 2px; font-size: 21px; font-family: 'Oswald', sans-serif; font-weight: 600;">naismith.</span>
|
19 |
+
</body></html>
|
index.html
CHANGED
@@ -1,19 +1,323 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
-
<html
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
</html>
|
|
|
1 |
<!DOCTYPE html>
|
2 |
+
<html><meta charset="utf-8" />
|
3 |
+
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
4 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
5 |
+
<meta name="format-detection" content="telephone=no" />
|
6 |
+
<meta name="msapplication-tap-highlight" content="no" />
|
7 |
+
<meta name="viewport" content="user-scalable=no, initial-scale=1, maximum-scale=1, minimum-scale=1, width=device-width" />
|
8 |
+
<link href='http://fonts.googleapis.com/css?family=Raleway:500' rel='stylesheet' type='text/css'>
|
9 |
+
<link href='http://fonts.googleapis.com/css?family=Open+Sans+Condensed:700|Oswald:300' rel='stylesheet' type='text/css'>
|
10 |
+
<link href='http://fonts.googleapis.com/css?family=Open+Sans+Condensed:300' rel='stylesheet' type='text/css'>
|
11 |
+
<link href='http://fonts.googleapis.com/css?family=Roboto+Condensed' rel='stylesheet' type='text/css'>
|
12 |
+
<link href='https://fonts.googleapis.com/css?family=Poiret+One|Dosis:300|Fjalla+One' rel='stylesheet' type='text/css'>
|
13 |
+
<link href='https://fonts.googleapis.com/css?family=Indie+Flower|Lobster' rel='stylesheet' type='text/css'>
|
14 |
+
<link href='https://fonts.googleapis.com/css?family=Pacifico|Shadows+Into+Light|Dancing+Script|Amatic+SC' rel='stylesheet' type='text/css'>
|
15 |
+
<link href='https://fonts.googleapis.com/css?family=Teko' rel='stylesheet' type='text/css'>
|
16 |
+
<link href="https://fonts.googleapis.com/css?family=Abril+Fatface|Josefin+Sans|Gloria+Hallelujah|Roboto Slab|Righteous|Sacramento|Yellowtail" rel="stylesheet">
|
17 |
+
<link href="https://fonts.googleapis.com/css?family=Annie+Use+Your+Telescope|Just+Me+Again+Down+Here|Nixie+One|Six+Caps|Unkempt" rel="stylesheet">
|
18 |
+
<style>
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
.css-1dbjc4n {
|
24 |
+
-ms-flex-align: stretch;
|
25 |
+
-ms-flex-direction: column;
|
26 |
+
-ms-flex-negative: 0;
|
27 |
+
-ms-flex-preferred-size: auto;
|
28 |
+
-webkit-align-items: stretch;
|
29 |
+
-webkit-box-align: stretch;
|
30 |
+
-webkit-box-direction: normal;
|
31 |
+
-webkit-box-orient: vertical;
|
32 |
+
-webkit-flex-basis: auto;
|
33 |
+
-webkit-flex-direction: column;
|
34 |
+
-webkit-flex-shrink: 0;
|
35 |
+
align-items: stretch;
|
36 |
+
border: 0 solid black;
|
37 |
+
box-sizing: border-box;
|
38 |
+
display: -webkit-box;
|
39 |
+
display: -moz-box;
|
40 |
+
display: -ms-flexbox;
|
41 |
+
display: -webkit-flex;
|
42 |
+
display: flex;
|
43 |
+
flex-basis: auto;
|
44 |
+
flex-direction: column;
|
45 |
+
flex-shrink: 0;
|
46 |
+
margin-bottom: 0px;
|
47 |
+
margin-left: 0px;
|
48 |
+
margin-right: 2px;
|
49 |
+
margin-top: 0px;
|
50 |
+
min-height: 0px;
|
51 |
+
min-width: 0px;
|
52 |
+
padding-bottom: 0px;
|
53 |
+
padding-left: 0px;
|
54 |
+
padding-right: 0px;
|
55 |
+
padding-top: 0px;
|
56 |
+
position: relative;
|
57 |
+
z-index: 0;
|
58 |
+
}
|
59 |
+
|
60 |
+
.r-xoduu5zo {
|
61 |
+
display: inline-flex;
|
62 |
+
top: 10px;
|
63 |
+
background-color: #000000;
|
64 |
+
}
|
65 |
+
|
66 |
+
.r-xoduu5xxx {
|
67 |
+
display: inline-flex;
|
68 |
+
top: 5px;
|
69 |
+
}
|
70 |
+
.hagh1 {
|
71 |
+
height: 125px;
|
72 |
+
width: 390px;
|
73 |
+
position: absolute;
|
74 |
+
left: 500px;
|
75 |
+
top: 57px;
|
76 |
+
text-align: center;
|
77 |
+
}
|
78 |
+
|
79 |
+
div.relative {
|
80 |
+
position: relative;
|
81 |
+
top: -16px ;
|
82 |
+
|
83 |
+
text-align: center;
|
84 |
+
|
85 |
+
}
|
86 |
+
|
87 |
+
.r-xoduu5xoooo {
|
88 |
+
display: inline-flex;
|
89 |
+
top: -7px;
|
90 |
+
}
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
.CIRCDosis_SZ18_SP-3_SH1_SK0_MA0{
|
95 |
+
|
96 |
+
font-family: 'Dosis', sans-serif;
|
97 |
+
line-height: 110%;
|
98 |
+
font-size: 18px;
|
99 |
+
letter-spacing: -3px;
|
100 |
+
text-shadow: 0px 0px 1px;
|
101 |
+
transform: skew(0deg, 0deg);
|
102 |
+
margin-top: 0.00cm;
|
103 |
+
position: relative;
|
104 |
+
top: 11px;
|
105 |
+
float: left;
|
106 |
+
|
107 |
+
|
108 |
+
}
|
109 |
+
.CIRCDosis_SZ18_SP-3_SH1_SK0_MA0 a:link,
|
110 |
+
.CIRCDosis_SZ18_SP-3_SH1_SK0_MA0 a:visited,
|
111 |
+
.CIRCDosis_SZ18_SP-3_SH1_SK0_MA0 a:hover {
|
112 |
+
color: #000000;
|
113 |
+
text-decoration: none;
|
114 |
+
transition: all 0.2s ease-in-out;
|
115 |
+
}
|
116 |
+
|
117 |
+
.wordCIRCTEXT{
|
118 |
+
|
119 |
+
|
120 |
+
position: absolute;
|
121 |
+
background-color: #b400ff;
|
122 |
+
width: 15px;
|
123 |
+
height: 10px;
|
124 |
+
border: 1px solid #000000;
|
125 |
+
|
126 |
+
|
127 |
+
}
|
128 |
+
|
129 |
+
.r-xoduu5xo {
|
130 |
+
display: inline-flex;
|
131 |
+
top: -2px;
|
132 |
+
}
|
133 |
+
|
134 |
+
.r-xoduu5xoo {
|
135 |
+
display: inline-flex;
|
136 |
+
top: -4px;
|
137 |
+
}
|
138 |
+
.css-901oaoS {
|
139 |
+
border: 0 solid black;
|
140 |
+
box-sizing: border-box;
|
141 |
+
color: #ffffff;
|
142 |
+
display: inline;
|
143 |
+
font-size: 14px;
|
144 |
+
margin-bottom: 0px;
|
145 |
+
margin-left: 0px;
|
146 |
+
margin-right: 3px;
|
147 |
+
margin-top: 0px;
|
148 |
+
padding-bottom: 0px;
|
149 |
+
padding-left: 7px;
|
150 |
+
letter-spacing: 3px;
|
151 |
+
padding-right: 0px;
|
152 |
+
padding-top: 0px;
|
153 |
+
white-space: pre-wrap;
|
154 |
+
word-wrap: break-word;
|
155 |
+
font-family: 'Oswald', sans-serif;
|
156 |
+
font-weight: 300;
|
157 |
+
|
158 |
+
}
|
159 |
+
.css-901oao2sx {
|
160 |
+
border: 0 solid black;
|
161 |
+
box-sizing: border-box;
|
162 |
+
color: #000000;
|
163 |
+
display: inline;
|
164 |
+
font-weight: 600;
|
165 |
+
font-size: 20px;
|
166 |
+
margin-bottom: 0px;
|
167 |
+
margin-left: 0px;
|
168 |
+
margin-right: 0px;
|
169 |
+
margin-top: 0px;
|
170 |
+
padding-bottom: 0px;
|
171 |
+
padding-left: 0px;
|
172 |
+
letter-spacing: 3px;
|
173 |
+
padding-right: 0px;
|
174 |
+
padding-top: 0px;
|
175 |
+
white-space: pre-wrap;
|
176 |
+
word-wrap: break-word;
|
177 |
+
font-family: 'Oswald', sans-serif;
|
178 |
+
|
179 |
+
}
|
180 |
+
|
181 |
+
.write2 {
|
182 |
+
|
183 |
+
background-color: #000000;
|
184 |
+
|
185 |
+
word-wrap: break-word;
|
186 |
+
border: 1px solid black;
|
187 |
+
box-sizing: border-box;
|
188 |
+
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
|
193 |
+
}
|
194 |
+
|
195 |
+
.r-xoduu5x {
|
196 |
+
display: inline-flex;
|
197 |
+
top: 8px;
|
198 |
+
}
|
199 |
+
|
200 |
+
.css-901oao2sx {
|
201 |
+
border: 0 solid black;
|
202 |
+
box-sizing: border-box;
|
203 |
+
color: #000000;
|
204 |
+
display: inline;
|
205 |
+
font-weight: 600;
|
206 |
+
font-size: 20px;
|
207 |
+
margin-bottom: 0px;
|
208 |
+
margin-left: 0px;
|
209 |
+
margin-right: 3px;
|
210 |
+
margin-top: 0px;
|
211 |
+
padding-bottom: 0px;
|
212 |
+
padding-left: 0px;
|
213 |
+
letter-spacing: 3px;
|
214 |
+
padding-right: 0px;
|
215 |
+
padding-top: 0px;
|
216 |
+
white-space: pre-wrap;
|
217 |
+
word-wrap: break-word;
|
218 |
+
font-family: 'Oswald', sans-serif;
|
219 |
+
|
220 |
+
}
|
221 |
+
.css-901oao2sx {
|
222 |
+
border: 0 solid black;
|
223 |
+
box-sizing: border-box;
|
224 |
+
color: #000000;
|
225 |
+
display: inline;
|
226 |
+
font-weight: 600;
|
227 |
+
font-size: 20px;
|
228 |
+
margin-bottom: 0px;
|
229 |
+
margin-left: 0px;
|
230 |
+
margin-right: 2px;
|
231 |
+
margin-top: 0px;
|
232 |
+
padding-bottom: 0px;
|
233 |
+
padding-left: 0px;
|
234 |
+
letter-spacing: 3px;
|
235 |
+
padding-right: 0px;
|
236 |
+
padding-top: -5px;
|
237 |
+
white-space: pre-wrap;
|
238 |
+
word-wrap: break-word;
|
239 |
+
font-family: 'Oswald', sans-serif;
|
240 |
+
|
241 |
+
}
|
242 |
+
|
243 |
+
|
244 |
+
</style>
|
245 |
+
|
246 |
+
</head>
|
247 |
+
|
248 |
+
<body>
|
249 |
+
|
250 |
+
<body>
|
251 |
+
<h1>Conversation Generator + circulartext A.I</h1>
|
252 |
+
<form method="post">
|
253 |
+
<label for="prompt">Enter Prompt:</label>
|
254 |
+
<input type="text" name="prompt" id="prompt" required>
|
255 |
+
<button type="submit">Generate Conversation</button>
|
256 |
+
</form>
|
257 |
+
{% if conversation %}
|
258 |
+
<h2>Generated Conversation:</h2>
|
259 |
+
<p>{{ conversation }}</p>
|
260 |
+
{% endif %}
|
261 |
+
|
262 |
+
{% include 'first.html' %}
|
263 |
+
|
264 |
+
<!-- Form for running backend script -->
|
265 |
+
<form method="post" onsubmit="runAAMain(); return false;">
|
266 |
+
<button type="submit">Run Backend Script</button>
|
267 |
+
</form>
|
268 |
+
|
269 |
+
<script>
|
270 |
+
async function generateAndDisplay() {
|
271 |
+
const prompt = document.getElementById('prompt').value;
|
272 |
+
const response = await fetch('/', {
|
273 |
+
method: 'POST',
|
274 |
+
headers: {
|
275 |
+
'Content-Type': 'application/json',
|
276 |
+
},
|
277 |
+
body: JSON.stringify({
|
278 |
+
prompt: prompt,
|
279 |
+
}),
|
280 |
+
});
|
281 |
+
|
282 |
+
const result = await response.json();
|
283 |
+
document.getElementById('conversation-container').innerHTML = `<h2>Generated Conversation:</h2><p>${result.conversation}</p>`;
|
284 |
+
}
|
285 |
+
|
286 |
+
async function runAAMain() {
|
287 |
+
const response = await fetch('/run_aamain', {
|
288 |
+
method: 'POST',
|
289 |
+
});
|
290 |
+
|
291 |
+
const result = await response.json();
|
292 |
+
alert(result.message); // You can handle the result as needed
|
293 |
+
|
294 |
+
// Reload the conversation container after running the backend script
|
295 |
+
loadConversation();
|
296 |
+
}
|
297 |
+
|
298 |
+
// Function to reload the conversation container every 10 seconds
|
299 |
+
function autoRefresh() {
|
300 |
+
setInterval(loadConversation, 10000); // 10,000 milliseconds = 10 seconds
|
301 |
+
}
|
302 |
+
|
303 |
+
// Call the function when the page loads
|
304 |
+
window.onload = function () {
|
305 |
+
autoRefresh();
|
306 |
+
loadConversation(); // Initial load
|
307 |
+
};
|
308 |
+
|
309 |
+
// Function to fetch and load the conversation into the container
|
310 |
+
async function loadConversation() {
|
311 |
+
const response = await fetch('/');
|
312 |
+
const html = await response.text();
|
313 |
+
|
314 |
+
// Extract the content of the conversation container
|
315 |
+
const conversationContainer = document.getElementById('conversation-container');
|
316 |
+
const newContent = new DOMParser().parseFromString(html, 'text/html').getElementById('conversation-container').innerHTML;
|
317 |
+
|
318 |
+
// Replace the content of the conversation container
|
319 |
+
conversationContainer.innerHTML = newContent;
|
320 |
+
}
|
321 |
+
</script>
|
322 |
+
</body>
|
323 |
</html>
|
mod8.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
|
5 |
+
def print_matrix(matrix):
|
6 |
+
for row in matrix:
|
7 |
+
print(','.join(map(str, row)))
|
8 |
+
|
9 |
+
def generate_random_values(num_values):
|
10 |
+
return [random.randint(0, 8) for _ in range(num_values)]
|
11 |
+
|
12 |
+
def randomize_l1(matrix, random_values, identical_positions, combination_positions):
|
13 |
+
for i in range(len(matrix)):
|
14 |
+
if (i, i + 1) in identical_positions or (i + 1, i) in identical_positions:
|
15 |
+
# Use the same random value for identical positions
|
16 |
+
matrix[i][0] = random_values[i]
|
17 |
+
matrix[i + 1][0] = random_values[i]
|
18 |
+
elif (i, i) in combination_positions:
|
19 |
+
# Use the same random value for each letter within the combination
|
20 |
+
matrix[i][0] = random_values[i]
|
21 |
+
else:
|
22 |
+
# Use a common random value for non-identical and non-combination positions
|
23 |
+
matrix[i][0] = random_values[0]
|
24 |
+
|
25 |
+
# Function to find positions of the same letters in the text
|
26 |
+
def find_same_letter_positions(text):
|
27 |
+
same_letter_positions = []
|
28 |
+
for i in range(len(text)):
|
29 |
+
for j in range(i + 1, len(text)):
|
30 |
+
if text[i] == text[j]:
|
31 |
+
same_letter_positions.append((i, j))
|
32 |
+
same_letter_positions.append((j, i)) # Include (j, i) as well
|
33 |
+
return same_letter_positions
|
34 |
+
|
35 |
+
# Function to find positions of the specified combinations in the text
|
36 |
+
def find_combination_positions(text, combinations):
|
37 |
+
combination_positions = []
|
38 |
+
for combination in combinations:
|
39 |
+
i = 0
|
40 |
+
while i < len(text) - len(combination) + 1:
|
41 |
+
if text[i:i+len(combination)] == combination:
|
42 |
+
for j in range(i, i+len(combination)):
|
43 |
+
combination_positions.append((j, j))
|
44 |
+
i += len(combination) # Move to the next position after the combination
|
45 |
+
else:
|
46 |
+
i += 1
|
47 |
+
return combination_positions
|
48 |
+
|
49 |
+
# Function to create a matrix from the text
|
50 |
+
def create_matrix_from_text(text):
|
51 |
+
num_rows = len(text)
|
52 |
+
num_columns = num_rows
|
53 |
+
|
54 |
+
# Initialize a matrix with zeros
|
55 |
+
matrix = [[0] * num_columns for _ in range(num_rows)]
|
56 |
+
|
57 |
+
# Fill the matrix with data
|
58 |
+
for i in range(num_rows):
|
59 |
+
matrix[i][0] = text[i] # Assign letters to the first column
|
60 |
+
|
61 |
+
return matrix
|
62 |
+
|
63 |
+
# Process all text files from text1.csv to text5.csv
|
64 |
+
for i in range(1, 20):
|
65 |
+
csv_filename = f'text{i}.csv'
|
66 |
+
combo_filename = f'text{i}combo1.csv'
|
67 |
+
|
68 |
+
# Read the text from the CSV file
|
69 |
+
with open(csv_filename, 'r') as file:
|
70 |
+
reader = csv.reader(file)
|
71 |
+
text = next(reader)[0]
|
72 |
+
|
73 |
+
# Find positions of the same letters
|
74 |
+
same_letter_positions = find_same_letter_positions(text)
|
75 |
+
print(f"\n{text} - Same Letter Positions:", same_letter_positions)
|
76 |
+
|
77 |
+
# Specify the combinations to find
|
78 |
+
combinations_to_find = ["bl", "wh", "sa", "wo", "no", "ve", "ab", "gro", "pu", "lo", "co", "bus", "pla", "ac", "at", "pr", "fa", "gr", "to", "or", "fa", "fr", "ki", "qu", "cl", "ok", "fig", "run", "ee", "BL", "WH", "SA", "WO", "NO", "VE", "AB", "GRO", "PU", "LO", "CO", "BUS", "PLA", "AC", "AT", "PR", "FA", "GR", "TO", "OR", "FA", "FR", "KI", "QU", "CL", "OK", "FIG", "RUN", "EE"]
|
79 |
+
|
80 |
+
# Find positions of the specified combinations
|
81 |
+
combination_positions = find_combination_positions(text, combinations_to_find)
|
82 |
+
print(f"{text} - Combination Positions:", combination_positions)
|
83 |
+
|
84 |
+
# Create a matrix based on the number of letters in the text
|
85 |
+
matrix = create_matrix_from_text(text)
|
86 |
+
|
87 |
+
# Generate random values for each letter position
|
88 |
+
random_values = generate_random_values(len(text))
|
89 |
+
|
90 |
+
# Randomly set values for the L1 column
|
91 |
+
randomize_l1(matrix, random_values, same_letter_positions, combination_positions)
|
92 |
+
|
93 |
+
# Print the matrix after the first modifications
|
94 |
+
print(f"{text} - Matrix After Randomization:")
|
95 |
+
print_matrix(matrix)
|
96 |
+
|
97 |
+
# Save the matrix to the combo file
|
98 |
+
with open(combo_filename, 'w', newline='') as csvfile:
|
99 |
+
writer = csv.writer(csvfile)
|
100 |
+
for row in matrix:
|
101 |
+
writer.writerow(row)
|
mod8l2.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import csv
|
3 |
+
|
4 |
+
def print_matrix(matrix):
|
5 |
+
for row in matrix:
|
6 |
+
print(','.join(map(str, row)))
|
7 |
+
|
8 |
+
def generate_random_values(num_values, zero_weight=0.5):
|
9 |
+
# Generate random values with 50% chance of being 0 in the first position
|
10 |
+
values = [0] + [random.randint(1, 11) for _ in range(num_values - 1)]
|
11 |
+
return [random.choice([0, val]) for val in values]
|
12 |
+
|
13 |
+
def randomize_l1(matrix, random_values, identical_positions, combination_positions):
|
14 |
+
# If there are no matches or combination positions, set all numbers to the same value
|
15 |
+
if not identical_positions and not combination_positions:
|
16 |
+
common_random_value = random_values[0]
|
17 |
+
for i in range(len(matrix)):
|
18 |
+
matrix[i][0] = common_random_value
|
19 |
+
else:
|
20 |
+
for i in range(len(matrix)):
|
21 |
+
if (i, i + 1) in identical_positions or (i + 1, i) in identical_positions:
|
22 |
+
# Use the same random value for identical positions
|
23 |
+
matrix[i][0] = random_values[i]
|
24 |
+
matrix[i + 1][0] = random_values[i]
|
25 |
+
elif (i, i) in combination_positions:
|
26 |
+
# Use the same random value for each letter within the combination
|
27 |
+
matrix[i][0] = random_values[i]
|
28 |
+
else:
|
29 |
+
# Use a common random value for non-identical and non-combination positions
|
30 |
+
matrix[i][0] = random_values[0]
|
31 |
+
|
32 |
+
# Function to find positions of the same letters in the text
|
33 |
+
def find_same_letter_positions(text):
|
34 |
+
same_letter_positions = []
|
35 |
+
for i in range(len(text)):
|
36 |
+
for j in range(i + 1, len(text)):
|
37 |
+
if text[i] == text[j]:
|
38 |
+
same_letter_positions.append((i, j))
|
39 |
+
same_letter_positions.append((j, i)) # Include (j, i) as well
|
40 |
+
return same_letter_positions
|
41 |
+
|
42 |
+
# Function to find positions of the specified combinations in the text
|
43 |
+
def find_combination_positions(text, combinations):
|
44 |
+
combination_positions = []
|
45 |
+
for combination in combinations:
|
46 |
+
i = 0
|
47 |
+
while i < len(text) - len(combination) + 1:
|
48 |
+
if text[i:i+len(combination)] == combination:
|
49 |
+
for j in range(i, i+len(combination)):
|
50 |
+
combination_positions.append((j, j))
|
51 |
+
i += len(combination) # Move to the next position after the combination
|
52 |
+
else:
|
53 |
+
i += 1
|
54 |
+
return combination_positions
|
55 |
+
|
56 |
+
# Function to create a matrix from the text
|
57 |
+
def create_matrix_from_text(text):
|
58 |
+
num_rows = len(text)
|
59 |
+
num_columns = num_rows
|
60 |
+
|
61 |
+
# Initialize a matrix with zeros
|
62 |
+
matrix = [[0] * num_columns for _ in range(num_rows)]
|
63 |
+
|
64 |
+
# Fill the matrix with data
|
65 |
+
for i in range(num_rows):
|
66 |
+
matrix[i][0] = text[i] # Assign letters to the first column
|
67 |
+
|
68 |
+
return matrix
|
69 |
+
|
70 |
+
# Process all text files from text1.csv to text5.csv
|
71 |
+
for i in range(1, 20):
|
72 |
+
csv_filename = f'text{i}.csv'
|
73 |
+
combo_filename = f'text{i}combo2.csv'
|
74 |
+
|
75 |
+
# Read the text from the CSV file
|
76 |
+
with open(csv_filename, 'r') as file:
|
77 |
+
reader = csv.reader(file)
|
78 |
+
text = next(reader)[0]
|
79 |
+
|
80 |
+
# Find positions of the same letters
|
81 |
+
same_letter_positions = find_same_letter_positions(text)
|
82 |
+
print(f"\n{text} - Same Letter Positions:", same_letter_positions)
|
83 |
+
|
84 |
+
# Specify the combinations to find
|
85 |
+
combinations_to_find = ["bl", "wh", "sa", "wo", "no", "ve", "ab", "gro", "pu", "lo", "co", "bus", "pla", "ac", "at", "pr", "fa", "gr", "to", "or", "fa", "fr", "ki", "qu", "cl", "ok", "fig", "run", "ee", "BL", "WH", "SA", "WO", "NO", "VE", "AB", "GRO", "PU", "LO", "CO", "BUS", "PLA", "AC", "AT", "PR", "FA", "GR", "TO", "OR", "FA", "FR", "KI", "QU", "CL", "OK", "FIG", "RUN", "EE"]
|
86 |
+
|
87 |
+
# Find positions of the specified combinations
|
88 |
+
combination_positions = find_combination_positions(text, combinations_to_find)
|
89 |
+
print(f"{text} - Combination Positions:", combination_positions)
|
90 |
+
|
91 |
+
# Create a matrix based on the number of letters in the text
|
92 |
+
matrix = create_matrix_from_text(text)
|
93 |
+
|
94 |
+
# Generate random values for each letter position with 50% chance of being 0
|
95 |
+
random_values = generate_random_values(len(text), zero_weight=0.5)
|
96 |
+
|
97 |
+
# Randomly set values for the L1 column
|
98 |
+
randomize_l1(matrix, random_values, same_letter_positions, combination_positions)
|
99 |
+
|
100 |
+
# Print the matrix after the first modifications
|
101 |
+
print(f"{text} - Matrix After Randomization:")
|
102 |
+
print_matrix(matrix)
|
103 |
+
|
104 |
+
# Save the matrix to the combo file
|
105 |
+
with open(combo_filename, 'w', newline='') as csvfile:
|
106 |
+
writer = csv.writer(csvfile)
|
107 |
+
for row in matrix:
|
108 |
+
writer.writerow(row)
|
mod8l3.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import csv
|
3 |
+
|
4 |
+
def print_matrix(matrix):
|
5 |
+
for row in matrix:
|
6 |
+
print(','.join(map(str, row)))
|
7 |
+
|
8 |
+
def generate_random_values(num_values):
|
9 |
+
return [random.randint(0, 14) for _ in range(num_values)]
|
10 |
+
|
11 |
+
def randomize_l1(matrix, random_values, identical_positions, combination_positions):
|
12 |
+
for i in range(len(matrix)):
|
13 |
+
if (i, i + 1) in identical_positions or (i + 1, i) in identical_positions:
|
14 |
+
matrix[i][0] = random_values[i]
|
15 |
+
matrix[i + 1][0] = random_values[i]
|
16 |
+
elif (i, i) in combination_positions:
|
17 |
+
matrix[i][0] = random_values[i]
|
18 |
+
else:
|
19 |
+
matrix[i][0] = random_values[0]
|
20 |
+
|
21 |
+
def find_same_letter_positions(text):
|
22 |
+
same_letter_positions = []
|
23 |
+
for i in range(len(text)):
|
24 |
+
for j in range(i + 1, len(text)):
|
25 |
+
if text[i] == text[j]:
|
26 |
+
same_letter_positions.append((i, j))
|
27 |
+
same_letter_positions.append((j, i))
|
28 |
+
return same_letter_positions
|
29 |
+
|
30 |
+
def find_combination_positions(text, combinations):
|
31 |
+
combination_positions = []
|
32 |
+
for combination in combinations:
|
33 |
+
i = 0
|
34 |
+
while i < len(text) - len(combination) + 1:
|
35 |
+
if text[i:i+len(combination)] == combination:
|
36 |
+
for j in range(i, i+len(combination)):
|
37 |
+
combination_positions.append((j, j))
|
38 |
+
i += len(combination)
|
39 |
+
else:
|
40 |
+
i += 1
|
41 |
+
return combination_positions
|
42 |
+
|
43 |
+
def create_matrix_from_text(text):
|
44 |
+
num_rows = len(text)
|
45 |
+
num_columns = num_rows
|
46 |
+
matrix = [[0] * num_columns for _ in range(num_rows)]
|
47 |
+
|
48 |
+
for i in range(num_rows):
|
49 |
+
matrix[i][0] = text[i]
|
50 |
+
|
51 |
+
return matrix
|
52 |
+
|
53 |
+
for i in range(1, 20):
|
54 |
+
csv_filename = f'text{i}.csv'
|
55 |
+
combo_filename = f'text{i}combo3.csv'
|
56 |
+
|
57 |
+
with open(csv_filename, 'r') as file:
|
58 |
+
reader = csv.reader(file)
|
59 |
+
text = next(reader)[0]
|
60 |
+
|
61 |
+
same_letter_positions = find_same_letter_positions(text)
|
62 |
+
print(f"\n{text} - Same Letter Positions:", same_letter_positions)
|
63 |
+
|
64 |
+
combinations_to_find = ["BL", "WH", "SA", "WO", "NO", "VE", "AB", "GRO", "PU", "LO", "CO", "BUS", "PLA", "AC", "AT", "PR", "FA", "GR", "TO", "OR", "FA", "FR", "KI", "QU", "CL", "OK", "FIG", "RUN", "EE", "GH", "SI", "OX", "LAB", "ALL", "PRO", "gh","si", "ox", "lab", "all", "pro","ro", "to", "ra", "ho", "ge", "ve", "gr", "ab", "gro", "pu", "lo", "co", "bus", "pla", "ac", "at", "pr", "fa", "gr", "to", "or", "fa", "fr", "ki", "qu", "cl", "ok", "fig", "run", "ee"]
|
65 |
+
|
66 |
+
combination_positions = find_combination_positions(text, combinations_to_find)
|
67 |
+
print(f"{text} - Combination Positions:", combination_positions)
|
68 |
+
|
69 |
+
matrix = create_matrix_from_text(text)
|
70 |
+
random_values = generate_random_values(len(text))
|
71 |
+
randomize_l1(matrix, random_values, same_letter_positions, combination_positions)
|
72 |
+
|
73 |
+
print(f"{text} - Matrix After Randomization:")
|
74 |
+
print_matrix(matrix)
|
75 |
+
|
76 |
+
with open(combo_filename, 'w', newline='') as csvfile:
|
77 |
+
writer = csv.writer(csvfile)
|
78 |
+
for row in matrix:
|
79 |
+
writer.writerow(row)
|
mod8l4.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import csv
|
3 |
+
|
4 |
+
def print_matrix(matrix):
|
5 |
+
for row in matrix:
|
6 |
+
print(','.join(map(str, row)))
|
7 |
+
|
8 |
+
def generate_random_values(num_values, zero_weight=0.5):
|
9 |
+
values = [3] + [random.randint(1, 7) for _ in range(num_values - 1)]
|
10 |
+
return [random.choice([0, val]) for val in values]
|
11 |
+
|
12 |
+
def randomize_l1(matrix, random_values, identical_positions, combination_positions):
|
13 |
+
if not identical_positions and not combination_positions:
|
14 |
+
common_random_value = random_values[0]
|
15 |
+
for i in range(len(matrix)):
|
16 |
+
matrix[i][0] = common_random_value
|
17 |
+
else:
|
18 |
+
for i in range(len(matrix)):
|
19 |
+
if (i, i + 1) in identical_positions or (i + 1, i) in identical_positions:
|
20 |
+
matrix[i][0] = random_values[i]
|
21 |
+
matrix[i + 1][0] = random_values[i]
|
22 |
+
elif (i, i) in combination_positions:
|
23 |
+
matrix[i][0] = random_values[i]
|
24 |
+
else:
|
25 |
+
matrix[i][0] = random_values[0]
|
26 |
+
|
27 |
+
def find_same_letter_positions(text):
|
28 |
+
same_letter_positions = []
|
29 |
+
for i in range(len(text)):
|
30 |
+
for j in range(i + 1, len(text)):
|
31 |
+
if text[i] == text[j]:
|
32 |
+
same_letter_positions.append((i, j))
|
33 |
+
same_letter_positions.append((j, i))
|
34 |
+
return same_letter_positions
|
35 |
+
|
36 |
+
def find_combination_positions(text, combinations):
|
37 |
+
combination_positions = []
|
38 |
+
for combination in combinations:
|
39 |
+
i = 0
|
40 |
+
while i < len(text) - len(combination) + 1:
|
41 |
+
if text[i:i+len(combination)] == combination:
|
42 |
+
for j in range(i, i+len(combination)):
|
43 |
+
combination_positions.append((j, j))
|
44 |
+
i += len(combination)
|
45 |
+
else:
|
46 |
+
i += 1
|
47 |
+
return combination_positions
|
48 |
+
|
49 |
+
def create_matrix_from_text(text):
|
50 |
+
num_rows = len(text)
|
51 |
+
num_columns = num_rows
|
52 |
+
matrix = [[0] * num_columns for _ in range(num_rows)]
|
53 |
+
|
54 |
+
for i in range(num_rows):
|
55 |
+
matrix[i][0] = text[i]
|
56 |
+
|
57 |
+
return matrix
|
58 |
+
|
59 |
+
for i in range(1, 20):
|
60 |
+
csv_filename = f'text{i}.csv'
|
61 |
+
combo4_filename = f'text{i}combo4.csv'
|
62 |
+
combo5_filename = f'text{i}combo5.csv'
|
63 |
+
|
64 |
+
with open(csv_filename, 'r') as file:
|
65 |
+
reader = csv.reader(file)
|
66 |
+
text = next(reader)[0]
|
67 |
+
|
68 |
+
same_letter_positions = find_same_letter_positions(text)
|
69 |
+
print(f"\n{text} - Same Letter Positions:", same_letter_positions)
|
70 |
+
|
71 |
+
combinations_to_find = ["ve", "ab", "gro", "pu", "lo", "co", "bus", "pla", "ac", "at", "pr", "fa", "gr", "to", "or", "fa", "fr", "ki", "qu", "cl", "ok", "fig", "run"]
|
72 |
+
|
73 |
+
combination_positions = find_combination_positions(text, combinations_to_find)
|
74 |
+
print(f"{text} - Combination Positions:", combination_positions)
|
75 |
+
|
76 |
+
matrix = create_matrix_from_text(text)
|
77 |
+
random_values = generate_random_values(len(text), zero_weight=0.5)
|
78 |
+
|
79 |
+
randomize_l1(matrix, random_values, same_letter_positions, combination_positions)
|
80 |
+
|
81 |
+
print(f"{text} - Matrix After Randomization:")
|
82 |
+
print_matrix(matrix)
|
83 |
+
|
84 |
+
with open(combo4_filename, 'w', newline='') as csvfile:
|
85 |
+
writer = csv.writer(csvfile)
|
86 |
+
for row in matrix:
|
87 |
+
writer.writerow(row)
|
88 |
+
|
89 |
+
with open(combo5_filename, 'w', newline='') as csvfile:
|
90 |
+
writer = csv.writer(csvfile)
|
91 |
+
for row in matrix:
|
92 |
+
writer.writerow(row)
|
mod8l5.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import csv
|
3 |
+
|
4 |
+
def print_matrix(matrix):
|
5 |
+
for row in matrix:
|
6 |
+
print(','.join(map(str, row)))
|
7 |
+
|
8 |
+
def generate_random_values(num_values):
|
9 |
+
return [random.randint(0, 12) for _ in range(num_values)]
|
10 |
+
|
11 |
+
def randomize_l1(matrix, random_values, identical_positions, combination_positions):
|
12 |
+
for i in range(len(matrix)):
|
13 |
+
if (i, i + 1) in identical_positions or (i + 1, i) in identical_positions:
|
14 |
+
matrix[i][0] = random_values[i]
|
15 |
+
matrix[i + 1][0] = random_values[i]
|
16 |
+
elif (i, i) in combination_positions:
|
17 |
+
matrix[i][0] = random_values[i]
|
18 |
+
else:
|
19 |
+
matrix[i][0] = random_values[0]
|
20 |
+
|
21 |
+
def find_same_letter_positions(text):
|
22 |
+
same_letter_positions = []
|
23 |
+
for i in range(len(text)):
|
24 |
+
for j in range(i + 1, len(text)):
|
25 |
+
if text[i] == text[j]:
|
26 |
+
same_letter_positions.append((i, j))
|
27 |
+
same_letter_positions.append((j, i))
|
28 |
+
return same_letter_positions
|
29 |
+
|
30 |
+
def find_combination_positions(text, combinations):
|
31 |
+
combination_positions = []
|
32 |
+
for combination in combinations:
|
33 |
+
i = 0
|
34 |
+
while i < len(text) - len(combination) + 1:
|
35 |
+
if text[i:i+len(combination)] == combination:
|
36 |
+
for j in range(i, i+len(combination)):
|
37 |
+
combination_positions.append((j, j))
|
38 |
+
i += len(combination)
|
39 |
+
else:
|
40 |
+
i += 1
|
41 |
+
return combination_positions
|
42 |
+
|
43 |
+
def create_matrix_from_text(text):
|
44 |
+
num_rows = len(text)
|
45 |
+
num_columns = num_rows
|
46 |
+
matrix = [[0] * num_columns for _ in range(num_rows)]
|
47 |
+
|
48 |
+
for i in range(num_rows):
|
49 |
+
matrix[i][0] = text[i]
|
50 |
+
|
51 |
+
return matrix
|
52 |
+
|
53 |
+
for i in range(1, 20):
|
54 |
+
csv_filename = f'text{i}.csv'
|
55 |
+
combo6_filename = f'text{i}combo6.csv'
|
56 |
+
|
57 |
+
with open(csv_filename, 'r') as file:
|
58 |
+
reader = csv.reader(file)
|
59 |
+
text = next(reader)[0]
|
60 |
+
|
61 |
+
same_letter_positions = find_same_letter_positions(text)
|
62 |
+
print(f"\n{text} - Same Letter Positions:", same_letter_positions)
|
63 |
+
|
64 |
+
combinations_to_find = ["ya", "te", "co", "wa", "pr", "pi", "act", "pa", "ja", "sa", "ve", "ab", "gro", "pu", "lo", "co", "bus", "pla", "ac", "at", "pr", "fa", "gr", "to", "or", "fa", "fr", "ki", "qu", "cl", "ok", "fig", "run", "ee"]
|
65 |
+
|
66 |
+
combination_positions = find_combination_positions(text, combinations_to_find)
|
67 |
+
print(f"{text} - Combination Positions:", combination_positions)
|
68 |
+
|
69 |
+
matrix = create_matrix_from_text(text)
|
70 |
+
random_values = generate_random_values(len(text))
|
71 |
+
|
72 |
+
randomize_l1(matrix, random_values, same_letter_positions, combination_positions)
|
73 |
+
|
74 |
+
print(f"{text} - Matrix After Randomization:")
|
75 |
+
print_matrix(matrix)
|
76 |
+
|
77 |
+
with open(combo6_filename, 'w', newline='') as csvfile:
|
78 |
+
writer = csv.writer(csvfile)
|
79 |
+
for row in matrix:
|
80 |
+
writer.writerow(row)
|
textto.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
|
4 |
+
def separate_words(input_csv):
|
5 |
+
with open(input_csv, 'r', newline='', encoding='utf-8') as csvfile:
|
6 |
+
reader = csv.reader(csvfile)
|
7 |
+
|
8 |
+
for row_index, row in enumerate(reader):
|
9 |
+
for col_index, word in enumerate(row):
|
10 |
+
# Create a separate file for each word
|
11 |
+
output_file = f'text{row_index + 1}.csv'
|
12 |
+
|
13 |
+
with open(output_file, 'w', newline='', encoding='utf-8') as word_file:
|
14 |
+
writer = csv.writer(word_file)
|
15 |
+
writer.writerow([word])
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
input_csv = "text.csv"
|
19 |
+
separate_words(input_csv)
|
word_database.txt
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
movie
|
2 |
+
excited
|
3 |
+
waiting
|
4 |
+
long
|
5 |
+
time
|
6 |
+
production
|
7 |
+
real
|
8 |
+
coded
|
9 |
+
digital
|
10 |
+
favorite
|
11 |
+
asking
|
12 |
+
doing
|
13 |
+
basketball
|
14 |
+
soccer
|
15 |
+
football
|
16 |
+
baseball
|
17 |
+
soup
|
18 |
+
food
|
19 |
+
burgers
|
20 |
+
pizza
|
21 |
+
fruit
|
22 |
+
pinapple
|
23 |
+
milk
|
24 |
+
jello
|
25 |
+
canddy
|
26 |
+
candy
|
27 |
+
rice
|
28 |
+
greens
|
29 |
+
lettuce
|
30 |
+
outmeal
|
31 |
+
cereal
|
32 |
+
dogs
|
33 |
+
cats
|
34 |
+
animals
|
35 |
+
goats
|
36 |
+
sheeps
|
37 |
+
movies
|
38 |
+
money
|
39 |
+
bank
|
40 |
+
account
|
41 |
+
keeping
|
42 |
+
looking
|
43 |
+
moving
|
44 |
+
boxes
|
45 |
+
elephants
|
46 |
+
movement
|
47 |
+
coding
|
48 |
+
developing
|
49 |
+
going
|
50 |
+
cruise
|
51 |
+
ship
|
52 |
+
boat
|
53 |
+
bahamas
|
54 |
+
cats
|
55 |
+
foods
|
56 |
+
healthy
|
57 |
+
eating
|
58 |
+
important
|
59 |
+
pennylvania
|
60 |
+
alanta
|
61 |
+
north carolina
|
62 |
+
new york
|
63 |
+
france
|
64 |
+
paris
|
65 |
+
work
|
66 |
+
jobs
|
67 |
+
computers
|
68 |
+
commputer
|
69 |
+
grocery
|
70 |
+
glamorous
|
71 |
+
version
|
72 |
+
truck
|
73 |
+
pickup
|
74 |
+
play
|
75 |
+
types
|
76 |
+
games
|
77 |
+
applications
|
78 |
+
quantum
|
79 |
+
speeds
|
80 |
+
advancements
|
81 |
+
technological
|
82 |
+
glimpse
|
83 |
+
countless
|
84 |
+
technology
|
85 |
+
future
|
86 |
+
walking
|
87 |
+
hello
|
88 |
+
fuck
|
89 |
+
going
|
90 |
+
work
|
91 |
+
about
|
92 |
+
jordan
|
93 |
+
Jordan's
|
94 |
+
Jordan
|
95 |
+
1993
|
96 |
+
season
|
97 |
+
superstar
|
98 |
+
NBA
|
99 |
+
championship
|
100 |
+
leading
|
101 |
+
points
|
102 |
+
assist
|
103 |
+
career
|
104 |
+
chicago
|
105 |
+
scared
|
106 |
+
tongue
|
107 |
+
energy
|
108 |
+
disguise
|
109 |
+
business
|
110 |
+
older
|
111 |
+
grown
|
112 |
+
call
|
113 |
+
bills
|
114 |
+
garden
|
115 |
+
house
|
116 |
+
fallen
|
117 |
+
blossoms
|
118 |
+
lawn
|
119 |
+
love
|
120 |
+
forever
|
121 |
+
most
|
122 |
+
big
|
123 |
+
fan
|
124 |
+
clout
|
125 |
+
space
|
126 |
+
team
|
127 |
+
doing
|
128 |
+
today
|
129 |
+
woke
|
130 |
+
work
|
131 |
+
relax
|
132 |
+
fart
|
133 |
+
Grrah
|
134 |
+
quicker
|
135 |
+
thicker
|
136 |
+
richer
|
137 |
+
bics
|
138 |
+
computer.
|
139 |
+
football,
|
140 |
+
baseball,
|
141 |
+
basketball,
|
142 |
+
data
|
143 |
+
the
|