from detectree2model.predictions.predict import run_detectree2 from polygons_processing.postpprocess_detectree2 import postprocess from generate_tree_images.generate_tree_images import generate_tree_images from classification.classification_predict import classify import os import json def row_to_feature(row): feature = { "id": row["id"], "type": "Feature", "properties": {"Confidence_score": row["Confidence_score"]}, "geometry": {"type": "Polygon", "coordinates": [row["coordinates"]]}, "species": row['species'] } return feature def export_geojson(df, filename): features = [row_to_feature(row) for idx, row in df.iterrows()] feature_collection = { "type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:EPSG::32720"}}, "features": features, } output_geojson = json.dumps(feature_collection) with open(f"{filename}.geojson", "w") as f: f.write(output_geojson) print(f"GeoJSON data exported to '{filename}.geojson' file.") """ tif_input: the file containing a tif that we are analyzing tif_file_name: the file name of the tif input. tif_input is the folder in which the tif file lies (detectree2 works with that) but generate_tree_images requires path including the file hence the file name is needed output_directory: the directory were all in-between and final files are stored generate_tree_images stores the cutout tree images in a separate folder """ tif_input = "/Users/jonathanseele/ETH/Hackathons/EcoHackathon/WeCanopy/test/" tif_file_name = "TreeCrownVectorDataset_761588_9673769_20_20_32720" current_directory = os.getcwd() output_directory = os.path.join(current_directory, "outputs") if not os.path.exists(output_directory): os.makedirs(output_directory) run_detectree2(tif_input, store_path=output_directory) processed_output_df = postprocess(output_directory + '/detectree2_delin.geojson') processed_geojson = output_directory + '/processed_delin.geojson' generate_tree_images(processed_geojson, tif_input) output_folder = './tree_images' all_top_3_list = [] # Initialize an empty list to accumulate all top_3 lists for file_name in os.listdir(output_folder): file_path = os.path.join(output_folder, file_name) probs = classify(file_path) top_3 = probs.head(3) top_3_list = [[cls, prob] for cls, prob in top_3.items()] # Accumulate the top_3_list for each file all_top_3_list.append(top_3_list) # Assign the accumulated top_3_list to the 'species' column of the dataframe processed_output_df['species'] = all_top_3_list final_output_path = 'result' export_geojson(processed_output_df, final_output_path)