file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
app_2_wl.py
import streamlit as st # Base packages import pandas as pd import numpy as np import datetime import altair as alt import matplotlib.pyplot as plt # Find coordinates from geopy.geocoders import Nominatim geolocator = Nominatim(user_agent="myapp2") import time # Plot static maps import cartopy.crs as ccrs import cartopy.feature as cfeature # Plot interactive maps import geopandas as gpd from shapely import wkt from bokeh.io import output_notebook, show, output_file from bokeh.plotting import figure from bokeh.models import GeoJSONDataSource, ColumnDataSource import json from bokeh.models import HoverTool import math from scipy.optimize import curve_fit import plotly.express as px st.header(" Xibaar yu aju ci Jangorëy Koronaa ci Senegal 🇸🇳") st.sidebar.markdown("*yeesal gu muj: 25/03/2020*") st.sidebar.markdown("---") st.sidebar.header("Ressources utiles") st.sidebar.markdown("Numero guir woté bu jamp bu jeuk: **78 172 10 81**") st.sidebar.markdown("Numero guir woté bu jamp ñaaréle: **76 765 97 31**") st.sidebar.markdown("Numero guir woté bu jamp ñeetéle: **70 717 14 92**") st.sidebar.markdown("Numero boye woté té do fayye bu ministere: **800 00 50 50**") st.sidebar.markdown("Samu: **1515**") st.sidebar.markdown("Besel ci sa telefone : **#2121#**") st.sidebar.markdown("[Saytul say sa yarame ci Jangoroji ci Prevcovid19](http://www.prevcovid19.com/#/teste)") st.sidebar.markdown("[Tweetru ministre gui eub walu wergu yaram ](https://twitter.com/MinisteredelaS1)") st.sidebar.markdown("[Booleb xéeti mbir ak màndargaay jumtukaayu ](https://github.com/maelfabien/COVID-19-Senegal)") st.sidebar.markdown("---") st.sidebar.header("Jokko ak wa ministere") st.sidebar.markdown("Ministre gui eub walu wergu yaram ak boolem boko / Fann Residence") st.sidebar.markdown("Rue Aimé Césaire, Dakar, Senegal") st.sidebar.markdown("+221 800 00 50 50 - [email protected]") st.sidebar.markdown("---") st.sidebar.markdown("Ñi ka derale moye [Maël Fabien](https://maelfabien.github.io/) ak [Dakar Institute of Technology](https://dit.sn/)") # I. Dataframe df = pd.read_csv("COVID_Dakar.csv", sep=";") df['Date'] = pd.to_datetime(df['Date'], dayfirst=True) #st.write(df) evol_cases = df[['Date', 'Positif', 'Negatif', 'Décédé', 'Guéri']].groupby("Date").sum().cumsum() st.subheader("Ci tënkk") total_positif = evol_cases.tail(1)['Positif'][0] total_negatif = evol_cases.tail(1)['Negatif'][0] total_decede = evol_cases.tail(1)['Décédé'][0] total_geuri = evol_cases.tail(1)['Guéri'][0] st.markdown("Limu ñi feebar: <span style='font-size:1.5em;'>%s</span>"%(total_positif - total_geuri), unsafe_allow_html=True) st.markdown("Limu ñi faatu: <span style='font-size:1.5em;'>%s</span>"%(total_decede), unsafe_allow_html=True) st.markdown("Limu ñi wer: <span style='font-size:1.5em;'>%s</span>"%(total_geuri), unsafe_allow_html=True) st.markdown("dayob ñi wer : <span style='font-size:1.5em;'>%s</span>"%(np.round(total_geuri / total_positif, 3) * 100), unsafe_allow_html=True) st.markdown("dàyob yoqute ñi feebar bis bu ay : <span style='font-size:1.5em;'>%s</span>"%(np.round(pd.DataFrame(np.sqrt(evol_cases['Positif'].pct_change(periods=2)+1)-1).tail(1)['Positif'][0] * 100, 2)), unsafe_allow_html=True) st.markdown("Mboolem ñi ame Koronaa: <span style='font-size:1.5em;'>%s</span>"%(total_positif), unsafe_allow_html=True) st.markdown("Mboolem ñi ñu saytu te ñu mùcc ci feebar bi: <span style='font-size:1.5em;'>%s</span>"%(total_negatif), unsafe_allow_html=True) st.markdown("Mboolem ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(total_positif + total_negatif), unsafe_allow_html=True) st.markdown("dayob ñi ame feebar bi ci ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(np.round(total_positif / (total_positif + total_negatif), 3) * 100), unsafe_allow_html=True) # II. Map st.markdown("---") st.subheader("ñi ame feebar bi fu ñu féete") shapefile = 'app/ne_110m_admin_0_countries.shp' #Read shapefile using Geopandas gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']] gdf.columns = ['country', 'country_code', 'geometry'] gdf = gdf[gdf['country']=="Senegal"] grid_crs=gdf.crs gdf_json = json.loads(gdf.to_json()) grid = json.dumps(gdf_json) cities = pd.read_csv("city_coordinates.csv", index_col=0) def find_lat(x): try: return float(citie
['Ville'] == x]['Latitude']) except TypeError: return None def find_long(x): try: return float(cities[cities['Ville'] == x]['Longitude']) except TypeError: return None summary = df[['Positif', 'Ville']].groupby("Ville").sum().reset_index() summary['latitude'] = summary['Ville'].apply(lambda x: find_lat(x)) summary['longitude'] = summary['Ville'].apply(lambda x: find_long(x)) geosource = GeoJSONDataSource(geojson = grid) pointsource = ColumnDataSource(summary) hover = HoverTool( tooltips = [('Ville', '@Ville'), ('Limu ñi ame Koronaa ', '@Positif')] ) #Create figure object. p = figure(plot_height = 550 , plot_width = 700, tools=[hover, 'pan', 'wheel_zoom']) p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None p.xaxis.visible = False p.yaxis.visible = False p.outline_line_color = None patch = p.patches('xs','ys', source = geosource, fill_color = '#fff7bc', line_color = 'black', line_width = 0.35, fill_alpha = 1, hover_fill_color="#fec44f") #Add patch renderer to figure. patch = p.patches('xs','ys', source = geosource, fill_color = 'lightgrey', line_color = 'black', line_width = 0.25, fill_alpha = 1) p.circle('longitude','latitude',source=pointsource, size=15) st.bokeh_chart(p) # III. Map st.markdown("---") st.subheader(" Yoqute limu ñi ame Koronaa ci Senegal") highlight = alt.selection(type='single', on='mouseover', fields=['Positif'], nearest=True) chart = alt.Chart(evol_cases.reset_index()).mark_line(point=True, strokeWidth=5).encode( x='Date:T', y='Positif:Q', tooltip='Positif:Q' ).add_selection( highlight ).properties(height=400, width=700) st.write(chart.interactive()) st.markdown("---") st.subheader("Mingalé rewu Pays-Bas") st.write("Senegaal rewle bigua xamanetané limu way-dëkké dafa méggo ak rewu Pays-bas (Fukk ak jurrom benn million), ba taxna ab mégele meuna dox di diganté ñaari dëkk yoyé. Doneté yoqute Jangorëy Koronaa gui ci rewum Senegaal la geune yéxé ci sinu dioni yalla taye, luñu setlu ci ni Jangoro gui di doxé diarna bayi xel wayé itameu lathe na niou xalate ci.Fi gua xamené mome leu rewu Senegaal tolu ci Jangorëy Koronaa dafa mengo ci fukki fan ak juroom ci guinaw fi rew mi di Pays-Bas Tolone,wayé xayma gogu boye seteu juroom ñaari faney le guir rew pays-bas té Senegaal fukki fan ak juroom ñeet. Lim yi aju ci rewu Pays-Bas ñuguike jeulé ci Wikipedia: https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Netherlands") df_nl = pd.read_csv("df_nl.csv") plt.figure(figsize=(16,10)) plt.plot(df_nl['Netherlands'], linestyle="--", linewidth=5, label="Pays-Bas") plt.plot(df_nl['Senegal'],label="Sénégal", linewidth=5) plt.figtext(.5,.9,'Evolution des cas au Sénégal et aux Pays-Bas', fontsize=30, ha='center') plt.legend() st.pyplot(plt) # IV. Contamination st.markdown("---") st.subheader("Tassarok Jangorogui") st.write("Ñugui xamé ñeneu ñu jeulé Jangoroji ci ñu jugué bimeu rew, ci niit ñu feebar yigua xamené ño waleu ñeni niit.Limu ñigua xamné ño ameu Jangoroji té jeuléko ci biir rewmi, moye waleu gui geuna ragalu ci walanté Jangoroji..") facteur = df[['Date', 'Facteur']].dropna() facteur['Count'] = 1 importe = facteur[facteur['Facteur'] == "Importé"].groupby("Date").sum().cumsum().reset_index() voyage = facteur[facteur['Facteur'] == "Contact"].groupby("Date").sum().cumsum().reset_index() communaute = facteur[facteur['Facteur'] == "Communauté"].groupby("Date").sum().cumsum().reset_index() df_int = pd.merge(importe, voyage, left_on='Date', right_on='Date', how='outer') df_int = pd.merge(df_int, communaute, left_on='Date', right_on='Date', how='outer') df_int['Date'] = pd.to_datetime(df_int['Date'], dayfirst=True) df_int = df_int.sort_values("Date").ffill().fillna(0) df_int.columns = ["Date", "Importes", "Contact", "Communauté"] ch0 = alt.Chart(df_int).transform_fold( ['Importes', 'Contact', 'Communauté'], ).mark_line(size=5).encode( x='Date:T', y='value:Q', color='key:N' ).properties(height=500, width=700) st.altair_chart(ch0) st.write("Ñu dieulé Jangoroji bitimeu rew, té waleu Jangoroji ñeneu ñu dëkk Senegaal, Ñugui jugué ci rew yi :") ch3 = alt.Chart(df.dropna(subset=['Source/Voyage'])).mark_bar().encode( x = 'Source/Voyage:N', y=alt.Y('count()', title='Nombre de patients') ).properties(title="Provenance des malades", height=300, width=700) st.write(ch3) # Interactive Map st.write("Natalu feega xamené fila jangorey koronaa bi jugué:") df3 = px.data.gapminder().query("year == 2007") df2 = df3[(df3['country']=="Italy") | (df3['country']=="Senegal") | (df3['country']=="United Kingdom") | (df3['country']=="France") | (df3['country']=="Spain")] fig = px.line_geo(df2, locations="iso_alpha", projection="orthographic") st.plotly_chart(fig) # V. Population st.markdown("---") st.subheader("Way-dëkk ñu feebar daleu.") st.write("Les chiffres présentés ci-dessous tiennent compte des publication du Ministère de la Santé et de l'Action Sociale. Certaines données sont manquantes, et nous n'affichons que les valeurs connues à ce jour.") st.write("1. At ñu eupe ci yi Jangoroji di diap ", np.mean(df['Age'].dropna()), " ans") ch = alt.Chart(df).mark_bar().encode( x = 'Age:Q', y=alt.Y('count()', title='Nombre de patients') ).properties(title="Atu aji wop gui ", height=300, width=700) st.write(ch) st.write("2. Ñu eup ci aji-wop yi aye goor lañu") st.write(pd.DataFrame(df[['Homme', 'Femme']].dropna().sum()).transpose()) st.write("3. Ñu eupe ci ñu feebar bi diapeu ndakaru lañu dëkké") ch2 = alt.Chart(df.dropna(subset=['Ville'])).mark_bar().encode( x = 'Ville:N', y=alt.Y('count()', title='Nombre de patients') ).properties(title="Ville connue du patient", height=300, width=700) st.write(ch2) st.write("4. Ñu eupe ci niit ñu amé Jangoroji Senegaal lañu dëkk.") st.write(df['Resident Senegal'].dropna().value_counts()) st.write("5. Ñu eupe ci niit ñu amé Jangoroji Senegaal lañu dëkk.") st.write(df['Resident Senegal'].dropna().value_counts()) st.write("6. Faan ñigua xamné aji wop gui ci laye teud lalu opital : ", np.mean(df['Temps Hospitalisation (j)'].dropna()), " Faan")
s[cities
identifier_name
app_2_wl.py
import streamlit as st # Base packages import pandas as pd import numpy as np import datetime import altair as alt import matplotlib.pyplot as plt # Find coordinates from geopy.geocoders import Nominatim geolocator = Nominatim(user_agent="myapp2") import time # Plot static maps import cartopy.crs as ccrs import cartopy.feature as cfeature # Plot interactive maps import geopandas as gpd from shapely import wkt from bokeh.io import output_notebook, show, output_file from bokeh.plotting import figure from bokeh.models import GeoJSONDataSource, ColumnDataSource import json from bokeh.models import HoverTool import math from scipy.optimize import curve_fit import plotly.express as px st.header(" Xibaar yu aju ci Jangorëy Koronaa ci Senegal 🇸🇳") st.sidebar.markdown("*yeesal gu muj: 25/03/2020*") st.sidebar.markdown("---") st.sidebar.header("Ressources utiles") st.sidebar.markdown("Numero guir woté bu jamp bu jeuk: **78 172 10 81**") st.sidebar.markdown("Numero guir woté bu jamp ñaaréle: **76 765 97 31**") st.sidebar.markdown("Numero guir woté bu jamp ñeetéle: **70 717 14 92**") st.sidebar.markdown("Numero boye woté té do fayye bu ministere: **800 00 50 50**") st.sidebar.markdown("Samu: **1515**") st.sidebar.markdown("Besel ci sa telefone : **#2121#**") st.sidebar.markdown("[Saytul say sa yarame ci Jangoroji ci Prevcovid19](http://www.prevcovid19.com/#/teste)") st.sidebar.markdown("[Tweetru ministre gui eub walu wergu yaram ](https://twitter.com/MinisteredelaS1)") st.sidebar.markdown("[Booleb xéeti mbir ak màndargaay jumtukaayu ](https://github.com/maelfabien/COVID-19-Senegal)") st.sidebar.markdown("---") st.sidebar.header("Jokko ak wa ministere") st.sidebar.markdown("Ministre gui eub walu wergu yaram ak boolem boko / Fann Residence") st.sidebar.markdown("Rue Aimé Césaire, Dakar, Senegal") st.sidebar.markdown("+221 800 00 50 50 - [email protected]") st.sidebar.markdown("---") st.sidebar.markdown("Ñi ka derale moye [Maël Fabien](https://maelfabien.github.io/) ak [Dakar Institute of Technology](https://dit.sn/)") # I. Dataframe df = pd.read_csv("COVID_Dakar.csv", sep=";") df['Date'] = pd.to_datetime(df['Date'], dayfirst=True) #st.write(df) evol_cases = df[['Date', 'Positif', 'Negatif', 'Décédé', 'Guéri']].groupby("Date").sum().cumsum() st.subheader("Ci tënkk") total_positif = evol_cases.tail(1)['Positif'][0] total_negatif = evol_cases.tail(1)['Negatif'][0] total_decede = evol_cases.tail(1)['Décédé'][0] total_geuri = evol_cases.tail(1)['Guéri'][0] st.markdown("Limu ñi feebar: <span style='font-size:1.5em;'>%s</span>"%(total_positif - total_geuri), unsafe_allow_html=True) st.markdown("Limu ñi faatu: <span style='font-size:1.5em;'>%s</span>"%(total_decede), unsafe_allow_html=True) st.markdown("Limu ñi wer: <span style='font-size:1.5em;'>%s</span>"%(total_geuri), unsafe_allow_html=True) st.markdown("dayob ñi wer : <span style='font-size:1.5em;'>%s</span>"%(np.round(total_geuri / total_positif, 3) * 100), unsafe_allow_html=True) st.markdown("dàyob yoqute ñi feebar bis bu ay : <span style='font-size:1.5em;'>%s</span>"%(np.round(pd.DataFrame(np.sqrt(evol_cases['Positif'].pct_change(periods=2)+1)-1).tail(1)['Positif'][0] * 100, 2)), unsafe_allow_html=True) st.markdown("Mboolem ñi ame Koronaa: <span style='font-size:1.5em;'>%s</span>"%(total_positif), unsafe_allow_html=True) st.markdown("Mboolem ñi ñu saytu te ñu mùcc ci feebar bi: <span style='font-size:1.5em;'>%s</span>"%(total_negatif), unsafe_allow_html=True) st.markdown("Mboolem ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(total_positif + total_negatif), unsafe_allow_html=True) st.markdown("dayob ñi ame feebar bi ci ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(np.round(total_positif / (total_positif + total_negatif), 3) * 100), unsafe_allow_html=True) # II. Map st.markdown("---") st.subheader("ñi ame feebar bi fu ñu féete") shapefile = 'app/ne_110m_admin_0_countries.shp' #Read shapefile using Geopandas gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']] gdf.columns = ['country', 'country_code', 'geometry'] gdf = gdf[gdf['country']=="Senegal"] grid_crs=gdf.crs gdf_json = json.loads(gdf.to_json()) grid = json.dumps(gdf_json) cities = pd.read_csv("city_coordinates.csv", index_col=0) def find_lat(x): try: return float(cities[cities['Ville'] == x]['Latitude']) except TypeError: return None def find_long(x): try: return float(cities[cities['Ville']
lle").sum().reset_index() summary['latitude'] = summary['Ville'].apply(lambda x: find_lat(x)) summary['longitude'] = summary['Ville'].apply(lambda x: find_long(x)) geosource = GeoJSONDataSource(geojson = grid) pointsource = ColumnDataSource(summary) hover = HoverTool( tooltips = [('Ville', '@Ville'), ('Limu ñi ame Koronaa ', '@Positif')] ) #Create figure object. p = figure(plot_height = 550 , plot_width = 700, tools=[hover, 'pan', 'wheel_zoom']) p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None p.xaxis.visible = False p.yaxis.visible = False p.outline_line_color = None patch = p.patches('xs','ys', source = geosource, fill_color = '#fff7bc', line_color = 'black', line_width = 0.35, fill_alpha = 1, hover_fill_color="#fec44f") #Add patch renderer to figure. patch = p.patches('xs','ys', source = geosource, fill_color = 'lightgrey', line_color = 'black', line_width = 0.25, fill_alpha = 1) p.circle('longitude','latitude',source=pointsource, size=15) st.bokeh_chart(p) # III. Map st.markdown("---") st.subheader(" Yoqute limu ñi ame Koronaa ci Senegal") highlight = alt.selection(type='single', on='mouseover', fields=['Positif'], nearest=True) chart = alt.Chart(evol_cases.reset_index()).mark_line(point=True, strokeWidth=5).encode( x='Date:T', y='Positif:Q', tooltip='Positif:Q' ).add_selection( highlight ).properties(height=400, width=700) st.write(chart.interactive()) st.markdown("---") st.subheader("Mingalé rewu Pays-Bas") st.write("Senegaal rewle bigua xamanetané limu way-dëkké dafa méggo ak rewu Pays-bas (Fukk ak jurrom benn million), ba taxna ab mégele meuna dox di diganté ñaari dëkk yoyé. Doneté yoqute Jangorëy Koronaa gui ci rewum Senegaal la geune yéxé ci sinu dioni yalla taye, luñu setlu ci ni Jangoro gui di doxé diarna bayi xel wayé itameu lathe na niou xalate ci.Fi gua xamené mome leu rewu Senegaal tolu ci Jangorëy Koronaa dafa mengo ci fukki fan ak juroom ci guinaw fi rew mi di Pays-Bas Tolone,wayé xayma gogu boye seteu juroom ñaari faney le guir rew pays-bas té Senegaal fukki fan ak juroom ñeet. Lim yi aju ci rewu Pays-Bas ñuguike jeulé ci Wikipedia: https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Netherlands") df_nl = pd.read_csv("df_nl.csv") plt.figure(figsize=(16,10)) plt.plot(df_nl['Netherlands'], linestyle="--", linewidth=5, label="Pays-Bas") plt.plot(df_nl['Senegal'],label="Sénégal", linewidth=5) plt.figtext(.5,.9,'Evolution des cas au Sénégal et aux Pays-Bas', fontsize=30, ha='center') plt.legend() st.pyplot(plt) # IV. Contamination st.markdown("---") st.subheader("Tassarok Jangorogui") st.write("Ñugui xamé ñeneu ñu jeulé Jangoroji ci ñu jugué bimeu rew, ci niit ñu feebar yigua xamené ño waleu ñeni niit.Limu ñigua xamné ño ameu Jangoroji té jeuléko ci biir rewmi, moye waleu gui geuna ragalu ci walanté Jangoroji..") facteur = df[['Date', 'Facteur']].dropna() facteur['Count'] = 1 importe = facteur[facteur['Facteur'] == "Importé"].groupby("Date").sum().cumsum().reset_index() voyage = facteur[facteur['Facteur'] == "Contact"].groupby("Date").sum().cumsum().reset_index() communaute = facteur[facteur['Facteur'] == "Communauté"].groupby("Date").sum().cumsum().reset_index() df_int = pd.merge(importe, voyage, left_on='Date', right_on='Date', how='outer') df_int = pd.merge(df_int, communaute, left_on='Date', right_on='Date', how='outer') df_int['Date'] = pd.to_datetime(df_int['Date'], dayfirst=True) df_int = df_int.sort_values("Date").ffill().fillna(0) df_int.columns = ["Date", "Importes", "Contact", "Communauté"] ch0 = alt.Chart(df_int).transform_fold( ['Importes', 'Contact', 'Communauté'], ).mark_line(size=5).encode( x='Date:T', y='value:Q', color='key:N' ).properties(height=500, width=700) st.altair_chart(ch0) st.write("Ñu dieulé Jangoroji bitimeu rew, té waleu Jangoroji ñeneu ñu dëkk Senegaal, Ñugui jugué ci rew yi :") ch3 = alt.Chart(df.dropna(subset=['Source/Voyage'])).mark_bar().encode( x = 'Source/Voyage:N', y=alt.Y('count()', title='Nombre de patients') ).properties(title="Provenance des malades", height=300, width=700) st.write(ch3) # Interactive Map st.write("Natalu feega xamené fila jangorey koronaa bi jugué:") df3 = px.data.gapminder().query("year == 2007") df2 = df3[(df3['country']=="Italy") | (df3['country']=="Senegal") | (df3['country']=="United Kingdom") | (df3['country']=="France") | (df3['country']=="Spain")] fig = px.line_geo(df2, locations="iso_alpha", projection="orthographic") st.plotly_chart(fig) # V. Population st.markdown("---") st.subheader("Way-dëkk ñu feebar daleu.") st.write("Les chiffres présentés ci-dessous tiennent compte des publication du Ministère de la Santé et de l'Action Sociale. Certaines données sont manquantes, et nous n'affichons que les valeurs connues à ce jour.") st.write("1. At ñu eupe ci yi Jangoroji di diap ", np.mean(df['Age'].dropna()), " ans") ch = alt.Chart(df).mark_bar().encode( x = 'Age:Q', y=alt.Y('count()', title='Nombre de patients') ).properties(title="Atu aji wop gui ", height=300, width=700) st.write(ch) st.write("2. Ñu eup ci aji-wop yi aye goor lañu") st.write(pd.DataFrame(df[['Homme', 'Femme']].dropna().sum()).transpose()) st.write("3. Ñu eupe ci ñu feebar bi diapeu ndakaru lañu dëkké") ch2 = alt.Chart(df.dropna(subset=['Ville'])).mark_bar().encode( x = 'Ville:N', y=alt.Y('count()', title='Nombre de patients') ).properties(title="Ville connue du patient", height=300, width=700) st.write(ch2) st.write("4. Ñu eupe ci niit ñu amé Jangoroji Senegaal lañu dëkk.") st.write(df['Resident Senegal'].dropna().value_counts()) st.write("5. Ñu eupe ci niit ñu amé Jangoroji Senegaal lañu dëkk.") st.write(df['Resident Senegal'].dropna().value_counts()) st.write("6. Faan ñigua xamné aji wop gui ci laye teud lalu opital : ", np.mean(df['Temps Hospitalisation (j)'].dropna()), " Faan")
== x]['Longitude']) except TypeError: return None summary = df[['Positif', 'Ville']].groupby("Vi
identifier_body
PerformanceTester.py
import numpy import subprocess import sys import os.path from PerformanceTesterJob import Job, printc, run import types from tabulate081 import tabulate enable_scipy = True try: from scipy import stats except: enable_scipy = False enable_plotting = True try: import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt except: enable_plotting = False def divisors(n): result = [] for i in range(1,n+1): if n%i==0: result.append(i) return result class Tester(): def __init__(self): self.Jobs = []; self.cx = [] self.cy = [] self.cz = [] self.timer = '' self.domains = [] self.domains_color = [] self.domains_symbol = [] self.template = '' self.iterations = [] self.output_suffix = '' self.executable = [] self.executable_name = [] self.executable_symbol = [] self.uniq_total_cpus = [] self.group_submit_files = [] def SetTimer(self, timer): self.timer = timer def SetTemplate(self, template): self.template = template def AddExecutable(self, executable, name, symbol='o'): self.executable.append(executable) self.executable_name.append(name) self.executable_symbol.append(symbol) def SetOutputSuffix(self, output_suffix): self.output_suffix = output_suffix def SetCpuConfig(self, cx, cy, cz): self.cx = cx self.cy = cy self.cz = cz def AddDomain(self, dx, dy, dz, color='#000000', symbol='o'): self.domains.append([dx,dy,dz]) self.domains_color.append(color) self.domains_symbol.append(symbol) def AddIterations(self, cpu_from, cpu_to, iterations):
def GenerateJobs(self): utc = set() for exec in self.executable: for x in self.cx: for y in self.cy: for z in self.cz: utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) # if numpy.prod([x,y,z]) >= 64 and numpy.prod([x,y,z]) <= 1600: # elif numpy.prod([x,y,z]) < 64: # for d in self.domains: # self.Jobs.append(Job(d, [x,y,z], 250)) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10): utc = set() cpu_configs = [] totalCPU_divisors = divisors(totalCPU) for exec in self.executable: for x in totalCPU_divisors: for y in totalCPU_divisors: for z in totalCPU_divisors: code = "{0}_{1}_{2}".format(x,y,z) if numpy.prod([x,y,z]) == totalCPU: if x <= max_x and y <= max_y and z <= max_z: if code not in cpu_configs: cpu_configs.append(code) utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def MakeSubmits(self): for J in self.Jobs: J.MakeSubmit(self.template) print('Prepared submit for job', J.job_name) def MakeGroupSubmits(self): utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() print(utc) for tc in utc: first = True for J in self.Jobs: if J.total_cpu == tc: if first: J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc)) self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc)) first = False else: J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc)) def SubmitAll(self): for J in self.Jobs: J.Submit() def SubmitGroupAll(self): for f in self.group_submit_files: run("sbatch {0}".format(f)) def ReadJobTimers(self): for J in self.Jobs: J.ReadTimer(self.timer) def ReadGroupJobTimers(self): printc("Processing outfiles... ", end="") files = set() for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] # print(exec) utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() # print(utc) for tc in utc: outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix) # files.add(outfile) run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True) files.add("{0}.clean".format(outfile)) with open("{0}.clean".format(outfile), 'r') as f: fname = '' for line in f: #print(line.strip(),line[0:1] ) if "E.{0}".format(execn) in line: fname = line.strip() + "." + self.output_suffix files.add(fname) elif self.timer in line: if len(fname) > 0: with open(fname, "w") as fw: fw.write(line) fname="" printc("\tdone", color='green') printc("Reading timers... ", end="") for J in self.Jobs: J.ReadTimer(self.timer) printc("\tdone", color='green') printc("Cleaning up files... ", end="") for file in files: run("rm -f {0}".format(file), quiet=True); printc("\tdone", color='green') def ProcessStats(self): if enable_plotting: fig = plt.figure(figsize=[16,8]) ax = fig.add_subplot(111) ax.set_xscale("log", nonpositive='clip') ax.set_yscale("log", nonpositive='clip') for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] for di in range(len(self.domains)): d = self.domains[di] min_times = numpy.empty(len(self.uniq_total_cpus)) min_times[:] = numpy.NAN printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue') for ci in range(len(self.uniq_total_cpus)): c = self.uniq_total_cpus[ci] JobsOK = [] JobsNK = [] printc('\tProcessing cpu config {0}'.format(str(c)), 'violet') T = [] for J in self.Jobs: if J.total_cpu == c and J.domain_size == d and J.executable==exec: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) else: JobsNK.append(J) JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) if len(JobsOK) > 0: min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3]) for J in JobsOK: # printc('\t\tOK:', 'green', end=" ") tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci]))) tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu) # print(J.job_name,'\t', tpts) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc]) for J in JobsNK: # printc('\t\tFAIL:', 'red', end=" ") # print(J.job_name,'\t', J.timers_results) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-']) header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core'] if len(T) > 0: print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t')) if enable_plotting: Q, W = -1, -1 for q in range(len(self.uniq_total_cpus)): print (len(self.uniq_total_cpus)) if min_times[q] > 0: for w in range(len(self.uniq_total_cpus)-1,0,-1): if min_times[w] > 0: if Q==-1 and W==-1: Q = q W = w + 1 # print("Q, W", q, w) x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W] x2 = numpy.log(numpy.array(x1)) y2 = numpy.log(numpy.array(y1)) print (x1,y1) if enable_scipy: slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2) #print("LS", slope, intercept, r_value, p_value, std_err) #plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di]) plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*32)*slope+intercept)],'-.', color=self.domains_color[di]) else: print("No SCIPY!") #plt.plot(self.uniq_total_cpus,min_times,self.executable_symbol[iexec], color=self.domains_color[di],label="{0}, {1} ({2:4.3f})".format(execn,self.domains[di],-slope),ms=10) #plt.plot(self.uniq_total_cpus,min_times, color=self.domains_color[di]) # print("Q, W", q, w) # plt.plot([self.uniq_total_cpus[Q], self.uniq_total_cpus[W]], [min_times[Q], min_times[Q]/(self.uniq_total_cpus[W]/self.uniq_total_cpus[Q])],'--', color=self.domains_color[di]) # print(self.uniq_total_cpus[Q:W]) # print(min_times[Q:W]) # print(self.uniq_total_cpus,min_times) if enable_plotting: # print("MakingPlot") plt.axis((50,2000,0.001,1)) plt.xlabel('Total number of cores',fontsize=16) plt.ylabel('Average time per iteration',fontsize=16) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.2,box.width, box.height * 0.8]) plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),fancybox=True, shadow=True, ncol=3) plt.grid() plt.savefig("nowy.png") # plt.show() def CpuConfigPlot(self): for ds in self.domains: name = 'cpu_{0}_{1}_{2}.png'.format(ds[0], ds[1], ds[2]) title = 'Advection dwarf: global domain size {0} x {1} x {2}'.format(ds[0], ds[1], ds[2]) JobsOK = [] for J in self.Jobs: if J.domain_size == ds: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) JobsOK.sort(key=lambda x: -float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) JobsOK.sort(key=lambda x: -float(x.cpus[2])) JobsOK.sort(key=lambda x: float(x.total_cpu)) y = [] bestx = [] besty = [] labels=[] y2 = [] x2=[] tcp = 0 halo_size = [] for J in JobsOK: tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptspc = tpts*J.total_cpu min_lptps = 1e10 for K in JobsOK: if J.total_cpu == K.total_cpu: ltpts = float(K.timers_results[self.timer][4])/float(K.timers_results[self.timer][3]) if ltpts < min_lptps: min_lptps = ltpts #print(J.total_cpu, min_lptps) if tpts==min_lptps: bestx.append(len(y)) besty.append(tpts) #print(numpy.prod(J.cpus), J.cpus[0], J.cpus[1], J.cpus[2],tptss) y.append(tpts) labels.append('{0} x {1} x {2}'.format(J.cpus[0],J.cpus[1],J.cpus[2])) if tcp != J.total_cpu: tcp = J.total_cpu y2.append(len(y)-1) x2.append(tcp) box = numpy.array(J.domain_size)/numpy.array(J.cpus) pp = (box[0]*box[1]+box[1]*box[2]+box[0]*box[2])*2 ppt = pp*numpy.prod(J.cpus); # print(J.domain_size, J.cpus, box, pp, ppt) halo_size.append(ppt) x = range(len(y)) if enable_scipy: print("CORR:", stats.pearsonr(y, halo_size)) else: print("No SCIPY") plt.clf() fig = plt.figure(figsize=(50,20)) ax = fig.add_subplot(111) #ax.set_xscale("log", nonposx='clip') #ax.set_yscale("log", nonposy='clip') plt.plot(bestx,besty,'ro',ms=20) plt.plot(x,y,'.',ms=10) # for q in range(0,len(y2)-1): # s,e = y2[q], y2[q+1] # print("T", x2[q], s, e) # slope, intercept, r_value, p_value, std_err = stats.linregress(x[s:e],y[s:e]) # print(slope, intercept, r_value, p_value, std_err) h = numpy.max(y)-numpy.min(y) for a in range(len(y2)): plt.plot([y2[a]-.5, y2[a]-.5], [numpy.min(y)-h/20, numpy.max(y)+h/20],'k--') plt.xticks(x, labels, rotation='vertical', fontsize=16) plt.grid() plt.axis([-.5, len(y)-.5, numpy.min(y)-h/20, numpy.max(y)+h/20]) plt.xlabel('MPI cartesian decomposition: nprocx * nprocy * nprocz', fontsize=28) plt.ylabel('Average time per MPDATA call', fontsize=28) plt.xticks(x, labels, rotation='vertical', fontsize=16) #ax2 = plt.twinx() #ax2.set_yscale("log", nonposy='clip') #ax2.plot(x,numpy.array(halo_size),'sr') plt.title(title, fontsize=28) plt.savefig(name)
self.iterations.append([cpu_from, cpu_to, iterations])
identifier_body
PerformanceTester.py
import numpy import subprocess import sys import os.path from PerformanceTesterJob import Job, printc, run import types from tabulate081 import tabulate enable_scipy = True try: from scipy import stats except: enable_scipy = False enable_plotting = True try: import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt except: enable_plotting = False def divisors(n): result = [] for i in range(1,n+1): if n%i==0: result.append(i) return result class Tester(): def __init__(self): self.Jobs = []; self.cx = [] self.cy = [] self.cz = [] self.timer = '' self.domains = [] self.domains_color = [] self.domains_symbol = [] self.template = '' self.iterations = [] self.output_suffix = '' self.executable = [] self.executable_name = [] self.executable_symbol = [] self.uniq_total_cpus = [] self.group_submit_files = [] def SetTimer(self, timer): self.timer = timer def SetTemplate(self, template): self.template = template def AddExecutable(self, executable, name, symbol='o'): self.executable.append(executable) self.executable_name.append(name) self.executable_symbol.append(symbol) def SetOutputSuffix(self, output_suffix): self.output_suffix = output_suffix def SetCpuConfig(self, cx, cy, cz): self.cx = cx self.cy = cy self.cz = cz def AddDomain(self, dx, dy, dz, color='#000000', symbol='o'): self.domains.append([dx,dy,dz]) self.domains_color.append(color) self.domains_symbol.append(symbol) def AddIterations(self, cpu_from, cpu_to, iterations): self.iterations.append([cpu_from, cpu_to, iterations]) def GenerateJobs(self): utc = set() for exec in self.executable: for x in self.cx: for y in self.cy: for z in self.cz: utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) # if numpy.prod([x,y,z]) >= 64 and numpy.prod([x,y,z]) <= 1600: # elif numpy.prod([x,y,z]) < 64: # for d in self.domains: # self.Jobs.append(Job(d, [x,y,z], 250)) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10): utc = set() cpu_configs = [] totalCPU_divisors = divisors(totalCPU) for exec in self.executable: for x in totalCPU_divisors: for y in totalCPU_divisors: for z in totalCPU_divisors: code = "{0}_{1}_{2}".format(x,y,z) if numpy.prod([x,y,z]) == totalCPU: if x <= max_x and y <= max_y and z <= max_z: if code not in cpu_configs: cpu_configs.append(code) utc.add(numpy.prod([x,y,z])) for iteration in self.iterations:
self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def MakeSubmits(self): for J in self.Jobs: J.MakeSubmit(self.template) print('Prepared submit for job', J.job_name) def MakeGroupSubmits(self): utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() print(utc) for tc in utc: first = True for J in self.Jobs: if J.total_cpu == tc: if first: J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc)) self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc)) first = False else: J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc)) def SubmitAll(self): for J in self.Jobs: J.Submit() def SubmitGroupAll(self): for f in self.group_submit_files: run("sbatch {0}".format(f)) def ReadJobTimers(self): for J in self.Jobs: J.ReadTimer(self.timer) def ReadGroupJobTimers(self): printc("Processing outfiles... ", end="") files = set() for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] # print(exec) utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() # print(utc) for tc in utc: outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix) # files.add(outfile) run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True) files.add("{0}.clean".format(outfile)) with open("{0}.clean".format(outfile), 'r') as f: fname = '' for line in f: #print(line.strip(),line[0:1] ) if "E.{0}".format(execn) in line: fname = line.strip() + "." + self.output_suffix files.add(fname) elif self.timer in line: if len(fname) > 0: with open(fname, "w") as fw: fw.write(line) fname="" printc("\tdone", color='green') printc("Reading timers... ", end="") for J in self.Jobs: J.ReadTimer(self.timer) printc("\tdone", color='green') printc("Cleaning up files... ", end="") for file in files: run("rm -f {0}".format(file), quiet=True); printc("\tdone", color='green') def ProcessStats(self): if enable_plotting: fig = plt.figure(figsize=[16,8]) ax = fig.add_subplot(111) ax.set_xscale("log", nonpositive='clip') ax.set_yscale("log", nonpositive='clip') for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] for di in range(len(self.domains)): d = self.domains[di] min_times = numpy.empty(len(self.uniq_total_cpus)) min_times[:] = numpy.NAN printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue') for ci in range(len(self.uniq_total_cpus)): c = self.uniq_total_cpus[ci] JobsOK = [] JobsNK = [] printc('\tProcessing cpu config {0}'.format(str(c)), 'violet') T = [] for J in self.Jobs: if J.total_cpu == c and J.domain_size == d and J.executable==exec: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) else: JobsNK.append(J) JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) if len(JobsOK) > 0: min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3]) for J in JobsOK: # printc('\t\tOK:', 'green', end=" ") tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci]))) tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu) # print(J.job_name,'\t', tpts) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc]) for J in JobsNK: # printc('\t\tFAIL:', 'red', end=" ") # print(J.job_name,'\t', J.timers_results) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-']) header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core'] if len(T) > 0: print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t')) if enable_plotting: Q, W = -1, -1 for q in range(len(self.uniq_total_cpus)): print (len(self.uniq_total_cpus)) if min_times[q] > 0: for w in range(len(self.uniq_total_cpus)-1,0,-1): if min_times[w] > 0: if Q==-1 and W==-1: Q = q W = w + 1 # print("Q, W", q, w) x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W] x2 = numpy.log(numpy.array(x1)) y2 = numpy.log(numpy.array(y1)) print (x1,y1) if enable_scipy: slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2) #print("LS", slope, intercept, r_value, p_value, std_err) #plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di]) plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*32)*slope+intercept)],'-.', color=self.domains_color[di]) else: print("No SCIPY!") #plt.plot(self.uniq_total_cpus,min_times,self.executable_symbol[iexec], color=self.domains_color[di],label="{0}, {1} ({2:4.3f})".format(execn,self.domains[di],-slope),ms=10) #plt.plot(self.uniq_total_cpus,min_times, color=self.domains_color[di]) # print("Q, W", q, w) # plt.plot([self.uniq_total_cpus[Q], self.uniq_total_cpus[W]], [min_times[Q], min_times[Q]/(self.uniq_total_cpus[W]/self.uniq_total_cpus[Q])],'--', color=self.domains_color[di]) # print(self.uniq_total_cpus[Q:W]) # print(min_times[Q:W]) # print(self.uniq_total_cpus,min_times) if enable_plotting: # print("MakingPlot") plt.axis((50,2000,0.001,1)) plt.xlabel('Total number of cores',fontsize=16) plt.ylabel('Average time per iteration',fontsize=16) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.2,box.width, box.height * 0.8]) plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),fancybox=True, shadow=True, ncol=3) plt.grid() plt.savefig("nowy.png") # plt.show() def CpuConfigPlot(self): for ds in self.domains: name = 'cpu_{0}_{1}_{2}.png'.format(ds[0], ds[1], ds[2]) title = 'Advection dwarf: global domain size {0} x {1} x {2}'.format(ds[0], ds[1], ds[2]) JobsOK = [] for J in self.Jobs: if J.domain_size == ds: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) JobsOK.sort(key=lambda x: -float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) JobsOK.sort(key=lambda x: -float(x.cpus[2])) JobsOK.sort(key=lambda x: float(x.total_cpu)) y = [] bestx = [] besty = [] labels=[] y2 = [] x2=[] tcp = 0 halo_size = [] for J in JobsOK: tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptspc = tpts*J.total_cpu min_lptps = 1e10 for K in JobsOK: if J.total_cpu == K.total_cpu: ltpts = float(K.timers_results[self.timer][4])/float(K.timers_results[self.timer][3]) if ltpts < min_lptps: min_lptps = ltpts #print(J.total_cpu, min_lptps) if tpts==min_lptps: bestx.append(len(y)) besty.append(tpts) #print(numpy.prod(J.cpus), J.cpus[0], J.cpus[1], J.cpus[2],tptss) y.append(tpts) labels.append('{0} x {1} x {2}'.format(J.cpus[0],J.cpus[1],J.cpus[2])) if tcp != J.total_cpu: tcp = J.total_cpu y2.append(len(y)-1) x2.append(tcp) box = numpy.array(J.domain_size)/numpy.array(J.cpus) pp = (box[0]*box[1]+box[1]*box[2]+box[0]*box[2])*2 ppt = pp*numpy.prod(J.cpus); # print(J.domain_size, J.cpus, box, pp, ppt) halo_size.append(ppt) x = range(len(y)) if enable_scipy: print("CORR:", stats.pearsonr(y, halo_size)) else: print("No SCIPY") plt.clf() fig = plt.figure(figsize=(50,20)) ax = fig.add_subplot(111) #ax.set_xscale("log", nonposx='clip') #ax.set_yscale("log", nonposy='clip') plt.plot(bestx,besty,'ro',ms=20) plt.plot(x,y,'.',ms=10) # for q in range(0,len(y2)-1): # s,e = y2[q], y2[q+1] # print("T", x2[q], s, e) # slope, intercept, r_value, p_value, std_err = stats.linregress(x[s:e],y[s:e]) # print(slope, intercept, r_value, p_value, std_err) h = numpy.max(y)-numpy.min(y) for a in range(len(y2)): plt.plot([y2[a]-.5, y2[a]-.5], [numpy.min(y)-h/20, numpy.max(y)+h/20],'k--') plt.xticks(x, labels, rotation='vertical', fontsize=16) plt.grid() plt.axis([-.5, len(y)-.5, numpy.min(y)-h/20, numpy.max(y)+h/20]) plt.xlabel('MPI cartesian decomposition: nprocx * nprocy * nprocz', fontsize=28) plt.ylabel('Average time per MPDATA call', fontsize=28) plt.xticks(x, labels, rotation='vertical', fontsize=16) #ax2 = plt.twinx() #ax2.set_yscale("log", nonposy='clip') #ax2.plot(x,numpy.array(halo_size),'sr') plt.title(title, fontsize=28) plt.savefig(name)
if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out","")))
conditional_block
PerformanceTester.py
import numpy import subprocess import sys import os.path from PerformanceTesterJob import Job, printc, run import types from tabulate081 import tabulate enable_scipy = True try: from scipy import stats except: enable_scipy = False enable_plotting = True try: import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt except: enable_plotting = False def divisors(n): result = [] for i in range(1,n+1): if n%i==0: result.append(i) return result class Tester(): def __init__(self): self.Jobs = []; self.cx = [] self.cy = [] self.cz = [] self.timer = '' self.domains = [] self.domains_color = [] self.domains_symbol = [] self.template = '' self.iterations = [] self.output_suffix = '' self.executable = [] self.executable_name = [] self.executable_symbol = [] self.uniq_total_cpus = [] self.group_submit_files = [] def SetTimer(self, timer): self.timer = timer def SetTemplate(self, template): self.template = template def AddExecutable(self, executable, name, symbol='o'): self.executable.append(executable) self.executable_name.append(name) self.executable_symbol.append(symbol) def SetOutputSuffix(self, output_suffix): self.output_suffix = output_suffix def SetCpuConfig(self, cx, cy, cz): self.cx = cx self.cy = cy self.cz = cz def AddDomain(self, dx, dy, dz, color='#000000', symbol='o'): self.domains.append([dx,dy,dz]) self.domains_color.append(color) self.domains_symbol.append(symbol) def AddIterations(self, cpu_from, cpu_to, iterations): self.iterations.append([cpu_from, cpu_to, iterations]) def GenerateJobs(self): utc = set() for exec in self.executable: for x in self.cx: for y in self.cy: for z in self.cz: utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) # if numpy.prod([x,y,z]) >= 64 and numpy.prod([x,y,z]) <= 1600: # elif numpy.prod([x,y,z]) < 64: # for d in self.domains: # self.Jobs.append(Job(d, [x,y,z], 250)) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10): utc = set() cpu_configs = [] totalCPU_divisors = divisors(totalCPU) for exec in self.executable: for x in totalCPU_divisors: for y in totalCPU_divisors: for z in totalCPU_divisors: code = "{0}_{1}_{2}".format(x,y,z) if numpy.prod([x,y,z]) == totalCPU: if x <= max_x and y <= max_y and z <= max_z: if code not in cpu_configs: cpu_configs.append(code) utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def
(self): for J in self.Jobs: J.MakeSubmit(self.template) print('Prepared submit for job', J.job_name) def MakeGroupSubmits(self): utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() print(utc) for tc in utc: first = True for J in self.Jobs: if J.total_cpu == tc: if first: J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc)) self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc)) first = False else: J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc)) def SubmitAll(self): for J in self.Jobs: J.Submit() def SubmitGroupAll(self): for f in self.group_submit_files: run("sbatch {0}".format(f)) def ReadJobTimers(self): for J in self.Jobs: J.ReadTimer(self.timer) def ReadGroupJobTimers(self): printc("Processing outfiles... ", end="") files = set() for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] # print(exec) utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() # print(utc) for tc in utc: outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix) # files.add(outfile) run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True) files.add("{0}.clean".format(outfile)) with open("{0}.clean".format(outfile), 'r') as f: fname = '' for line in f: #print(line.strip(),line[0:1] ) if "E.{0}".format(execn) in line: fname = line.strip() + "." + self.output_suffix files.add(fname) elif self.timer in line: if len(fname) > 0: with open(fname, "w") as fw: fw.write(line) fname="" printc("\tdone", color='green') printc("Reading timers... ", end="") for J in self.Jobs: J.ReadTimer(self.timer) printc("\tdone", color='green') printc("Cleaning up files... ", end="") for file in files: run("rm -f {0}".format(file), quiet=True); printc("\tdone", color='green') def ProcessStats(self): if enable_plotting: fig = plt.figure(figsize=[16,8]) ax = fig.add_subplot(111) ax.set_xscale("log", nonpositive='clip') ax.set_yscale("log", nonpositive='clip') for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] for di in range(len(self.domains)): d = self.domains[di] min_times = numpy.empty(len(self.uniq_total_cpus)) min_times[:] = numpy.NAN printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue') for ci in range(len(self.uniq_total_cpus)): c = self.uniq_total_cpus[ci] JobsOK = [] JobsNK = [] printc('\tProcessing cpu config {0}'.format(str(c)), 'violet') T = [] for J in self.Jobs: if J.total_cpu == c and J.domain_size == d and J.executable==exec: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) else: JobsNK.append(J) JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) if len(JobsOK) > 0: min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3]) for J in JobsOK: # printc('\t\tOK:', 'green', end=" ") tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci]))) tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu) # print(J.job_name,'\t', tpts) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc]) for J in JobsNK: # printc('\t\tFAIL:', 'red', end=" ") # print(J.job_name,'\t', J.timers_results) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-']) header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core'] if len(T) > 0: print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t')) if enable_plotting: Q, W = -1, -1 for q in range(len(self.uniq_total_cpus)): print (len(self.uniq_total_cpus)) if min_times[q] > 0: for w in range(len(self.uniq_total_cpus)-1,0,-1): if min_times[w] > 0: if Q==-1 and W==-1: Q = q W = w + 1 # print("Q, W", q, w) x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W] x2 = numpy.log(numpy.array(x1)) y2 = numpy.log(numpy.array(y1)) print (x1,y1) if enable_scipy: slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2) #print("LS", slope, intercept, r_value, p_value, std_err) #plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di]) plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*32)*slope+intercept)],'-.', color=self.domains_color[di]) else: print("No SCIPY!") #plt.plot(self.uniq_total_cpus,min_times,self.executable_symbol[iexec], color=self.domains_color[di],label="{0}, {1} ({2:4.3f})".format(execn,self.domains[di],-slope),ms=10) #plt.plot(self.uniq_total_cpus,min_times, color=self.domains_color[di]) # print("Q, W", q, w) # plt.plot([self.uniq_total_cpus[Q], self.uniq_total_cpus[W]], [min_times[Q], min_times[Q]/(self.uniq_total_cpus[W]/self.uniq_total_cpus[Q])],'--', color=self.domains_color[di]) # print(self.uniq_total_cpus[Q:W]) # print(min_times[Q:W]) # print(self.uniq_total_cpus,min_times) if enable_plotting: # print("MakingPlot") plt.axis((50,2000,0.001,1)) plt.xlabel('Total number of cores',fontsize=16) plt.ylabel('Average time per iteration',fontsize=16) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.2,box.width, box.height * 0.8]) plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),fancybox=True, shadow=True, ncol=3) plt.grid() plt.savefig("nowy.png") # plt.show() def CpuConfigPlot(self): for ds in self.domains: name = 'cpu_{0}_{1}_{2}.png'.format(ds[0], ds[1], ds[2]) title = 'Advection dwarf: global domain size {0} x {1} x {2}'.format(ds[0], ds[1], ds[2]) JobsOK = [] for J in self.Jobs: if J.domain_size == ds: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) JobsOK.sort(key=lambda x: -float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) JobsOK.sort(key=lambda x: -float(x.cpus[2])) JobsOK.sort(key=lambda x: float(x.total_cpu)) y = [] bestx = [] besty = [] labels=[] y2 = [] x2=[] tcp = 0 halo_size = [] for J in JobsOK: tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptspc = tpts*J.total_cpu min_lptps = 1e10 for K in JobsOK: if J.total_cpu == K.total_cpu: ltpts = float(K.timers_results[self.timer][4])/float(K.timers_results[self.timer][3]) if ltpts < min_lptps: min_lptps = ltpts #print(J.total_cpu, min_lptps) if tpts==min_lptps: bestx.append(len(y)) besty.append(tpts) #print(numpy.prod(J.cpus), J.cpus[0], J.cpus[1], J.cpus[2],tptss) y.append(tpts) labels.append('{0} x {1} x {2}'.format(J.cpus[0],J.cpus[1],J.cpus[2])) if tcp != J.total_cpu: tcp = J.total_cpu y2.append(len(y)-1) x2.append(tcp) box = numpy.array(J.domain_size)/numpy.array(J.cpus) pp = (box[0]*box[1]+box[1]*box[2]+box[0]*box[2])*2 ppt = pp*numpy.prod(J.cpus); # print(J.domain_size, J.cpus, box, pp, ppt) halo_size.append(ppt) x = range(len(y)) if enable_scipy: print("CORR:", stats.pearsonr(y, halo_size)) else: print("No SCIPY") plt.clf() fig = plt.figure(figsize=(50,20)) ax = fig.add_subplot(111) #ax.set_xscale("log", nonposx='clip') #ax.set_yscale("log", nonposy='clip') plt.plot(bestx,besty,'ro',ms=20) plt.plot(x,y,'.',ms=10) # for q in range(0,len(y2)-1): # s,e = y2[q], y2[q+1] # print("T", x2[q], s, e) # slope, intercept, r_value, p_value, std_err = stats.linregress(x[s:e],y[s:e]) # print(slope, intercept, r_value, p_value, std_err) h = numpy.max(y)-numpy.min(y) for a in range(len(y2)): plt.plot([y2[a]-.5, y2[a]-.5], [numpy.min(y)-h/20, numpy.max(y)+h/20],'k--') plt.xticks(x, labels, rotation='vertical', fontsize=16) plt.grid() plt.axis([-.5, len(y)-.5, numpy.min(y)-h/20, numpy.max(y)+h/20]) plt.xlabel('MPI cartesian decomposition: nprocx * nprocy * nprocz', fontsize=28) plt.ylabel('Average time per MPDATA call', fontsize=28) plt.xticks(x, labels, rotation='vertical', fontsize=16) #ax2 = plt.twinx() #ax2.set_yscale("log", nonposy='clip') #ax2.plot(x,numpy.array(halo_size),'sr') plt.title(title, fontsize=28) plt.savefig(name)
MakeSubmits
identifier_name
PerformanceTester.py
import numpy import subprocess import sys import os.path from PerformanceTesterJob import Job, printc, run import types from tabulate081 import tabulate enable_scipy = True try: from scipy import stats except: enable_scipy = False enable_plotting = True try: import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt except: enable_plotting = False def divisors(n): result = [] for i in range(1,n+1): if n%i==0: result.append(i) return result class Tester(): def __init__(self): self.Jobs = []; self.cx = [] self.cy = [] self.cz = [] self.timer = '' self.domains = [] self.domains_color = [] self.domains_symbol = [] self.template = '' self.iterations = [] self.output_suffix = '' self.executable = [] self.executable_name = [] self.executable_symbol = [] self.uniq_total_cpus = [] self.group_submit_files = [] def SetTimer(self, timer): self.timer = timer def SetTemplate(self, template): self.template = template def AddExecutable(self, executable, name, symbol='o'): self.executable.append(executable) self.executable_name.append(name) self.executable_symbol.append(symbol) def SetOutputSuffix(self, output_suffix): self.output_suffix = output_suffix def SetCpuConfig(self, cx, cy, cz): self.cx = cx self.cy = cy self.cz = cz def AddDomain(self, dx, dy, dz, color='#000000', symbol='o'): self.domains.append([dx,dy,dz]) self.domains_color.append(color) self.domains_symbol.append(symbol) def AddIterations(self, cpu_from, cpu_to, iterations): self.iterations.append([cpu_from, cpu_to, iterations]) def GenerateJobs(self): utc = set() for exec in self.executable: for x in self.cx: for y in self.cy: for z in self.cz: utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) # if numpy.prod([x,y,z]) >= 64 and numpy.prod([x,y,z]) <= 1600: # elif numpy.prod([x,y,z]) < 64: # for d in self.domains: # self.Jobs.append(Job(d, [x,y,z], 250)) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10): utc = set() cpu_configs = [] totalCPU_divisors = divisors(totalCPU) for exec in self.executable: for x in totalCPU_divisors: for y in totalCPU_divisors: for z in totalCPU_divisors: code = "{0}_{1}_{2}".format(x,y,z) if numpy.prod([x,y,z]) == totalCPU: if x <= max_x and y <= max_y and z <= max_z: if code not in cpu_configs: cpu_configs.append(code) utc.add(numpy.prod([x,y,z])) for iteration in self.iterations: if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]: for d in self.domains: self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) self.Jobs.sort() self.uniq_total_cpus = list(utc) self.uniq_total_cpus.sort() def MakeSubmits(self): for J in self.Jobs: J.MakeSubmit(self.template) print('Prepared submit for job', J.job_name) def MakeGroupSubmits(self): utc = set() for J in self.Jobs: utc.add(J.total_cpu) utc = list(utc) utc.sort() print(utc) for tc in utc: first = True for J in self.Jobs: if J.total_cpu == tc: if first: J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc)) self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc)) first = False else: J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc)) def SubmitAll(self): for J in self.Jobs: J.Submit() def SubmitGroupAll(self): for f in self.group_submit_files: run("sbatch {0}".format(f)) def ReadJobTimers(self): for J in self.Jobs: J.ReadTimer(self.timer) def ReadGroupJobTimers(self): printc("Processing outfiles... ", end="") files = set() for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] # print(exec) utc = set() for J in self.Jobs: utc.add(J.total_cpu)
utc = list(utc) utc.sort() # print(utc) for tc in utc: outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix) # files.add(outfile) run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True) files.add("{0}.clean".format(outfile)) with open("{0}.clean".format(outfile), 'r') as f: fname = '' for line in f: #print(line.strip(),line[0:1] ) if "E.{0}".format(execn) in line: fname = line.strip() + "." + self.output_suffix files.add(fname) elif self.timer in line: if len(fname) > 0: with open(fname, "w") as fw: fw.write(line) fname="" printc("\tdone", color='green') printc("Reading timers... ", end="") for J in self.Jobs: J.ReadTimer(self.timer) printc("\tdone", color='green') printc("Cleaning up files... ", end="") for file in files: run("rm -f {0}".format(file), quiet=True); printc("\tdone", color='green') def ProcessStats(self): if enable_plotting: fig = plt.figure(figsize=[16,8]) ax = fig.add_subplot(111) ax.set_xscale("log", nonpositive='clip') ax.set_yscale("log", nonpositive='clip') for iexec in range(len(self.executable)): exec = self.executable[iexec] execn= self.executable_name[iexec] for di in range(len(self.domains)): d = self.domains[di] min_times = numpy.empty(len(self.uniq_total_cpus)) min_times[:] = numpy.NAN printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue') for ci in range(len(self.uniq_total_cpus)): c = self.uniq_total_cpus[ci] JobsOK = [] JobsNK = [] printc('\tProcessing cpu config {0}'.format(str(c)), 'violet') T = [] for J in self.Jobs: if J.total_cpu == c and J.domain_size == d and J.executable==exec: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) else: JobsNK.append(J) JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) if len(JobsOK) > 0: min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3]) for J in JobsOK: # printc('\t\tOK:', 'green', end=" ") tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci]))) tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu) # print(J.job_name,'\t', tpts) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc]) for J in JobsNK: # printc('\t\tFAIL:', 'red', end=" ") # print(J.job_name,'\t', J.timers_results) ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2]) T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-']) header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core'] if len(T) > 0: print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t')) if enable_plotting: Q, W = -1, -1 for q in range(len(self.uniq_total_cpus)): print (len(self.uniq_total_cpus)) if min_times[q] > 0: for w in range(len(self.uniq_total_cpus)-1,0,-1): if min_times[w] > 0: if Q==-1 and W==-1: Q = q W = w + 1 # print("Q, W", q, w) x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W] x2 = numpy.log(numpy.array(x1)) y2 = numpy.log(numpy.array(y1)) print (x1,y1) if enable_scipy: slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2) #print("LS", slope, intercept, r_value, p_value, std_err) #plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di]) plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*32)*slope+intercept)],'-.', color=self.domains_color[di]) else: print("No SCIPY!") #plt.plot(self.uniq_total_cpus,min_times,self.executable_symbol[iexec], color=self.domains_color[di],label="{0}, {1} ({2:4.3f})".format(execn,self.domains[di],-slope),ms=10) #plt.plot(self.uniq_total_cpus,min_times, color=self.domains_color[di]) # print("Q, W", q, w) # plt.plot([self.uniq_total_cpus[Q], self.uniq_total_cpus[W]], [min_times[Q], min_times[Q]/(self.uniq_total_cpus[W]/self.uniq_total_cpus[Q])],'--', color=self.domains_color[di]) # print(self.uniq_total_cpus[Q:W]) # print(min_times[Q:W]) # print(self.uniq_total_cpus,min_times) if enable_plotting: # print("MakingPlot") plt.axis((50,2000,0.001,1)) plt.xlabel('Total number of cores',fontsize=16) plt.ylabel('Average time per iteration',fontsize=16) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.2,box.width, box.height * 0.8]) plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),fancybox=True, shadow=True, ncol=3) plt.grid() plt.savefig("nowy.png") # plt.show() def CpuConfigPlot(self): for ds in self.domains: name = 'cpu_{0}_{1}_{2}.png'.format(ds[0], ds[1], ds[2]) title = 'Advection dwarf: global domain size {0} x {1} x {2}'.format(ds[0], ds[1], ds[2]) JobsOK = [] for J in self.Jobs: if J.domain_size == ds: if type(J.timers_results[self.timer]) is list: JobsOK.append(J) JobsOK.sort(key=lambda x: -float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3])) JobsOK.sort(key=lambda x: -float(x.cpus[2])) JobsOK.sort(key=lambda x: float(x.total_cpu)) y = [] bestx = [] besty = [] labels=[] y2 = [] x2=[] tcp = 0 halo_size = [] for J in JobsOK: tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3]) tptss = '{0:7.4f} s'.format(tpts) tptspc = tpts*J.total_cpu min_lptps = 1e10 for K in JobsOK: if J.total_cpu == K.total_cpu: ltpts = float(K.timers_results[self.timer][4])/float(K.timers_results[self.timer][3]) if ltpts < min_lptps: min_lptps = ltpts #print(J.total_cpu, min_lptps) if tpts==min_lptps: bestx.append(len(y)) besty.append(tpts) #print(numpy.prod(J.cpus), J.cpus[0], J.cpus[1], J.cpus[2],tptss) y.append(tpts) labels.append('{0} x {1} x {2}'.format(J.cpus[0],J.cpus[1],J.cpus[2])) if tcp != J.total_cpu: tcp = J.total_cpu y2.append(len(y)-1) x2.append(tcp) box = numpy.array(J.domain_size)/numpy.array(J.cpus) pp = (box[0]*box[1]+box[1]*box[2]+box[0]*box[2])*2 ppt = pp*numpy.prod(J.cpus); # print(J.domain_size, J.cpus, box, pp, ppt) halo_size.append(ppt) x = range(len(y)) if enable_scipy: print("CORR:", stats.pearsonr(y, halo_size)) else: print("No SCIPY") plt.clf() fig = plt.figure(figsize=(50,20)) ax = fig.add_subplot(111) #ax.set_xscale("log", nonposx='clip') #ax.set_yscale("log", nonposy='clip') plt.plot(bestx,besty,'ro',ms=20) plt.plot(x,y,'.',ms=10) # for q in range(0,len(y2)-1): # s,e = y2[q], y2[q+1] # print("T", x2[q], s, e) # slope, intercept, r_value, p_value, std_err = stats.linregress(x[s:e],y[s:e]) # print(slope, intercept, r_value, p_value, std_err) h = numpy.max(y)-numpy.min(y) for a in range(len(y2)): plt.plot([y2[a]-.5, y2[a]-.5], [numpy.min(y)-h/20, numpy.max(y)+h/20],'k--') plt.xticks(x, labels, rotation='vertical', fontsize=16) plt.grid() plt.axis([-.5, len(y)-.5, numpy.min(y)-h/20, numpy.max(y)+h/20]) plt.xlabel('MPI cartesian decomposition: nprocx * nprocy * nprocz', fontsize=28) plt.ylabel('Average time per MPDATA call', fontsize=28) plt.xticks(x, labels, rotation='vertical', fontsize=16) #ax2 = plt.twinx() #ax2.set_yscale("log", nonposy='clip') #ax2.plot(x,numpy.array(halo_size),'sr') plt.title(title, fontsize=28) plt.savefig(name)
random_line_split
task_5.py
''' ***************************************************************************************** * * =============================================== * Nirikshak Bot (NB) Theme (eYRC 2020-21) * =============================================== * * This script is to implement Task 5 of Nirikshak Bot (NB) Theme (eYRC 2020-21). * * This software is made available on an "AS IS WHERE IS BASIS". * Licensee/end user indemnifies and will keep e-Yantra indemnified from * any and all claim(s) that emanate from the use of the Software or * breach of the terms of this agreement. * * e-Yantra - An MHRD (now MOE) project under National Mission on Education using ICT (NMEICT) * ***************************************************************************************** ''' # Team ID: 2139 # Author List: Yash Varshney, Aman Tyagi # Filename: task_5.py # Functions: color_get,traverse_ball,send_data_to_draw_path, make_connection,set_path,complete_all_mapping_path,get_color # [ Comma separated list of functions in this file ] # Global variables: # [ List of global variables defined in this file ] # NOTE: Make sure you do NOT call sys.exit() in this code. ####################### IMPORT MODULES ####################### ## You are not allowed to make any changes in this section. ## ############################################################## import numpy as np import cv2 import os, sys import traceback import time import json ############################################################## # Importing the sim module for Remote API connection with try: import sim except Exception: print('\n[ERROR] It seems the sim.py OR simConst.py files are not found!') print('\n[WARNING] Make sure to have following files in the directory:') print('sim.py, simConst.py and appropriate library - remoteApi.dll (if on Windows), remoteApi.so (if on Linux) or remoteApi.dylib (if on Mac).\n') #Import 'task_1b.py' file as module try: import task_1b except ImportError: print('\n[ERROR] task_1b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1b.py is present in this current directory.\n') except Exception as e: print('Your task_1b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_1a_part1.py' file as module try: import task_1a_part1 except ImportError: print('\n[ERROR] task_1a_part1.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1a_part1.py is present in this current directory.\n') except Exception as e: print('Your task_1a_part1.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2a.py' file as module try: import task_2a except ImportError: print('\n[ERROR] task_2a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2a.py is present in this current directory.\n') except Exception as e: print('Your task_2a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2b.py' file as module try: import task_2b except ImportError: print('\n[ERROR] task_2b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2b.py is present in this current directory.\n') except Exception as e: print('Your task_2b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_3.py' file as module try: import task_3 except ImportError: print('\n[ERROR] task_3.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_3.py is present in this current directory.\n') except Exception as e: print('Your task_3.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_4a.py' file as module try: import task_4a except ImportError: print('\n[ERROR] task_4a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_4a.py is present in this current directory.\n') except Exception as e: print('Your task_4a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) ########################Global variables###################### vision_sensor_1 = -1 vision_sensor_2 = -1 vision_sensor_3 = -1 vision_sensor_4 = -1 vision_sensor_5 = -1 encoded_maze_t4 = None encoded_maze_t1 = None encoded_maze_t3 = None encoded_maze_t2 = None servo_handle_x_t4 = -1 servo_handle_y_t4 = -1 servo_handle_x_t3 = -1 servo_handle_y_t3 = -1 servo_handle_x_t2 = -1 servo_handle_y_t2 = -1 servo_handle_x_t1 = -1 servo_handle_y_t1 = -1 handle_list = {} try: with open('ball_details.json') as file: ball_details = json.load(file) except ImportError: print('\n[ERROR] ball_details.json file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure ball_details.json is present in this current directory.\n') map_start = { "T4":[(0,5)], "T3":[(4,9)], "T2":[(0,4)], "T1":[(5,0)] } # do mapping of start and end point on the basis of color and json file. map_end = { "T4":[(5,9), (9,4), (4,0)], "T3":[(9,5), (5,0), (0,4)], "T2":[(4,9), (9,5), (5,0)], "T1":[(0,4), (4,9), (9,5)] } t4_path = None #path to table req aux_path = None #path to req cb path_map = { #pixel path to each exit point on the table "T1":[], "T2":[], "T3":[], "T4":[] } path_box_map = { #box coordinates path to draw path on the tables "T1":[], "T2":[], "T3":[], "T4":[] } maze_map ={ } collection_box = None #integer variable to store the number of the collection box client_id = -1 ############################################################ ############################################################## # NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION # # Function Name: send_color_and_collection_box_identified # Inputs: ball_color and collection_box_name # Outputs: None # Purpose: 1. This function should only be called when the task is being evaluated using # test executable. # 2. The format to send the data is as follows: # 'color::collection_box_name' def send_color_and_collection_box_identified(ball_color, collection_box_name): global client_id color_and_cb = [ball_color + '::' + collection_box_name] inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1', sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking) ################# ADD UTILITY FUNCTIONS HERE ################# ## You can define any utility functions for your code. ## ## Please add proper comments to ensure that your code is ## ## readable and easy to understand. ## ############################################################## ''' Function name: color_get Inputs: Image from vision sensor Outputs: Color of the ball detected in the image Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image Example call: color_get(image_from_vision_sensor) ''' def color_get(img_file_path): if(img_file_path is None): return #Read the image if type(img_file_path) == type(str()): img_file_path = cv2.imread(img_file_path) else: img_file_path= img_file_path #cv2.imwrite("colorefromrailing.png",img_file_path) imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT) hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV) #To create a mask for red colour red_lower = np.array([0, 50, 50]) red_upper = np.array([10, 255, 255]) red_mask = cv2.inRange(hsvFrame, red_lower, red_upper) kernal = np.ones((5, 5)) red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1] gray_blur_red= cv2.Canny(red_gray,100,255) #Create a mask for blue colour blue_lower = np.array([94, 20, 0], np.uint8) blue_upper = np.array([140,255 ,255], np.uint8) blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper) kernal = np.ones((5, 5)) blue_mask = cv2.dilate(blue_mask, kernal) blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1] gray_blur_blue= cv2.Canny(blue_gray,100,255) #Create a mask for green colour green_lower = np.array([25, 52, 72], np.uint8) green_upper = np.array([102, 255, 255], np.uint8) green_mask = cv2.inRange(hsvFrame, green_lower, green_upper) kernal = np.ones((5, 5)) green_mask = cv2.dilate(green_mask, kernal) green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1] gray_blur_green = cv2.Canny(green_gray,100,255) #find contours on blue mask cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None): if len(cnts) == 2:
elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'blue' #Find red contours in the image cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'red' # Find green contours in the image cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if(len(cnts)): return 'green' ''' Function name: traverse_ball Usage: traverses the ball from one point to another Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow Outputs: None Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path) ''' def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path): global client_id rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming) current_time = '' while(len(current_time) == 0 ): rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer) j = 0 k= 0 for i in pixel_path: i.reverse() task_3.change_setpoint(i) while(1): j+=1 k+=1 vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle) transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) warped_img = task_1b.applyPerspectiveTransform(transformed_image) shapes = task_1a_part1.scan_image(warped_img) if(shapes): warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB) warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2) warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2) if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30): break else: task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y) return 1 ''' Function name: send_data_to_draw_path Usage: Draws path on the table in Coppleiasim scene Inputs: table no and the box path to be drawn Outputs: None Example call: send_data_to_draw_path('T4', pixel_path_list) ''' def send_data_to_draw_path(table,path): global client_id ############## IF REQUIRED, CHANGE THE CODE FROM HERE ############## coppelia_sim_coord_path = [] table_name = "top_plate_respondable_t" + str(table) + "_1" for coord in path: for element in coord: coppelia_sim_coord_path.append(((10*element) - 45)/100) inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id, \ table_name, sim.sim_scripttype_customizationscript, 'drawPath', [], \ coppelia_sim_coord_path, [], inputBuffer, sim.simx_opmode_oneshot) ''' Function name: make_connection Usage: Establishes connection with the Coppleiasim server and populates the global variable handle list with the updated values of servo handle and vision sensors Inputs: None Outputs: None Example call: make_connection() ''' def make_connection(): global client_id,handle_list global vision_sensor_5,vision_sensor_4,vision_sensor_3,vision_sensor_2,vision_sensor_1,servo_handle_x_t1,servo_handle_y_t1,servo_handle_x_t4,servo_handle_y_t4 return_code,servo_handle_x_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_2",sim.simx_opmode_blocking) return_code,servo_handle_x_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_2",sim.simx_opmode_blocking) return_code,vision_sensor_1 = sim.simxGetObjectHandle(client_id,"vision_sensor_1",sim.simx_opmode_blocking) #return_code,vision_sensor_2 = sim.simxGetObjectHandle(client_id,"vision_sensor_2",sim.simx_opmode_blocking) #return_code,vision_sensor_3 = sim.simxGetObjectHandle(client_id,"vision_sensor_3",sim.simx_opmode_blocking) return_code,vision_sensor_4 = sim.simxGetObjectHandle(client_id,"vision_sensor_4",sim.simx_opmode_blocking) return_code,vision_sensor_5 = sim.simxGetObjectHandle(client_id,"vision_sensor_5",sim.simx_opmode_blocking) handle_list = {'T4' : [servo_handle_x_t4,servo_handle_y_t4,vision_sensor_4], 'T3' : [], 'T2' : [], 'T1' : [servo_handle_x_t1,servo_handle_y_t1,vision_sensor_1] } ''' Function name: set_path Usage: sets variables used to make the ball reach to its destination collection box according to the color using ball_details json dictionary. It calls send_data_to_draw_path to draw the path on the table. Inputs: color of the detected ball : string Outputs: None Example call: set_path('green') ''' def set_path(color): global t4_path,aux_path table, collection_box = ball_details[color][0].split('_') t4_path=path_map['T4'][int(table[-1])-1] t4_path_drawn = path_box_map['T4'][int(table[-1])-1] send_data_to_draw_path(4,t4_path_drawn) aux_path = path_map[table][int(collection_box[-1])-1] aux_path_drawn = path_box_map[table][int(collection_box[-1])-1] send_data_to_draw_path(1,aux_path_drawn) ball_details[color].pop(0) ''' Function name: complete_all_mapping_path Usage: Sets all mapping path according to the values of entry and exit points of the table and the maze. It also manipulates the setpoints according to the required collection box collection box(line no 452-478) to make the ball fall in the collection box. Inputs: Table number for which the paths have to be set : string Outputs: None Example Call: complete_all_mapping_path('T4') ''' def complete_all_mapping_path (tablenum): global map_start,map_end,maze_map,path_map,encoded_maze_t1,path_box_map for i in range(3): start_coord= map_start[tablenum][0] end_coord= map_end[tablenum][i] mazearray = maze_map[tablenum] path = task_4a.find_path(mazearray, start_coord, end_coord) path_box_map[tablenum].append(path) resolution_x = 1120#1280 resolution_y = 1120#1280 x_increment = resolution_x//10 y_increment = resolution_y//10 pixel_path = [] for i in range(len(path)): pixel_path.append([]) for i in range(len(path)): # to change the pixel trim: change 180 with pixel*10 and 18 with pixel x_pix_trim = int(((180*path[i][0])/45)-18) y_pix_trim = int(((180*path[i][1])/45)-18) x_pixel = ((x_increment//2) + path[i][0]*x_increment) + 80 + x_pix_trim y_pixel = ((y_increment//2) + path[i][1]*y_increment) + 80 + y_pix_trim pixel_path[i].append(x_pixel) pixel_path[i].append(y_pixel) if (tablenum == 'T1'): if (path[len(path)-1] == map_end[tablenum][0]): #(0,4) decrease y pixel [tilt in +ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0]- (y_increment//2), pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][1]): #(4,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] + (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][2]): #(9,5) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) else: print("Unexpected element in the end of the path in maze T1") if (tablenum == 'T4'): if (path[len(path)-1] == map_end[tablenum][0]): #(5,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1]+(x_increment//2)] ) elif (path[len(path)-1] == map_end[tablenum][1]): #(9,4) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) elif (path[len(path)-1] == map_end[tablenum][2]): #(4,0) decrease x pixel [tilt in -ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] - (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) else: print("Unexpected element in the end of the path in maze T4") path_map[tablenum].append(pixel_path) ''' Function name: get_color Usage: It sends the vision sensor image to the color_get function repeatedly and waits until the called function returns a color. Inputs: None Outputs: color of the detected ball ''' def get_color(): global vision_sensor_5,client_id color = None return_code ,image_resolution,vision_sensor_image =sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) while(color is None ): return_code ,image_resolution,vision_sensor_image = sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) if(len(vision_sensor_image)): vision_sensor_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) color = color_get(vision_sensor_image) return color ############################################################## def main(rec_client_id): """ Purpose: --- Teams are free to design their code in this task. The test executable will only call this function of task_5.py. init_remote_api_server() and exit_remote_api_server() functions are already defined in the executable and hence should not be called by the teams. The obtained client_id is passed to this function so that teams can use it in their code. However NOTE: Teams will have to call start_simulation() and stop_simulation() function on their own. Input Arguments: --- `rec_client_id` : integer client_id returned after calling init_remote_api_server() function from the executable. Returns: --- None Example call: --- main(rec_client_id) """ ############## ADD YOUR CODE HERE ############## global maze_map,encoded_maze_t1,encoded_maze_t2,encoded_maze_t3,encoded_maze_t4,t4_path,aux_path,servo_handle_x_t1,servo_handle_y_t1 global servo_handle_x_t4,servo_handle_y_t4,path_box_map,collection_box global client_id,ball_details client_id = rec_client_id img_t4 = cv2.imread("maze_t4.JPG") warped_t4 = task_1b.applyPerspectiveTransform(img_t4) encoded_maze_t4 = task_1b.detectMaze(warped_t4) maze_map['T4'] = encoded_maze_t4 return_code = task_2b.send_data(rec_client_id,encoded_maze_t4,"t4") img_t1 = cv2.imread("maze_t1.JPG") warped_t1 = task_1b.applyPerspectiveTransform(img_t1) encoded_maze_t1 = task_1b.detectMaze(warped_t1) maze_map['T1'] = encoded_maze_t1 return_code = task_2b.send_data(rec_client_id,encoded_maze_t1,"t1") complete_all_mapping_path('T1') complete_all_mapping_path('T4') #Similarly for T3 and T2 make_connection() return_code = task_2a.start_simulation(rec_client_id) i = 0 while(all([len(i)!=0 for i in list(ball_details.values())])): print("here") color = get_color() if(color): collection_box = ball_details[color][0] table = ball_details[color][0].split('_')[0] send_color_and_collection_box_identified(color, collection_box) set_path(color) traverse_ball(handle_list["T4"][0],handle_list["T4"][1],handle_list["T4"][2],t4_path) traverse_ball(handle_list[table][0],handle_list[table][1],handle_list[table][2],aux_path) print("complete ho gaya task") print(len(list(ball_details.values()))) print(f" i is {i} ") time.sleep(5) task_2a.stop_simulation(rec_client_id) ################################################## # Function Name: main (built in) # Inputs: None # Outputs: None # Purpose: To call the main(rec_client_id) function written by teams when they # run task_5.py only. # NOTE: Write your solution ONLY in the space provided in the above functions. This function should not be edited. if __name__ == "__main__": client_id = task_2a.init_remote_api_server() main(client_id)
cnts = cnts[0]
conditional_block
task_5.py
''' ***************************************************************************************** * * =============================================== * Nirikshak Bot (NB) Theme (eYRC 2020-21) * =============================================== * * This script is to implement Task 5 of Nirikshak Bot (NB) Theme (eYRC 2020-21). * * This software is made available on an "AS IS WHERE IS BASIS". * Licensee/end user indemnifies and will keep e-Yantra indemnified from * any and all claim(s) that emanate from the use of the Software or * breach of the terms of this agreement. * * e-Yantra - An MHRD (now MOE) project under National Mission on Education using ICT (NMEICT) * ***************************************************************************************** ''' # Team ID: 2139 # Author List: Yash Varshney, Aman Tyagi # Filename: task_5.py # Functions: color_get,traverse_ball,send_data_to_draw_path, make_connection,set_path,complete_all_mapping_path,get_color # [ Comma separated list of functions in this file ] # Global variables: # [ List of global variables defined in this file ] # NOTE: Make sure you do NOT call sys.exit() in this code. ####################### IMPORT MODULES ####################### ## You are not allowed to make any changes in this section. ## ############################################################## import numpy as np import cv2 import os, sys import traceback import time import json ############################################################## # Importing the sim module for Remote API connection with try: import sim except Exception: print('\n[ERROR] It seems the sim.py OR simConst.py files are not found!') print('\n[WARNING] Make sure to have following files in the directory:') print('sim.py, simConst.py and appropriate library - remoteApi.dll (if on Windows), remoteApi.so (if on Linux) or remoteApi.dylib (if on Mac).\n') #Import 'task_1b.py' file as module try: import task_1b except ImportError: print('\n[ERROR] task_1b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1b.py is present in this current directory.\n') except Exception as e: print('Your task_1b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_1a_part1.py' file as module try: import task_1a_part1 except ImportError: print('\n[ERROR] task_1a_part1.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1a_part1.py is present in this current directory.\n') except Exception as e: print('Your task_1a_part1.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2a.py' file as module try: import task_2a except ImportError: print('\n[ERROR] task_2a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2a.py is present in this current directory.\n') except Exception as e: print('Your task_2a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2b.py' file as module try: import task_2b except ImportError: print('\n[ERROR] task_2b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2b.py is present in this current directory.\n') except Exception as e: print('Your task_2b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_3.py' file as module try: import task_3 except ImportError: print('\n[ERROR] task_3.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_3.py is present in this current directory.\n') except Exception as e: print('Your task_3.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_4a.py' file as module try: import task_4a except ImportError: print('\n[ERROR] task_4a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_4a.py is present in this current directory.\n') except Exception as e: print('Your task_4a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) ########################Global variables###################### vision_sensor_1 = -1 vision_sensor_2 = -1 vision_sensor_3 = -1 vision_sensor_4 = -1 vision_sensor_5 = -1 encoded_maze_t4 = None encoded_maze_t1 = None encoded_maze_t3 = None encoded_maze_t2 = None servo_handle_x_t4 = -1 servo_handle_y_t4 = -1 servo_handle_x_t3 = -1 servo_handle_y_t3 = -1 servo_handle_x_t2 = -1 servo_handle_y_t2 = -1 servo_handle_x_t1 = -1 servo_handle_y_t1 = -1 handle_list = {} try: with open('ball_details.json') as file: ball_details = json.load(file) except ImportError: print('\n[ERROR] ball_details.json file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure ball_details.json is present in this current directory.\n') map_start = { "T4":[(0,5)], "T3":[(4,9)], "T2":[(0,4)], "T1":[(5,0)] } # do mapping of start and end point on the basis of color and json file. map_end = { "T4":[(5,9), (9,4), (4,0)], "T3":[(9,5), (5,0), (0,4)], "T2":[(4,9), (9,5), (5,0)], "T1":[(0,4), (4,9), (9,5)] } t4_path = None #path to table req aux_path = None #path to req cb path_map = { #pixel path to each exit point on the table "T1":[], "T2":[], "T3":[], "T4":[] } path_box_map = { #box coordinates path to draw path on the tables "T1":[], "T2":[], "T3":[], "T4":[] } maze_map ={ } collection_box = None #integer variable to store the number of the collection box client_id = -1 ############################################################ ############################################################## # NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION # # Function Name: send_color_and_collection_box_identified # Inputs: ball_color and collection_box_name # Outputs: None # Purpose: 1. This function should only be called when the task is being evaluated using # test executable. # 2. The format to send the data is as follows: # 'color::collection_box_name' def send_color_and_collection_box_identified(ball_color, collection_box_name): global client_id color_and_cb = [ball_color + '::' + collection_box_name] inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1', sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking) ################# ADD UTILITY FUNCTIONS HERE ################# ## You can define any utility functions for your code. ## ## Please add proper comments to ensure that your code is ## ## readable and easy to understand. ## ############################################################## ''' Function name: color_get Inputs: Image from vision sensor Outputs: Color of the ball detected in the image Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image Example call: color_get(image_from_vision_sensor) ''' def color_get(img_file_path): if(img_file_path is None): return #Read the image if type(img_file_path) == type(str()): img_file_path = cv2.imread(img_file_path) else: img_file_path= img_file_path #cv2.imwrite("colorefromrailing.png",img_file_path) imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT) hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV) #To create a mask for red colour red_lower = np.array([0, 50, 50]) red_upper = np.array([10, 255, 255]) red_mask = cv2.inRange(hsvFrame, red_lower, red_upper) kernal = np.ones((5, 5)) red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1] gray_blur_red= cv2.Canny(red_gray,100,255) #Create a mask for blue colour blue_lower = np.array([94, 20, 0], np.uint8) blue_upper = np.array([140,255 ,255], np.uint8) blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper) kernal = np.ones((5, 5)) blue_mask = cv2.dilate(blue_mask, kernal) blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1] gray_blur_blue= cv2.Canny(blue_gray,100,255) #Create a mask for green colour green_lower = np.array([25, 52, 72], np.uint8) green_upper = np.array([102, 255, 255], np.uint8) green_mask = cv2.inRange(hsvFrame, green_lower, green_upper) kernal = np.ones((5, 5)) green_mask = cv2.dilate(green_mask, kernal) green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1] gray_blur_green = cv2.Canny(green_gray,100,255) #find contours on blue mask cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None): if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'blue' #Find red contours in the image cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'red' # Find green contours in the image cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if(len(cnts)): return 'green' ''' Function name: traverse_ball Usage: traverses the ball from one point to another Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow Outputs: None Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path) ''' def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path): global client_id rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming) current_time = '' while(len(current_time) == 0 ): rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer) j = 0 k= 0 for i in pixel_path: i.reverse() task_3.change_setpoint(i) while(1): j+=1 k+=1 vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle) transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) warped_img = task_1b.applyPerspectiveTransform(transformed_image) shapes = task_1a_part1.scan_image(warped_img) if(shapes): warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB) warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2) warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2) if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30): break else: task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y) return 1 ''' Function name: send_data_to_draw_path Usage: Draws path on the table in Coppleiasim scene Inputs: table no and the box path to be drawn Outputs: None Example call: send_data_to_draw_path('T4', pixel_path_list) ''' def send_data_to_draw_path(table,path): global client_id ############## IF REQUIRED, CHANGE THE CODE FROM HERE ############## coppelia_sim_coord_path = [] table_name = "top_plate_respondable_t" + str(table) + "_1" for coord in path: for element in coord: coppelia_sim_coord_path.append(((10*element) - 45)/100) inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id, \ table_name, sim.sim_scripttype_customizationscript, 'drawPath', [], \ coppelia_sim_coord_path, [], inputBuffer, sim.simx_opmode_oneshot) ''' Function name: make_connection Usage: Establishes connection with the Coppleiasim server and populates the global variable handle list with the updated values of servo handle and vision sensors Inputs: None Outputs: None Example call: make_connection() ''' def make_connection(): global client_id,handle_list global vision_sensor_5,vision_sensor_4,vision_sensor_3,vision_sensor_2,vision_sensor_1,servo_handle_x_t1,servo_handle_y_t1,servo_handle_x_t4,servo_handle_y_t4 return_code,servo_handle_x_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_2",sim.simx_opmode_blocking) return_code,servo_handle_x_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_2",sim.simx_opmode_blocking) return_code,vision_sensor_1 = sim.simxGetObjectHandle(client_id,"vision_sensor_1",sim.simx_opmode_blocking) #return_code,vision_sensor_2 = sim.simxGetObjectHandle(client_id,"vision_sensor_2",sim.simx_opmode_blocking) #return_code,vision_sensor_3 = sim.simxGetObjectHandle(client_id,"vision_sensor_3",sim.simx_opmode_blocking) return_code,vision_sensor_4 = sim.simxGetObjectHandle(client_id,"vision_sensor_4",sim.simx_opmode_blocking) return_code,vision_sensor_5 = sim.simxGetObjectHandle(client_id,"vision_sensor_5",sim.simx_opmode_blocking) handle_list = {'T4' : [servo_handle_x_t4,servo_handle_y_t4,vision_sensor_4], 'T3' : [], 'T2' : [], 'T1' : [servo_handle_x_t1,servo_handle_y_t1,vision_sensor_1] } ''' Function name: set_path Usage: sets variables used to make the ball reach to its destination collection box according to the color using ball_details json dictionary. It calls send_data_to_draw_path to draw the path on the table. Inputs: color of the detected ball : string Outputs: None Example call: set_path('green') ''' def set_path(color): global t4_path,aux_path table, collection_box = ball_details[color][0].split('_') t4_path=path_map['T4'][int(table[-1])-1] t4_path_drawn = path_box_map['T4'][int(table[-1])-1] send_data_to_draw_path(4,t4_path_drawn) aux_path = path_map[table][int(collection_box[-1])-1] aux_path_drawn = path_box_map[table][int(collection_box[-1])-1] send_data_to_draw_path(1,aux_path_drawn) ball_details[color].pop(0) ''' Function name: complete_all_mapping_path Usage: Sets all mapping path according to the values of entry and exit points of the table and the maze. It also manipulates the setpoints according to the required collection box collection box(line no 452-478) to make the ball fall in the collection box. Inputs: Table number for which the paths have to be set : string Outputs: None Example Call: complete_all_mapping_path('T4') ''' def
(tablenum): global map_start,map_end,maze_map,path_map,encoded_maze_t1,path_box_map for i in range(3): start_coord= map_start[tablenum][0] end_coord= map_end[tablenum][i] mazearray = maze_map[tablenum] path = task_4a.find_path(mazearray, start_coord, end_coord) path_box_map[tablenum].append(path) resolution_x = 1120#1280 resolution_y = 1120#1280 x_increment = resolution_x//10 y_increment = resolution_y//10 pixel_path = [] for i in range(len(path)): pixel_path.append([]) for i in range(len(path)): # to change the pixel trim: change 180 with pixel*10 and 18 with pixel x_pix_trim = int(((180*path[i][0])/45)-18) y_pix_trim = int(((180*path[i][1])/45)-18) x_pixel = ((x_increment//2) + path[i][0]*x_increment) + 80 + x_pix_trim y_pixel = ((y_increment//2) + path[i][1]*y_increment) + 80 + y_pix_trim pixel_path[i].append(x_pixel) pixel_path[i].append(y_pixel) if (tablenum == 'T1'): if (path[len(path)-1] == map_end[tablenum][0]): #(0,4) decrease y pixel [tilt in +ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0]- (y_increment//2), pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][1]): #(4,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] + (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][2]): #(9,5) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) else: print("Unexpected element in the end of the path in maze T1") if (tablenum == 'T4'): if (path[len(path)-1] == map_end[tablenum][0]): #(5,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1]+(x_increment//2)] ) elif (path[len(path)-1] == map_end[tablenum][1]): #(9,4) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) elif (path[len(path)-1] == map_end[tablenum][2]): #(4,0) decrease x pixel [tilt in -ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] - (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) else: print("Unexpected element in the end of the path in maze T4") path_map[tablenum].append(pixel_path) ''' Function name: get_color Usage: It sends the vision sensor image to the color_get function repeatedly and waits until the called function returns a color. Inputs: None Outputs: color of the detected ball ''' def get_color(): global vision_sensor_5,client_id color = None return_code ,image_resolution,vision_sensor_image =sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) while(color is None ): return_code ,image_resolution,vision_sensor_image = sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) if(len(vision_sensor_image)): vision_sensor_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) color = color_get(vision_sensor_image) return color ############################################################## def main(rec_client_id): """ Purpose: --- Teams are free to design their code in this task. The test executable will only call this function of task_5.py. init_remote_api_server() and exit_remote_api_server() functions are already defined in the executable and hence should not be called by the teams. The obtained client_id is passed to this function so that teams can use it in their code. However NOTE: Teams will have to call start_simulation() and stop_simulation() function on their own. Input Arguments: --- `rec_client_id` : integer client_id returned after calling init_remote_api_server() function from the executable. Returns: --- None Example call: --- main(rec_client_id) """ ############## ADD YOUR CODE HERE ############## global maze_map,encoded_maze_t1,encoded_maze_t2,encoded_maze_t3,encoded_maze_t4,t4_path,aux_path,servo_handle_x_t1,servo_handle_y_t1 global servo_handle_x_t4,servo_handle_y_t4,path_box_map,collection_box global client_id,ball_details client_id = rec_client_id img_t4 = cv2.imread("maze_t4.JPG") warped_t4 = task_1b.applyPerspectiveTransform(img_t4) encoded_maze_t4 = task_1b.detectMaze(warped_t4) maze_map['T4'] = encoded_maze_t4 return_code = task_2b.send_data(rec_client_id,encoded_maze_t4,"t4") img_t1 = cv2.imread("maze_t1.JPG") warped_t1 = task_1b.applyPerspectiveTransform(img_t1) encoded_maze_t1 = task_1b.detectMaze(warped_t1) maze_map['T1'] = encoded_maze_t1 return_code = task_2b.send_data(rec_client_id,encoded_maze_t1,"t1") complete_all_mapping_path('T1') complete_all_mapping_path('T4') #Similarly for T3 and T2 make_connection() return_code = task_2a.start_simulation(rec_client_id) i = 0 while(all([len(i)!=0 for i in list(ball_details.values())])): print("here") color = get_color() if(color): collection_box = ball_details[color][0] table = ball_details[color][0].split('_')[0] send_color_and_collection_box_identified(color, collection_box) set_path(color) traverse_ball(handle_list["T4"][0],handle_list["T4"][1],handle_list["T4"][2],t4_path) traverse_ball(handle_list[table][0],handle_list[table][1],handle_list[table][2],aux_path) print("complete ho gaya task") print(len(list(ball_details.values()))) print(f" i is {i} ") time.sleep(5) task_2a.stop_simulation(rec_client_id) ################################################## # Function Name: main (built in) # Inputs: None # Outputs: None # Purpose: To call the main(rec_client_id) function written by teams when they # run task_5.py only. # NOTE: Write your solution ONLY in the space provided in the above functions. This function should not be edited. if __name__ == "__main__": client_id = task_2a.init_remote_api_server() main(client_id)
complete_all_mapping_path
identifier_name
task_5.py
''' ***************************************************************************************** * * =============================================== * Nirikshak Bot (NB) Theme (eYRC 2020-21) * =============================================== * * This script is to implement Task 5 of Nirikshak Bot (NB) Theme (eYRC 2020-21). * * This software is made available on an "AS IS WHERE IS BASIS". * Licensee/end user indemnifies and will keep e-Yantra indemnified from * any and all claim(s) that emanate from the use of the Software or * breach of the terms of this agreement. * * e-Yantra - An MHRD (now MOE) project under National Mission on Education using ICT (NMEICT) * ***************************************************************************************** ''' # Team ID: 2139 # Author List: Yash Varshney, Aman Tyagi # Filename: task_5.py # Functions: color_get,traverse_ball,send_data_to_draw_path, make_connection,set_path,complete_all_mapping_path,get_color # [ Comma separated list of functions in this file ] # Global variables: # [ List of global variables defined in this file ] # NOTE: Make sure you do NOT call sys.exit() in this code. ####################### IMPORT MODULES ####################### ## You are not allowed to make any changes in this section. ## ############################################################## import numpy as np import cv2 import os, sys import traceback import time import json ############################################################## # Importing the sim module for Remote API connection with try: import sim except Exception: print('\n[ERROR] It seems the sim.py OR simConst.py files are not found!') print('\n[WARNING] Make sure to have following files in the directory:') print('sim.py, simConst.py and appropriate library - remoteApi.dll (if on Windows), remoteApi.so (if on Linux) or remoteApi.dylib (if on Mac).\n') #Import 'task_1b.py' file as module try: import task_1b except ImportError: print('\n[ERROR] task_1b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1b.py is present in this current directory.\n') except Exception as e: print('Your task_1b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_1a_part1.py' file as module try: import task_1a_part1 except ImportError: print('\n[ERROR] task_1a_part1.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1a_part1.py is present in this current directory.\n') except Exception as e: print('Your task_1a_part1.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2a.py' file as module try: import task_2a except ImportError: print('\n[ERROR] task_2a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2a.py is present in this current directory.\n') except Exception as e: print('Your task_2a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2b.py' file as module try: import task_2b except ImportError: print('\n[ERROR] task_2b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2b.py is present in this current directory.\n') except Exception as e: print('Your task_2b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_3.py' file as module try: import task_3 except ImportError: print('\n[ERROR] task_3.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_3.py is present in this current directory.\n') except Exception as e: print('Your task_3.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_4a.py' file as module try: import task_4a except ImportError: print('\n[ERROR] task_4a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_4a.py is present in this current directory.\n') except Exception as e: print('Your task_4a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) ########################Global variables###################### vision_sensor_1 = -1 vision_sensor_2 = -1 vision_sensor_3 = -1 vision_sensor_4 = -1 vision_sensor_5 = -1 encoded_maze_t4 = None encoded_maze_t1 = None encoded_maze_t3 = None encoded_maze_t2 = None servo_handle_x_t4 = -1 servo_handle_y_t4 = -1 servo_handle_x_t3 = -1 servo_handle_y_t3 = -1 servo_handle_x_t2 = -1 servo_handle_y_t2 = -1 servo_handle_x_t1 = -1 servo_handle_y_t1 = -1 handle_list = {} try: with open('ball_details.json') as file: ball_details = json.load(file) except ImportError: print('\n[ERROR] ball_details.json file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure ball_details.json is present in this current directory.\n') map_start = { "T4":[(0,5)], "T3":[(4,9)], "T2":[(0,4)], "T1":[(5,0)] } # do mapping of start and end point on the basis of color and json file. map_end = { "T4":[(5,9), (9,4), (4,0)], "T3":[(9,5), (5,0), (0,4)], "T2":[(4,9), (9,5), (5,0)], "T1":[(0,4), (4,9), (9,5)] } t4_path = None #path to table req aux_path = None #path to req cb path_map = { #pixel path to each exit point on the table "T1":[], "T2":[], "T3":[], "T4":[] } path_box_map = { #box coordinates path to draw path on the tables "T1":[], "T2":[], "T3":[], "T4":[] } maze_map ={ } collection_box = None #integer variable to store the number of the collection box client_id = -1 ############################################################ ############################################################## # NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION # # Function Name: send_color_and_collection_box_identified # Inputs: ball_color and collection_box_name # Outputs: None # Purpose: 1. This function should only be called when the task is being evaluated using # test executable. # 2. The format to send the data is as follows: # 'color::collection_box_name' def send_color_and_collection_box_identified(ball_color, collection_box_name):
################# ADD UTILITY FUNCTIONS HERE ################# ## You can define any utility functions for your code. ## ## Please add proper comments to ensure that your code is ## ## readable and easy to understand. ## ############################################################## ''' Function name: color_get Inputs: Image from vision sensor Outputs: Color of the ball detected in the image Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image Example call: color_get(image_from_vision_sensor) ''' def color_get(img_file_path): if(img_file_path is None): return #Read the image if type(img_file_path) == type(str()): img_file_path = cv2.imread(img_file_path) else: img_file_path= img_file_path #cv2.imwrite("colorefromrailing.png",img_file_path) imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT) hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV) #To create a mask for red colour red_lower = np.array([0, 50, 50]) red_upper = np.array([10, 255, 255]) red_mask = cv2.inRange(hsvFrame, red_lower, red_upper) kernal = np.ones((5, 5)) red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1] gray_blur_red= cv2.Canny(red_gray,100,255) #Create a mask for blue colour blue_lower = np.array([94, 20, 0], np.uint8) blue_upper = np.array([140,255 ,255], np.uint8) blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper) kernal = np.ones((5, 5)) blue_mask = cv2.dilate(blue_mask, kernal) blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1] gray_blur_blue= cv2.Canny(blue_gray,100,255) #Create a mask for green colour green_lower = np.array([25, 52, 72], np.uint8) green_upper = np.array([102, 255, 255], np.uint8) green_mask = cv2.inRange(hsvFrame, green_lower, green_upper) kernal = np.ones((5, 5)) green_mask = cv2.dilate(green_mask, kernal) green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1] gray_blur_green = cv2.Canny(green_gray,100,255) #find contours on blue mask cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None): if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'blue' #Find red contours in the image cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'red' # Find green contours in the image cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if(len(cnts)): return 'green' ''' Function name: traverse_ball Usage: traverses the ball from one point to another Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow Outputs: None Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path) ''' def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path): global client_id rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming) current_time = '' while(len(current_time) == 0 ): rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer) j = 0 k= 0 for i in pixel_path: i.reverse() task_3.change_setpoint(i) while(1): j+=1 k+=1 vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle) transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) warped_img = task_1b.applyPerspectiveTransform(transformed_image) shapes = task_1a_part1.scan_image(warped_img) if(shapes): warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB) warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2) warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2) if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30): break else: task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y) return 1 ''' Function name: send_data_to_draw_path Usage: Draws path on the table in Coppleiasim scene Inputs: table no and the box path to be drawn Outputs: None Example call: send_data_to_draw_path('T4', pixel_path_list) ''' def send_data_to_draw_path(table,path): global client_id ############## IF REQUIRED, CHANGE THE CODE FROM HERE ############## coppelia_sim_coord_path = [] table_name = "top_plate_respondable_t" + str(table) + "_1" for coord in path: for element in coord: coppelia_sim_coord_path.append(((10*element) - 45)/100) inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id, \ table_name, sim.sim_scripttype_customizationscript, 'drawPath', [], \ coppelia_sim_coord_path, [], inputBuffer, sim.simx_opmode_oneshot) ''' Function name: make_connection Usage: Establishes connection with the Coppleiasim server and populates the global variable handle list with the updated values of servo handle and vision sensors Inputs: None Outputs: None Example call: make_connection() ''' def make_connection(): global client_id,handle_list global vision_sensor_5,vision_sensor_4,vision_sensor_3,vision_sensor_2,vision_sensor_1,servo_handle_x_t1,servo_handle_y_t1,servo_handle_x_t4,servo_handle_y_t4 return_code,servo_handle_x_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_2",sim.simx_opmode_blocking) return_code,servo_handle_x_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_2",sim.simx_opmode_blocking) return_code,vision_sensor_1 = sim.simxGetObjectHandle(client_id,"vision_sensor_1",sim.simx_opmode_blocking) #return_code,vision_sensor_2 = sim.simxGetObjectHandle(client_id,"vision_sensor_2",sim.simx_opmode_blocking) #return_code,vision_sensor_3 = sim.simxGetObjectHandle(client_id,"vision_sensor_3",sim.simx_opmode_blocking) return_code,vision_sensor_4 = sim.simxGetObjectHandle(client_id,"vision_sensor_4",sim.simx_opmode_blocking) return_code,vision_sensor_5 = sim.simxGetObjectHandle(client_id,"vision_sensor_5",sim.simx_opmode_blocking) handle_list = {'T4' : [servo_handle_x_t4,servo_handle_y_t4,vision_sensor_4], 'T3' : [], 'T2' : [], 'T1' : [servo_handle_x_t1,servo_handle_y_t1,vision_sensor_1] } ''' Function name: set_path Usage: sets variables used to make the ball reach to its destination collection box according to the color using ball_details json dictionary. It calls send_data_to_draw_path to draw the path on the table. Inputs: color of the detected ball : string Outputs: None Example call: set_path('green') ''' def set_path(color): global t4_path,aux_path table, collection_box = ball_details[color][0].split('_') t4_path=path_map['T4'][int(table[-1])-1] t4_path_drawn = path_box_map['T4'][int(table[-1])-1] send_data_to_draw_path(4,t4_path_drawn) aux_path = path_map[table][int(collection_box[-1])-1] aux_path_drawn = path_box_map[table][int(collection_box[-1])-1] send_data_to_draw_path(1,aux_path_drawn) ball_details[color].pop(0) ''' Function name: complete_all_mapping_path Usage: Sets all mapping path according to the values of entry and exit points of the table and the maze. It also manipulates the setpoints according to the required collection box collection box(line no 452-478) to make the ball fall in the collection box. Inputs: Table number for which the paths have to be set : string Outputs: None Example Call: complete_all_mapping_path('T4') ''' def complete_all_mapping_path (tablenum): global map_start,map_end,maze_map,path_map,encoded_maze_t1,path_box_map for i in range(3): start_coord= map_start[tablenum][0] end_coord= map_end[tablenum][i] mazearray = maze_map[tablenum] path = task_4a.find_path(mazearray, start_coord, end_coord) path_box_map[tablenum].append(path) resolution_x = 1120#1280 resolution_y = 1120#1280 x_increment = resolution_x//10 y_increment = resolution_y//10 pixel_path = [] for i in range(len(path)): pixel_path.append([]) for i in range(len(path)): # to change the pixel trim: change 180 with pixel*10 and 18 with pixel x_pix_trim = int(((180*path[i][0])/45)-18) y_pix_trim = int(((180*path[i][1])/45)-18) x_pixel = ((x_increment//2) + path[i][0]*x_increment) + 80 + x_pix_trim y_pixel = ((y_increment//2) + path[i][1]*y_increment) + 80 + y_pix_trim pixel_path[i].append(x_pixel) pixel_path[i].append(y_pixel) if (tablenum == 'T1'): if (path[len(path)-1] == map_end[tablenum][0]): #(0,4) decrease y pixel [tilt in +ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0]- (y_increment//2), pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][1]): #(4,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] + (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][2]): #(9,5) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) else: print("Unexpected element in the end of the path in maze T1") if (tablenum == 'T4'): if (path[len(path)-1] == map_end[tablenum][0]): #(5,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1]+(x_increment//2)] ) elif (path[len(path)-1] == map_end[tablenum][1]): #(9,4) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) elif (path[len(path)-1] == map_end[tablenum][2]): #(4,0) decrease x pixel [tilt in -ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] - (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) else: print("Unexpected element in the end of the path in maze T4") path_map[tablenum].append(pixel_path) ''' Function name: get_color Usage: It sends the vision sensor image to the color_get function repeatedly and waits until the called function returns a color. Inputs: None Outputs: color of the detected ball ''' def get_color(): global vision_sensor_5,client_id color = None return_code ,image_resolution,vision_sensor_image =sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) while(color is None ): return_code ,image_resolution,vision_sensor_image = sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) if(len(vision_sensor_image)): vision_sensor_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) color = color_get(vision_sensor_image) return color ############################################################## def main(rec_client_id): """ Purpose: --- Teams are free to design their code in this task. The test executable will only call this function of task_5.py. init_remote_api_server() and exit_remote_api_server() functions are already defined in the executable and hence should not be called by the teams. The obtained client_id is passed to this function so that teams can use it in their code. However NOTE: Teams will have to call start_simulation() and stop_simulation() function on their own. Input Arguments: --- `rec_client_id` : integer client_id returned after calling init_remote_api_server() function from the executable. Returns: --- None Example call: --- main(rec_client_id) """ ############## ADD YOUR CODE HERE ############## global maze_map,encoded_maze_t1,encoded_maze_t2,encoded_maze_t3,encoded_maze_t4,t4_path,aux_path,servo_handle_x_t1,servo_handle_y_t1 global servo_handle_x_t4,servo_handle_y_t4,path_box_map,collection_box global client_id,ball_details client_id = rec_client_id img_t4 = cv2.imread("maze_t4.JPG") warped_t4 = task_1b.applyPerspectiveTransform(img_t4) encoded_maze_t4 = task_1b.detectMaze(warped_t4) maze_map['T4'] = encoded_maze_t4 return_code = task_2b.send_data(rec_client_id,encoded_maze_t4,"t4") img_t1 = cv2.imread("maze_t1.JPG") warped_t1 = task_1b.applyPerspectiveTransform(img_t1) encoded_maze_t1 = task_1b.detectMaze(warped_t1) maze_map['T1'] = encoded_maze_t1 return_code = task_2b.send_data(rec_client_id,encoded_maze_t1,"t1") complete_all_mapping_path('T1') complete_all_mapping_path('T4') #Similarly for T3 and T2 make_connection() return_code = task_2a.start_simulation(rec_client_id) i = 0 while(all([len(i)!=0 for i in list(ball_details.values())])): print("here") color = get_color() if(color): collection_box = ball_details[color][0] table = ball_details[color][0].split('_')[0] send_color_and_collection_box_identified(color, collection_box) set_path(color) traverse_ball(handle_list["T4"][0],handle_list["T4"][1],handle_list["T4"][2],t4_path) traverse_ball(handle_list[table][0],handle_list[table][1],handle_list[table][2],aux_path) print("complete ho gaya task") print(len(list(ball_details.values()))) print(f" i is {i} ") time.sleep(5) task_2a.stop_simulation(rec_client_id) ################################################## # Function Name: main (built in) # Inputs: None # Outputs: None # Purpose: To call the main(rec_client_id) function written by teams when they # run task_5.py only. # NOTE: Write your solution ONLY in the space provided in the above functions. This function should not be edited. if __name__ == "__main__": client_id = task_2a.init_remote_api_server() main(client_id)
global client_id color_and_cb = [ball_color + '::' + collection_box_name] inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1', sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking)
identifier_body
task_5.py
''' ***************************************************************************************** * * =============================================== * Nirikshak Bot (NB) Theme (eYRC 2020-21) * =============================================== * * This script is to implement Task 5 of Nirikshak Bot (NB) Theme (eYRC 2020-21). * * This software is made available on an "AS IS WHERE IS BASIS". * Licensee/end user indemnifies and will keep e-Yantra indemnified from * any and all claim(s) that emanate from the use of the Software or * breach of the terms of this agreement. * * e-Yantra - An MHRD (now MOE) project under National Mission on Education using ICT (NMEICT) * ***************************************************************************************** ''' # Team ID: 2139 # Author List: Yash Varshney, Aman Tyagi # Filename: task_5.py # Functions: color_get,traverse_ball,send_data_to_draw_path, make_connection,set_path,complete_all_mapping_path,get_color # [ Comma separated list of functions in this file ] # Global variables: # [ List of global variables defined in this file ] # NOTE: Make sure you do NOT call sys.exit() in this code. ####################### IMPORT MODULES ####################### ## You are not allowed to make any changes in this section. ## ############################################################## import numpy as np import cv2 import os, sys import traceback import time import json ############################################################## # Importing the sim module for Remote API connection with try: import sim except Exception: print('\n[ERROR] It seems the sim.py OR simConst.py files are not found!') print('\n[WARNING] Make sure to have following files in the directory:') print('sim.py, simConst.py and appropriate library - remoteApi.dll (if on Windows), remoteApi.so (if on Linux) or remoteApi.dylib (if on Mac).\n') #Import 'task_1b.py' file as module try: import task_1b except ImportError: print('\n[ERROR] task_1b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1b.py is present in this current directory.\n') except Exception as e: print('Your task_1b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_1a_part1.py' file as module try: import task_1a_part1 except ImportError: print('\n[ERROR] task_1a_part1.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_1a_part1.py is present in this current directory.\n') except Exception as e: print('Your task_1a_part1.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2a.py' file as module try: import task_2a except ImportError: print('\n[ERROR] task_2a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2a.py is present in this current directory.\n') except Exception as e: print('Your task_2a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_2b.py' file as module try: import task_2b except ImportError: print('\n[ERROR] task_2b.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_2b.py is present in this current directory.\n') except Exception as e: print('Your task_2b.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_3.py' file as module try: import task_3 except ImportError: print('\n[ERROR] task_3.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_3.py is present in this current directory.\n') except Exception as e: print('Your task_3.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) # Import 'task_4a.py' file as module try: import task_4a except ImportError: print('\n[ERROR] task_4a.py file is not present in the current directory.') print('Your current directory is: ', os.getcwd()) print('Make sure task_4a.py is present in this current directory.\n') except Exception as e: print('Your task_4a.py throwed an Exception. Kindly debug your code!\n') traceback.print_exc(file=sys.stdout) ########################Global variables###################### vision_sensor_1 = -1 vision_sensor_2 = -1 vision_sensor_3 = -1 vision_sensor_4 = -1 vision_sensor_5 = -1 encoded_maze_t4 = None encoded_maze_t1 = None encoded_maze_t3 = None encoded_maze_t2 = None servo_handle_x_t4 = -1 servo_handle_y_t4 = -1 servo_handle_x_t3 = -1 servo_handle_y_t3 = -1 servo_handle_x_t2 = -1 servo_handle_y_t2 = -1 servo_handle_x_t1 = -1 servo_handle_y_t1 = -1 handle_list = {} try: with open('ball_details.json') as file: ball_details = json.load(file) except ImportError: print('\n[ERROR] ball_details.json file is not present in the current directory.') print('Your current directory is: ', os.getcwd())
"T4":[(0,5)], "T3":[(4,9)], "T2":[(0,4)], "T1":[(5,0)] } # do mapping of start and end point on the basis of color and json file. map_end = { "T4":[(5,9), (9,4), (4,0)], "T3":[(9,5), (5,0), (0,4)], "T2":[(4,9), (9,5), (5,0)], "T1":[(0,4), (4,9), (9,5)] } t4_path = None #path to table req aux_path = None #path to req cb path_map = { #pixel path to each exit point on the table "T1":[], "T2":[], "T3":[], "T4":[] } path_box_map = { #box coordinates path to draw path on the tables "T1":[], "T2":[], "T3":[], "T4":[] } maze_map ={ } collection_box = None #integer variable to store the number of the collection box client_id = -1 ############################################################ ############################################################## # NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION # # Function Name: send_color_and_collection_box_identified # Inputs: ball_color and collection_box_name # Outputs: None # Purpose: 1. This function should only be called when the task is being evaluated using # test executable. # 2. The format to send the data is as follows: # 'color::collection_box_name' def send_color_and_collection_box_identified(ball_color, collection_box_name): global client_id color_and_cb = [ball_color + '::' + collection_box_name] inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1', sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking) ################# ADD UTILITY FUNCTIONS HERE ################# ## You can define any utility functions for your code. ## ## Please add proper comments to ensure that your code is ## ## readable and easy to understand. ## ############################################################## ''' Function name: color_get Inputs: Image from vision sensor Outputs: Color of the ball detected in the image Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image Example call: color_get(image_from_vision_sensor) ''' def color_get(img_file_path): if(img_file_path is None): return #Read the image if type(img_file_path) == type(str()): img_file_path = cv2.imread(img_file_path) else: img_file_path= img_file_path #cv2.imwrite("colorefromrailing.png",img_file_path) imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT) hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV) #To create a mask for red colour red_lower = np.array([0, 50, 50]) red_upper = np.array([10, 255, 255]) red_mask = cv2.inRange(hsvFrame, red_lower, red_upper) kernal = np.ones((5, 5)) red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1] gray_blur_red= cv2.Canny(red_gray,100,255) #Create a mask for blue colour blue_lower = np.array([94, 20, 0], np.uint8) blue_upper = np.array([140,255 ,255], np.uint8) blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper) kernal = np.ones((5, 5)) blue_mask = cv2.dilate(blue_mask, kernal) blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1] gray_blur_blue= cv2.Canny(blue_gray,100,255) #Create a mask for green colour green_lower = np.array([25, 52, 72], np.uint8) green_upper = np.array([102, 255, 255], np.uint8) green_mask = cv2.inRange(hsvFrame, green_lower, green_upper) kernal = np.ones((5, 5)) green_mask = cv2.dilate(green_mask, kernal) green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1] gray_blur_green = cv2.Canny(green_gray,100,255) #find contours on blue mask cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None): if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'blue' #Find red contours in the image cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if (len(cnts)): return 'red' # Find green contours in the image cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if type(cnts[-1]) !=type(None) : if len(cnts) == 2: cnts = cnts[0] elif len(cnts) == 3: cnts = cnts[1] if(len(cnts)): return 'green' ''' Function name: traverse_ball Usage: traverses the ball from one point to another Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow Outputs: None Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path) ''' def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path): global client_id rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming) current_time = '' while(len(current_time) == 0 ): rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer) j = 0 k= 0 for i in pixel_path: i.reverse() task_3.change_setpoint(i) while(1): j+=1 k+=1 vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle) transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) warped_img = task_1b.applyPerspectiveTransform(transformed_image) shapes = task_1a_part1.scan_image(warped_img) if(shapes): warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB) warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2) warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2) if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30): break else: task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y) return 1 ''' Function name: send_data_to_draw_path Usage: Draws path on the table in Coppleiasim scene Inputs: table no and the box path to be drawn Outputs: None Example call: send_data_to_draw_path('T4', pixel_path_list) ''' def send_data_to_draw_path(table,path): global client_id ############## IF REQUIRED, CHANGE THE CODE FROM HERE ############## coppelia_sim_coord_path = [] table_name = "top_plate_respondable_t" + str(table) + "_1" for coord in path: for element in coord: coppelia_sim_coord_path.append(((10*element) - 45)/100) inputBuffer = bytearray() return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id, \ table_name, sim.sim_scripttype_customizationscript, 'drawPath', [], \ coppelia_sim_coord_path, [], inputBuffer, sim.simx_opmode_oneshot) ''' Function name: make_connection Usage: Establishes connection with the Coppleiasim server and populates the global variable handle list with the updated values of servo handle and vision sensors Inputs: None Outputs: None Example call: make_connection() ''' def make_connection(): global client_id,handle_list global vision_sensor_5,vision_sensor_4,vision_sensor_3,vision_sensor_2,vision_sensor_1,servo_handle_x_t1,servo_handle_y_t1,servo_handle_x_t4,servo_handle_y_t4 return_code,servo_handle_x_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_2",sim.simx_opmode_blocking) return_code,servo_handle_x_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_1",sim.simx_opmode_blocking) return_code,servo_handle_y_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_2",sim.simx_opmode_blocking) return_code,vision_sensor_1 = sim.simxGetObjectHandle(client_id,"vision_sensor_1",sim.simx_opmode_blocking) #return_code,vision_sensor_2 = sim.simxGetObjectHandle(client_id,"vision_sensor_2",sim.simx_opmode_blocking) #return_code,vision_sensor_3 = sim.simxGetObjectHandle(client_id,"vision_sensor_3",sim.simx_opmode_blocking) return_code,vision_sensor_4 = sim.simxGetObjectHandle(client_id,"vision_sensor_4",sim.simx_opmode_blocking) return_code,vision_sensor_5 = sim.simxGetObjectHandle(client_id,"vision_sensor_5",sim.simx_opmode_blocking) handle_list = {'T4' : [servo_handle_x_t4,servo_handle_y_t4,vision_sensor_4], 'T3' : [], 'T2' : [], 'T1' : [servo_handle_x_t1,servo_handle_y_t1,vision_sensor_1] } ''' Function name: set_path Usage: sets variables used to make the ball reach to its destination collection box according to the color using ball_details json dictionary. It calls send_data_to_draw_path to draw the path on the table. Inputs: color of the detected ball : string Outputs: None Example call: set_path('green') ''' def set_path(color): global t4_path,aux_path table, collection_box = ball_details[color][0].split('_') t4_path=path_map['T4'][int(table[-1])-1] t4_path_drawn = path_box_map['T4'][int(table[-1])-1] send_data_to_draw_path(4,t4_path_drawn) aux_path = path_map[table][int(collection_box[-1])-1] aux_path_drawn = path_box_map[table][int(collection_box[-1])-1] send_data_to_draw_path(1,aux_path_drawn) ball_details[color].pop(0) ''' Function name: complete_all_mapping_path Usage: Sets all mapping path according to the values of entry and exit points of the table and the maze. It also manipulates the setpoints according to the required collection box collection box(line no 452-478) to make the ball fall in the collection box. Inputs: Table number for which the paths have to be set : string Outputs: None Example Call: complete_all_mapping_path('T4') ''' def complete_all_mapping_path (tablenum): global map_start,map_end,maze_map,path_map,encoded_maze_t1,path_box_map for i in range(3): start_coord= map_start[tablenum][0] end_coord= map_end[tablenum][i] mazearray = maze_map[tablenum] path = task_4a.find_path(mazearray, start_coord, end_coord) path_box_map[tablenum].append(path) resolution_x = 1120#1280 resolution_y = 1120#1280 x_increment = resolution_x//10 y_increment = resolution_y//10 pixel_path = [] for i in range(len(path)): pixel_path.append([]) for i in range(len(path)): # to change the pixel trim: change 180 with pixel*10 and 18 with pixel x_pix_trim = int(((180*path[i][0])/45)-18) y_pix_trim = int(((180*path[i][1])/45)-18) x_pixel = ((x_increment//2) + path[i][0]*x_increment) + 80 + x_pix_trim y_pixel = ((y_increment//2) + path[i][1]*y_increment) + 80 + y_pix_trim pixel_path[i].append(x_pixel) pixel_path[i].append(y_pixel) if (tablenum == 'T1'): if (path[len(path)-1] == map_end[tablenum][0]): #(0,4) decrease y pixel [tilt in +ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0]- (y_increment//2), pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][1]): #(4,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] + (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) elif (path[len(path)-1] == map_end[tablenum][2]): #(9,5) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) else: print("Unexpected element in the end of the path in maze T1") if (tablenum == 'T4'): if (path[len(path)-1] == map_end[tablenum][0]): #(5,9) increase x pixel [tilt in +ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1]+(x_increment//2)] ) elif (path[len(path)-1] == map_end[tablenum][1]): #(9,4) increase y pixel [tilt in -ve y] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] , pixel_path[len(pixel_path)-1][1] + (y_increment//2)]) elif (path[len(path)-1] == map_end[tablenum][2]): #(4,0) decrease x pixel [tilt in -ve x] pixel_path.append( [ pixel_path[len(pixel_path)-1][0] - (x_increment//2) , pixel_path[len(pixel_path)-1][1]]) else: print("Unexpected element in the end of the path in maze T4") path_map[tablenum].append(pixel_path) ''' Function name: get_color Usage: It sends the vision sensor image to the color_get function repeatedly and waits until the called function returns a color. Inputs: None Outputs: color of the detected ball ''' def get_color(): global vision_sensor_5,client_id color = None return_code ,image_resolution,vision_sensor_image =sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) while(color is None ): return_code ,image_resolution,vision_sensor_image = sim.simxGetVisionSensorImage(client_id,vision_sensor_5,0,sim.simx_opmode_blocking) if(len(vision_sensor_image)): vision_sensor_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution) color = color_get(vision_sensor_image) return color ############################################################## def main(rec_client_id): """ Purpose: --- Teams are free to design their code in this task. The test executable will only call this function of task_5.py. init_remote_api_server() and exit_remote_api_server() functions are already defined in the executable and hence should not be called by the teams. The obtained client_id is passed to this function so that teams can use it in their code. However NOTE: Teams will have to call start_simulation() and stop_simulation() function on their own. Input Arguments: --- `rec_client_id` : integer client_id returned after calling init_remote_api_server() function from the executable. Returns: --- None Example call: --- main(rec_client_id) """ ############## ADD YOUR CODE HERE ############## global maze_map,encoded_maze_t1,encoded_maze_t2,encoded_maze_t3,encoded_maze_t4,t4_path,aux_path,servo_handle_x_t1,servo_handle_y_t1 global servo_handle_x_t4,servo_handle_y_t4,path_box_map,collection_box global client_id,ball_details client_id = rec_client_id img_t4 = cv2.imread("maze_t4.JPG") warped_t4 = task_1b.applyPerspectiveTransform(img_t4) encoded_maze_t4 = task_1b.detectMaze(warped_t4) maze_map['T4'] = encoded_maze_t4 return_code = task_2b.send_data(rec_client_id,encoded_maze_t4,"t4") img_t1 = cv2.imread("maze_t1.JPG") warped_t1 = task_1b.applyPerspectiveTransform(img_t1) encoded_maze_t1 = task_1b.detectMaze(warped_t1) maze_map['T1'] = encoded_maze_t1 return_code = task_2b.send_data(rec_client_id,encoded_maze_t1,"t1") complete_all_mapping_path('T1') complete_all_mapping_path('T4') #Similarly for T3 and T2 make_connection() return_code = task_2a.start_simulation(rec_client_id) i = 0 while(all([len(i)!=0 for i in list(ball_details.values())])): print("here") color = get_color() if(color): collection_box = ball_details[color][0] table = ball_details[color][0].split('_')[0] send_color_and_collection_box_identified(color, collection_box) set_path(color) traverse_ball(handle_list["T4"][0],handle_list["T4"][1],handle_list["T4"][2],t4_path) traverse_ball(handle_list[table][0],handle_list[table][1],handle_list[table][2],aux_path) print("complete ho gaya task") print(len(list(ball_details.values()))) print(f" i is {i} ") time.sleep(5) task_2a.stop_simulation(rec_client_id) ################################################## # Function Name: main (built in) # Inputs: None # Outputs: None # Purpose: To call the main(rec_client_id) function written by teams when they # run task_5.py only. # NOTE: Write your solution ONLY in the space provided in the above functions. This function should not be edited. if __name__ == "__main__": client_id = task_2a.init_remote_api_server() main(client_id)
print('Make sure ball_details.json is present in this current directory.\n') map_start = {
random_line_split
delete.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package handlers import ( "context" "fmt" "net/http" "time" "go.opentelemetry.io/otel/attribute" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/endpoints/handlers/finisher" requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" ) // DeleteResource returns a function that will handle a resource deletion // TODO admission here becomes solely validating admission func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided // timeout inside the parent context is lower than requestTimeoutUpperBound. ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound) defer cancel() ctx = request.WithNamespace(ctx, namespace) admit = admission.WithAudit(admit) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } options := &metav1.DeleteOptions{} if allowsOptions { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } span.AddEvent("Decoded delete options") objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) span.AddEvent("Recorded the audit event") } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) span.AddEvent("About to delete object from database") wasDeleted := true userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) wasDeleted = deleted return obj, err }) if err != nil { scope.err(err, w, req)
} span.AddEvent("Object deleted from database") status := http.StatusOK // Return http.StatusAccepted if the resource was not deleted immediately and // user requested cascading deletion by setting OrphanDependents=false. // Note: We want to do this always if resource was not deleted immediately, but // that will break existing clients. // Other cases where resource is not instantly deleted are: namespace deletion // and pod graceful deletion. //nolint:staticcheck // SA1019 backwards compatibility //nolint: staticcheck if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { status = http.StatusAccepted } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: int32(status), Details: &metav1.StatusDetails{ Name: name, Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, status, outputMediaType, result) } } // DeleteCollection returns a function that will handle a collection deletion func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, err := scope.Namer.Namespace(req) if err != nil { scope.err(err, w, req) return } // DELETECOLLECTION can be a lengthy operation, // we should not impose any 34s timeout here. // NOTE: This is similar to LIST which does not enforce a 34s timeout. ctx = request.WithNamespace(ctx, namespace) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } listOptions := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)) if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) scope.err(err, w, req) return } // transform fields // TODO: DecodeParametersInto should do this. if listOptions.FieldSelector != nil { fn := func(label, value string) (newLabel, newValue string, err error) { return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) } if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { // TODO: allow bad request to set field causes based on query parameters err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } options := &metav1.DeleteOptions{} if checkBody { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) admit = admission.WithAudit(admit) userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) }) if err != nil { scope.err(err, w, req) return } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: http.StatusOK, Details: &metav1.StatusDetails{ Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } }
return
random_line_split
delete.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package handlers import ( "context" "fmt" "net/http" "time" "go.opentelemetry.io/otel/attribute" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/endpoints/handlers/finisher" requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" ) // DeleteResource returns a function that will handle a resource deletion // TODO admission here becomes solely validating admission func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided // timeout inside the parent context is lower than requestTimeoutUpperBound. ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound) defer cancel() ctx = request.WithNamespace(ctx, namespace) admit = admission.WithAudit(admit) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } options := &metav1.DeleteOptions{} if allowsOptions { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } span.AddEvent("Decoded delete options") objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) span.AddEvent("Recorded the audit event") } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) span.AddEvent("About to delete object from database") wasDeleted := true userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) wasDeleted = deleted return obj, err }) if err != nil { scope.err(err, w, req) return } span.AddEvent("Object deleted from database") status := http.StatusOK // Return http.StatusAccepted if the resource was not deleted immediately and // user requested cascading deletion by setting OrphanDependents=false. // Note: We want to do this always if resource was not deleted immediately, but // that will break existing clients. // Other cases where resource is not instantly deleted are: namespace deletion // and pod graceful deletion. //nolint:staticcheck // SA1019 backwards compatibility //nolint: staticcheck if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { status = http.StatusAccepted } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: int32(status), Details: &metav1.StatusDetails{ Name: name, Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, status, outputMediaType, result) } } // DeleteCollection returns a function that will handle a collection deletion func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc
{ return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, err := scope.Namer.Namespace(req) if err != nil { scope.err(err, w, req) return } // DELETECOLLECTION can be a lengthy operation, // we should not impose any 34s timeout here. // NOTE: This is similar to LIST which does not enforce a 34s timeout. ctx = request.WithNamespace(ctx, namespace) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } listOptions := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)) if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) scope.err(err, w, req) return } // transform fields // TODO: DecodeParametersInto should do this. if listOptions.FieldSelector != nil { fn := func(label, value string) (newLabel, newValue string, err error) { return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) } if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { // TODO: allow bad request to set field causes based on query parameters err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } options := &metav1.DeleteOptions{} if checkBody { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) admit = admission.WithAudit(admit) userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) }) if err != nil { scope.err(err, w, req) return } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: http.StatusOK, Details: &metav1.StatusDetails{ Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } }
identifier_body
delete.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package handlers import ( "context" "fmt" "net/http" "time" "go.opentelemetry.io/otel/attribute" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/endpoints/handlers/finisher" requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" ) // DeleteResource returns a function that will handle a resource deletion // TODO admission here becomes solely validating admission func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided // timeout inside the parent context is lower than requestTimeoutUpperBound. ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound) defer cancel() ctx = request.WithNamespace(ctx, namespace) admit = admission.WithAudit(admit) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } options := &metav1.DeleteOptions{} if allowsOptions { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } span.AddEvent("Decoded delete options") objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) span.AddEvent("Recorded the audit event") } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) span.AddEvent("About to delete object from database") wasDeleted := true userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) wasDeleted = deleted return obj, err }) if err != nil { scope.err(err, w, req) return } span.AddEvent("Object deleted from database") status := http.StatusOK // Return http.StatusAccepted if the resource was not deleted immediately and // user requested cascading deletion by setting OrphanDependents=false. // Note: We want to do this always if resource was not deleted immediately, but // that will break existing clients. // Other cases where resource is not instantly deleted are: namespace deletion // and pod graceful deletion. //nolint:staticcheck // SA1019 backwards compatibility //nolint: staticcheck if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { status = http.StatusAccepted } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: int32(status), Details: &metav1.StatusDetails{ Name: name, Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, status, outputMediaType, result) } } // DeleteCollection returns a function that will handle a collection deletion func
(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, err := scope.Namer.Namespace(req) if err != nil { scope.err(err, w, req) return } // DELETECOLLECTION can be a lengthy operation, // we should not impose any 34s timeout here. // NOTE: This is similar to LIST which does not enforce a 34s timeout. ctx = request.WithNamespace(ctx, namespace) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } listOptions := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)) if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) scope.err(err, w, req) return } // transform fields // TODO: DecodeParametersInto should do this. if listOptions.FieldSelector != nil { fn := func(label, value string) (newLabel, newValue string, err error) { return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) } if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { // TODO: allow bad request to set field causes based on query parameters err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } options := &metav1.DeleteOptions{} if checkBody { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) admit = admission.WithAudit(admit) userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) }) if err != nil { scope.err(err, w, req) return } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: http.StatusOK, Details: &metav1.StatusDetails{ Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } }
DeleteCollection
identifier_name
delete.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package handlers import ( "context" "fmt" "net/http" "time" "go.opentelemetry.io/otel/attribute" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/endpoints/handlers/finisher" requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" ) // DeleteResource returns a function that will handle a resource deletion // TODO admission here becomes solely validating admission func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() // For performance tracking purposes. ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided // timeout inside the parent context is lower than requestTimeoutUpperBound. ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound) defer cancel() ctx = request.WithNamespace(ctx, namespace) admit = admission.WithAudit(admit) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } options := &metav1.DeleteOptions{} if allowsOptions { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0
else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) span.AddEvent("About to delete object from database") wasDeleted := true userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) wasDeleted = deleted return obj, err }) if err != nil { scope.err(err, w, req) return } span.AddEvent("Object deleted from database") status := http.StatusOK // Return http.StatusAccepted if the resource was not deleted immediately and // user requested cascading deletion by setting OrphanDependents=false. // Note: We want to do this always if resource was not deleted immediately, but // that will break existing clients. // Other cases where resource is not instantly deleted are: namespace deletion // and pod graceful deletion. //nolint:staticcheck // SA1019 backwards compatibility //nolint: staticcheck if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { status = http.StatusAccepted } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: int32(status), Details: &metav1.StatusDetails{ Name: name, Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, status, outputMediaType, result) } } // DeleteCollection returns a function that will handle a collection deletion func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...) defer span.End(500 * time.Millisecond) namespace, err := scope.Namer.Namespace(req) if err != nil { scope.err(err, w, req) return } // DELETECOLLECTION can be a lengthy operation, // we should not impose any 34s timeout here. // NOTE: This is similar to LIST which does not enforce a 34s timeout. ctx = request.WithNamespace(ctx, namespace) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { scope.err(err, w, req) return } listOptions := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)) if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) scope.err(err, w, req) return } // transform fields // TODO: DecodeParametersInto should do this. if listOptions.FieldSelector != nil { fn := func(label, value string) (newLabel, newValue string, err error) { return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) } if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { // TODO: allow bad request to set field causes based on query parameters err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } options := &metav1.DeleteOptions{} if checkBody { body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection) if err != nil { span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error())) scope.err(err, w, req) return } span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body))) if len(body) > 0 { s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) scope.err(err, w, req) return } } } if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) scope.err(err, w, req) return } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) admit = admission.WithAudit(admit) userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) }) if err != nil { scope.err(err, w, req) return } // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid // object with the response. if result == nil { result = &metav1.Status{ Status: metav1.StatusSuccess, Code: http.StatusOK, Details: &metav1.StatusDetails{ Kind: scope.Kind.Kind, }, } } span.AddEvent("About to write a response") defer span.AddEvent("Writing http response done") transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result) } }
{ s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) if err != nil { scope.err(err, w, req) return } // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) if err != nil { scope.err(err, w, req) return } if obj != options { scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) return } span.AddEvent("Decoded delete options") objGV := gvk.GroupVersion() audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs) span.AddEvent("Recorded the audit event") }
conditional_block
oldDH.py
###LIBRARIES import networkx as nx from networkx.readwrite import json_graph from random import uniform,randint import numpy from numpy import linalg as LA import math as Math import json import os from pathlib import Path import time import shutil ## start_time = time.time() ###CLEAN folder = './html/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) folder = './tmp/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) clear = lambda: os.system('cls') #on Windows System clear() ###GRAPH test=False valorizzato=False if(test and not valorizzato): with open('test.json') as json_file: data=json.load(json_file) elif(valorizzato): with open('originale.json') as json_file: data=json.load(json_file) else: G=nx.read_graphml('graph.graphml') data = json_graph.node_link_data(G) ##IABLES width=1000 height=800 nodes=data['nodes'] links=data['links'] ###FUNCTIONS def getStartingPositions(nodes,links): for n in nodes: n['x']=uniform(200,width) n['y']=uniform(200,height) for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} return nodes def chooseANode(max): rand=randint(0,max-1) it=0 for n in nodes: if(it==rand): return n it=it+1 # returns true iff the line from (a,b)->(c,d) intersects with (p,q)->(r,s) def intersects(a,b,c,d,p,q,r,s): det = (c - a) * (s - q) - (r - p) * (d - b) if (det == 0): #print("det 0") return False else: lambdA = ((s - q) * (r - a) + (p - r) * (s - b)) / det gamma = ((b - d) * (r - a) + (c - a) * (s - b)) / det #print("lambdA: "+str(lambdA)+"\ngamma: "+str(gamma)) return (0 < lambdA and lambdA < 1) and (0 < gamma and gamma < 1) def numberOfIntersections(nodes,links): res=0 for k in links: #print("ciclo esterno") for l in links: #print("ciclo interno") """for m in nodes: if(m['id']==k['source']): sourceK=m if(m['id']==k['target']): targetK=m if(m['id']==l['source']): sourceL=m if(m['id']==l['target']): targetL=m""" #print(k['s']) try: skx=k['s']['x'] sky=k['s']['y'] #print(k['t']) tkx=k['t']['x'] tky=k['t']['y'] #print(l['s']) slx=l['s']['x'] sly=l['s']['y'] tlx=l['t']['x'] tly=l['t']['y'] except: print(k) print("-----") print(l) exit() if(intersects(skx,sky,tkx,tky,slx,sly,tlx,tly)): res=res+1 """else: #print("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) if(sourceK['name']=="Ant-Man" and targetK['name']=='Avengers: Endgame' and sourceL['name']=="Black Panther" and targetL['name']=='Ant-Man and the Wasp'): print("skx: "+str(skx)+"; sky: "+str(sky)+"\ntkx: "+str(tkx)+"; tky: "+str(tky)+"\nslx: "+str(slx)+"; sly: "+str(sly)+"\ntlx: "+str(tlx)+"; tly: "+str(tly)) exit("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) exit("res: "+str(res))""" return res def distance_to_line(p0, p1, p2): x_diff = p2['x'] - p1['x'] y_diff = p2['y'] - p1['y'] num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x']) den = Math.sqrt(y_diff**2 + x_diff**2) return num / den #questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi # l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo); # num incroci bassa #distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T # nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini def computeEnergy(nodes,links,logInfo): sac_time=time.time() width=1000 height=800 lambda1=8 #lambda2=100 lambda3=5 lambda4=1000 repulsiveTot=0 #fromBordersTot=0 edgeLenTot=0 numCrossTot=0 distanze=[] for u in nodes: ux=u['x'] uy=u['y'] posi=numpy.array([ux,uy]) """dr=numpy.linalg.norm(posi-numpy.array([width,uy])) dl=numpy.linalg.norm(posi-numpy.array([0,uy])) dt=numpy.linalg.norm(posi-numpy.array([ux,0])) db=numpy.linalg.norm(posi-numpy.array([ux,height])) """ """rt={'x':width,'y':0} rb={'x':width,'y':height} tl={'x':0,'y':0} tr={'x':width,'y':0} bl={'x':0,'y':height} br={'x':width,'y':height} lt={'x':0,'y':0} lb={'x':0,'y':height} dr=distance_to_line(u, rt, rb) dl=distance_to_line(u, lt, lb) dt=distance_to_line(u, tl, tr) db=distance_to_line(u, bl, br) """ """ if(dr==0): dr=1 if(dl==0): dl=1 if(dt==0): dt=1 if(db==0): db=1 fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))""" for v in nodes: #se sono elementi diversi if(u['id']!=v['id']): vx=v['x'] vy=v['y'] posj=numpy.array([vx,vy]) duv=numpy.linalg.norm(posi-posj) #se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto if(duv==0): iduv=1000 else: iduv=1/pow(duv,2) repulsiveTot=repulsiveTot+lambda1*duv for l in links: if(l['source']==u['id'] and l['target']==v['id']): source=u target=v edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2) break #questo costa circa 0.28 s numCrossTot=lambda4*numberOfIntersections(nodes,links) #print("--- %s Compute energy seconds ---" % (time.time() - sac_time)) #exit("numCrossTot: "+str(numCrossTot)) #exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot)) #print(str(fromBordersTot)) msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n" log= open('./tmp/info.log',"a+") log.write(msg) log.close() tot=repulsiveTot+edgeLenTot+numCrossTot totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=tot/totMax return tot,numCrossTot def computeFTEnergy(nodes,links,logInfo): tot,numCrossTot= computeEnergy(nodes,links,logInfo) nodeLinkDist=0 gmin=60 lambda5=1 for v in nodes: for l in links: for n in nodes: if(n['id']==l['source']): source=n if(n['id']==l['target']): target=n dl=lambda5*distance_to_line(v, source, target) if(dl<gmin): nodeLinkDist=nodeLinkDist+dl totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=(tot+nodeLinkDist)/totMax return tot,numCrossTot def move(node,rad,nodes,links): #print("sposto "+node['name']) #print("x: "+str(node['x'])+"y: "+str(node['y'])) startx=node['x'] starty=node['y'] if(node['x']+rad>1400): startx=1400-rad-20#tolgo un margine elif(node['x']-rad<30): startx=30+rad+20#aggiungo un margine if(node['y']+rad>1000): starty=1000-rad-20#tolgo un margine elif(node['y']-rad<30): starty=30+rad+20#aggiungo un margine #radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni """angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0 nx=startx+Math.cos(angle)*rad ny=starty+Math.sin(angle)*rad""" #approccio quadrato, mi muovo nell'area di un quadrato nx=uniform(startx-rad,startx+rad) ny=uniform(starty-rad,starty+rad) width=1000 height=800 maxNodeX=width minNodeX=0 maxNodeY=height minNodeY=0 log= open('./tmp/info.log',"a+") log.write("("+str(node['x'])+", "+str(node['y'])+") =>("+str(nx)+", "+str(ny)+")\n") log.close() for n in nodes: if(n['id']==node['id']): n['x']=nx n['y']=ny break for l in links: if(l['source']==node['id']): l['s']['x']=nx l['s']['y']=ny if(l['target']==node['id']): l['t']['x']=nx l['t']['y']=ny def moveBack(vc,vx,vy,nodes,links): for n in nodes: if(n['id']==vc['id']): n['x']=vx n['y']=vy break for l in links: if(l['source']==vc['id']): l['s']['x']=vx l['s']['y']=vx if(l['target']==vc['id']): l['t']['x']=vx l['t']['y']=vx def fixNodePos(nodes): maxX=0 maxY=0 for v in nodes: if(v['x']<30): if(abs(v['x']-30)>maxX): maxX=abs(v['x']-30) if(v['y']<30): if(abs(v['y']-30)>maxY): maxY=abs(v['y']-30) for v in nodes: v['x']=v['x']+100+maxX v['y']=v['y']+100+maxY def saveConfiguration(filename,data): fileContent = Path('parent.html').read_text() fileContent=fileContent.replace("@@@@@@", filename+".json") html= open('./html/'+filename+'.html',"w+") html.write(fileContent) html.close() ##json content= open('./tmp/'+filename+'.json',"w+") content.write(json.dumps(data)) content.close() def polyn(input): return 25*input#meglio 30, ma troppo lento per grossi input def simulatedAnnealing(data): #exit(str(len(data['nodes']))) #inizio con posizioni casuali dei nodes nodes=data['nodes'] links=data['links'] valorizzato=False if(not valorizzato): nodes=getStartingPositions(nodes,links) f= open('originale.json',"w+") f.write(json.dumps(data)) f.close() else: for n in nodes: for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} T=100 stages=11 fineTuningStages=4 numNodes=len(nodes) currentStage=0 fineTuningStage=0 numMoves=polyn(numNodes) currentMove=0 gamma=0.8 fuoriArea=False ### vc=chooseANode(numNodes) prevEnergy,ncp=computeEnergy(nodes,links,'') if(ncp<=2000000): saveConfiguration('outer',data) while(currentStage<=stages and not fuoriArea): sao_time=time.time() currentMove=0 #print("currentStage: "+str(currentStage)+"\nT: "+str(T)) while(currentMove<=numMoves and not fuoriArea): sai_time=time.time() print("currentStage: "+str(currentStage)+"\nT: "+str(T)) print("currentMove: "+str(currentMove)) #print(vc) vx=vc['x'] vy=vc['y'] """for n in nodes: if(n['x']>1400 or n['x']<0 or n['y']>1200 or n['y']<0): print("fuori area, vado in fine tuning") fuoriArea=True fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" move(vc,T,nodes,links) newEnergy,ncn=computeEnergy(nodes,links,"s"+str(currentStage)+"i"+str(currentMove)+".json") de=newEnergy-prevEnergy #accetta la configurazione attuale #print("de: "+str(de)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_s'+str(currentStage)+'i'+str(currentMove),data) if(de<0): """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" prevEnergy=newEnergy vc=chooseANode(numNodes) #if(currentMove<10): saveConfiguration('s'+str(currentStage)+'i'+str(currentMove),data) elif(uniform(0,1)<Math.exp(-de/T)): # print("accetto la configurazione, anche se è peggio") """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" vc=chooseANode(numNodes) else: #print("move back") moveBack(vc,vx,vy,nodes,links) print("--- %s seconds ---" % (time.time() - sai_time))#0.6s a iterazione=>0.14s currentMove=currentMove+1 currentStage=currentStage+1 print("--- %s seconds ---" % (time.time() - sao_time)) T=gamma*T #fine-tuning phase #exit("temperatura: "+str(T)) currentStage=0 #vc=chooseANode(numNodes) #prevEnergy=computeFTEnergy(nodes,links) T=100 prevEnergy,ncp=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) while(currentStage<=fineTuningStages): currentMove=0 while(currentMove<=numMoves): print("currentStage FT: "+str(currentStage)) print("currentMove 2: "+str(currentMove)) vx=vc['x'] vy=vc['y'] move(vc,T,nodes,links) newEnergy,ncn=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_FT_s'+str(currentStage)+'i'+str(currentMove),data) de=newEnergy-prevEnergy #accetta la configurazione attuale if(de<0): prevEnergy=newEnergy saveConfiguration('FT_s'+str(currentStage)+'i'+str(currentMove),data) vc=chooseANode(numNodes) else: moveBack(vc,vx,vy,nodes,links) currentMove=currentMove+1 currentStage=currentStage+1 simulatedAnnealing(data) #fixNodePos(nodes) minimo=50 massimo=0 for n in nodes: if(n['x']<minimo): minimo=n['x'] if(n['x']>massimo): mas
ff=massimo-minimo exit("diff: "+str(diff)) f= open('res.json',"w+") f.write(json.dumps(data)) f.close() print("--- %s seconds ---" % (time.time() - start_time)) print("fine")
simo=n['x'] di
conditional_block
oldDH.py
###LIBRARIES import networkx as nx from networkx.readwrite import json_graph from random import uniform,randint import numpy from numpy import linalg as LA import math as Math import json import os from pathlib import Path import time import shutil ## start_time = time.time() ###CLEAN folder = './html/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) folder = './tmp/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) clear = lambda: os.system('cls') #on Windows System clear() ###GRAPH test=False valorizzato=False if(test and not valorizzato): with open('test.json') as json_file: data=json.load(json_file) elif(valorizzato): with open('originale.json') as json_file: data=json.load(json_file) else: G=nx.read_graphml('graph.graphml') data = json_graph.node_link_data(G) ##IABLES width=1000 height=800 nodes=data['nodes'] links=data['links'] ###FUNCTIONS def getStartingPositions(nodes,links): for n in nodes: n['x']=uniform(200,width) n['y']=uniform(200,height) for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} return nodes def chooseANode(max): rand=randint(0,max-1) it=0 for n in nodes: if(it==rand): return n it=it+1 # returns true iff the line from (a,b)->(c,d) intersects with (p,q)->(r,s) def intersects(a,b,c,d,p,q,r,s): det = (c - a) * (s - q) - (r - p) * (d - b) if (det == 0): #print("det 0") return False else: lambdA = ((s - q) * (r - a) + (p - r) * (s - b)) / det gamma = ((b - d) * (r - a) + (c - a) * (s - b)) / det #print("lambdA: "+str(lambdA)+"\ngamma: "+str(gamma)) return (0 < lambdA and lambdA < 1) and (0 < gamma and gamma < 1) def numberOfIntersections(nodes,links):
def distance_to_line(p0, p1, p2): x_diff = p2['x'] - p1['x'] y_diff = p2['y'] - p1['y'] num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x']) den = Math.sqrt(y_diff**2 + x_diff**2) return num / den #questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi # l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo); # num incroci bassa #distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T # nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini def computeEnergy(nodes,links,logInfo): sac_time=time.time() width=1000 height=800 lambda1=8 #lambda2=100 lambda3=5 lambda4=1000 repulsiveTot=0 #fromBordersTot=0 edgeLenTot=0 numCrossTot=0 distanze=[] for u in nodes: ux=u['x'] uy=u['y'] posi=numpy.array([ux,uy]) """dr=numpy.linalg.norm(posi-numpy.array([width,uy])) dl=numpy.linalg.norm(posi-numpy.array([0,uy])) dt=numpy.linalg.norm(posi-numpy.array([ux,0])) db=numpy.linalg.norm(posi-numpy.array([ux,height])) """ """rt={'x':width,'y':0} rb={'x':width,'y':height} tl={'x':0,'y':0} tr={'x':width,'y':0} bl={'x':0,'y':height} br={'x':width,'y':height} lt={'x':0,'y':0} lb={'x':0,'y':height} dr=distance_to_line(u, rt, rb) dl=distance_to_line(u, lt, lb) dt=distance_to_line(u, tl, tr) db=distance_to_line(u, bl, br) """ """ if(dr==0): dr=1 if(dl==0): dl=1 if(dt==0): dt=1 if(db==0): db=1 fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))""" for v in nodes: #se sono elementi diversi if(u['id']!=v['id']): vx=v['x'] vy=v['y'] posj=numpy.array([vx,vy]) duv=numpy.linalg.norm(posi-posj) #se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto if(duv==0): iduv=1000 else: iduv=1/pow(duv,2) repulsiveTot=repulsiveTot+lambda1*duv for l in links: if(l['source']==u['id'] and l['target']==v['id']): source=u target=v edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2) break #questo costa circa 0.28 s numCrossTot=lambda4*numberOfIntersections(nodes,links) #print("--- %s Compute energy seconds ---" % (time.time() - sac_time)) #exit("numCrossTot: "+str(numCrossTot)) #exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot)) #print(str(fromBordersTot)) msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n" log= open('./tmp/info.log',"a+") log.write(msg) log.close() tot=repulsiveTot+edgeLenTot+numCrossTot totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=tot/totMax return tot,numCrossTot def computeFTEnergy(nodes,links,logInfo): tot,numCrossTot= computeEnergy(nodes,links,logInfo) nodeLinkDist=0 gmin=60 lambda5=1 for v in nodes: for l in links: for n in nodes: if(n['id']==l['source']): source=n if(n['id']==l['target']): target=n dl=lambda5*distance_to_line(v, source, target) if(dl<gmin): nodeLinkDist=nodeLinkDist+dl totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=(tot+nodeLinkDist)/totMax return tot,numCrossTot def move(node,rad,nodes,links): #print("sposto "+node['name']) #print("x: "+str(node['x'])+"y: "+str(node['y'])) startx=node['x'] starty=node['y'] if(node['x']+rad>1400): startx=1400-rad-20#tolgo un margine elif(node['x']-rad<30): startx=30+rad+20#aggiungo un margine if(node['y']+rad>1000): starty=1000-rad-20#tolgo un margine elif(node['y']-rad<30): starty=30+rad+20#aggiungo un margine #radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni """angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0 nx=startx+Math.cos(angle)*rad ny=starty+Math.sin(angle)*rad""" #approccio quadrato, mi muovo nell'area di un quadrato nx=uniform(startx-rad,startx+rad) ny=uniform(starty-rad,starty+rad) width=1000 height=800 maxNodeX=width minNodeX=0 maxNodeY=height minNodeY=0 log= open('./tmp/info.log',"a+") log.write("("+str(node['x'])+", "+str(node['y'])+") =>("+str(nx)+", "+str(ny)+")\n") log.close() for n in nodes: if(n['id']==node['id']): n['x']=nx n['y']=ny break for l in links: if(l['source']==node['id']): l['s']['x']=nx l['s']['y']=ny if(l['target']==node['id']): l['t']['x']=nx l['t']['y']=ny def moveBack(vc,vx,vy,nodes,links): for n in nodes: if(n['id']==vc['id']): n['x']=vx n['y']=vy break for l in links: if(l['source']==vc['id']): l['s']['x']=vx l['s']['y']=vx if(l['target']==vc['id']): l['t']['x']=vx l['t']['y']=vx def fixNodePos(nodes): maxX=0 maxY=0 for v in nodes: if(v['x']<30): if(abs(v['x']-30)>maxX): maxX=abs(v['x']-30) if(v['y']<30): if(abs(v['y']-30)>maxY): maxY=abs(v['y']-30) for v in nodes: v['x']=v['x']+100+maxX v['y']=v['y']+100+maxY def saveConfiguration(filename,data): fileContent = Path('parent.html').read_text() fileContent=fileContent.replace("@@@@@@", filename+".json") html= open('./html/'+filename+'.html',"w+") html.write(fileContent) html.close() ##json content= open('./tmp/'+filename+'.json',"w+") content.write(json.dumps(data)) content.close() def polyn(input): return 25*input#meglio 30, ma troppo lento per grossi input def simulatedAnnealing(data): #exit(str(len(data['nodes']))) #inizio con posizioni casuali dei nodes nodes=data['nodes'] links=data['links'] valorizzato=False if(not valorizzato): nodes=getStartingPositions(nodes,links) f= open('originale.json',"w+") f.write(json.dumps(data)) f.close() else: for n in nodes: for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} T=100 stages=11 fineTuningStages=4 numNodes=len(nodes) currentStage=0 fineTuningStage=0 numMoves=polyn(numNodes) currentMove=0 gamma=0.8 fuoriArea=False ### vc=chooseANode(numNodes) prevEnergy,ncp=computeEnergy(nodes,links,'') if(ncp<=2000000): saveConfiguration('outer',data) while(currentStage<=stages and not fuoriArea): sao_time=time.time() currentMove=0 #print("currentStage: "+str(currentStage)+"\nT: "+str(T)) while(currentMove<=numMoves and not fuoriArea): sai_time=time.time() print("currentStage: "+str(currentStage)+"\nT: "+str(T)) print("currentMove: "+str(currentMove)) #print(vc) vx=vc['x'] vy=vc['y'] """for n in nodes: if(n['x']>1400 or n['x']<0 or n['y']>1200 or n['y']<0): print("fuori area, vado in fine tuning") fuoriArea=True fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" move(vc,T,nodes,links) newEnergy,ncn=computeEnergy(nodes,links,"s"+str(currentStage)+"i"+str(currentMove)+".json") de=newEnergy-prevEnergy #accetta la configurazione attuale #print("de: "+str(de)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_s'+str(currentStage)+'i'+str(currentMove),data) if(de<0): """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" prevEnergy=newEnergy vc=chooseANode(numNodes) #if(currentMove<10): saveConfiguration('s'+str(currentStage)+'i'+str(currentMove),data) elif(uniform(0,1)<Math.exp(-de/T)): # print("accetto la configurazione, anche se è peggio") """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" vc=chooseANode(numNodes) else: #print("move back") moveBack(vc,vx,vy,nodes,links) print("--- %s seconds ---" % (time.time() - sai_time))#0.6s a iterazione=>0.14s currentMove=currentMove+1 currentStage=currentStage+1 print("--- %s seconds ---" % (time.time() - sao_time)) T=gamma*T #fine-tuning phase #exit("temperatura: "+str(T)) currentStage=0 #vc=chooseANode(numNodes) #prevEnergy=computeFTEnergy(nodes,links) T=100 prevEnergy,ncp=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) while(currentStage<=fineTuningStages): currentMove=0 while(currentMove<=numMoves): print("currentStage FT: "+str(currentStage)) print("currentMove 2: "+str(currentMove)) vx=vc['x'] vy=vc['y'] move(vc,T,nodes,links) newEnergy,ncn=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_FT_s'+str(currentStage)+'i'+str(currentMove),data) de=newEnergy-prevEnergy #accetta la configurazione attuale if(de<0): prevEnergy=newEnergy saveConfiguration('FT_s'+str(currentStage)+'i'+str(currentMove),data) vc=chooseANode(numNodes) else: moveBack(vc,vx,vy,nodes,links) currentMove=currentMove+1 currentStage=currentStage+1 simulatedAnnealing(data) #fixNodePos(nodes) minimo=50 massimo=0 for n in nodes: if(n['x']<minimo): minimo=n['x'] if(n['x']>massimo): massimo=n['x'] diff=massimo-minimo exit("diff: "+str(diff)) f= open('res.json',"w+") f.write(json.dumps(data)) f.close() print("--- %s seconds ---" % (time.time() - start_time)) print("fine")
res=0 for k in links: #print("ciclo esterno") for l in links: #print("ciclo interno") """for m in nodes: if(m['id']==k['source']): sourceK=m if(m['id']==k['target']): targetK=m if(m['id']==l['source']): sourceL=m if(m['id']==l['target']): targetL=m""" #print(k['s']) try: skx=k['s']['x'] sky=k['s']['y'] #print(k['t']) tkx=k['t']['x'] tky=k['t']['y'] #print(l['s']) slx=l['s']['x'] sly=l['s']['y'] tlx=l['t']['x'] tly=l['t']['y'] except: print(k) print("-----") print(l) exit() if(intersects(skx,sky,tkx,tky,slx,sly,tlx,tly)): res=res+1 """else: #print("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) if(sourceK['name']=="Ant-Man" and targetK['name']=='Avengers: Endgame' and sourceL['name']=="Black Panther" and targetL['name']=='Ant-Man and the Wasp'): print("skx: "+str(skx)+"; sky: "+str(sky)+"\ntkx: "+str(tkx)+"; tky: "+str(tky)+"\nslx: "+str(slx)+"; sly: "+str(sly)+"\ntlx: "+str(tlx)+"; tly: "+str(tly)) exit("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) exit("res: "+str(res))""" return res
identifier_body
oldDH.py
###LIBRARIES import networkx as nx from networkx.readwrite import json_graph from random import uniform,randint import numpy from numpy import linalg as LA import math as Math import json import os from pathlib import Path import time import shutil ## start_time = time.time() ###CLEAN folder = './html/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) folder = './tmp/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) clear = lambda: os.system('cls') #on Windows System clear() ###GRAPH test=False valorizzato=False if(test and not valorizzato): with open('test.json') as json_file: data=json.load(json_file) elif(valorizzato): with open('originale.json') as json_file: data=json.load(json_file) else: G=nx.read_graphml('graph.graphml') data = json_graph.node_link_data(G) ##IABLES width=1000 height=800 nodes=data['nodes'] links=data['links'] ###FUNCTIONS def getStartingPositions(nodes,links): for n in nodes: n['x']=uniform(200,width) n['y']=uniform(200,height) for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} return nodes def chooseANode(max): rand=randint(0,max-1) it=0 for n in nodes: if(it==rand): return n it=it+1 # returns true iff the line from (a,b)->(c,d) intersects with (p,q)->(r,s) def intersects(a,b,c,d,p,q,r,s): det = (c - a) * (s - q) - (r - p) * (d - b) if (det == 0): #print("det 0") return False else: lambdA = ((s - q) * (r - a) + (p - r) * (s - b)) / det gamma = ((b - d) * (r - a) + (c - a) * (s - b)) / det #print("lambdA: "+str(lambdA)+"\ngamma: "+str(gamma)) return (0 < lambdA and lambdA < 1) and (0 < gamma and gamma < 1) def numberOfIntersections(nodes,links): res=0 for k in links: #print("ciclo esterno") for l in links: #print("ciclo interno") """for m in nodes: if(m['id']==k['source']): sourceK=m if(m['id']==k['target']): targetK=m if(m['id']==l['source']): sourceL=m if(m['id']==l['target']): targetL=m""" #print(k['s']) try: skx=k['s']['x'] sky=k['s']['y'] #print(k['t']) tkx=k['t']['x'] tky=k['t']['y'] #print(l['s']) slx=l['s']['x'] sly=l['s']['y'] tlx=l['t']['x'] tly=l['t']['y'] except: print(k) print("-----") print(l) exit() if(intersects(skx,sky,tkx,tky,slx,sly,tlx,tly)): res=res+1 """else: #print("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) if(sourceK['name']=="Ant-Man" and targetK['name']=='Avengers: Endgame' and sourceL['name']=="Black Panther" and targetL['name']=='Ant-Man and the Wasp'): print("skx: "+str(skx)+"; sky: "+str(sky)+"\ntkx: "+str(tkx)+"; tky: "+str(tky)+"\nslx: "+str(slx)+"; sly: "+str(sly)+"\ntlx: "+str(tlx)+"; tly: "+str(tly)) exit("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) exit("res: "+str(res))""" return res def distance_to_line(p0, p1, p2): x_diff = p2['x'] - p1['x'] y_diff = p2['y'] - p1['y']
return num / den #questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi # l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo); # num incroci bassa #distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T # nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini def computeEnergy(nodes,links,logInfo): sac_time=time.time() width=1000 height=800 lambda1=8 #lambda2=100 lambda3=5 lambda4=1000 repulsiveTot=0 #fromBordersTot=0 edgeLenTot=0 numCrossTot=0 distanze=[] for u in nodes: ux=u['x'] uy=u['y'] posi=numpy.array([ux,uy]) """dr=numpy.linalg.norm(posi-numpy.array([width,uy])) dl=numpy.linalg.norm(posi-numpy.array([0,uy])) dt=numpy.linalg.norm(posi-numpy.array([ux,0])) db=numpy.linalg.norm(posi-numpy.array([ux,height])) """ """rt={'x':width,'y':0} rb={'x':width,'y':height} tl={'x':0,'y':0} tr={'x':width,'y':0} bl={'x':0,'y':height} br={'x':width,'y':height} lt={'x':0,'y':0} lb={'x':0,'y':height} dr=distance_to_line(u, rt, rb) dl=distance_to_line(u, lt, lb) dt=distance_to_line(u, tl, tr) db=distance_to_line(u, bl, br) """ """ if(dr==0): dr=1 if(dl==0): dl=1 if(dt==0): dt=1 if(db==0): db=1 fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))""" for v in nodes: #se sono elementi diversi if(u['id']!=v['id']): vx=v['x'] vy=v['y'] posj=numpy.array([vx,vy]) duv=numpy.linalg.norm(posi-posj) #se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto if(duv==0): iduv=1000 else: iduv=1/pow(duv,2) repulsiveTot=repulsiveTot+lambda1*duv for l in links: if(l['source']==u['id'] and l['target']==v['id']): source=u target=v edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2) break #questo costa circa 0.28 s numCrossTot=lambda4*numberOfIntersections(nodes,links) #print("--- %s Compute energy seconds ---" % (time.time() - sac_time)) #exit("numCrossTot: "+str(numCrossTot)) #exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot)) #print(str(fromBordersTot)) msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n" log= open('./tmp/info.log',"a+") log.write(msg) log.close() tot=repulsiveTot+edgeLenTot+numCrossTot totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=tot/totMax return tot,numCrossTot def computeFTEnergy(nodes,links,logInfo): tot,numCrossTot= computeEnergy(nodes,links,logInfo) nodeLinkDist=0 gmin=60 lambda5=1 for v in nodes: for l in links: for n in nodes: if(n['id']==l['source']): source=n if(n['id']==l['target']): target=n dl=lambda5*distance_to_line(v, source, target) if(dl<gmin): nodeLinkDist=nodeLinkDist+dl totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=(tot+nodeLinkDist)/totMax return tot,numCrossTot def move(node,rad,nodes,links): #print("sposto "+node['name']) #print("x: "+str(node['x'])+"y: "+str(node['y'])) startx=node['x'] starty=node['y'] if(node['x']+rad>1400): startx=1400-rad-20#tolgo un margine elif(node['x']-rad<30): startx=30+rad+20#aggiungo un margine if(node['y']+rad>1000): starty=1000-rad-20#tolgo un margine elif(node['y']-rad<30): starty=30+rad+20#aggiungo un margine #radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni """angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0 nx=startx+Math.cos(angle)*rad ny=starty+Math.sin(angle)*rad""" #approccio quadrato, mi muovo nell'area di un quadrato nx=uniform(startx-rad,startx+rad) ny=uniform(starty-rad,starty+rad) width=1000 height=800 maxNodeX=width minNodeX=0 maxNodeY=height minNodeY=0 log= open('./tmp/info.log',"a+") log.write("("+str(node['x'])+", "+str(node['y'])+") =>("+str(nx)+", "+str(ny)+")\n") log.close() for n in nodes: if(n['id']==node['id']): n['x']=nx n['y']=ny break for l in links: if(l['source']==node['id']): l['s']['x']=nx l['s']['y']=ny if(l['target']==node['id']): l['t']['x']=nx l['t']['y']=ny def moveBack(vc,vx,vy,nodes,links): for n in nodes: if(n['id']==vc['id']): n['x']=vx n['y']=vy break for l in links: if(l['source']==vc['id']): l['s']['x']=vx l['s']['y']=vx if(l['target']==vc['id']): l['t']['x']=vx l['t']['y']=vx def fixNodePos(nodes): maxX=0 maxY=0 for v in nodes: if(v['x']<30): if(abs(v['x']-30)>maxX): maxX=abs(v['x']-30) if(v['y']<30): if(abs(v['y']-30)>maxY): maxY=abs(v['y']-30) for v in nodes: v['x']=v['x']+100+maxX v['y']=v['y']+100+maxY def saveConfiguration(filename,data): fileContent = Path('parent.html').read_text() fileContent=fileContent.replace("@@@@@@", filename+".json") html= open('./html/'+filename+'.html',"w+") html.write(fileContent) html.close() ##json content= open('./tmp/'+filename+'.json',"w+") content.write(json.dumps(data)) content.close() def polyn(input): return 25*input#meglio 30, ma troppo lento per grossi input def simulatedAnnealing(data): #exit(str(len(data['nodes']))) #inizio con posizioni casuali dei nodes nodes=data['nodes'] links=data['links'] valorizzato=False if(not valorizzato): nodes=getStartingPositions(nodes,links) f= open('originale.json',"w+") f.write(json.dumps(data)) f.close() else: for n in nodes: for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} T=100 stages=11 fineTuningStages=4 numNodes=len(nodes) currentStage=0 fineTuningStage=0 numMoves=polyn(numNodes) currentMove=0 gamma=0.8 fuoriArea=False ### vc=chooseANode(numNodes) prevEnergy,ncp=computeEnergy(nodes,links,'') if(ncp<=2000000): saveConfiguration('outer',data) while(currentStage<=stages and not fuoriArea): sao_time=time.time() currentMove=0 #print("currentStage: "+str(currentStage)+"\nT: "+str(T)) while(currentMove<=numMoves and not fuoriArea): sai_time=time.time() print("currentStage: "+str(currentStage)+"\nT: "+str(T)) print("currentMove: "+str(currentMove)) #print(vc) vx=vc['x'] vy=vc['y'] """for n in nodes: if(n['x']>1400 or n['x']<0 or n['y']>1200 or n['y']<0): print("fuori area, vado in fine tuning") fuoriArea=True fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" move(vc,T,nodes,links) newEnergy,ncn=computeEnergy(nodes,links,"s"+str(currentStage)+"i"+str(currentMove)+".json") de=newEnergy-prevEnergy #accetta la configurazione attuale #print("de: "+str(de)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_s'+str(currentStage)+'i'+str(currentMove),data) if(de<0): """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" prevEnergy=newEnergy vc=chooseANode(numNodes) #if(currentMove<10): saveConfiguration('s'+str(currentStage)+'i'+str(currentMove),data) elif(uniform(0,1)<Math.exp(-de/T)): # print("accetto la configurazione, anche se è peggio") """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" vc=chooseANode(numNodes) else: #print("move back") moveBack(vc,vx,vy,nodes,links) print("--- %s seconds ---" % (time.time() - sai_time))#0.6s a iterazione=>0.14s currentMove=currentMove+1 currentStage=currentStage+1 print("--- %s seconds ---" % (time.time() - sao_time)) T=gamma*T #fine-tuning phase #exit("temperatura: "+str(T)) currentStage=0 #vc=chooseANode(numNodes) #prevEnergy=computeFTEnergy(nodes,links) T=100 prevEnergy,ncp=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) while(currentStage<=fineTuningStages): currentMove=0 while(currentMove<=numMoves): print("currentStage FT: "+str(currentStage)) print("currentMove 2: "+str(currentMove)) vx=vc['x'] vy=vc['y'] move(vc,T,nodes,links) newEnergy,ncn=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_FT_s'+str(currentStage)+'i'+str(currentMove),data) de=newEnergy-prevEnergy #accetta la configurazione attuale if(de<0): prevEnergy=newEnergy saveConfiguration('FT_s'+str(currentStage)+'i'+str(currentMove),data) vc=chooseANode(numNodes) else: moveBack(vc,vx,vy,nodes,links) currentMove=currentMove+1 currentStage=currentStage+1 simulatedAnnealing(data) #fixNodePos(nodes) minimo=50 massimo=0 for n in nodes: if(n['x']<minimo): minimo=n['x'] if(n['x']>massimo): massimo=n['x'] diff=massimo-minimo exit("diff: "+str(diff)) f= open('res.json',"w+") f.write(json.dumps(data)) f.close() print("--- %s seconds ---" % (time.time() - start_time)) print("fine")
num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x']) den = Math.sqrt(y_diff**2 + x_diff**2)
random_line_split
oldDH.py
###LIBRARIES import networkx as nx from networkx.readwrite import json_graph from random import uniform,randint import numpy from numpy import linalg as LA import math as Math import json import os from pathlib import Path import time import shutil ## start_time = time.time() ###CLEAN folder = './html/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) folder = './tmp/' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) clear = lambda: os.system('cls') #on Windows System clear() ###GRAPH test=False valorizzato=False if(test and not valorizzato): with open('test.json') as json_file: data=json.load(json_file) elif(valorizzato): with open('originale.json') as json_file: data=json.load(json_file) else: G=nx.read_graphml('graph.graphml') data = json_graph.node_link_data(G) ##IABLES width=1000 height=800 nodes=data['nodes'] links=data['links'] ###FUNCTIONS def getStartingPositions(nodes,links): for n in nodes: n['x']=uniform(200,width) n['y']=uniform(200,height) for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} return nodes def chooseANode(max): rand=randint(0,max-1) it=0 for n in nodes: if(it==rand): return n it=it+1 # returns true iff the line from (a,b)->(c,d) intersects with (p,q)->(r,s) def intersects(a,b,c,d,p,q,r,s): det = (c - a) * (s - q) - (r - p) * (d - b) if (det == 0): #print("det 0") return False else: lambdA = ((s - q) * (r - a) + (p - r) * (s - b)) / det gamma = ((b - d) * (r - a) + (c - a) * (s - b)) / det #print("lambdA: "+str(lambdA)+"\ngamma: "+str(gamma)) return (0 < lambdA and lambdA < 1) and (0 < gamma and gamma < 1) def numberOfIntersections(nodes,links): res=0 for k in links: #print("ciclo esterno") for l in links: #print("ciclo interno") """for m in nodes: if(m['id']==k['source']): sourceK=m if(m['id']==k['target']): targetK=m if(m['id']==l['source']): sourceL=m if(m['id']==l['target']): targetL=m""" #print(k['s']) try: skx=k['s']['x'] sky=k['s']['y'] #print(k['t']) tkx=k['t']['x'] tky=k['t']['y'] #print(l['s']) slx=l['s']['x'] sly=l['s']['y'] tlx=l['t']['x'] tly=l['t']['y'] except: print(k) print("-----") print(l) exit() if(intersects(skx,sky,tkx,tky,slx,sly,tlx,tly)): res=res+1 """else: #print("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) if(sourceK['name']=="Ant-Man" and targetK['name']=='Avengers: Endgame' and sourceL['name']=="Black Panther" and targetL['name']=='Ant-Man and the Wasp'): print("skx: "+str(skx)+"; sky: "+str(sky)+"\ntkx: "+str(tkx)+"; tky: "+str(tky)+"\nslx: "+str(slx)+"; sly: "+str(sly)+"\ntlx: "+str(tlx)+"; tly: "+str(tly)) exit("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name']) exit("res: "+str(res))""" return res def distance_to_line(p0, p1, p2): x_diff = p2['x'] - p1['x'] y_diff = p2['y'] - p1['y'] num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x']) den = Math.sqrt(y_diff**2 + x_diff**2) return num / den #questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi # l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo); # num incroci bassa #distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T # nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini def computeEnergy(nodes,links,logInfo): sac_time=time.time() width=1000 height=800 lambda1=8 #lambda2=100 lambda3=5 lambda4=1000 repulsiveTot=0 #fromBordersTot=0 edgeLenTot=0 numCrossTot=0 distanze=[] for u in nodes: ux=u['x'] uy=u['y'] posi=numpy.array([ux,uy]) """dr=numpy.linalg.norm(posi-numpy.array([width,uy])) dl=numpy.linalg.norm(posi-numpy.array([0,uy])) dt=numpy.linalg.norm(posi-numpy.array([ux,0])) db=numpy.linalg.norm(posi-numpy.array([ux,height])) """ """rt={'x':width,'y':0} rb={'x':width,'y':height} tl={'x':0,'y':0} tr={'x':width,'y':0} bl={'x':0,'y':height} br={'x':width,'y':height} lt={'x':0,'y':0} lb={'x':0,'y':height} dr=distance_to_line(u, rt, rb) dl=distance_to_line(u, lt, lb) dt=distance_to_line(u, tl, tr) db=distance_to_line(u, bl, br) """ """ if(dr==0): dr=1 if(dl==0): dl=1 if(dt==0): dt=1 if(db==0): db=1 fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))""" for v in nodes: #se sono elementi diversi if(u['id']!=v['id']): vx=v['x'] vy=v['y'] posj=numpy.array([vx,vy]) duv=numpy.linalg.norm(posi-posj) #se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto if(duv==0): iduv=1000 else: iduv=1/pow(duv,2) repulsiveTot=repulsiveTot+lambda1*duv for l in links: if(l['source']==u['id'] and l['target']==v['id']): source=u target=v edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2) break #questo costa circa 0.28 s numCrossTot=lambda4*numberOfIntersections(nodes,links) #print("--- %s Compute energy seconds ---" % (time.time() - sac_time)) #exit("numCrossTot: "+str(numCrossTot)) #exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot)) #print(str(fromBordersTot)) msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n" log= open('./tmp/info.log',"a+") log.write(msg) log.close() tot=repulsiveTot+edgeLenTot+numCrossTot totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=tot/totMax return tot,numCrossTot def computeFTEnergy(nodes,links,logInfo): tot,numCrossTot= computeEnergy(nodes,links,logInfo) nodeLinkDist=0 gmin=60 lambda5=1 for v in nodes: for l in links: for n in nodes: if(n['id']==l['source']): source=n if(n['id']==l['target']): target=n dl=lambda5*distance_to_line(v, source, target) if(dl<gmin): nodeLinkDist=nodeLinkDist+dl totMax=1.7976931348623157e+8#va normalizzata in [0,1] tot=(tot+nodeLinkDist)/totMax return tot,numCrossTot def move(node,rad,nodes,links): #print("sposto "+node['name']) #print("x: "+str(node['x'])+"y: "+str(node['y'])) startx=node['x'] starty=node['y'] if(node['x']+rad>1400): startx=1400-rad-20#tolgo un margine elif(node['x']-rad<30): startx=30+rad+20#aggiungo un margine if(node['y']+rad>1000): starty=1000-rad-20#tolgo un margine elif(node['y']-rad<30): starty=30+rad+20#aggiungo un margine #radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni """angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0 nx=startx+Math.cos(angle)*rad ny=starty+Math.sin(angle)*rad""" #approccio quadrato, mi muovo nell'area di un quadrato nx=uniform(startx-rad,startx+rad) ny=uniform(starty-rad,starty+rad) width=1000 height=800 maxNodeX=width minNodeX=0 maxNodeY=height minNodeY=0 log= open('./tmp/info.log',"a+") log.write("("+str(node['x'])+", "+str(node['y'])+") =>("+str(nx)+", "+str(ny)+")\n") log.close() for n in nodes: if(n['id']==node['id']): n['x']=nx n['y']=ny break for l in links: if(l['source']==node['id']): l['s']['x']=nx l['s']['y']=ny if(l['target']==node['id']): l['t']['x']=nx l['t']['y']=ny def moveBack(vc,vx,vy,nodes,links): for n in nodes: if(n['id']==vc['id']): n['x']=vx n['y']=vy break for l in links: if(l['source']==vc['id']): l['s']['x']=vx l['s']['y']=vx if(l['target']==vc['id']): l['t']['x']=vx l['t']['y']=vx def fixNodePos(nodes): maxX=0 maxY=0 for v in nodes: if(v['x']<30): if(abs(v['x']-30)>maxX): maxX=abs(v['x']-30) if(v['y']<30): if(abs(v['y']-30)>maxY): maxY=abs(v['y']-30) for v in nodes: v['x']=v['x']+100+maxX v['y']=v['y']+100+maxY def saveConfiguration(filename,data): fileContent = Path('parent.html').read_text() fileContent=fileContent.replace("@@@@@@", filename+".json") html= open('./html/'+filename+'.html',"w+") html.write(fileContent) html.close() ##json content= open('./tmp/'+filename+'.json',"w+") content.write(json.dumps(data)) content.close() def po
nput): return 25*input#meglio 30, ma troppo lento per grossi input def simulatedAnnealing(data): #exit(str(len(data['nodes']))) #inizio con posizioni casuali dei nodes nodes=data['nodes'] links=data['links'] valorizzato=False if(not valorizzato): nodes=getStartingPositions(nodes,links) f= open('originale.json',"w+") f.write(json.dumps(data)) f.close() else: for n in nodes: for l in links: if(l['source']==n['id']): l['s']={'x':n['x'],'y':n['y']} if(l['target']==n['id']): l['t']={'x':n['x'],'y':n['y']} T=100 stages=11 fineTuningStages=4 numNodes=len(nodes) currentStage=0 fineTuningStage=0 numMoves=polyn(numNodes) currentMove=0 gamma=0.8 fuoriArea=False ### vc=chooseANode(numNodes) prevEnergy,ncp=computeEnergy(nodes,links,'') if(ncp<=2000000): saveConfiguration('outer',data) while(currentStage<=stages and not fuoriArea): sao_time=time.time() currentMove=0 #print("currentStage: "+str(currentStage)+"\nT: "+str(T)) while(currentMove<=numMoves and not fuoriArea): sai_time=time.time() print("currentStage: "+str(currentStage)+"\nT: "+str(T)) print("currentMove: "+str(currentMove)) #print(vc) vx=vc['x'] vy=vc['y'] """for n in nodes: if(n['x']>1400 or n['x']<0 or n['y']>1200 or n['y']<0): print("fuori area, vado in fine tuning") fuoriArea=True fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" move(vc,T,nodes,links) newEnergy,ncn=computeEnergy(nodes,links,"s"+str(currentStage)+"i"+str(currentMove)+".json") de=newEnergy-prevEnergy #accetta la configurazione attuale #print("de: "+str(de)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_s'+str(currentStage)+'i'+str(currentMove),data) if(de<0): """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" prevEnergy=newEnergy vc=chooseANode(numNodes) #if(currentMove<10): saveConfiguration('s'+str(currentStage)+'i'+str(currentMove),data) elif(uniform(0,1)<Math.exp(-de/T)): # print("accetto la configurazione, anche se è peggio") """if(fuoriArea): moveBack(vc,vx,vy,nodes,links) fineTuningStages=fineTuningStages+(stages-currentStage) print("fineTuningStages: "+str(fineTuningStages)) print("T: "+str(T)) T=100 break""" vc=chooseANode(numNodes) else: #print("move back") moveBack(vc,vx,vy,nodes,links) print("--- %s seconds ---" % (time.time() - sai_time))#0.6s a iterazione=>0.14s currentMove=currentMove+1 currentStage=currentStage+1 print("--- %s seconds ---" % (time.time() - sao_time)) T=gamma*T #fine-tuning phase #exit("temperatura: "+str(T)) currentStage=0 #vc=chooseANode(numNodes) #prevEnergy=computeFTEnergy(nodes,links) T=100 prevEnergy,ncp=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) while(currentStage<=fineTuningStages): currentMove=0 while(currentMove<=numMoves): print("currentStage FT: "+str(currentStage)) print("currentMove 2: "+str(currentMove)) vx=vc['x'] vy=vc['y'] move(vc,T,nodes,links) newEnergy,ncn=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove)) if(ncn<=ncp): ncp=ncn saveConfiguration('K_FT_s'+str(currentStage)+'i'+str(currentMove),data) de=newEnergy-prevEnergy #accetta la configurazione attuale if(de<0): prevEnergy=newEnergy saveConfiguration('FT_s'+str(currentStage)+'i'+str(currentMove),data) vc=chooseANode(numNodes) else: moveBack(vc,vx,vy,nodes,links) currentMove=currentMove+1 currentStage=currentStage+1 simulatedAnnealing(data) #fixNodePos(nodes) minimo=50 massimo=0 for n in nodes: if(n['x']<minimo): minimo=n['x'] if(n['x']>massimo): massimo=n['x'] diff=massimo-minimo exit("diff: "+str(diff)) f= open('res.json',"w+") f.write(json.dumps(data)) f.close() print("--- %s seconds ---" % (time.time() - start_time)) print("fine")
lyn(i
identifier_name
textContent-es.js
var textContent_ES = { "intro": { "paragraphs": [ "Mi nombre es Francisco pero todos me llaman Paco i me gano la vida como desarrolador de software.", "He estado creando diversas soluciones de software para varios clientes desde 2009. Lo que más me gusta son los desafíos y resolver problemas, mejorando mis capacidades a diario. Me encanta ayudar a mis clientes y compañeros y ver cómo los proyectos maduran y crecen. Me inspira ser parte de un equipo con un objetivo común y me despierto todos los días motivado para hacer las cosas. En el día a día el trabajo me proporciona una gran satisfacción y la resolución de problemas es una de mis fortalezas y algo con lo que realmente disfruto.", "También me apasiona aprender, conocer nuevos lugares y conocer gente nueva. Tengo la suerte de que mi carrera en el desarrollo me haya brindado la oportunidad de hacer las tres cosas.", "En mi tiempo libre disfruto aprendiendo y tocando la guitarra, leyendo un buen libro o cocinando un buen festín con amigos. Aparte me encanta poder enseñar mis conocimientos dando clases particulares de programación. También me inscribo a cursos de humanidades cuando la ocasión lo merece para no olvidar mi parte más 'humana'. No dudes en echar un vistazo a mi sitio web para aprender más sobre mí y mi trabajo.", "¡Hasta la vista!" ], "optionMenu": "Introducción", "wellcome" : "Bienvenido a mi website", "btnWellcome" : "Explícame más..." }, "techSkills": { "paragraphs": [ "Me considero un desarrollador full-stack capaz de contribuir a proyectos en todas sus facetas. La mayor parte de mi experiencia ha sido con tecnologías de Microsoft, incluida una amplia experiencia en el uso de C#, ASP.NET, Web Forms y SQL Server. Me siento cómodo usando una herramientas de Microsoft, incluidas varias versiones de Visual Studio, Entity Framework, Team Foundation Server y Git. ", "Para proyectos de desarrollo web he trabajado con HTML/CSS y JavaScript, incluidos varios frameworks y librerias populares como ASP.NET, Telerik, Bootstrap, Angular, Slick y jQuery. También tengo experiencia en el uso de herramientas cloud de Azure y Amazon Web Services.", "A continuación se muestra una breve lista de mis habilidades." ], "devSkills": "C#, ASP.NET, ASP.NET Core, ASP.NET Web API, MVC, Entity Framework, EFCore, Telerik, Solcuiones en Azure, React, jQuery, JavaScript, Angular, Typescript, HTML, CSS, Bootstrap, PrimeNg, FontAwesome, Oracle, SQL, CloudantDb, CosmoDb", "concepts": "Software development process (Agile, Scrum), relational databases, data modelling, object-oriented programming, TDD, MVC, n-tier architecture, SOLID principles", "tools": "TFS, Git, GitHub, Visual Studio 2019, Visual Studio Code, MSSQL Mangement Studio, Azure, Amazon Web Services, Notepad++, Atom, JetBrains Resharper, Docker, Source Tree, Postman", "optionMenu": "Tecnologías", "subTitle1" : "Desarrollo", "subTitle2" : "Conceptos", "subTitle3" : "Herramientas" }, "personalSkills": { "paragraphs": [ "Además de ofrecer un sólido conjunto de habilidades técnicas prácticas, creo que mis habilidades interpersonales y mi visión empresarial son activos adicionales que ofrezco a mis clientes. Me siento cómodo en roles de cara al cliente que requieren habilidades blandas para negociar entregables y administrar expectativas de los grupos de interés. ", "A lo largo de mi carrera he gestionado con éxito varios proyectos. Trabajé como analista para recopilar los requisitos de los usuarios y redacté documentación detallada de los mismos. También he gestionado equipos de desarrollo en el extranjero y puedo mentorizar las integrantes de nivel junior del equipo.", "Mis habilidades analíticas son otra fortaleza, y regularmente he podido identificar y resolver ineficiencias dentro de los procesos de desarrollo que han mejorado la velocidad de entrega.", "Soy hablante nativo de español y catalán y puedo hablar fluidamente en inglés." ], "optionMenu": "Skills" }, "experiences": { "optionMenu" : "Experiencia", "desktopPublisher": { "id": 1, "ref": "Diseñador/Maquetador", "slickitem":"#slick-slide08", "company": "Varias compañias", "website": "", "city": "Barcelona", "role": "Diseñador/Maquetador", "dateStart": "Dic 2006", "dateEnd": "Ago 2008", "techStack": "", "description": "Antes de entrar en el mundo de la infórmatica trabajé como diseñador/maquetador para varias empresas en el área de Barcelona utilizando mayormente Acrobat, Photoshop, Illustrator, InDesign, Quark, Freehand y ArtiosCad.", "tasks": "", "image": "graphic" }, "netLife": { "id": 2, "ref": "netLife", "slickitem":"#slick-slide07", "company": "Net-Life S.L", "city": "Barcelona", "website": "", "role": "Programador", "dateStart": "Dec 2009", "dateEnd": "May 2010", "techStack": "", "description": "Este fue mi primer trabajo como programador en prácticas montando páginas web usando C# (framework 3.5), ASP.NET, HTML, MS SQL Server y Visual Studio 2010.", "tasks": "", "image": "netlife" }, "btgsa": { "id": 3, "ref": "btgsa", "slickitem":"#slick-slide06", "company": "T&G Think & Grow (now Aggity) ", "city": "Barcelona", "website": "http://btgsa.com", "role": "Software Engineer", "dateStart": "Jun 2010", "dateEnd": "Mar 2014", "techStack": "C#, Visual Studio, Oracle, MSSQL Server, TFS (Sharepoint integrated), ASP.NET, HTML, Gembase.", "description": "At T&G (ahora Aggity) trabajé como software engineer focalizado en tareas de desarrollo utilizando el stack de Microsoft. También formé a usuarios finales entre otros.", "tasks": [ "Desarrollé y modifiqué funciones del programa de acuerdo con las especificaciones del cliente utilizando Visual Studio 2010, C# y ASP.NET.", "Diseñé e implementé bases de datos de MS SQL Server y Oracle que incluían consultas complejas, procedimientos almacenados y vistas para las migraciones de datos de los nuevos clientes.", "Se corrigieron problemas de software e irregularidades en consultas o procedimientos almacenados (SQL) y en la lógica de negocio en C#.", "Escribí documentación funcional para nuevos clientes y capacité a nuevos usuarios." ], "image": "aggity" }, "luxtripper": { "id": 4, "ref": "luxtripper", "slickitem":"#slick-slide05", "company": "Luxtripper LTD", "city": "London", "website": "http://luxtripper.co.uk", "role": "Project Manager / Developer", "dateStart": "Ene 2015 ", "dateEnd": "Ago 2015", "techStack": "", "description": "Startup del sector del turimso de lujo para encontrar destinos a nivel mundial según un algoritmo propio. Implementé la infraestructura mínima (servidores, backups, control de versiones) y los procesos del departamento de software dirigiendo un equipo de cuatro desarrolladores offshore en Pakistán.", "tasks": [ "Gestioné varios proyectos internos. Desempeñé funciones como analista de negocios para recopilar y escribir los requisitos para los departamentos de ventas y marketing.", "Diseñé e implementé integraciones con terceras API para reservas de hoteles. Prototipé una función de reserva de vuelos.", "Se implementó el software de control de versiones TFS y se establecieron procesos sobre el mismo (fusiones de ramas, despliegues, pruebas).", "Implementación de servidores de producción, backup y test usando Rackspace y la plataforma Azure. Copias de seguridad automatizadas mediante scripts para que las bases de datos, los servidores de imágenes y el código se pudieran guardar en el servicio Amazon S3.", "Se resolvieron errores de codificación relacionados con SQL y lógica empresarial. Contribución a cambios cosméticos.", "Implementé planes de prueba para front y back-end con TFS en línea.", "Migré un blog de WordPress a un servidor IIS interno migrando los datos de MySQL a MS SQL Server." ], "image": "luxtripper" }, "membra": { "id": 5, "ref": "membra", "slickitem":"#slick-slide04", "company": "Membership Engagement Services", "city": "London", "website": "http://membra.co.uk", "role": "Web Developer", "dateStart": "Ago 2015", "dateEnd": "Ago 2016", "techStack": "", "description": "Se implementaron nuevas características y funcionalidades en un sitio web utilizado por los hospitales de la NHS para recopilar y presentar datos de encuestas a los usuarios (casi no tenían back-end).", "tasks": [ "Se modificó sitio web para que fuera responsive y así mejorar la compatibilidad entre dispositivos. Se añadió una barra de progreso para mejorar UI/UX entre otras funcionalidades.", "Implementación de un mantenimiento para todo lo referente a las preguntas/respuestas de encuestas como Ordenar-Modificar-Eliminar (en lugar de hacer cambios directamente en la base de datos).", "Realice múltiples tareas de bases de datos, incluidas tablas dinámicas, creación y diseño de tablas, procedimientos almacenados y optimización de consultas. Hice un scripting para automatizar la importación de datos que mejoró la velocidad de días a solo seis horas.", "Crée una herramienta de administración de arquitectura de tres niveles que automatizaba y optimizaba las tareas de los administradores de proyectos. Desarrollada con Bootstrap, Telerik framework, ASP.NET Webforms, C# y MS SQL Server." ], "image": "mes" }, "mission": { "id": 6, "ref": "mission", "slickitem":"#slick-slide03", "company": "Mission Communications", "city": "London", "website": "http://mission-communications.net", "role": "Web Developer", "dateStart": "Ago 2016", "dateEnd": "Nov 2017", "techStack": "", "description": "En Mission Communications pude trabajar en varios proyectos de desarrollo web para clientes de diversas industrias y en particular hice contribuciones significativas al desarrollo de sightseeingpass.com.", "tasks": [ "Creé un sitio web de comercio electrónico y contenido de noticias/blogs que se integró con Facebook, Instagram y Twitter. Usé Web Forms, C#, Bootstrap y jQuery.", "Creé un sitio para un evento de administradores de propiedades privadas. El sitio estaba protegido con contraseña para mostrar detalles sobre propiedades disponibles (imágenes, descripción, planos, etc.). Se implementó una presentación de diapositivas utilizando la biblioteca Slick JavaScript y Typescript. El sitio permitía a los agentes inmobiliarios enviar correos electrónicos individuales o masivos a los registrantes (adjuntando pdfs con la información de la propiedad). Construido en Web Forms, C#, jQuery y Bootstrap.", "Conversión de sitios web críticos para la empresa en aplicaciones web ASP.NET para mejorar la capacidad de depuración.", "Se migraron varios sitios web a TFS para permitir a los desarrolladores trabajar localmente, reemplazando una solución FTP problemática.", "Implementé nuevos puntos finales y corrección de errores en la API de la empresa. La API se desarrolló utilizando la arquitectura MVC con un Entity Framework que apuntaba a una base de datos de MS SQL Server." ], "sightseeingpass": [ "Añadí una nueva función en el back-end para sightseeingpass.com para administrar todas las imágenes relacionadas con una atracción turística. La función permitía a los usuarios cargar varias imágenes a la vez y ordenarlas mediante drag and drop. Se utilizaron Web Forms, C#, jQuery y SQL stored procedures para operaciones CRUD.", "Desarrollé una nueva función en sightseeingpass.com para que los usuarios creasen itinerarios personalizados para los recorridos en autobús con la capacidad de seleccionar fechas y atracciones de una lista (utilizando datos de la API de la empresa), creando un mapa de Google con las rutas más eficientes utilizando el diseño aportado por la diseñadora gráfica. Lo hice con web forms (user control), Bootstrap, C#, JQuery, controladores ASP.NET y API internas.", "Se creó una función utilizando la API de la empresa para comparar datos entre 3 bases de datos con alertas de cualquier diferencia. La solución permitió a los usuarios actualizar datos dispares a través de la base de datos del sitio web con la base de datos del sistema central con un solo clic y permitió que el equipo pudiera verificar bajo demanda la consistencia/coherencia entre los sistemas. Se utilizó MS SQL Server, API interna y C#.", "Escribí pruebas unitarias para validar la reescritura de URL y la precisión de los precios de venta al público (número de personas, tipo de entrega, tipo de tarjeta, etc.)." ], "image": "mission" }, "educo": { "id": 7, "ref": "educo", "slickitem":"#slick-slide02", "company": "Educo ONG", "city": "Barcelona", "website": "https://www.educo.org/", "role": "Web Developer", "dateStart": "Mar 2018", "dateEnd": "Ago 2018", "techStack": "Kentico CMS, C#, SQL Server, Javascript", "description": "ONG para velar y salvaguardar los derechos de la infancia. Estuve cubriendo una baja por maternidad ayudando en el incidental del aplicativo.", "tasks": [ "Creé un componente de crowdfunding en Kentico CMS integrado con los sistemas de pago Redsys y Paypal. Usé componentes C#, Javascript, HTML, CSS y Kentico.", "Corrección de bugs en el sitio web comercial, también en Kentico CMS.", "Código Javascript refactorizado de acuerdo con los principios SOLID." ], "image": "educo" }, "wivi": { "id": 8, "ref": "wivi", "slickitem":"#slick-slide01", "company": "Wivi Vision", "city": "Barcelona", "website": "https://wivivision.com/", "role": "Web Developer", "dateStart": "Oct 2018", "dateEnd": "Abr 2019", "techStack": "", "description": "Debido a un acuerdo de confidencialidad no se me permite mostrar públicamente ninguna descripción de ninguna tarea ni tecnología durante mi tiempo en esta startup.",
}, "planeta": { "id": 9, "ref": "planeta", "slickitem":"#slick-slide00", "company": "Grupo Planeta", "city": "Barcelona", "website": "https://www.planeta.es/en/learning", "role": "Web Developer - Team Leader", "dateStart": "Ago 2018", "dateEnd": "Dic 2020", "techStack": "ASP.NET, ASP.NET API, C#, HTML, CSS, Canvas, Javascript, jQuery, SQL, SSIS, IIS, CMS DotnetNuke", "description": "Trabajos como desarrollador web para la división de formación y universidades del grupo, donde se gestiona desde las webs de captación de los cursos hasta la matriculación en los mismos y la integración con las plataformas de aprendizaje. Los últimos 4 meses actué como team lead gestionando un equipo de 6 desarrolladores, 2 QA y 1 BA.", "tasks": [ "Integración de dos marcas debiendo migrar todos los datos de una marca (alumnos, matrículas, cursos…) hacia la otra para preservar el histórico de las mismas.", "Solución para cambio de cabina NAS conflictiva donde se alojaba toda la documentación que se generaba durante el proceso de matriculación de los alumnos y de los cursos.", "Modifiqué el comportamiento de los dos aplicativos principales para historificar la ingente documentación que se generaba en el proceso de negocio. Se simplificó de esta manera los procesos de backups de los mismos y se mejoró el rendimiento en la aplicación. Además se aplicaron normalizaciones a los nombres de documentos.", "Creación de un script en SQL para evitar un exploit conocido del CMS de los sitios web que permite crear superusuarios en los sites.", "Participé como team leader en los diferentes procesos de upgrade de 3 de los CMS donde corrían una parte de los sitios web, aportando ayuda técnica y funcional cuando se requería y llevando a cabo los despliegues.", "Acabé proyecto crítico para la organización como la migración del editor de ejercicios/pruebas de Flash a HTML5, teniendo que utilizar canvas para la parte de preview de los mismos.", "Como team lead participé en la planificación y ejecución para cambiar el proveedor de pagos de los aplicativos debido a un cambio normativo europeo (3DS)." ], "image": "planeta" } }, "education": { "collection": [ { "id": 1, "center": "Microsoft Certified", "date": "Barcelona (2021)", "title": "Azure Developer Associate", "desc": "", "descUrl": "", "image": "az204", "url": "https://www.credly.com/badges/4a608036-cc3e-44cc-9a30-b6c0a46392d8" }, { "id": 2, "center": "Microsoft Certified", "date": "Barcelona (2021)", "title": "Azure Data Fundamentals", "desc": "", "descUrl": "", "image": "dp900", "url": "https://www.credly.com/badges/b4f0bec9-cba2-48e6-ab40-72be4c68c089" }, { "id": 3, "center": "Microsoft Certified", "date": "Barcelona (2021)", "title": "Azure Fundamentals", "desc": "", "descUrl": "", "image": "az900", "url": "https://www.youracclaim.com/badges/a24463a9-7aef-4a38-b975-a83fcec3f16c" }, { "id": 4, "center": "Microsoft Learn", "date": "Barcelona (2020 - 2021)", "title": "Varios módulos, learning paths y challenges", "desc": "", "descUrl": "", "image": "mlearn", "url": "https://docs.microsoft.com/en-us/users/franciscorosaherrero-0686" }, { "id": 5, "center": "Pluralsight", "date": "Barcelona (2019)", "title": "Varios módulos de aprendizaje y pruebas de nivel", "desc": "", "descUrl" : "", "image": "pluralsight", "url": "https://app.pluralsight.com/profile/francisco-rosa" }, { "id": 6, "center": "Master D", "date": "Barcelona (2012)", "title": "Curso programación en Android", "desc": "", "descUrl" : "", "image": "masterd", "url": "https://www.masterd.es/curso-android" }, { "id": 7, "center": "Escola Politècnica Salessians Sarrià", "date": "Barcelona (2008 - 2010)", "title": "Técnico superior en Desarrollo de Aplicaciones Informáticas", "desc": "", "descUrl" : "", "image": "epps", "url": "https://www.salesianssarria.com/grado-superior/desarrollo-aplicaciones-multiplataforma" }, { "id": 8, "center": "Escola Bit", "date": "Barcelona (2009)", "title": "Programación en Java", "desc": "", "descUrl" : "", "image": "escolabit", "url": "https://www.bit.es/curso/desarrollo-de-aplicaciones-con-java-9-11/" }, { "id": 9, "center": "IES Esteve Terradas i illa", "date": "Barcelona (2005 - 2007)", "title": "Técnico superior en Producción Editorial", "desc": "", "descUrl" : "", "image": "esteve", "url": "https://agora.xtec.cat/iesesteveterradas/arts-grafiques/" } ], "optionMenu": "Educación" }, "misc": { "optionMenu" : "Mi biblioteca", "optionMenu2" : "Conferencias y Meetups", "books": [ { "id": 1, "title": "Clean Code: A Handbook of Agile Software Craftsmanship", "author": "Robert C. Martin" }, { "id": 2, "title": "The Pragmatic Programmer: From Journeyman to Master", "author": "Gary McLean Hall" }, { "id": 3, "title": "Adaptive Code via C#: Class and Interface Design, Design Patterns, and SOLID Principles", "author": "David Thomas" }, { "id": 4, "title": "Test Driven Development: By Example", "author": "Kent Beck" }, { "id": 5, "title": "HEAD FIRST DESIGN PATTERNS", "author": "Elisabeth Freeman, Eric Freeman, Bert Bates" }, { "id": 6, "title": "The Software Craftsman: Professionalism, Pragmatism, Pride", "author": "Robert C. Martin, Sandro Mancuso" }, { "id": 7, "title": "Pro ASP.NET Web API Security", "author": "Badrinarayanan Lakshmiraghavan" }, { "id": 8, "title": "Working Effectively with Legacy Code (Robert C Martin)", "author": "Michael C. Feathers" } ], "conferences": [ { "id": 1, "name": "SOFTWARE CRAFTSMANSHIP LONDON", "desc": "", "date": "5-6 October 2017", "URL": "http://sc-london.com/" }, { "id": 2, "name": "CODENODE", "desc": "Cuando el tiempo lo permite atiendo a eventos y workshops sobre diferentes materias (el de las promises en javasript estuvo muy bien, por ejemplo)", "date": "-", "URL": "https://skillsmatter.com/" }, { "id": 3, "name": "SOFTWARE CRAFTSMANSHIP BARCELONA", "desc": "Sigo lo eventos del Barcelona Meetup Group", "date": "-", "URL": "https://www.meetup.com/Barcelona-Software-Craftsmanship/events/" }, { "id": 4, "name": "Codebar", "desc": "Actué como mentor en sus eventos para enseñar programación con javascript o SQL", "date": "-", "URL": "https://codebar.io/" } ] }, "socialMedia": { "optionMenu" : "Contacto", "links": [ { "id": 1, "name": "GitHub", "URL": "http://github.com/apkouk", "image": "" }, { "id": 2, "name": "LinkedIn", "URL": "https://www.linkedIn.com/in/pacorosa", "image": "" }, { "id": 3, "name": "GMail", "URL": "https://www.linkedIn.com/in/pacorosa", "image": "" } ] }, "services": { "optionMenu" : "Servicios", "title": "Como consultor autónomo ofrezco varios servicios a los clientes que pueden incluir:", "serviceItems": [ { "id": 2, "title": "Database Scripting", "desc": "A lo largo de los años he trabajado con varias bases de datos, incluida una amplia experiencia con Microsoft SQL Server y Oracle en el diseño de tablas y procedimientos almacenados. Puedo trabajar con bases de datos NoSql como CLoudantDb o MongoDb.", "image": "database" }, { "id": 3, "title": "System Integration", "desc": "Mi experiencia incluye un extenso trabajo de integración de sistemas que a menudo ha requerido la capacidad de resolver problemas y trabajar con documentación ambigua o vaga relacionada con software y API de terceros. Puedo navegar por estas áreas para los clientes y desarrollar una solución funcional.", "image": "systemInt" }, { "id": 6, "title": "Team Management", "desc": "A lo largo de mi carrera, he dirigido pequeños equipos in situ y en el extranjero o distribuidos. También puedo orientar a los miembros del equipo junior y obtener la aceptación del equipo para que se adhieran a las mejores prácticas.", "image": "teamMan" }, { "id": 4, "title": "Error Fixing/Code Quality", "desc": "Uno de mis puntos fuertes es la capacidad de analizar código para identificar errores y áreas de mejora para el rendimiento o la mantenibilidad.", "image": "errorFix" }, { "id": 1, "title": "Software Development", "desc": "Durante diez años me he centrado en el trabajo de desarrollo de software y he creado muchas soluciones utilizando C#, ASP.NET, ASP.NET Core, ASP.NET Web API, Entity Framework, Microsoft SQL Server, JQuery, Angular, Typecript HTML y CSS. Puedo contribuir al ciclo de vida completo de los proyectos de desarrollo de software desde los requisitos hasta las pruebas y la implementación.", "image": "softdev" }, { "id": 5, "title": "Software Process Improvement", "desc": "Puedo revisar el proceso de desarrollo de software para diseñar e implementar nuevos procesos y herramientas. Esto puede incluir implementaciones de nuevas metodologías de desarrollo de software (Agile, Scrum), soluciones de control de código (Git, Team Foundation Server) y buenas prácticas de la industria como revisiones de código.", "image": "softdevProc" } ] } }
"tasks": [], "image": "wivi"
random_line_split
dwarfdebuginfo.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::helpers::{get_uid, resolve_specification, DieReference}; use binaryninja::{ binaryview::{BinaryView, BinaryViewBase}, debuginfo::{DebugFunctionInfo, DebugInfo}, rc::*, templatesimplifier::simplify_str_to_str, types::{Conf, FunctionParameter, Type}, }; use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit}; use log::error; use std::{ collections::{hash_map::Values, HashMap}, ffi::CString, hash::Hash, }; pub(crate) type TypeUID = usize; ///////////////////////// // FunctionInfoBuilder // TODO : Function local variables #[derive(PartialEq, Eq, Hash)] pub struct FunctionInfoBuilder { pub full_name: Option<CString>, pub raw_name: Option<CString>, pub return_type: Option<TypeUID>, pub address: Option<u64>, pub parameters: Vec<Option<(CString, TypeUID)>>, } impl FunctionInfoBuilder { pub fn update( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if full_name.is_some() { self.full_name = full_name; } if raw_name.is_some() { self.raw_name = raw_name; } if return_type.is_some() { self.return_type = return_type; } if address.is_some() { self.address = address; } for (i, new_parameter) in parameters.into_iter().enumerate() { if let Some(old_parameter) = self.parameters.get(i) { if old_parameter.is_none() { self.parameters[i] = new_parameter; } } else { self.parameters.push(new_parameter); } } } } ////////////////////// // DebugInfoBuilder // TODO : Don't make this pub...fix the value thing pub(crate) struct DebugType { name: CString, t: Ref<Type>, commit: bool, } // DWARF info is stored and displayed in a tree, but is really a graph // The purpose of this builder is to help resolve those graph edges by mapping partial function // info and types to one DIE's UID (T) before adding the completed info to BN's debug info pub struct DebugInfoBuilder { functions: Vec<FunctionInfoBuilder>, types: HashMap<TypeUID, DebugType>, data_variables: HashMap<u64, (Option<CString>, TypeUID)>, names: HashMap<TypeUID, CString>, default_address_size: usize, } impl DebugInfoBuilder { pub fn new(view: &BinaryView) -> Self { DebugInfoBuilder { functions: vec![], types: HashMap::new(), data_variables: HashMap::new(), names: HashMap::new(), default_address_size: view.address_size(), } } pub fn default_address_size(&self) -> usize { self.default_address_size } #[allow(clippy::too_many_arguments)] pub fn insert_function( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if let Some(function) = self.functions.iter_mut().find(|func| { (func.raw_name.is_some() && func.raw_name == raw_name) || (func.full_name.is_some() && func.full_name == full_name) }) { function.update(full_name, raw_name, return_type, address, parameters); } else { self.functions.push(FunctionInfoBuilder { full_name, raw_name, return_type, address, parameters, }); } } pub fn functions(&self) -> &[FunctionInfoBuilder] { &self.functions } pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> { self.types.values() } pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) { if let Some(DebugType { name: existing_name, t: existing_type, commit: _, }) = self.types.insert( type_uid, DebugType { name: name.clone(), t: t.clone(), commit, }, ) { if existing_type != t { error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)", existing_type, existing_name, t, name ); } } } pub fn remove_type(&mut self, type_uid: TypeUID) { self.types.remove(&type_uid); } // TODO : Non-copy? pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> { self.types .get(&type_uid) .map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone())) } pub fn contains_type(&self, type_uid: TypeUID) -> bool { self.types.get(&type_uid).is_some() } pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) { if let Some((_existing_name, existing_type_uid)) = self.data_variables.insert(address, (name, type_uid)) { let existing_type = self.get_type(existing_type_uid).unwrap().1; let new_type = self.get_type(type_uid).unwrap().1; if existing_type_uid != type_uid || existing_type != new_type { error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`", address, self.get_type(existing_type_uid).unwrap().1, self.get_type(type_uid).unwrap().1 ); } } } pub fn set_name(&mut self, die_uid: TypeUID, name: CString) { assert!(self.names.insert(die_uid, name).is_none()); } pub fn get_name<R: Reader<Offset = usize>>( &self, dwarf: &Dwarf<R>, unit: &Unit<R>, entry: &DebuggingInformationEntry<R>, ) -> Option<CString> { match resolve_specification(dwarf, unit, entry) { DieReference::Offset(entry_offset) => self .names .get(&get_uid(unit, &unit.entry(entry_offset).unwrap())) .cloned(), DieReference::UnitAndOffset((entry_unit, entry_offset)) => self .names .get(&get_uid( &entry_unit, &entry_unit.entry(entry_offset).unwrap(), )) .cloned(), } } fn commit_types(&self, debug_info: &mut DebugInfo) { for debug_type in self.types() { if debug_type.commit { debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref()); } } } // TODO : Consume data? fn commit_data_variables(&self, debug_info: &mut DebugInfo) { for (&address, (name, type_uid)) in &self.data_variables { assert!(debug_info.add_data_variable( address, &self.get_type(*type_uid).unwrap().1, name.clone() )); } } fn commit_functions(&self, debug_info: &mut DebugInfo) { for function in self.functions() { let return_type = match function.return_type { Some(return_type_id) => { Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0) } _ => Conf::new(binaryninja::types::Type::void(), 0), }; let parameters: Vec<FunctionParameter<CString>> = function .parameters .iter() .filter_map(|parameter| match parameter { Some((name, 0)) =>
Some((name, uid)) => Some(FunctionParameter::new( self.get_type(*uid).unwrap().1, name.clone(), None, )), _ => None, }) .collect(); // TODO : Handle let platform = None; let variable_parameters = false; // let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None; let function_type = binaryninja::types::Type::function(&return_type, &parameters, variable_parameters); let simplified_full_name = function .full_name .as_ref() .map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned()) .map(|simp| CString::new(simp).unwrap()); debug_info.add_function(DebugFunctionInfo::new( simplified_full_name.clone(), simplified_full_name, // TODO : This should eventually be changed, but the "full_name" should probably be the unsimplified version, and the "short_name" should be the simplified version...currently the symbols view shows the full version, so changing it here too makes it look bad in the UI function.raw_name.clone(), Some(function_type), function.address, platform, )); } } pub fn commit_info(&self, debug_info: &mut DebugInfo) { self.commit_types(debug_info); self.commit_data_variables(debug_info); self.commit_functions(debug_info); } }
{ Some(FunctionParameter::new(Type::void(), name.clone(), None)) }
conditional_block
dwarfdebuginfo.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::helpers::{get_uid, resolve_specification, DieReference}; use binaryninja::{ binaryview::{BinaryView, BinaryViewBase}, debuginfo::{DebugFunctionInfo, DebugInfo}, rc::*, templatesimplifier::simplify_str_to_str, types::{Conf, FunctionParameter, Type}, }; use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit}; use log::error; use std::{ collections::{hash_map::Values, HashMap}, ffi::CString, hash::Hash, }; pub(crate) type TypeUID = usize; ///////////////////////// // FunctionInfoBuilder // TODO : Function local variables #[derive(PartialEq, Eq, Hash)] pub struct FunctionInfoBuilder { pub full_name: Option<CString>, pub raw_name: Option<CString>, pub return_type: Option<TypeUID>, pub address: Option<u64>, pub parameters: Vec<Option<(CString, TypeUID)>>, } impl FunctionInfoBuilder { pub fn update( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if full_name.is_some() { self.full_name = full_name; } if raw_name.is_some() { self.raw_name = raw_name; } if return_type.is_some() { self.return_type = return_type; } if address.is_some() { self.address = address; } for (i, new_parameter) in parameters.into_iter().enumerate() { if let Some(old_parameter) = self.parameters.get(i) { if old_parameter.is_none() { self.parameters[i] = new_parameter; } } else { self.parameters.push(new_parameter); } } } } ////////////////////// // DebugInfoBuilder // TODO : Don't make this pub...fix the value thing pub(crate) struct DebugType { name: CString, t: Ref<Type>, commit: bool, } // DWARF info is stored and displayed in a tree, but is really a graph // The purpose of this builder is to help resolve those graph edges by mapping partial function // info and types to one DIE's UID (T) before adding the completed info to BN's debug info pub struct DebugInfoBuilder { functions: Vec<FunctionInfoBuilder>, types: HashMap<TypeUID, DebugType>, data_variables: HashMap<u64, (Option<CString>, TypeUID)>, names: HashMap<TypeUID, CString>, default_address_size: usize, } impl DebugInfoBuilder { pub fn new(view: &BinaryView) -> Self { DebugInfoBuilder { functions: vec![], types: HashMap::new(), data_variables: HashMap::new(), names: HashMap::new(), default_address_size: view.address_size(), } } pub fn default_address_size(&self) -> usize { self.default_address_size } #[allow(clippy::too_many_arguments)] pub fn insert_function( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if let Some(function) = self.functions.iter_mut().find(|func| { (func.raw_name.is_some() && func.raw_name == raw_name) || (func.full_name.is_some() && func.full_name == full_name) }) { function.update(full_name, raw_name, return_type, address, parameters); } else { self.functions.push(FunctionInfoBuilder { full_name, raw_name, return_type, address, parameters, }); } } pub fn functions(&self) -> &[FunctionInfoBuilder] { &self.functions } pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> { self.types.values() } pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) { if let Some(DebugType { name: existing_name, t: existing_type, commit: _, }) = self.types.insert( type_uid, DebugType { name: name.clone(), t: t.clone(), commit, }, ) { if existing_type != t { error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)", existing_type, existing_name, t, name ); } } } pub fn remove_type(&mut self, type_uid: TypeUID) { self.types.remove(&type_uid); } // TODO : Non-copy? pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> { self.types .get(&type_uid) .map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone())) } pub fn contains_type(&self, type_uid: TypeUID) -> bool { self.types.get(&type_uid).is_some() } pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID)
pub fn set_name(&mut self, die_uid: TypeUID, name: CString) { assert!(self.names.insert(die_uid, name).is_none()); } pub fn get_name<R: Reader<Offset = usize>>( &self, dwarf: &Dwarf<R>, unit: &Unit<R>, entry: &DebuggingInformationEntry<R>, ) -> Option<CString> { match resolve_specification(dwarf, unit, entry) { DieReference::Offset(entry_offset) => self .names .get(&get_uid(unit, &unit.entry(entry_offset).unwrap())) .cloned(), DieReference::UnitAndOffset((entry_unit, entry_offset)) => self .names .get(&get_uid( &entry_unit, &entry_unit.entry(entry_offset).unwrap(), )) .cloned(), } } fn commit_types(&self, debug_info: &mut DebugInfo) { for debug_type in self.types() { if debug_type.commit { debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref()); } } } // TODO : Consume data? fn commit_data_variables(&self, debug_info: &mut DebugInfo) { for (&address, (name, type_uid)) in &self.data_variables { assert!(debug_info.add_data_variable( address, &self.get_type(*type_uid).unwrap().1, name.clone() )); } } fn commit_functions(&self, debug_info: &mut DebugInfo) { for function in self.functions() { let return_type = match function.return_type { Some(return_type_id) => { Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0) } _ => Conf::new(binaryninja::types::Type::void(), 0), }; let parameters: Vec<FunctionParameter<CString>> = function .parameters .iter() .filter_map(|parameter| match parameter { Some((name, 0)) => { Some(FunctionParameter::new(Type::void(), name.clone(), None)) } Some((name, uid)) => Some(FunctionParameter::new( self.get_type(*uid).unwrap().1, name.clone(), None, )), _ => None, }) .collect(); // TODO : Handle let platform = None; let variable_parameters = false; // let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None; let function_type = binaryninja::types::Type::function(&return_type, &parameters, variable_parameters); let simplified_full_name = function .full_name .as_ref() .map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned()) .map(|simp| CString::new(simp).unwrap()); debug_info.add_function(DebugFunctionInfo::new( simplified_full_name.clone(), simplified_full_name, // TODO : This should eventually be changed, but the "full_name" should probably be the unsimplified version, and the "short_name" should be the simplified version...currently the symbols view shows the full version, so changing it here too makes it look bad in the UI function.raw_name.clone(), Some(function_type), function.address, platform, )); } } pub fn commit_info(&self, debug_info: &mut DebugInfo) { self.commit_types(debug_info); self.commit_data_variables(debug_info); self.commit_functions(debug_info); } }
{ if let Some((_existing_name, existing_type_uid)) = self.data_variables.insert(address, (name, type_uid)) { let existing_type = self.get_type(existing_type_uid).unwrap().1; let new_type = self.get_type(type_uid).unwrap().1; if existing_type_uid != type_uid || existing_type != new_type { error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`", address, self.get_type(existing_type_uid).unwrap().1, self.get_type(type_uid).unwrap().1 ); } } }
identifier_body
dwarfdebuginfo.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::helpers::{get_uid, resolve_specification, DieReference}; use binaryninja::{ binaryview::{BinaryView, BinaryViewBase}, debuginfo::{DebugFunctionInfo, DebugInfo}, rc::*, templatesimplifier::simplify_str_to_str, types::{Conf, FunctionParameter, Type}, }; use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit}; use log::error; use std::{ collections::{hash_map::Values, HashMap}, ffi::CString, hash::Hash, }; pub(crate) type TypeUID = usize; ///////////////////////// // FunctionInfoBuilder // TODO : Function local variables #[derive(PartialEq, Eq, Hash)] pub struct FunctionInfoBuilder { pub full_name: Option<CString>, pub raw_name: Option<CString>, pub return_type: Option<TypeUID>, pub address: Option<u64>, pub parameters: Vec<Option<(CString, TypeUID)>>, } impl FunctionInfoBuilder { pub fn update( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if full_name.is_some() { self.full_name = full_name; } if raw_name.is_some() { self.raw_name = raw_name; } if return_type.is_some() { self.return_type = return_type; } if address.is_some() { self.address = address; } for (i, new_parameter) in parameters.into_iter().enumerate() { if let Some(old_parameter) = self.parameters.get(i) { if old_parameter.is_none() { self.parameters[i] = new_parameter; } } else { self.parameters.push(new_parameter); } } } } ////////////////////// // DebugInfoBuilder // TODO : Don't make this pub...fix the value thing pub(crate) struct DebugType { name: CString, t: Ref<Type>, commit: bool, } // DWARF info is stored and displayed in a tree, but is really a graph // The purpose of this builder is to help resolve those graph edges by mapping partial function // info and types to one DIE's UID (T) before adding the completed info to BN's debug info pub struct DebugInfoBuilder { functions: Vec<FunctionInfoBuilder>, types: HashMap<TypeUID, DebugType>, data_variables: HashMap<u64, (Option<CString>, TypeUID)>, names: HashMap<TypeUID, CString>, default_address_size: usize, } impl DebugInfoBuilder { pub fn
(view: &BinaryView) -> Self { DebugInfoBuilder { functions: vec![], types: HashMap::new(), data_variables: HashMap::new(), names: HashMap::new(), default_address_size: view.address_size(), } } pub fn default_address_size(&self) -> usize { self.default_address_size } #[allow(clippy::too_many_arguments)] pub fn insert_function( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if let Some(function) = self.functions.iter_mut().find(|func| { (func.raw_name.is_some() && func.raw_name == raw_name) || (func.full_name.is_some() && func.full_name == full_name) }) { function.update(full_name, raw_name, return_type, address, parameters); } else { self.functions.push(FunctionInfoBuilder { full_name, raw_name, return_type, address, parameters, }); } } pub fn functions(&self) -> &[FunctionInfoBuilder] { &self.functions } pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> { self.types.values() } pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) { if let Some(DebugType { name: existing_name, t: existing_type, commit: _, }) = self.types.insert( type_uid, DebugType { name: name.clone(), t: t.clone(), commit, }, ) { if existing_type != t { error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)", existing_type, existing_name, t, name ); } } } pub fn remove_type(&mut self, type_uid: TypeUID) { self.types.remove(&type_uid); } // TODO : Non-copy? pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> { self.types .get(&type_uid) .map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone())) } pub fn contains_type(&self, type_uid: TypeUID) -> bool { self.types.get(&type_uid).is_some() } pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) { if let Some((_existing_name, existing_type_uid)) = self.data_variables.insert(address, (name, type_uid)) { let existing_type = self.get_type(existing_type_uid).unwrap().1; let new_type = self.get_type(type_uid).unwrap().1; if existing_type_uid != type_uid || existing_type != new_type { error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`", address, self.get_type(existing_type_uid).unwrap().1, self.get_type(type_uid).unwrap().1 ); } } } pub fn set_name(&mut self, die_uid: TypeUID, name: CString) { assert!(self.names.insert(die_uid, name).is_none()); } pub fn get_name<R: Reader<Offset = usize>>( &self, dwarf: &Dwarf<R>, unit: &Unit<R>, entry: &DebuggingInformationEntry<R>, ) -> Option<CString> { match resolve_specification(dwarf, unit, entry) { DieReference::Offset(entry_offset) => self .names .get(&get_uid(unit, &unit.entry(entry_offset).unwrap())) .cloned(), DieReference::UnitAndOffset((entry_unit, entry_offset)) => self .names .get(&get_uid( &entry_unit, &entry_unit.entry(entry_offset).unwrap(), )) .cloned(), } } fn commit_types(&self, debug_info: &mut DebugInfo) { for debug_type in self.types() { if debug_type.commit { debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref()); } } } // TODO : Consume data? fn commit_data_variables(&self, debug_info: &mut DebugInfo) { for (&address, (name, type_uid)) in &self.data_variables { assert!(debug_info.add_data_variable( address, &self.get_type(*type_uid).unwrap().1, name.clone() )); } } fn commit_functions(&self, debug_info: &mut DebugInfo) { for function in self.functions() { let return_type = match function.return_type { Some(return_type_id) => { Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0) } _ => Conf::new(binaryninja::types::Type::void(), 0), }; let parameters: Vec<FunctionParameter<CString>> = function .parameters .iter() .filter_map(|parameter| match parameter { Some((name, 0)) => { Some(FunctionParameter::new(Type::void(), name.clone(), None)) } Some((name, uid)) => Some(FunctionParameter::new( self.get_type(*uid).unwrap().1, name.clone(), None, )), _ => None, }) .collect(); // TODO : Handle let platform = None; let variable_parameters = false; // let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None; let function_type = binaryninja::types::Type::function(&return_type, &parameters, variable_parameters); let simplified_full_name = function .full_name .as_ref() .map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned()) .map(|simp| CString::new(simp).unwrap()); debug_info.add_function(DebugFunctionInfo::new( simplified_full_name.clone(), simplified_full_name, // TODO : This should eventually be changed, but the "full_name" should probably be the unsimplified version, and the "short_name" should be the simplified version...currently the symbols view shows the full version, so changing it here too makes it look bad in the UI function.raw_name.clone(), Some(function_type), function.address, platform, )); } } pub fn commit_info(&self, debug_info: &mut DebugInfo) { self.commit_types(debug_info); self.commit_data_variables(debug_info); self.commit_functions(debug_info); } }
new
identifier_name
dwarfdebuginfo.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::helpers::{get_uid, resolve_specification, DieReference}; use binaryninja::{ binaryview::{BinaryView, BinaryViewBase}, debuginfo::{DebugFunctionInfo, DebugInfo}, rc::*, templatesimplifier::simplify_str_to_str, types::{Conf, FunctionParameter, Type}, }; use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit}; use log::error; use std::{ collections::{hash_map::Values, HashMap}, ffi::CString, hash::Hash, }; pub(crate) type TypeUID = usize; ///////////////////////// // FunctionInfoBuilder // TODO : Function local variables #[derive(PartialEq, Eq, Hash)] pub struct FunctionInfoBuilder { pub full_name: Option<CString>, pub raw_name: Option<CString>, pub return_type: Option<TypeUID>, pub address: Option<u64>, pub parameters: Vec<Option<(CString, TypeUID)>>, } impl FunctionInfoBuilder { pub fn update( &mut self, full_name: Option<CString>, raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if full_name.is_some() { self.full_name = full_name; } if raw_name.is_some() { self.raw_name = raw_name; } if return_type.is_some() { self.return_type = return_type; } if address.is_some() { self.address = address; } for (i, new_parameter) in parameters.into_iter().enumerate() { if let Some(old_parameter) = self.parameters.get(i) { if old_parameter.is_none() { self.parameters[i] = new_parameter; } } else { self.parameters.push(new_parameter); } } } } ////////////////////// // DebugInfoBuilder // TODO : Don't make this pub...fix the value thing pub(crate) struct DebugType { name: CString, t: Ref<Type>, commit: bool, } // DWARF info is stored and displayed in a tree, but is really a graph // The purpose of this builder is to help resolve those graph edges by mapping partial function // info and types to one DIE's UID (T) before adding the completed info to BN's debug info pub struct DebugInfoBuilder { functions: Vec<FunctionInfoBuilder>, types: HashMap<TypeUID, DebugType>, data_variables: HashMap<u64, (Option<CString>, TypeUID)>, names: HashMap<TypeUID, CString>, default_address_size: usize, } impl DebugInfoBuilder { pub fn new(view: &BinaryView) -> Self { DebugInfoBuilder { functions: vec![], types: HashMap::new(), data_variables: HashMap::new(), names: HashMap::new(), default_address_size: view.address_size(), } } pub fn default_address_size(&self) -> usize { self.default_address_size } #[allow(clippy::too_many_arguments)] pub fn insert_function(
raw_name: Option<CString>, return_type: Option<TypeUID>, address: Option<u64>, parameters: Vec<Option<(CString, TypeUID)>>, ) { if let Some(function) = self.functions.iter_mut().find(|func| { (func.raw_name.is_some() && func.raw_name == raw_name) || (func.full_name.is_some() && func.full_name == full_name) }) { function.update(full_name, raw_name, return_type, address, parameters); } else { self.functions.push(FunctionInfoBuilder { full_name, raw_name, return_type, address, parameters, }); } } pub fn functions(&self) -> &[FunctionInfoBuilder] { &self.functions } pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> { self.types.values() } pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) { if let Some(DebugType { name: existing_name, t: existing_type, commit: _, }) = self.types.insert( type_uid, DebugType { name: name.clone(), t: t.clone(), commit, }, ) { if existing_type != t { error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)", existing_type, existing_name, t, name ); } } } pub fn remove_type(&mut self, type_uid: TypeUID) { self.types.remove(&type_uid); } // TODO : Non-copy? pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> { self.types .get(&type_uid) .map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone())) } pub fn contains_type(&self, type_uid: TypeUID) -> bool { self.types.get(&type_uid).is_some() } pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) { if let Some((_existing_name, existing_type_uid)) = self.data_variables.insert(address, (name, type_uid)) { let existing_type = self.get_type(existing_type_uid).unwrap().1; let new_type = self.get_type(type_uid).unwrap().1; if existing_type_uid != type_uid || existing_type != new_type { error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`", address, self.get_type(existing_type_uid).unwrap().1, self.get_type(type_uid).unwrap().1 ); } } } pub fn set_name(&mut self, die_uid: TypeUID, name: CString) { assert!(self.names.insert(die_uid, name).is_none()); } pub fn get_name<R: Reader<Offset = usize>>( &self, dwarf: &Dwarf<R>, unit: &Unit<R>, entry: &DebuggingInformationEntry<R>, ) -> Option<CString> { match resolve_specification(dwarf, unit, entry) { DieReference::Offset(entry_offset) => self .names .get(&get_uid(unit, &unit.entry(entry_offset).unwrap())) .cloned(), DieReference::UnitAndOffset((entry_unit, entry_offset)) => self .names .get(&get_uid( &entry_unit, &entry_unit.entry(entry_offset).unwrap(), )) .cloned(), } } fn commit_types(&self, debug_info: &mut DebugInfo) { for debug_type in self.types() { if debug_type.commit { debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref()); } } } // TODO : Consume data? fn commit_data_variables(&self, debug_info: &mut DebugInfo) { for (&address, (name, type_uid)) in &self.data_variables { assert!(debug_info.add_data_variable( address, &self.get_type(*type_uid).unwrap().1, name.clone() )); } } fn commit_functions(&self, debug_info: &mut DebugInfo) { for function in self.functions() { let return_type = match function.return_type { Some(return_type_id) => { Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0) } _ => Conf::new(binaryninja::types::Type::void(), 0), }; let parameters: Vec<FunctionParameter<CString>> = function .parameters .iter() .filter_map(|parameter| match parameter { Some((name, 0)) => { Some(FunctionParameter::new(Type::void(), name.clone(), None)) } Some((name, uid)) => Some(FunctionParameter::new( self.get_type(*uid).unwrap().1, name.clone(), None, )), _ => None, }) .collect(); // TODO : Handle let platform = None; let variable_parameters = false; // let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None; let function_type = binaryninja::types::Type::function(&return_type, &parameters, variable_parameters); let simplified_full_name = function .full_name .as_ref() .map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned()) .map(|simp| CString::new(simp).unwrap()); debug_info.add_function(DebugFunctionInfo::new( simplified_full_name.clone(), simplified_full_name, // TODO : This should eventually be changed, but the "full_name" should probably be the unsimplified version, and the "short_name" should be the simplified version...currently the symbols view shows the full version, so changing it here too makes it look bad in the UI function.raw_name.clone(), Some(function_type), function.address, platform, )); } } pub fn commit_info(&self, debug_info: &mut DebugInfo) { self.commit_types(debug_info); self.commit_data_variables(debug_info); self.commit_functions(debug_info); } }
&mut self, full_name: Option<CString>,
random_line_split
main.go
package main import ( "errors" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "path/filepath" "regexp" "sort" "strconv" "strings" "github.com/oxplot/papersizes" "github.com/oxplot/pdftilecut/qpdf" ) const ( ptsInInch = 72 mmInInch = 25.4 mmInCm = 10 bleedMargin = ptsInInch * 5 / 6 // in pt from media box trimMargin = ptsInInch / 6 // in pt from bleed box trimMarkLineWidth = 0.5 // in pt // Min page size in mm minPageDimension = (bleedMargin + trimMargin + trimMarkLineWidth) * 2 * mmInInch / ptsInInch ) type tileSizeFlag struct { name string // in millimeters width float32 height float32 isDim bool } func (v *tileSizeFlag) String() string { if v.isDim { return fmt.Sprintf("%.0fmm x %.0fmm", v.width, v.height) } return fmt.Sprintf("%s (%.0fmm x %.0fmm)", v.name, v.width, v.height) } func (v *tileSizeFlag) Set(s string) error { // unit to mm ratios unitsToMillimeter := map[string]float32{ "mm": 1, "cm": mmInCm, "in": mmInInch, "pt": mmInInch / ptsInInch, } // known paper sizes size := papersizes.FromName(s) if size != nil { v.name = size.Name v.width = float32(size.Width) v.height = float32(size.Height) v.isDim = false } else { // w x h dimensions dimRe := regexp.MustCompile(`^\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*x\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*$`) parts := dimRe.FindStringSubmatch(s) if parts == nil { return errors.New("invalid tile size") } v.name = parts[1] + parts[2] + "x" + parts[3] + parts[4] w, _ := strconv.ParseFloat(parts[1], 32) v.width = float32(w) * unitsToMillimeter[parts[2]] h, _ := strconv.ParseFloat(parts[3], 32) v.height = float32(h) * unitsToMillimeter[parts[4]] v.isDim = true } if v.width < minPageDimension || v.height < minPageDimension { return fmt.Errorf("min. tile dimension is %fmm x %fmm", minPageDimension, minPageDimension) } return nil } var ( inputFile = flag.String("in", "-", "input PDF") outputFile = flag.String("out", "-", "output PDF") tileTitle = flag.String("title", "", "title to show on margin of each tile (defaults to input filename)") debugMode = flag.Bool("debug", false, "run in debug mode") longTrimMarks = flag.Bool("long-trim-marks", false, "Use full width/height trim marks") tileSize tileSizeFlag ) func init() { _ = tileSize.Set("A4") flag.Var(&tileSize, "tile-size", "maximum size including margin - can be a standard paper size (eg A5), or width x height dimension with a unit (mm, cm, in, pt) (e.g. 6cm x 12in)") } // getNextFreeObjectID returns the largest object id in the document + 1 func getNextFreeObjectID(d string) (int, error) { m := regexp.MustCompile(`(?m)^xref\s+\d+\s+(\d+)`).FindStringSubmatch(d) if m == nil { return 0, fmt.Errorf("cannot find the next free object id") } return strconv.Atoi(m[1]) } type rect struct { // ll = lower left // ur = upper right llx, lly, urx, ury float32 } func (r rect) isValid() bool { return r.llx <= r.urx && r.lly <= r.ury } type page struct { id int number int tileX int tileY int mediaBox rect cropBox rect bleedBox rect trimBox rect contentIds []int parentID int raw string } var ( boxReTpl = `(?m)^\s+/%s\s*\[\s*(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s*\]` bleedBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "BleedBox")) cropBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "CropBox")) mediaBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "MediaBox")) trimBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "TrimBox")) contentsRe = regexp.MustCompile(`(?m)^\s+/Contents\s+(?:(\d+)|\[([^\]]*))`) pageObjRmRe = regexp.MustCompile( `(?m)^\s+/((Bleed|Crop|Media|Trim|Art)Box|Contents|Parent)\s+(\[[^\]]+\]|\d+\s+\d+\s+R)\n`) ) // marshal serializes the page to string that can be inserted into // PDF document. func (p *page) marshal() string { b := &strings.Builder{} fmt.Fprintf(b, "\n%d 0 obj\n<<\n", p.id) fmt.Fprintf(b, " /MediaBox [ %f %f %f %f ]\n", p.mediaBox.llx, p.mediaBox.lly, p.mediaBox.urx, p.mediaBox.ury) fmt.Fprintf(b, " /CropBox [ %f %f %f %f ]\n", p.cropBox.llx, p.cropBox.lly, p.cropBox.urx, p.cropBox.ury) fmt.Fprintf(b, " /BleedBox [ %f %f %f %f ]\n", p.bleedBox.llx, p.bleedBox.lly, p.bleedBox.urx, p.bleedBox.ury) fmt.Fprintf(b, " /TrimBox [ %f %f %f %f ]\n", p.trimBox.llx, p.trimBox.lly, p.trimBox.urx, p.trimBox.ury) fmt.Fprintf(b, " /Contents [ ") for _, cid := range p.contentIds { fmt.Fprintf(b, " %d 0 R ", cid) } fmt.Fprintf(b, " ]\n") fmt.Fprintf(b, " /Parent %d 0 R\n", p.parentID) b.WriteString(p.raw) fmt.Fprintf(b, "\n>>\nendobj\n") return b.String() } // extractAttrs extracts interesting attributes of the page into // struct elements and removes them from raw string of the page. func (p *page) extractAttrs() error { atoi := func(s string) int { i, err := strconv.Atoi(s) if err != nil { panic(err) } return i } atof := func(s string) float32 { f, err := strconv.ParseFloat(s, 32) if err != nil { panic(err) } return float32(f) } var m []string m = contentsRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find Contents for page:\n%s", p.raw) } if m[1] != "" { p.contentIds = []int{atoi(m[1])} } else { m := regexp.MustCompile(`(?m)^\s+(\d+)\s+\d+\s+R`).FindAllStringSubmatch(m[2], -1) p.contentIds = []int{} for _, r := range m { p.contentIds = append(p.contentIds, atoi(r[1])) } } m = mediaBoxRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find MediaBox for page:\n%s", p.raw) } p.mediaBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} if !p.mediaBox.isValid() { return fmt.Errorf("invalid MediaBox for page:\n%s", p.raw) } m = cropBoxRe.FindStringSubmatch(p.raw) if m == nil { p.cropBox = p.mediaBox } else { p.cropBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.cropBox.isValid() { return fmt.Errorf("invalid CropBox for page:\n%s", p.raw) } m = bleedBoxRe.FindStringSubmatch(p.raw) if m == nil { p.bleedBox = p.cropBox } else { p.bleedBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.bleedBox.isValid() { return fmt.Errorf("invalid BleedBox for page:\n%s", p.raw) } m = trimBoxRe.FindStringSubmatch(p.raw) if m == nil { p.trimBox = p.cropBox } else { p.trimBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.trimBox.isValid() { return fmt.Errorf("invalid TrimBox for page:\n%s", p.raw) } // Delete all the extracted raw content p.raw = pageObjRmRe.ReplaceAllString(p.raw, "") return nil } // cutPageToTiles slices the page into tiles of the given size, setting // appropriate *Box attributes of the tiles. All other page attributes // are copied from the original page. func cutPageToTiles(p *page, tileW, tileH, bleedMargin, trimMargin float32) []*page { // Adjust tileW and tileH such that all tiles end up with the same dimensions pageWidth := p.trimBox.urx - p.trimBox.llx pageHeight := p.trimBox.ury - p.trimBox.lly hTiles := int(math.Ceil(float64(pageWidth / tileW))) vTiles := int(math.Ceil(float64(pageHeight / tileH))) tileW = pageWidth / float32(hTiles) tileH = pageHeight / float32(vTiles) var tilePages []*page tgy := 0 for y := 0; y < vTiles; y++ { lly := p.trimBox.lly + float32(y)*tileH tgx := 0 for x := 0; x < hTiles; x++ { llx := p.trimBox.llx + float32(x)*tileW tile := page{ tileX: tgx, tileY: tgy, mediaBox: rect{ llx - trimMargin - bleedMargin, lly - trimMargin - bleedMargin, llx + tileW + trimMargin + bleedMargin, lly + tileH + trimMargin + bleedMargin, }, bleedBox: rect{llx - trimMargin, lly - trimMargin, llx + tileW + trimMargin, lly + tileH + trimMargin}, trimBox: rect{llx, lly, llx + tileW, lly + tileH}, number: p.number, contentIds: append([]int{}, p.contentIds...), raw: p.raw, } tile.cropBox = tile.mediaBox tilePages = append(tilePages, &tile) tgx++ } tgy++ } return tilePages } // appendPagesToDoc appends the given pages after all the other objects // but before the xref block. It also updates the object ids as it goes // starting with startID. func appendPagesToDoc(d string, startID int, pages []*page) string { var b strings.Builder for pi, p := range pages { p.id = pi + startID b.WriteString(p.marshal()) } return strings.Replace(d, "\nxref\n", "\n"+b.String()+"\n\nxref\n", 1) } // replaceAllDocPagesWith updates the first node of the page tree with array // containing references to the given pages, effectively replacing all // the existing page trees. func replaceAllDocPagesWith(d string, pages []*page, pageTreeID int) string { b := &strings.Builder{} for _, p := range pages { fmt.Fprintf(b, "%d 0 R\n", p.id) } // Replace the count r := regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Count\s+)\d+`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1}%d`, len(pages))) // Replace page references r = regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Kids\s+\[)[^\]]*`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1} %s `, b.String())) return d } // getAllPages returns all the page objects in the document in order // they appear in input. func getAllPages(d string) []*page { pages := []*page{} // Match all the pages pageRe := regexp.MustCompile(`(?ms)^%% Page (\d+)\n%%[^\n]*\n\d+\s+\d+\s+obj\n<<\n(.*?)\n^>>\n^endobj`) pageM := pageRe.FindAllStringSubmatch(d, -1) for _, pm := range pageM { pNum, _ := strconv.Atoi(pm[1]) p := page{number: pNum, raw: pm[2]} if err := p.extractAttrs(); err != nil { log.Print(err) continue } pages = append(pages, &p) } return pages } // numToDoubleAlpha converts a given integer to a 26 base number // system with digits each between A-Z func numToAlpha(n int) string { s := []byte(strconv.FormatInt(int64(n), 26)) for i, c := range s { if c < 'a' { s[i] = byte('A' + (c - '0')) } else { s[i] = byte('A' + 10 + (c - 'a')) } } return string(s) } // createOverlayForPage returns a PDF object which contains: // - white opaque margin up to bleedMargin // - trim marks up to bleedMargin // - other printmarks such as tile/page number // This will update the contentIds of the page to include a ref // to the new overlay object. func createOverlayForPage(overlayID int, p *page) string { mb, bb, tb := p.mediaBox, p.bleedBox, p.trimBox // Draw opaque bleed margin stream := fmt.Sprintf(` q 1 1 1 rg %f %f m %f %f l %f %f l %f %f l h %f %f m %f %f l %f %f l %f %f l h f Q `, // +1s and -1s are to bleed the box outside of viewpoint mb.llx-1, mb.lly-1, mb.llx-1, mb.ury+1, mb.urx+1, mb.ury+1, mb.urx+1, mb.lly-1, bb.llx, bb.lly, bb.urx, bb.lly, bb.urx, bb.ury, bb.llx, bb.ury, ) // Draw trim marks if !*longTrimMarks { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, bb.llx, tb.lly, mb.llx-1, tb.ury, bb.llx, tb.ury, tb.llx, mb.ury+1, tb.llx, bb.ury, tb.urx, mb.ury+1, tb.urx, bb.ury, bb.urx, tb.ury, mb.urx+1, tb.ury, bb.urx, tb.lly, mb.urx+1, tb.lly, tb.llx, bb.lly, tb.llx, mb.lly-1, tb.urx, bb.lly, tb.urx, mb.lly-1, ) } else { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line ) } // Draw tile ref vch := float32(vecCharHeight) stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q q 0 0 0 rg %f w 2 J %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l %f %f l h f %f %f m %f %f l %f %f l h f Q `, bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1), bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1), trimMarkLineWidth, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2, bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2, bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2, ) // Draw page ref stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q `, tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1), bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1), ) // Draw page title stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `, tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1), ) p.contentIds = append(p.contentIds, overlayID) return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n", overlayID, len(stream), stream) } func process() error { // Convert to QDF form data, err := convertToQDF(*inputFile) if err != nil { return err } // Get the root page tree object id m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data) if m == nil { return fmt.Errorf("cannot find root page tree") } pageTreeID, _ := strconv.Atoi(m[1]) nextID, err := getNextFreeObjectID(data) if err != nil { return err } // Convert page size (which includes margins) in mm to // tile sizes (which excludes margins) in pt for use with PDF tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 pages := getAllPages(data) // Sort pages by page number if not already sorted sort.Slice(pages, func(i, j int) bool { return pages[i].number < pages[j].number }) var tiles []*page for _, p := range pages { ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin) for _, t := range ts { t.parentID = pageTreeID } tiles = append(tiles, ts...) } { // Wrap page content with graphics state preserving streams objs := fmt.Sprintf( "%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n", nextID, nextID+1) data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1) for _, t := range tiles { t.contentIds = append([]int{nextID}, t.contentIds...) t.contentIds = append(t.contentIds, nextID+1) } nextID += 2 } { // Create overlays and add it to the doc b := &strings.Builder{} for _, t := range tiles { b.WriteString(createOverlayForPage(nextID, t)) nextID++ } data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1) } data = appendPagesToDoc(data, nextID, tiles) data = replaceAllDocPagesWith(data, tiles, pageTreeID) // Write data back to temp file f, err := ioutil.TempFile("", "pdftilecut-im2-") if err != nil { return err } if !*debugMode { defer os.Remove(f.Name()) } if _, err := f.Write([]byte(data)); err != nil { f.Close() return err } f.Close() // Fix and write back an optimized PDF if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil { return err } return nil } // convertToOptimizedPDF converts in PDF to a compressed with // object streams PDF using QPDF. func convertToOptimizedPDF(in string, out string) error
// convertToQDF uses QPDF to convert an input PDF to a normalized // format that is easy to parse and manipulate. func convertToQDF(in string) (string, error) { q, err := qpdf.New() if err != nil { return "", err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return "", err } f, err := ioutil.TempFile("", "pdftilecut-im-") if err != nil { return "", nil } f.Close() if !*debugMode { defer os.Remove(f.Name()) } if err := q.InitFileWrite(f.Name()); err != nil { return "", err } q.SetQDFMode(true) q.SetObjectStreamMode(qpdf.ObjectStreamDisable) q.SetStreamDataMode(qpdf.StreamDataPreserve) if err := q.Write(); err != nil { return "", err } q.Close() // free up memory as soon as possible f, err = os.Open(f.Name()) if err != nil { return "", err } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return "", err } return string(b), nil } func main() { if err := run(); err != nil { log.Fatal(err) } } func run() error { flag.Parse() // Create temp file for input and output if needed if *inputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-in-") if err != nil { return err } defer os.Remove(f.Name()) if _, err := io.Copy(f, os.Stdin); err != nil { return err } if err := f.Close(); err != nil { return err } *inputFile = f.Name() if *tileTitle == "" { *tileTitle = "stdin" } } else if *tileTitle == "" { *tileTitle = filepath.Base(*inputFile) } *tileTitle = strings.ToUpper(*tileTitle) var toStdout bool if *outputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-out-") if err != nil { return err } f.Close() defer os.Remove(f.Name()) *outputFile = f.Name() toStdout = true } // Tile cut if err := process(); err != nil { return err } // Cleanup if toStdout { f, err := os.Open(*outputFile) if err != nil { return err } defer f.Close() if _, err := io.Copy(os.Stdout, f); err != nil { return err } } return nil }
{ q, err := qpdf.New() if err != nil { return err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return err } // TODO enable optimization flags if err := q.InitFileWrite(out); err != nil { return err } q.SetObjectStreamMode(qpdf.ObjectStreamGenerate) q.SetStreamDataMode(qpdf.StreamDataPreserve) q.SetCompressStreams(true) if err := q.Write(); err != nil { return err } return nil }
identifier_body
main.go
package main import ( "errors" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "path/filepath" "regexp" "sort" "strconv" "strings" "github.com/oxplot/papersizes" "github.com/oxplot/pdftilecut/qpdf" ) const ( ptsInInch = 72 mmInInch = 25.4 mmInCm = 10 bleedMargin = ptsInInch * 5 / 6 // in pt from media box trimMargin = ptsInInch / 6 // in pt from bleed box trimMarkLineWidth = 0.5 // in pt // Min page size in mm minPageDimension = (bleedMargin + trimMargin + trimMarkLineWidth) * 2 * mmInInch / ptsInInch ) type tileSizeFlag struct { name string // in millimeters width float32 height float32 isDim bool } func (v *tileSizeFlag) String() string { if v.isDim { return fmt.Sprintf("%.0fmm x %.0fmm", v.width, v.height) } return fmt.Sprintf("%s (%.0fmm x %.0fmm)", v.name, v.width, v.height) } func (v *tileSizeFlag) Set(s string) error { // unit to mm ratios unitsToMillimeter := map[string]float32{ "mm": 1, "cm": mmInCm, "in": mmInInch, "pt": mmInInch / ptsInInch, } // known paper sizes size := papersizes.FromName(s) if size != nil { v.name = size.Name v.width = float32(size.Width) v.height = float32(size.Height) v.isDim = false } else { // w x h dimensions dimRe := regexp.MustCompile(`^\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*x\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*$`) parts := dimRe.FindStringSubmatch(s) if parts == nil { return errors.New("invalid tile size") } v.name = parts[1] + parts[2] + "x" + parts[3] + parts[4] w, _ := strconv.ParseFloat(parts[1], 32) v.width = float32(w) * unitsToMillimeter[parts[2]] h, _ := strconv.ParseFloat(parts[3], 32) v.height = float32(h) * unitsToMillimeter[parts[4]] v.isDim = true } if v.width < minPageDimension || v.height < minPageDimension { return fmt.Errorf("min. tile dimension is %fmm x %fmm", minPageDimension, minPageDimension) } return nil } var ( inputFile = flag.String("in", "-", "input PDF") outputFile = flag.String("out", "-", "output PDF") tileTitle = flag.String("title", "", "title to show on margin of each tile (defaults to input filename)") debugMode = flag.Bool("debug", false, "run in debug mode") longTrimMarks = flag.Bool("long-trim-marks", false, "Use full width/height trim marks") tileSize tileSizeFlag ) func init() { _ = tileSize.Set("A4") flag.Var(&tileSize, "tile-size", "maximum size including margin - can be a standard paper size (eg A5), or width x height dimension with a unit (mm, cm, in, pt) (e.g. 6cm x 12in)") } // getNextFreeObjectID returns the largest object id in the document + 1 func getNextFreeObjectID(d string) (int, error) { m := regexp.MustCompile(`(?m)^xref\s+\d+\s+(\d+)`).FindStringSubmatch(d) if m == nil { return 0, fmt.Errorf("cannot find the next free object id") } return strconv.Atoi(m[1]) } type rect struct { // ll = lower left // ur = upper right llx, lly, urx, ury float32 } func (r rect)
() bool { return r.llx <= r.urx && r.lly <= r.ury } type page struct { id int number int tileX int tileY int mediaBox rect cropBox rect bleedBox rect trimBox rect contentIds []int parentID int raw string } var ( boxReTpl = `(?m)^\s+/%s\s*\[\s*(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s*\]` bleedBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "BleedBox")) cropBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "CropBox")) mediaBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "MediaBox")) trimBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "TrimBox")) contentsRe = regexp.MustCompile(`(?m)^\s+/Contents\s+(?:(\d+)|\[([^\]]*))`) pageObjRmRe = regexp.MustCompile( `(?m)^\s+/((Bleed|Crop|Media|Trim|Art)Box|Contents|Parent)\s+(\[[^\]]+\]|\d+\s+\d+\s+R)\n`) ) // marshal serializes the page to string that can be inserted into // PDF document. func (p *page) marshal() string { b := &strings.Builder{} fmt.Fprintf(b, "\n%d 0 obj\n<<\n", p.id) fmt.Fprintf(b, " /MediaBox [ %f %f %f %f ]\n", p.mediaBox.llx, p.mediaBox.lly, p.mediaBox.urx, p.mediaBox.ury) fmt.Fprintf(b, " /CropBox [ %f %f %f %f ]\n", p.cropBox.llx, p.cropBox.lly, p.cropBox.urx, p.cropBox.ury) fmt.Fprintf(b, " /BleedBox [ %f %f %f %f ]\n", p.bleedBox.llx, p.bleedBox.lly, p.bleedBox.urx, p.bleedBox.ury) fmt.Fprintf(b, " /TrimBox [ %f %f %f %f ]\n", p.trimBox.llx, p.trimBox.lly, p.trimBox.urx, p.trimBox.ury) fmt.Fprintf(b, " /Contents [ ") for _, cid := range p.contentIds { fmt.Fprintf(b, " %d 0 R ", cid) } fmt.Fprintf(b, " ]\n") fmt.Fprintf(b, " /Parent %d 0 R\n", p.parentID) b.WriteString(p.raw) fmt.Fprintf(b, "\n>>\nendobj\n") return b.String() } // extractAttrs extracts interesting attributes of the page into // struct elements and removes them from raw string of the page. func (p *page) extractAttrs() error { atoi := func(s string) int { i, err := strconv.Atoi(s) if err != nil { panic(err) } return i } atof := func(s string) float32 { f, err := strconv.ParseFloat(s, 32) if err != nil { panic(err) } return float32(f) } var m []string m = contentsRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find Contents for page:\n%s", p.raw) } if m[1] != "" { p.contentIds = []int{atoi(m[1])} } else { m := regexp.MustCompile(`(?m)^\s+(\d+)\s+\d+\s+R`).FindAllStringSubmatch(m[2], -1) p.contentIds = []int{} for _, r := range m { p.contentIds = append(p.contentIds, atoi(r[1])) } } m = mediaBoxRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find MediaBox for page:\n%s", p.raw) } p.mediaBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} if !p.mediaBox.isValid() { return fmt.Errorf("invalid MediaBox for page:\n%s", p.raw) } m = cropBoxRe.FindStringSubmatch(p.raw) if m == nil { p.cropBox = p.mediaBox } else { p.cropBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.cropBox.isValid() { return fmt.Errorf("invalid CropBox for page:\n%s", p.raw) } m = bleedBoxRe.FindStringSubmatch(p.raw) if m == nil { p.bleedBox = p.cropBox } else { p.bleedBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.bleedBox.isValid() { return fmt.Errorf("invalid BleedBox for page:\n%s", p.raw) } m = trimBoxRe.FindStringSubmatch(p.raw) if m == nil { p.trimBox = p.cropBox } else { p.trimBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.trimBox.isValid() { return fmt.Errorf("invalid TrimBox for page:\n%s", p.raw) } // Delete all the extracted raw content p.raw = pageObjRmRe.ReplaceAllString(p.raw, "") return nil } // cutPageToTiles slices the page into tiles of the given size, setting // appropriate *Box attributes of the tiles. All other page attributes // are copied from the original page. func cutPageToTiles(p *page, tileW, tileH, bleedMargin, trimMargin float32) []*page { // Adjust tileW and tileH such that all tiles end up with the same dimensions pageWidth := p.trimBox.urx - p.trimBox.llx pageHeight := p.trimBox.ury - p.trimBox.lly hTiles := int(math.Ceil(float64(pageWidth / tileW))) vTiles := int(math.Ceil(float64(pageHeight / tileH))) tileW = pageWidth / float32(hTiles) tileH = pageHeight / float32(vTiles) var tilePages []*page tgy := 0 for y := 0; y < vTiles; y++ { lly := p.trimBox.lly + float32(y)*tileH tgx := 0 for x := 0; x < hTiles; x++ { llx := p.trimBox.llx + float32(x)*tileW tile := page{ tileX: tgx, tileY: tgy, mediaBox: rect{ llx - trimMargin - bleedMargin, lly - trimMargin - bleedMargin, llx + tileW + trimMargin + bleedMargin, lly + tileH + trimMargin + bleedMargin, }, bleedBox: rect{llx - trimMargin, lly - trimMargin, llx + tileW + trimMargin, lly + tileH + trimMargin}, trimBox: rect{llx, lly, llx + tileW, lly + tileH}, number: p.number, contentIds: append([]int{}, p.contentIds...), raw: p.raw, } tile.cropBox = tile.mediaBox tilePages = append(tilePages, &tile) tgx++ } tgy++ } return tilePages } // appendPagesToDoc appends the given pages after all the other objects // but before the xref block. It also updates the object ids as it goes // starting with startID. func appendPagesToDoc(d string, startID int, pages []*page) string { var b strings.Builder for pi, p := range pages { p.id = pi + startID b.WriteString(p.marshal()) } return strings.Replace(d, "\nxref\n", "\n"+b.String()+"\n\nxref\n", 1) } // replaceAllDocPagesWith updates the first node of the page tree with array // containing references to the given pages, effectively replacing all // the existing page trees. func replaceAllDocPagesWith(d string, pages []*page, pageTreeID int) string { b := &strings.Builder{} for _, p := range pages { fmt.Fprintf(b, "%d 0 R\n", p.id) } // Replace the count r := regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Count\s+)\d+`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1}%d`, len(pages))) // Replace page references r = regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Kids\s+\[)[^\]]*`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1} %s `, b.String())) return d } // getAllPages returns all the page objects in the document in order // they appear in input. func getAllPages(d string) []*page { pages := []*page{} // Match all the pages pageRe := regexp.MustCompile(`(?ms)^%% Page (\d+)\n%%[^\n]*\n\d+\s+\d+\s+obj\n<<\n(.*?)\n^>>\n^endobj`) pageM := pageRe.FindAllStringSubmatch(d, -1) for _, pm := range pageM { pNum, _ := strconv.Atoi(pm[1]) p := page{number: pNum, raw: pm[2]} if err := p.extractAttrs(); err != nil { log.Print(err) continue } pages = append(pages, &p) } return pages } // numToDoubleAlpha converts a given integer to a 26 base number // system with digits each between A-Z func numToAlpha(n int) string { s := []byte(strconv.FormatInt(int64(n), 26)) for i, c := range s { if c < 'a' { s[i] = byte('A' + (c - '0')) } else { s[i] = byte('A' + 10 + (c - 'a')) } } return string(s) } // createOverlayForPage returns a PDF object which contains: // - white opaque margin up to bleedMargin // - trim marks up to bleedMargin // - other printmarks such as tile/page number // This will update the contentIds of the page to include a ref // to the new overlay object. func createOverlayForPage(overlayID int, p *page) string { mb, bb, tb := p.mediaBox, p.bleedBox, p.trimBox // Draw opaque bleed margin stream := fmt.Sprintf(` q 1 1 1 rg %f %f m %f %f l %f %f l %f %f l h %f %f m %f %f l %f %f l %f %f l h f Q `, // +1s and -1s are to bleed the box outside of viewpoint mb.llx-1, mb.lly-1, mb.llx-1, mb.ury+1, mb.urx+1, mb.ury+1, mb.urx+1, mb.lly-1, bb.llx, bb.lly, bb.urx, bb.lly, bb.urx, bb.ury, bb.llx, bb.ury, ) // Draw trim marks if !*longTrimMarks { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, bb.llx, tb.lly, mb.llx-1, tb.ury, bb.llx, tb.ury, tb.llx, mb.ury+1, tb.llx, bb.ury, tb.urx, mb.ury+1, tb.urx, bb.ury, bb.urx, tb.ury, mb.urx+1, tb.ury, bb.urx, tb.lly, mb.urx+1, tb.lly, tb.llx, bb.lly, tb.llx, mb.lly-1, tb.urx, bb.lly, tb.urx, mb.lly-1, ) } else { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line ) } // Draw tile ref vch := float32(vecCharHeight) stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q q 0 0 0 rg %f w 2 J %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l %f %f l h f %f %f m %f %f l %f %f l h f Q `, bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1), bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1), trimMarkLineWidth, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2, bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2, bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2, ) // Draw page ref stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q `, tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1), bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1), ) // Draw page title stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `, tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1), ) p.contentIds = append(p.contentIds, overlayID) return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n", overlayID, len(stream), stream) } func process() error { // Convert to QDF form data, err := convertToQDF(*inputFile) if err != nil { return err } // Get the root page tree object id m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data) if m == nil { return fmt.Errorf("cannot find root page tree") } pageTreeID, _ := strconv.Atoi(m[1]) nextID, err := getNextFreeObjectID(data) if err != nil { return err } // Convert page size (which includes margins) in mm to // tile sizes (which excludes margins) in pt for use with PDF tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 pages := getAllPages(data) // Sort pages by page number if not already sorted sort.Slice(pages, func(i, j int) bool { return pages[i].number < pages[j].number }) var tiles []*page for _, p := range pages { ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin) for _, t := range ts { t.parentID = pageTreeID } tiles = append(tiles, ts...) } { // Wrap page content with graphics state preserving streams objs := fmt.Sprintf( "%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n", nextID, nextID+1) data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1) for _, t := range tiles { t.contentIds = append([]int{nextID}, t.contentIds...) t.contentIds = append(t.contentIds, nextID+1) } nextID += 2 } { // Create overlays and add it to the doc b := &strings.Builder{} for _, t := range tiles { b.WriteString(createOverlayForPage(nextID, t)) nextID++ } data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1) } data = appendPagesToDoc(data, nextID, tiles) data = replaceAllDocPagesWith(data, tiles, pageTreeID) // Write data back to temp file f, err := ioutil.TempFile("", "pdftilecut-im2-") if err != nil { return err } if !*debugMode { defer os.Remove(f.Name()) } if _, err := f.Write([]byte(data)); err != nil { f.Close() return err } f.Close() // Fix and write back an optimized PDF if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil { return err } return nil } // convertToOptimizedPDF converts in PDF to a compressed with // object streams PDF using QPDF. func convertToOptimizedPDF(in string, out string) error { q, err := qpdf.New() if err != nil { return err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return err } // TODO enable optimization flags if err := q.InitFileWrite(out); err != nil { return err } q.SetObjectStreamMode(qpdf.ObjectStreamGenerate) q.SetStreamDataMode(qpdf.StreamDataPreserve) q.SetCompressStreams(true) if err := q.Write(); err != nil { return err } return nil } // convertToQDF uses QPDF to convert an input PDF to a normalized // format that is easy to parse and manipulate. func convertToQDF(in string) (string, error) { q, err := qpdf.New() if err != nil { return "", err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return "", err } f, err := ioutil.TempFile("", "pdftilecut-im-") if err != nil { return "", nil } f.Close() if !*debugMode { defer os.Remove(f.Name()) } if err := q.InitFileWrite(f.Name()); err != nil { return "", err } q.SetQDFMode(true) q.SetObjectStreamMode(qpdf.ObjectStreamDisable) q.SetStreamDataMode(qpdf.StreamDataPreserve) if err := q.Write(); err != nil { return "", err } q.Close() // free up memory as soon as possible f, err = os.Open(f.Name()) if err != nil { return "", err } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return "", err } return string(b), nil } func main() { if err := run(); err != nil { log.Fatal(err) } } func run() error { flag.Parse() // Create temp file for input and output if needed if *inputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-in-") if err != nil { return err } defer os.Remove(f.Name()) if _, err := io.Copy(f, os.Stdin); err != nil { return err } if err := f.Close(); err != nil { return err } *inputFile = f.Name() if *tileTitle == "" { *tileTitle = "stdin" } } else if *tileTitle == "" { *tileTitle = filepath.Base(*inputFile) } *tileTitle = strings.ToUpper(*tileTitle) var toStdout bool if *outputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-out-") if err != nil { return err } f.Close() defer os.Remove(f.Name()) *outputFile = f.Name() toStdout = true } // Tile cut if err := process(); err != nil { return err } // Cleanup if toStdout { f, err := os.Open(*outputFile) if err != nil { return err } defer f.Close() if _, err := io.Copy(os.Stdout, f); err != nil { return err } } return nil }
isValid
identifier_name
main.go
package main import ( "errors" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "path/filepath" "regexp" "sort" "strconv" "strings" "github.com/oxplot/papersizes" "github.com/oxplot/pdftilecut/qpdf" ) const ( ptsInInch = 72 mmInInch = 25.4 mmInCm = 10 bleedMargin = ptsInInch * 5 / 6 // in pt from media box trimMargin = ptsInInch / 6 // in pt from bleed box trimMarkLineWidth = 0.5 // in pt // Min page size in mm minPageDimension = (bleedMargin + trimMargin + trimMarkLineWidth) * 2 * mmInInch / ptsInInch ) type tileSizeFlag struct { name string // in millimeters width float32 height float32 isDim bool } func (v *tileSizeFlag) String() string { if v.isDim { return fmt.Sprintf("%.0fmm x %.0fmm", v.width, v.height) } return fmt.Sprintf("%s (%.0fmm x %.0fmm)", v.name, v.width, v.height) } func (v *tileSizeFlag) Set(s string) error { // unit to mm ratios unitsToMillimeter := map[string]float32{ "mm": 1, "cm": mmInCm, "in": mmInInch, "pt": mmInInch / ptsInInch, } // known paper sizes size := papersizes.FromName(s) if size != nil { v.name = size.Name v.width = float32(size.Width) v.height = float32(size.Height) v.isDim = false } else { // w x h dimensions dimRe := regexp.MustCompile(`^\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*x\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*$`) parts := dimRe.FindStringSubmatch(s) if parts == nil { return errors.New("invalid tile size") } v.name = parts[1] + parts[2] + "x" + parts[3] + parts[4] w, _ := strconv.ParseFloat(parts[1], 32) v.width = float32(w) * unitsToMillimeter[parts[2]] h, _ := strconv.ParseFloat(parts[3], 32) v.height = float32(h) * unitsToMillimeter[parts[4]] v.isDim = true } if v.width < minPageDimension || v.height < minPageDimension { return fmt.Errorf("min. tile dimension is %fmm x %fmm", minPageDimension, minPageDimension) } return nil } var ( inputFile = flag.String("in", "-", "input PDF") outputFile = flag.String("out", "-", "output PDF") tileTitle = flag.String("title", "", "title to show on margin of each tile (defaults to input filename)") debugMode = flag.Bool("debug", false, "run in debug mode") longTrimMarks = flag.Bool("long-trim-marks", false, "Use full width/height trim marks") tileSize tileSizeFlag ) func init() { _ = tileSize.Set("A4") flag.Var(&tileSize, "tile-size", "maximum size including margin - can be a standard paper size (eg A5), or width x height dimension with a unit (mm, cm, in, pt) (e.g. 6cm x 12in)") } // getNextFreeObjectID returns the largest object id in the document + 1 func getNextFreeObjectID(d string) (int, error) { m := regexp.MustCompile(`(?m)^xref\s+\d+\s+(\d+)`).FindStringSubmatch(d) if m == nil { return 0, fmt.Errorf("cannot find the next free object id") } return strconv.Atoi(m[1]) } type rect struct { // ll = lower left // ur = upper right llx, lly, urx, ury float32 } func (r rect) isValid() bool { return r.llx <= r.urx && r.lly <= r.ury } type page struct { id int number int tileX int tileY int mediaBox rect cropBox rect bleedBox rect trimBox rect contentIds []int parentID int raw string } var ( boxReTpl = `(?m)^\s+/%s\s*\[\s*(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s*\]` bleedBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "BleedBox")) cropBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "CropBox")) mediaBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "MediaBox")) trimBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "TrimBox")) contentsRe = regexp.MustCompile(`(?m)^\s+/Contents\s+(?:(\d+)|\[([^\]]*))`) pageObjRmRe = regexp.MustCompile( `(?m)^\s+/((Bleed|Crop|Media|Trim|Art)Box|Contents|Parent)\s+(\[[^\]]+\]|\d+\s+\d+\s+R)\n`) ) // marshal serializes the page to string that can be inserted into // PDF document. func (p *page) marshal() string { b := &strings.Builder{} fmt.Fprintf(b, "\n%d 0 obj\n<<\n", p.id) fmt.Fprintf(b, " /MediaBox [ %f %f %f %f ]\n", p.mediaBox.llx, p.mediaBox.lly, p.mediaBox.urx, p.mediaBox.ury) fmt.Fprintf(b, " /CropBox [ %f %f %f %f ]\n", p.cropBox.llx, p.cropBox.lly, p.cropBox.urx, p.cropBox.ury) fmt.Fprintf(b, " /BleedBox [ %f %f %f %f ]\n", p.bleedBox.llx, p.bleedBox.lly, p.bleedBox.urx, p.bleedBox.ury) fmt.Fprintf(b, " /TrimBox [ %f %f %f %f ]\n", p.trimBox.llx, p.trimBox.lly, p.trimBox.urx, p.trimBox.ury) fmt.Fprintf(b, " /Contents [ ") for _, cid := range p.contentIds { fmt.Fprintf(b, " %d 0 R ", cid) } fmt.Fprintf(b, " ]\n") fmt.Fprintf(b, " /Parent %d 0 R\n", p.parentID) b.WriteString(p.raw) fmt.Fprintf(b, "\n>>\nendobj\n") return b.String() } // extractAttrs extracts interesting attributes of the page into // struct elements and removes them from raw string of the page. func (p *page) extractAttrs() error { atoi := func(s string) int { i, err := strconv.Atoi(s) if err != nil { panic(err) } return i } atof := func(s string) float32 { f, err := strconv.ParseFloat(s, 32) if err != nil { panic(err) } return float32(f) } var m []string m = contentsRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find Contents for page:\n%s", p.raw) } if m[1] != "" { p.contentIds = []int{atoi(m[1])} } else { m := regexp.MustCompile(`(?m)^\s+(\d+)\s+\d+\s+R`).FindAllStringSubmatch(m[2], -1) p.contentIds = []int{} for _, r := range m { p.contentIds = append(p.contentIds, atoi(r[1])) } } m = mediaBoxRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find MediaBox for page:\n%s", p.raw) } p.mediaBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} if !p.mediaBox.isValid() { return fmt.Errorf("invalid MediaBox for page:\n%s", p.raw) } m = cropBoxRe.FindStringSubmatch(p.raw) if m == nil { p.cropBox = p.mediaBox } else { p.cropBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.cropBox.isValid() { return fmt.Errorf("invalid CropBox for page:\n%s", p.raw) } m = bleedBoxRe.FindStringSubmatch(p.raw) if m == nil { p.bleedBox = p.cropBox } else { p.bleedBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.bleedBox.isValid() { return fmt.Errorf("invalid BleedBox for page:\n%s", p.raw) } m = trimBoxRe.FindStringSubmatch(p.raw) if m == nil { p.trimBox = p.cropBox } else { p.trimBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.trimBox.isValid() { return fmt.Errorf("invalid TrimBox for page:\n%s", p.raw) } // Delete all the extracted raw content p.raw = pageObjRmRe.ReplaceAllString(p.raw, "") return nil } // cutPageToTiles slices the page into tiles of the given size, setting // appropriate *Box attributes of the tiles. All other page attributes // are copied from the original page. func cutPageToTiles(p *page, tileW, tileH, bleedMargin, trimMargin float32) []*page { // Adjust tileW and tileH such that all tiles end up with the same dimensions pageWidth := p.trimBox.urx - p.trimBox.llx pageHeight := p.trimBox.ury - p.trimBox.lly hTiles := int(math.Ceil(float64(pageWidth / tileW))) vTiles := int(math.Ceil(float64(pageHeight / tileH))) tileW = pageWidth / float32(hTiles) tileH = pageHeight / float32(vTiles) var tilePages []*page tgy := 0 for y := 0; y < vTiles; y++ { lly := p.trimBox.lly + float32(y)*tileH tgx := 0 for x := 0; x < hTiles; x++ { llx := p.trimBox.llx + float32(x)*tileW tile := page{ tileX: tgx, tileY: tgy, mediaBox: rect{ llx - trimMargin - bleedMargin, lly - trimMargin - bleedMargin, llx + tileW + trimMargin + bleedMargin, lly + tileH + trimMargin + bleedMargin, }, bleedBox: rect{llx - trimMargin, lly - trimMargin, llx + tileW + trimMargin, lly + tileH + trimMargin}, trimBox: rect{llx, lly, llx + tileW, lly + tileH}, number: p.number, contentIds: append([]int{}, p.contentIds...), raw: p.raw, } tile.cropBox = tile.mediaBox tilePages = append(tilePages, &tile) tgx++ } tgy++ } return tilePages } // appendPagesToDoc appends the given pages after all the other objects // but before the xref block. It also updates the object ids as it goes // starting with startID. func appendPagesToDoc(d string, startID int, pages []*page) string { var b strings.Builder for pi, p := range pages { p.id = pi + startID b.WriteString(p.marshal()) } return strings.Replace(d, "\nxref\n", "\n"+b.String()+"\n\nxref\n", 1) } // replaceAllDocPagesWith updates the first node of the page tree with array // containing references to the given pages, effectively replacing all // the existing page trees. func replaceAllDocPagesWith(d string, pages []*page, pageTreeID int) string { b := &strings.Builder{} for _, p := range pages { fmt.Fprintf(b, "%d 0 R\n", p.id) } // Replace the count r := regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Count\s+)\d+`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1}%d`, len(pages))) // Replace page references r = regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Kids\s+\[)[^\]]*`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1} %s `, b.String())) return d } // getAllPages returns all the page objects in the document in order // they appear in input. func getAllPages(d string) []*page { pages := []*page{} // Match all the pages pageRe := regexp.MustCompile(`(?ms)^%% Page (\d+)\n%%[^\n]*\n\d+\s+\d+\s+obj\n<<\n(.*?)\n^>>\n^endobj`) pageM := pageRe.FindAllStringSubmatch(d, -1) for _, pm := range pageM { pNum, _ := strconv.Atoi(pm[1]) p := page{number: pNum, raw: pm[2]} if err := p.extractAttrs(); err != nil { log.Print(err) continue } pages = append(pages, &p) } return pages } // numToDoubleAlpha converts a given integer to a 26 base number // system with digits each between A-Z func numToAlpha(n int) string { s := []byte(strconv.FormatInt(int64(n), 26)) for i, c := range s { if c < 'a' { s[i] = byte('A' + (c - '0')) } else { s[i] = byte('A' + 10 + (c - 'a')) } } return string(s) } // createOverlayForPage returns a PDF object which contains: // - white opaque margin up to bleedMargin // - trim marks up to bleedMargin // - other printmarks such as tile/page number // This will update the contentIds of the page to include a ref // to the new overlay object. func createOverlayForPage(overlayID int, p *page) string { mb, bb, tb := p.mediaBox, p.bleedBox, p.trimBox // Draw opaque bleed margin stream := fmt.Sprintf(` q 1 1 1 rg %f %f m %f %f l %f %f l %f %f l h %f %f m %f %f l %f %f l %f %f l h f Q `, // +1s and -1s are to bleed the box outside of viewpoint mb.llx-1, mb.lly-1, mb.llx-1, mb.ury+1, mb.urx+1, mb.ury+1, mb.urx+1, mb.lly-1, bb.llx, bb.lly, bb.urx, bb.lly, bb.urx, bb.ury, bb.llx, bb.ury, ) // Draw trim marks if !*longTrimMarks { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, bb.llx, tb.lly, mb.llx-1, tb.ury, bb.llx, tb.ury, tb.llx, mb.ury+1, tb.llx, bb.ury, tb.urx, mb.ury+1, tb.urx, bb.ury, bb.urx, tb.ury, mb.urx+1, tb.ury, bb.urx, tb.lly, mb.urx+1, tb.lly, tb.llx, bb.lly, tb.llx, mb.lly-1, tb.urx, bb.lly, tb.urx, mb.lly-1, ) } else { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line ) } // Draw tile ref vch := float32(vecCharHeight) stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q q 0 0 0 rg %f w 2 J %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l %f %f l h f %f %f m %f %f l %f %f l h f Q `, bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1), bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1), trimMarkLineWidth, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2, bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2, bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2, ) // Draw page ref stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q `, tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1), bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1), ) // Draw page title stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `, tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1), ) p.contentIds = append(p.contentIds, overlayID) return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n", overlayID, len(stream), stream) } func process() error { // Convert to QDF form data, err := convertToQDF(*inputFile) if err != nil { return err } // Get the root page tree object id m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data) if m == nil { return fmt.Errorf("cannot find root page tree") } pageTreeID, _ := strconv.Atoi(m[1]) nextID, err := getNextFreeObjectID(data) if err != nil { return err } // Convert page size (which includes margins) in mm to // tile sizes (which excludes margins) in pt for use with PDF tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 pages := getAllPages(data) // Sort pages by page number if not already sorted sort.Slice(pages, func(i, j int) bool { return pages[i].number < pages[j].number }) var tiles []*page for _, p := range pages { ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin) for _, t := range ts { t.parentID = pageTreeID } tiles = append(tiles, ts...) } { // Wrap page content with graphics state preserving streams objs := fmt.Sprintf( "%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n", nextID, nextID+1) data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1) for _, t := range tiles { t.contentIds = append([]int{nextID}, t.contentIds...) t.contentIds = append(t.contentIds, nextID+1) } nextID += 2 } { // Create overlays and add it to the doc b := &strings.Builder{} for _, t := range tiles { b.WriteString(createOverlayForPage(nextID, t)) nextID++ } data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1) } data = appendPagesToDoc(data, nextID, tiles) data = replaceAllDocPagesWith(data, tiles, pageTreeID) // Write data back to temp file f, err := ioutil.TempFile("", "pdftilecut-im2-") if err != nil { return err } if !*debugMode { defer os.Remove(f.Name()) } if _, err := f.Write([]byte(data)); err != nil { f.Close() return err } f.Close() // Fix and write back an optimized PDF if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil { return err } return nil } // convertToOptimizedPDF converts in PDF to a compressed with // object streams PDF using QPDF. func convertToOptimizedPDF(in string, out string) error { q, err := qpdf.New() if err != nil { return err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return err } // TODO enable optimization flags if err := q.InitFileWrite(out); err != nil { return err } q.SetObjectStreamMode(qpdf.ObjectStreamGenerate) q.SetStreamDataMode(qpdf.StreamDataPreserve) q.SetCompressStreams(true) if err := q.Write(); err != nil { return err } return nil } // convertToQDF uses QPDF to convert an input PDF to a normalized // format that is easy to parse and manipulate. func convertToQDF(in string) (string, error) { q, err := qpdf.New() if err != nil { return "", err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return "", err } f, err := ioutil.TempFile("", "pdftilecut-im-") if err != nil { return "", nil } f.Close() if !*debugMode { defer os.Remove(f.Name()) } if err := q.InitFileWrite(f.Name()); err != nil { return "", err } q.SetQDFMode(true) q.SetObjectStreamMode(qpdf.ObjectStreamDisable) q.SetStreamDataMode(qpdf.StreamDataPreserve) if err := q.Write(); err != nil { return "", err } q.Close() // free up memory as soon as possible f, err = os.Open(f.Name()) if err != nil { return "", err } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return "", err } return string(b), nil } func main() { if err := run(); err != nil { log.Fatal(err) } } func run() error { flag.Parse() // Create temp file for input and output if needed if *inputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-in-") if err != nil { return err } defer os.Remove(f.Name()) if _, err := io.Copy(f, os.Stdin); err != nil
if err := f.Close(); err != nil { return err } *inputFile = f.Name() if *tileTitle == "" { *tileTitle = "stdin" } } else if *tileTitle == "" { *tileTitle = filepath.Base(*inputFile) } *tileTitle = strings.ToUpper(*tileTitle) var toStdout bool if *outputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-out-") if err != nil { return err } f.Close() defer os.Remove(f.Name()) *outputFile = f.Name() toStdout = true } // Tile cut if err := process(); err != nil { return err } // Cleanup if toStdout { f, err := os.Open(*outputFile) if err != nil { return err } defer f.Close() if _, err := io.Copy(os.Stdout, f); err != nil { return err } } return nil }
{ return err }
conditional_block
main.go
package main import ( "errors" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "path/filepath" "regexp" "sort" "strconv" "strings" "github.com/oxplot/papersizes" "github.com/oxplot/pdftilecut/qpdf" ) const ( ptsInInch = 72 mmInInch = 25.4 mmInCm = 10 bleedMargin = ptsInInch * 5 / 6 // in pt from media box trimMargin = ptsInInch / 6 // in pt from bleed box trimMarkLineWidth = 0.5 // in pt // Min page size in mm minPageDimension = (bleedMargin + trimMargin + trimMarkLineWidth) * 2 * mmInInch / ptsInInch ) type tileSizeFlag struct { name string // in millimeters width float32 height float32 isDim bool } func (v *tileSizeFlag) String() string { if v.isDim { return fmt.Sprintf("%.0fmm x %.0fmm", v.width, v.height) } return fmt.Sprintf("%s (%.0fmm x %.0fmm)", v.name, v.width, v.height) } func (v *tileSizeFlag) Set(s string) error { // unit to mm ratios unitsToMillimeter := map[string]float32{ "mm": 1, "cm": mmInCm, "in": mmInInch, "pt": mmInInch / ptsInInch, } // known paper sizes size := papersizes.FromName(s) if size != nil { v.name = size.Name v.width = float32(size.Width) v.height = float32(size.Height) v.isDim = false } else { // w x h dimensions dimRe := regexp.MustCompile(`^\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*x\s*(\d+(?:\.\d+)?)\s*(mm|cm|in|pt)\s*$`) parts := dimRe.FindStringSubmatch(s) if parts == nil { return errors.New("invalid tile size") } v.name = parts[1] + parts[2] + "x" + parts[3] + parts[4] w, _ := strconv.ParseFloat(parts[1], 32) v.width = float32(w) * unitsToMillimeter[parts[2]] h, _ := strconv.ParseFloat(parts[3], 32) v.height = float32(h) * unitsToMillimeter[parts[4]] v.isDim = true } if v.width < minPageDimension || v.height < minPageDimension { return fmt.Errorf("min. tile dimension is %fmm x %fmm", minPageDimension, minPageDimension) } return nil } var ( inputFile = flag.String("in", "-", "input PDF") outputFile = flag.String("out", "-", "output PDF") tileTitle = flag.String("title", "", "title to show on margin of each tile (defaults to input filename)") debugMode = flag.Bool("debug", false, "run in debug mode") longTrimMarks = flag.Bool("long-trim-marks", false, "Use full width/height trim marks") tileSize tileSizeFlag ) func init() { _ = tileSize.Set("A4") flag.Var(&tileSize, "tile-size", "maximum size including margin - can be a standard paper size (eg A5), or width x height dimension with a unit (mm, cm, in, pt) (e.g. 6cm x 12in)") } // getNextFreeObjectID returns the largest object id in the document + 1 func getNextFreeObjectID(d string) (int, error) { m := regexp.MustCompile(`(?m)^xref\s+\d+\s+(\d+)`).FindStringSubmatch(d) if m == nil { return 0, fmt.Errorf("cannot find the next free object id") } return strconv.Atoi(m[1]) } type rect struct { // ll = lower left // ur = upper right llx, lly, urx, ury float32 } func (r rect) isValid() bool { return r.llx <= r.urx && r.lly <= r.ury } type page struct { id int number int tileX int tileY int mediaBox rect cropBox rect bleedBox rect trimBox rect contentIds []int parentID int raw string } var ( boxReTpl = `(?m)^\s+/%s\s*\[\s*(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s*\]` bleedBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "BleedBox")) cropBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "CropBox")) mediaBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "MediaBox")) trimBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "TrimBox")) contentsRe = regexp.MustCompile(`(?m)^\s+/Contents\s+(?:(\d+)|\[([^\]]*))`) pageObjRmRe = regexp.MustCompile( `(?m)^\s+/((Bleed|Crop|Media|Trim|Art)Box|Contents|Parent)\s+(\[[^\]]+\]|\d+\s+\d+\s+R)\n`) ) // marshal serializes the page to string that can be inserted into // PDF document. func (p *page) marshal() string { b := &strings.Builder{} fmt.Fprintf(b, "\n%d 0 obj\n<<\n", p.id) fmt.Fprintf(b, " /MediaBox [ %f %f %f %f ]\n", p.mediaBox.llx, p.mediaBox.lly, p.mediaBox.urx, p.mediaBox.ury) fmt.Fprintf(b, " /CropBox [ %f %f %f %f ]\n", p.cropBox.llx, p.cropBox.lly, p.cropBox.urx, p.cropBox.ury) fmt.Fprintf(b, " /BleedBox [ %f %f %f %f ]\n", p.bleedBox.llx, p.bleedBox.lly, p.bleedBox.urx, p.bleedBox.ury) fmt.Fprintf(b, " /TrimBox [ %f %f %f %f ]\n", p.trimBox.llx, p.trimBox.lly, p.trimBox.urx, p.trimBox.ury) fmt.Fprintf(b, " /Contents [ ") for _, cid := range p.contentIds { fmt.Fprintf(b, " %d 0 R ", cid) } fmt.Fprintf(b, " ]\n") fmt.Fprintf(b, " /Parent %d 0 R\n", p.parentID) b.WriteString(p.raw) fmt.Fprintf(b, "\n>>\nendobj\n") return b.String() } // extractAttrs extracts interesting attributes of the page into // struct elements and removes them from raw string of the page. func (p *page) extractAttrs() error { atoi := func(s string) int { i, err := strconv.Atoi(s) if err != nil { panic(err) } return i } atof := func(s string) float32 { f, err := strconv.ParseFloat(s, 32) if err != nil { panic(err) } return float32(f) } var m []string m = contentsRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find Contents for page:\n%s", p.raw) } if m[1] != "" { p.contentIds = []int{atoi(m[1])} } else { m := regexp.MustCompile(`(?m)^\s+(\d+)\s+\d+\s+R`).FindAllStringSubmatch(m[2], -1) p.contentIds = []int{} for _, r := range m { p.contentIds = append(p.contentIds, atoi(r[1])) } } m = mediaBoxRe.FindStringSubmatch(p.raw) if m == nil { return fmt.Errorf("cannot find MediaBox for page:\n%s", p.raw) } p.mediaBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} if !p.mediaBox.isValid() { return fmt.Errorf("invalid MediaBox for page:\n%s", p.raw) } m = cropBoxRe.FindStringSubmatch(p.raw) if m == nil { p.cropBox = p.mediaBox } else { p.cropBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.cropBox.isValid() { return fmt.Errorf("invalid CropBox for page:\n%s", p.raw) } m = bleedBoxRe.FindStringSubmatch(p.raw) if m == nil { p.bleedBox = p.cropBox } else { p.bleedBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.bleedBox.isValid() { return fmt.Errorf("invalid BleedBox for page:\n%s", p.raw) } m = trimBoxRe.FindStringSubmatch(p.raw) if m == nil { p.trimBox = p.cropBox } else { p.trimBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])} } if !p.trimBox.isValid() { return fmt.Errorf("invalid TrimBox for page:\n%s", p.raw) } // Delete all the extracted raw content p.raw = pageObjRmRe.ReplaceAllString(p.raw, "") return nil } // cutPageToTiles slices the page into tiles of the given size, setting // appropriate *Box attributes of the tiles. All other page attributes // are copied from the original page. func cutPageToTiles(p *page, tileW, tileH, bleedMargin, trimMargin float32) []*page { // Adjust tileW and tileH such that all tiles end up with the same dimensions pageWidth := p.trimBox.urx - p.trimBox.llx pageHeight := p.trimBox.ury - p.trimBox.lly hTiles := int(math.Ceil(float64(pageWidth / tileW))) vTiles := int(math.Ceil(float64(pageHeight / tileH))) tileW = pageWidth / float32(hTiles) tileH = pageHeight / float32(vTiles) var tilePages []*page tgy := 0 for y := 0; y < vTiles; y++ { lly := p.trimBox.lly + float32(y)*tileH tgx := 0 for x := 0; x < hTiles; x++ { llx := p.trimBox.llx + float32(x)*tileW tile := page{ tileX: tgx, tileY: tgy, mediaBox: rect{ llx - trimMargin - bleedMargin, lly - trimMargin - bleedMargin, llx + tileW + trimMargin + bleedMargin, lly + tileH + trimMargin + bleedMargin, }, bleedBox: rect{llx - trimMargin, lly - trimMargin, llx + tileW + trimMargin, lly + tileH + trimMargin}, trimBox: rect{llx, lly, llx + tileW, lly + tileH}, number: p.number, contentIds: append([]int{}, p.contentIds...), raw: p.raw, } tile.cropBox = tile.mediaBox tilePages = append(tilePages, &tile) tgx++ } tgy++ } return tilePages } // appendPagesToDoc appends the given pages after all the other objects // but before the xref block. It also updates the object ids as it goes // starting with startID. func appendPagesToDoc(d string, startID int, pages []*page) string { var b strings.Builder for pi, p := range pages { p.id = pi + startID b.WriteString(p.marshal()) } return strings.Replace(d, "\nxref\n", "\n"+b.String()+"\n\nxref\n", 1) } // replaceAllDocPagesWith updates the first node of the page tree with array // containing references to the given pages, effectively replacing all // the existing page trees. func replaceAllDocPagesWith(d string, pages []*page, pageTreeID int) string { b := &strings.Builder{} for _, p := range pages { fmt.Fprintf(b, "%d 0 R\n", p.id) } // Replace the count r := regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Count\s+)\d+`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1}%d`, len(pages))) // Replace page references r = regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Kids\s+\[)[^\]]*`, pageTreeID)) d = r.ReplaceAllString(d, fmt.Sprintf(`${1} %s `, b.String())) return d } // getAllPages returns all the page objects in the document in order // they appear in input. func getAllPages(d string) []*page { pages := []*page{} // Match all the pages pageRe := regexp.MustCompile(`(?ms)^%% Page (\d+)\n%%[^\n]*\n\d+\s+\d+\s+obj\n<<\n(.*?)\n^>>\n^endobj`) pageM := pageRe.FindAllStringSubmatch(d, -1) for _, pm := range pageM { pNum, _ := strconv.Atoi(pm[1]) p := page{number: pNum, raw: pm[2]} if err := p.extractAttrs(); err != nil { log.Print(err) continue } pages = append(pages, &p) } return pages } // numToDoubleAlpha converts a given integer to a 26 base number // system with digits each between A-Z func numToAlpha(n int) string { s := []byte(strconv.FormatInt(int64(n), 26)) for i, c := range s { if c < 'a' { s[i] = byte('A' + (c - '0')) } else { s[i] = byte('A' + 10 + (c - 'a')) } } return string(s) } // createOverlayForPage returns a PDF object which contains: // - white opaque margin up to bleedMargin // - trim marks up to bleedMargin // - other printmarks such as tile/page number // This will update the contentIds of the page to include a ref // to the new overlay object. func createOverlayForPage(overlayID int, p *page) string { mb, bb, tb := p.mediaBox, p.bleedBox, p.trimBox // Draw opaque bleed margin stream := fmt.Sprintf(` q 1 1 1 rg %f %f m %f %f l %f %f l %f %f l h %f %f m %f %f l %f %f l %f %f l h f Q `, // +1s and -1s are to bleed the box outside of viewpoint mb.llx-1, mb.lly-1, mb.llx-1, mb.ury+1, mb.urx+1, mb.ury+1, mb.urx+1, mb.lly-1, bb.llx, bb.lly, bb.urx, bb.lly, bb.urx, bb.ury, bb.llx, bb.ury, ) // Draw trim marks if !*longTrimMarks { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, bb.llx, tb.lly, mb.llx-1, tb.ury, bb.llx, tb.ury, tb.llx, mb.ury+1, tb.llx, bb.ury, tb.urx, mb.ury+1, tb.urx, bb.ury, bb.urx, tb.ury, mb.urx+1, tb.ury, bb.urx, tb.lly, mb.urx+1, tb.lly, tb.llx, bb.lly, tb.llx, mb.lly-1, tb.urx, bb.lly, tb.urx, mb.lly-1, ) } else { stream += fmt.Sprintf(` q 0 0 0 rg %f w %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l S Q `, trimMarkLineWidth, mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line ) } // Draw tile ref vch := float32(vecCharHeight) stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q q 0 0 0 rg %f w 2 J %f %f m %f %f l S %f %f m %f %f l S %f %f m %f %f l %f %f l h f %f %f m %f %f l %f %f l h f Q `, bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1), bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1), trimMarkLineWidth, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2, bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2, bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2, ) // Draw page ref stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q q 1 0 0 1 %f %f cm %s Q Q `, tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1), bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1), ) // Draw page title stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `, tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1), ) p.contentIds = append(p.contentIds, overlayID) return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n", overlayID, len(stream), stream) } func process() error { // Convert to QDF form data, err := convertToQDF(*inputFile) if err != nil { return err } // Get the root page tree object id m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data) if m == nil { return fmt.Errorf("cannot find root page tree") } pageTreeID, _ := strconv.Atoi(m[1]) nextID, err := getNextFreeObjectID(data) if err != nil { return err } // Convert page size (which includes margins) in mm to // tile sizes (which excludes margins) in pt for use with PDF tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2 pages := getAllPages(data) // Sort pages by page number if not already sorted sort.Slice(pages, func(i, j int) bool { return pages[i].number < pages[j].number }) var tiles []*page for _, p := range pages { ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin) for _, t := range ts { t.parentID = pageTreeID } tiles = append(tiles, ts...) } { // Wrap page content with graphics state preserving streams objs := fmt.Sprintf( "%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n", nextID, nextID+1) data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1) for _, t := range tiles { t.contentIds = append([]int{nextID}, t.contentIds...) t.contentIds = append(t.contentIds, nextID+1) } nextID += 2 } { // Create overlays and add it to the doc b := &strings.Builder{} for _, t := range tiles { b.WriteString(createOverlayForPage(nextID, t)) nextID++ } data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1) } data = appendPagesToDoc(data, nextID, tiles) data = replaceAllDocPagesWith(data, tiles, pageTreeID) // Write data back to temp file f, err := ioutil.TempFile("", "pdftilecut-im2-") if err != nil { return err } if !*debugMode { defer os.Remove(f.Name()) } if _, err := f.Write([]byte(data)); err != nil { f.Close() return err } f.Close() // Fix and write back an optimized PDF if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil { return err } return nil } // convertToOptimizedPDF converts in PDF to a compressed with // object streams PDF using QPDF. func convertToOptimizedPDF(in string, out string) error { q, err := qpdf.New() if err != nil { return err } defer q.Close() if !*debugMode { q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return err } // TODO enable optimization flags if err := q.InitFileWrite(out); err != nil { return err } q.SetObjectStreamMode(qpdf.ObjectStreamGenerate) q.SetStreamDataMode(qpdf.StreamDataPreserve) q.SetCompressStreams(true) if err := q.Write(); err != nil { return err } return nil } // convertToQDF uses QPDF to convert an input PDF to a normalized // format that is easy to parse and manipulate. func convertToQDF(in string) (string, error) { q, err := qpdf.New() if err != nil { return "", err } defer q.Close()
q.SetSuppressWarnings(true) } if err := q.ReadFile(in); err != nil { return "", err } f, err := ioutil.TempFile("", "pdftilecut-im-") if err != nil { return "", nil } f.Close() if !*debugMode { defer os.Remove(f.Name()) } if err := q.InitFileWrite(f.Name()); err != nil { return "", err } q.SetQDFMode(true) q.SetObjectStreamMode(qpdf.ObjectStreamDisable) q.SetStreamDataMode(qpdf.StreamDataPreserve) if err := q.Write(); err != nil { return "", err } q.Close() // free up memory as soon as possible f, err = os.Open(f.Name()) if err != nil { return "", err } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return "", err } return string(b), nil } func main() { if err := run(); err != nil { log.Fatal(err) } } func run() error { flag.Parse() // Create temp file for input and output if needed if *inputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-in-") if err != nil { return err } defer os.Remove(f.Name()) if _, err := io.Copy(f, os.Stdin); err != nil { return err } if err := f.Close(); err != nil { return err } *inputFile = f.Name() if *tileTitle == "" { *tileTitle = "stdin" } } else if *tileTitle == "" { *tileTitle = filepath.Base(*inputFile) } *tileTitle = strings.ToUpper(*tileTitle) var toStdout bool if *outputFile == "-" { f, err := ioutil.TempFile("", "pdftilecut-out-") if err != nil { return err } f.Close() defer os.Remove(f.Name()) *outputFile = f.Name() toStdout = true } // Tile cut if err := process(); err != nil { return err } // Cleanup if toStdout { f, err := os.Open(*outputFile) if err != nil { return err } defer f.Close() if _, err := io.Copy(os.Stdout, f); err != nil { return err } } return nil }
if !*debugMode {
random_line_split
app.py
from flask import Flask,request from flask_socketio import SocketIO import logging import pymysql import json import pygame import threading import json import gspeech import time con = pymysql.connect(host='dementia.openlink.kr', user='admin', password='Opendb1234!@', db='openlink', charset='utf8') # 한글처리 (charset = 'utf8') app = Flask(__name__) app.config['SECRET_KEY'] = 'mysecret' socketio = SocketIO(app, cors_allowed_origins='*') app.debug = True #app.host = '0.0.0.0' app.port = 5000 logging.basicConfig(level=logging.ERROR) pygame.mixer.pre_init(24000) pygame.init() testing = False exit_event = threading.Event() test = [] patients= {} words = {} datas = {} stop=False with open('./testData.json', 'r') as f: datas = json.load(f) word = [] for x in datas[0]['questions']: word.append(x['content']['word']) words['words'] = word word = [] for x in datas[10]['questions']: word.append(x['content']['word']) words['single']=word anilist = ''' 1. 고양이 2. 강아지 3. 거북이 4. 토끼 5. 뱀 6. 사자 7. 호랑이 8. 표범 9. 치타 10. 하이에나 11. 기린 12. 코끼리 13. 코뿔소 14. 하마 15. 악어 16. 펭귄 17. 부엉이 18. 올빼미 19. 곰 20. 돼지 21. 소 22. 닭 23. 독수리 24. 타조 25. 고릴라 26. 오랑우탄 27. 침팬지 28. 원숭이 29. 코알라 30. 캥거루 31. 고래 32. 상어 33. 칠면조 34. 직박구리 35. 쥐 36. 청설모 37. 메추라기 38. 앵무새 39. 삵 40. 스라소니 41. 판다 42. 오소리 43. 오리 44. 거위 45. 백조 46. 두루미 47. 고슴도치 48. 두더지 49. 우파루파 50. 맹꽁이 51. 너구리 52. 개구리 53. 두꺼비 54. 카멜레온 55. 이구아나 56. 노루 57. 제비 58. 까지 59. 고라니 60. 수달 61. 당나귀 62. 순록 63. 염소 64. 공작 65. 바다표범 66. 들소 67. 박쥐 68. 참새 69. 물개 70. 바다사자 71. 살모사 72. 구렁이 73. 얼룩말 74. 산양 75. 멧돼지 76. 카피바라 77. 바다코끼리 78. 도롱뇽 79. 북극곰 80. 퓨마 81. 미어캣 82. 코요테 83. 라마 84. 딱따구리 85. 기러기 86. 비둘기 87. 스컹크 88. 아르마딜로 89. 돌고래 90. 까마귀 91. 매 92. 낙타 93. 여우 94. 사슴 95. 늑대 96. 재규어 97. 알파카 98. 양 99. 다람쥐 100. 담비 '''.split()[1::2] anilist = list(map(lambda x : x.strip(),anilist)) @app.route("/") def main(): return ' ' @socketio.on('connect') def on_connect(client): print('conn',client) @socketio.on('disconnect') def disconnect(): cur = con.cursor(pymysql.cursors.DictCursor) print('discon',request.sid) if request.sid in patients: sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid]) cur.execute(sql) con.commit() return @socketio.on('patientJoin') def checkpatient(data): phone = data['phoneNumber'] cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:]) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() print(rows,request.sid) if rows : socketio.emit('patientJoin',True) sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)" val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH']) cur.execute(sql,val) con.commit() patients[request.sid] = phone else : socketio.emit('patientJoin',False) @socketio.on('doctorJoin') def checkdoctor(data): print('doctorJoin',request.sid) cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(data['id']) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() userid = rows['USER_ID'] #print(patients[data['phoneNumber']]) if rows: socketio.emit('doctorJoin',True) @socketio.on('patientInfo') def getPatientInfo(): print('patientInfo') cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler" cur.execute(sql) # 데이타 Fetch rows = cur.fetchall() res = json.dumps(rows) # 전체 rows socketio.emit('patientInfo',res) @socketio.on('startTest') def startTest(index): print('start Test') info = datas[index] #print(info) socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']}) @socketio.on('testFinished') def testFinished(): socketio.emit('testFinished',True) @socketio.on('startWords') def startWords(data): info = datas[0] print('start words',data,info) gsp = gspeech.Gspeech() stop = False findwords = [] word = words[data].copy() print(word) @socketio.on('stopWords') def stopWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':0, 'result':{'response':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('wordsResult',wordsResult) findwords.append(word.index(r)) finded.append(r) for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startwords') @socketio.on('startSingleWords') def startSingleWordsMemory(data): print('startSingleWordsMemory',data) gsp = gspeech.Gspeech() stop = False findwords = [] word = words['single'].copy() print(word) order = 1 @socketio.on('stopSingleWords') def stopSingleWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':data, 'result':{'order':order,'word':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('SingleWordsResult',wordsResult) print('inc order') order +=1 findwords.append(word.index(r)) finded.append(r) break for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startSingleWords') @socketio.on('startSM') def startSM(data): index = data['index'] correct = data['correct'] corrects = ['일','이','삼','사','오','육','칠','팔','구','십'] print('start SM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) if stt in [correct,corrects[int(correct)-1]]: print('clear') stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':int(correct) ,'score':1}}} #print(wordsResult) print('emit result') socketio.emit('SMResult',Result) if (not stt) or stop: print('stop') break print('end SM') @socketio.on('startSMM') def startSMM(data): index = data['index'] correct = data['correct'] print('start SMM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSMM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == 0)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == 1)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) if (not stt) or stop: print('stop') break print('end SMM') @socketio.on('startStickMemory') def startStickMemory(element): print(element) info = datas[9].copy() index = element['index'] correct = element['content']['correct'] print(info) socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) gsp = gspeech.Gspeech() stop = False @socketio.on('stopStickMemory') def stopStickMemory(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == '0')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == '1')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) if (not stt) or stop: print('stop') gsp.mic.Pause() break @socketio.on('startShapeColor') def startShapeColor(element): print(element) @socketio.on('stopShapeColor') def stopShapeColor(): stop = True info = datas[13].copy() index = element['index'] if int(index) < 0 : index = '0' word = element['content']['word'] socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) wordlist = ['무', '수박', '귤', '호두', '당근', '깻잎', '연근', '오이', '고추', '땅콩', '말', '토끼', '다람쥐', '금붕어', '돼지', '오리'] corlist = [1,2,1,1,2,2,2,1,1,2,1,2,2,1,2,1] numberlist = [['1','일'],['2','이']] cor = numberlist[corlist[int(index)]-1] gsp = gspeech.Gspeech() stop = False while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) print('go') stop = True Result = {'response':{'index':index if int(index) > 0 else str(int(index)-1), 'phase':0, 'result':{'word': stt,'imageScore':1*((cor[0] in stt) or (cor[1] in stt)), 'nameScore':1*(word in stt)}}} #print(wordsResult) print('emit result') socketio.emit('shapeColorResult',Result) if (not stt) or stop: print('stop') gsp.pauseMic() break @socketio.on('startwordFluencyTest') def startShapeColor(element): start = time.time() #print(element) gsp = gspeech.Gspeech()
seq = ['0 ~ 15 seconds', '16 ~ 30 seconds', '31 ~ 45 seconds', '46 ~ 60 seconds'] #print(anilist) @socketio.on('stopwordFluency') def stopwordFluency(): global stop stop = True while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) score = 0 corlist = [] for x in stt.split(): if x in anilist: score +=1 corlist.append(x) if score >0: endtime = int(time.time() - start) index = str(int(endtime/15)+1) print(endtime,score) Result = {'response':{ 'index':index, 'phase':0, 'result':{'word': ' '.join(corlist),'score':score}}} print('emit result') socketio.emit('wordFluencyResult',Result) cors_calTest= {'add':[[40,80,150],[30,70,120]], 'sub':[[10,40,90],[20,50,70]],'mult':[[300,60,250],[200,40,150]], 'div':[['네','다섯','아홉'],['세','세','일곱']]} @socketio.on('startcalTest') def startcalTest(element): #print(element) print('startCal') stop=False gsp = gspeech.Gspeech() @socketio.on('stopCalTest') def stopCalTest(): print('stopCalTest') stop = True ttype = element[0] index = element[1] cor = cors_calTest[ttype][int(index)-1][element[2]] #print(ttype,index) #print(cor) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':'-'.join([ttype,index,str(element[2]+1)]), 'phase':0, 'result':{'response':stt ,'score':1*(str(cor) in stt)}}} print('emit result') socketio.emit('calResult',Result) socketio.emit('next',[ttype,index,element[2]]) break gsp.pauseMic() print('endCalTest') numbersetdata = datas[5]['questions'].copy() cell = [] cells = [[],[]] for x in numbersetdata: cells[0].append(x['content']['leftSet']) cells[1].append(x['content']['rightSet']) nas=[] na = {'narration':'여기 맨 밑줄에 칸마다 숫자들이 적혀있습니다. 각 칸에 적힌 숫자를 2,1,3 이런 식으로 한 개씩만 읽으십시오. 한 번 해보세요. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 적힌 숫자를 말해보세요. 시작.', 'cells' : cells[0]} nas.append(na) na = {'narration':'이번에는 각 칸에 적힌 수자의 개수를 말씀해 보십시오. 한 개, 두 개, 세 개로 세지 마시고 숫자가 한 개 있으면 1, 두 개 있으면 2 이렇게 한 번 해보세요. 잘했습니다. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 몇 개의 숫자가 적혀있는지, 그 개수를 말씀해보십시오. 시작.', 'cells' : cells[1]} nas.append(na) numcorlist = [['1','일'],['2','이'],['3','삼']] @socketio.on('startNumberSet') def startNumberSet(data): stop=False gsp = gspeech.Gspeech() @socketio.on('stopnumSetTest') def stopnumSetTest(): print('stopnumSetTest') stop = True questions = numbersetdata[data[1]] print(questions,data) #print(nas[data[0]]) cors = [] cors.append(list(set(list(questions['content'].values())[0]))[0]) cors.append(len(list(questions['content'].values())[1])) print(cors) socketio.emit('startNarration',{'testType':'numbersSet', 'narration':nas[data[0]], 'questions':questions}) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':questions['index'], 'phase':data[0], 'result':{'response':stt ,'score':1*(str(numcorlist[cors[data[0]]-1]) in stt)}}} print('emit result') socketio.emit('numbersetResult',Result) break gsp.pauseMic() print('endnumTest') if __name__ == '__main__': socketio.run(app)
random_line_split
app.py
from flask import Flask,request from flask_socketio import SocketIO import logging import pymysql import json import pygame import threading import json import gspeech import time con = pymysql.connect(host='dementia.openlink.kr', user='admin', password='Opendb1234!@', db='openlink', charset='utf8') # 한글처리 (charset = 'utf8') app = Flask(__name__) app.config['SECRET_KEY'] = 'mysecret' socketio = SocketIO(app, cors_allowed_origins='*') app.debug = True #app.host = '0.0.0.0' app.port = 5000 logging.basicConfig(level=logging.ERROR) pygame.mixer.pre_init(24000) pygame.init() testing = False exit_event = threading.Event() test = [] patients= {} words = {} datas = {} stop=False with open('./testData.json', 'r') as f: datas = json.load(f) word = [] for x in datas[0]['questions']: word.append(x['content']['word']) words['words'] = word word = [] for x in datas[10]['questions']: word.append(x['content']['word']) words['single']=word anilist = ''' 1. 고양이 2. 강아지 3. 거북이 4. 토끼 5. 뱀 6. 사자 7. 호랑이 8. 표범 9. 치타 10. 하이에나 11. 기린 12. 코끼리 13. 코뿔소 14. 하마 15. 악어 16. 펭귄 17. 부엉이 18. 올빼미 19. 곰 20. 돼지 21. 소 22. 닭 23. 독수리 24. 타조 25. 고릴라 26. 오랑우탄 27. 침팬지 28. 원숭이 29. 코알라 30. 캥거루 31. 고래 32. 상어 33. 칠면조 34. 직박구리 35. 쥐 36. 청설모 37. 메추라기 38. 앵무새 39. 삵 40. 스라소니 41. 판다 42. 오소리 43. 오리 44. 거위 45. 백조 46. 두루미 47. 고슴도치 48. 두더지 49. 우파루파 50. 맹꽁이 51. 너구리 52. 개구리 53. 두꺼비 54. 카멜레온 55. 이구아나 56. 노루 57. 제비 58. 까지 59. 고라니 60. 수달 61. 당나귀 62. 순록 63. 염소 64. 공작 65. 바다표범 66. 들소 67. 박쥐 68. 참새 69. 물개 70. 바다사자 71. 살모사 72. 구렁이 73. 얼룩말 74. 산양 75. 멧돼지 76. 카피바라 77. 바다코끼리 78. 도롱뇽 79. 북극곰 80. 퓨마 81. 미어캣 82. 코요테 83. 라마 84. 딱따구리 85. 기러기 86. 비둘기 87. 스컹크 88. 아르마딜로 89. 돌고래 90. 까마귀 91. 매 92. 낙타 93. 여우 94. 사슴 95. 늑대 96. 재규어 97. 알파카 98. 양 99. 다람쥐 100. 담비 '''.split()[1::2] anilist = list(map(lambda x : x.strip(),anilist)) @app.route("/") def main(): return ' ' @socketio.on('connect') def on_connect(client): print('conn',client) @socketio.on('disconnect') def disconnect(): cur = con.cursor(pymysql.cursors.DictCursor) print('discon',request.sid) if request.sid in patients: sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid]) cur.execute(sql) con.commit() return @socketio.on('patientJoin') def checkpatient(data): phone = data['phoneNumber'] cur = con.cursor(pymysql.cursors.DictCur
ql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:]) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() print(rows,request.sid) if rows : socketio.emit('patientJoin',True) sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)" val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH']) cur.execute(sql,val) con.commit() patients[request.sid] = phone else : socketio.emit('patientJoin',False) @socketio.on('doctorJoin') def checkdoctor(data): print('doctorJoin',request.sid) cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(data['id']) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() userid = rows['USER_ID'] #print(patients[data['phoneNumber']]) if rows: socketio.emit('doctorJoin',True) @socketio.on('patientInfo') def getPatientInfo(): print('patientInfo') cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler" cur.execute(sql) # 데이타 Fetch rows = cur.fetchall() res = json.dumps(rows) # 전체 rows socketio.emit('patientInfo',res) @socketio.on('startTest') def startTest(index): print('start Test') info = datas[index] #print(info) socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']}) @socketio.on('testFinished') def testFinished(): socketio.emit('testFinished',True) @socketio.on('startWords') def startWords(data): info = datas[0] print('start words',data,info) gsp = gspeech.Gspeech() stop = False findwords = [] word = words[data].copy() print(word) @socketio.on('stopWords') def stopWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':0, 'result':{'response':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('wordsResult',wordsResult) findwords.append(word.index(r)) finded.append(r) for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startwords') @socketio.on('startSingleWords') def startSingleWordsMemory(data): print('startSingleWordsMemory',data) gsp = gspeech.Gspeech() stop = False findwords = [] word = words['single'].copy() print(word) order = 1 @socketio.on('stopSingleWords') def stopSingleWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':data, 'result':{'order':order,'word':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('SingleWordsResult',wordsResult) print('inc order') order +=1 findwords.append(word.index(r)) finded.append(r) break for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startSingleWords') @socketio.on('startSM') def startSM(data): index = data['index'] correct = data['correct'] corrects = ['일','이','삼','사','오','육','칠','팔','구','십'] print('start SM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) if stt in [correct,corrects[int(correct)-1]]: print('clear') stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':int(correct) ,'score':1}}} #print(wordsResult) print('emit result') socketio.emit('SMResult',Result) if (not stt) or stop: print('stop') break print('end SM') @socketio.on('startSMM') def startSMM(data): index = data['index'] correct = data['correct'] print('start SMM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSMM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == 0)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == 1)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) if (not stt) or stop: print('stop') break print('end SMM') @socketio.on('startStickMemory') def startStickMemory(element): print(element) info = datas[9].copy() index = element['index'] correct = element['content']['correct'] print(info) socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) gsp = gspeech.Gspeech() stop = False @socketio.on('stopStickMemory') def stopStickMemory(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == '0')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == '1')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) if (not stt) or stop: print('stop') gsp.mic.Pause() break @socketio.on('startShapeColor') def startShapeColor(element): print(element) @socketio.on('stopShapeColor') def stopShapeColor(): stop = True info = datas[13].copy() index = element['index'] if int(index) < 0 : index = '0' word = element['content']['word'] socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) wordlist = ['무', '수박', '귤', '호두', '당근', '깻잎', '연근', '오이', '고추', '땅콩', '말', '토끼', '다람쥐', '금붕어', '돼지', '오리'] corlist = [1,2,1,1,2,2,2,1,1,2,1,2,2,1,2,1] numberlist = [['1','일'],['2','이']] cor = numberlist[corlist[int(index)]-1] gsp = gspeech.Gspeech() stop = False while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) print('go') stop = True Result = {'response':{'index':index if int(index) > 0 else str(int(index)-1), 'phase':0, 'result':{'word': stt,'imageScore':1*((cor[0] in stt) or (cor[1] in stt)), 'nameScore':1*(word in stt)}}} #print(wordsResult) print('emit result') socketio.emit('shapeColorResult',Result) if (not stt) or stop: print('stop') gsp.pauseMic() break @socketio.on('startwordFluencyTest') def startShapeColor(element): start = time.time() #print(element) gsp = gspeech.Gspeech() seq = ['0 ~ 15 seconds', '16 ~ 30 seconds', '31 ~ 45 seconds', '46 ~ 60 seconds'] #print(anilist) @socketio.on('stopwordFluency') def stopwordFluency(): global stop stop = True while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) score = 0 corlist = [] for x in stt.split(): if x in anilist: score +=1 corlist.append(x) if score >0: endtime = int(time.time() - start) index = str(int(endtime/15)+1) print(endtime,score) Result = {'response':{ 'index':index, 'phase':0, 'result':{'word': ' '.join(corlist),'score':score}}} print('emit result') socketio.emit('wordFluencyResult',Result) cors_calTest= {'add':[[40,80,150],[30,70,120]], 'sub':[[10,40,90],[20,50,70]],'mult':[[300,60,250],[200,40,150]], 'div':[['네','다섯','아홉'],['세','세','일곱']]} @socketio.on('startcalTest') def startcalTest(element): #print(element) print('startCal') stop=False gsp = gspeech.Gspeech() @socketio.on('stopCalTest') def stopCalTest(): print('stopCalTest') stop = True ttype = element[0] index = element[1] cor = cors_calTest[ttype][int(index)-1][element[2]] #print(ttype,index) #print(cor) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':'-'.join([ttype,index,str(element[2]+1)]), 'phase':0, 'result':{'response':stt ,'score':1*(str(cor) in stt)}}} print('emit result') socketio.emit('calResult',Result) socketio.emit('next',[ttype,index,element[2]]) break gsp.pauseMic() print('endCalTest') numbersetdata = datas[5]['questions'].copy() cell = [] cells = [[],[]] for x in numbersetdata: cells[0].append(x['content']['leftSet']) cells[1].append(x['content']['rightSet']) nas=[] na = {'narration':'여기 맨 밑줄에 칸마다 숫자들이 적혀있습니다. 각 칸에 적힌 숫자를 2,1,3 이런 식으로 한 개씩만 읽으십시오. 한 번 해보세요. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 적힌 숫자를 말해보세요. 시작.', 'cells' : cells[0]} nas.append(na) na = {'narration':'이번에는 각 칸에 적힌 수자의 개수를 말씀해 보십시오. 한 개, 두 개, 세 개로 세지 마시고 숫자가 한 개 있으면 1, 두 개 있으면 2 이렇게 한 번 해보세요. 잘했습니다. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 몇 개의 숫자가 적혀있는지, 그 개수를 말씀해보십시오. 시작.', 'cells' : cells[1]} nas.append(na) numcorlist = [['1','일'],['2','이'],['3','삼']] @socketio.on('startNumberSet') def startNumberSet(data): stop=False gsp = gspeech.Gspeech() @socketio.on('stopnumSetTest') def stopnumSetTest(): print('stopnumSetTest') stop = True questions = numbersetdata[data[1]] print(questions,data) #print(nas[data[0]]) cors = [] cors.append(list(set(list(questions['content'].values())[0]))[0]) cors.append(len(list(questions['content'].values())[1])) print(cors) socketio.emit('startNarration',{'testType':'numbersSet', 'narration':nas[data[0]], 'questions':questions}) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':questions['index'], 'phase':data[0], 'result':{'response':stt ,'score':1*(str(numcorlist[cors[data[0]]-1]) in stt)}}} print('emit result') socketio.emit('numbersetResult',Result) break gsp.pauseMic() print('endnumTest') if __name__ == '__main__': socketio.run(app)
sor) s
identifier_body
app.py
from flask import Flask,request from flask_socketio import SocketIO import logging import pymysql import json import pygame import threading import json import gspeech import time con = pymysql.connect(host='dementia.openlink.kr', user='admin', password='Opendb1234!@', db='openlink', charset='utf8') # 한글처리 (charset = 'utf8') app = Flask(__name__) app.config['SECRET_KEY'] = 'mysecret' socketio = SocketIO(app, cors_allowed_origins='*') app.debug = True #app.host = '0.0.0.0' app.port = 5000 logging.basicConfig(level=logging.ERROR) pygame.mixer.pre_init(24000) pygame.init() testing = False exit_event = threading.Event() test = [] patients= {} words = {} datas = {} stop=False with open('./testData.json', 'r') as f: datas = json.load(f) word = [] for x in datas[0]['questions']: word.append(x['content']['word']) words['words'] = word word = [] for x in datas[10]['questions']: word.append(x['content']['word']) words['single']=word anilist = ''' 1. 고양이 2. 강아지 3. 거북이 4. 토끼 5. 뱀 6. 사자 7. 호랑이 8. 표범 9. 치타 10. 하이에나 11. 기린 12. 코끼리 13. 코뿔소 14. 하마 15. 악어 16. 펭귄 17. 부엉이 18. 올빼미 19. 곰 20. 돼지 21. 소 22. 닭 23. 독수리 24. 타조 25. 고릴라 26. 오랑우탄 27. 침팬지 28. 원숭이 29. 코알라 30. 캥거루 31. 고래 32. 상어 33. 칠면조 34. 직박구리 35. 쥐 36. 청설모 37. 메추라기 38. 앵무새 39. 삵 40. 스라소니 41. 판다 42. 오소리 43. 오리 44. 거위 45. 백조 46. 두루미 47. 고슴도치 48. 두더지 49. 우파루파 50. 맹꽁이 51. 너구리 52. 개구리 53. 두꺼비 54. 카멜레온 55. 이구아나 56. 노루 57. 제비 58. 까지 59. 고라니 60. 수달 61. 당나귀 62. 순록 63. 염소 64. 공작 65. 바다표범 66. 들소 67. 박쥐 68. 참새 69. 물개 70. 바다사자 71. 살모사 72. 구렁이 73. 얼룩말 74. 산양 75. 멧돼지 76. 카피바라 77. 바다코끼리 78. 도롱뇽 79. 북극곰 80. 퓨마 81. 미어캣 82. 코요테 83. 라마 84. 딱따구리 85. 기러기 86. 비둘기 87. 스컹크 88. 아르마딜로 89. 돌고래 90. 까마귀 91. 매 92. 낙타 93. 여우 94. 사슴 95. 늑대 96. 재규어 97. 알파카 98. 양 99. 다람쥐 100. 담비 '''.split()[1::2] anilist = list(map(lambda x : x.strip(),anilist)) @app.route("/") def main(): return ' ' @socketio.on('connect') def on_connect(client): print('conn',client) @socketio.on('disconnect') def disconnect(): cur = con.cursor(pymysql.cursors.DictCursor) print('discon',request.sid) if request.sid in patients: sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid]) cur.execute(sql) con.commit() return @socketio.on('patientJoin') def checkpatient(data): phone = data['phoneNumber'] cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:]) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() print(rows,request.sid) if rows : socketio.emit('patientJoin',True) sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)" val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH']) cur.execute(sql,val) con.commit() patients[request.sid] = phone else : socketio.emit('patientJoin',False) @socketio.on('doctorJoin') def checkdoctor(data): print('doctorJoin',request.sid) cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(da
Info') cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler" cur.execute(sql) # 데이타 Fetch rows = cur.fetchall() res = json.dumps(rows) # 전체 rows socketio.emit('patientInfo',res) @socketio.on('startTest') def startTest(index): print('start Test') info = datas[index] #print(info) socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']}) @socketio.on('testFinished') def testFinished(): socketio.emit('testFinished',True) @socketio.on('startWords') def startWords(data): info = datas[0] print('start words',data,info) gsp = gspeech.Gspeech() stop = False findwords = [] word = words[data].copy() print(word) @socketio.on('stopWords') def stopWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':0, 'result':{'response':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('wordsResult',wordsResult) findwords.append(word.index(r)) finded.append(r) for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startwords') @socketio.on('startSingleWords') def startSingleWordsMemory(data): print('startSingleWordsMemory',data) gsp = gspeech.Gspeech() stop = False findwords = [] word = words['single'].copy() print(word) order = 1 @socketio.on('stopSingleWords') def stopSingleWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':data, 'result':{'order':order,'word':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('SingleWordsResult',wordsResult) print('inc order') order +=1 findwords.append(word.index(r)) finded.append(r) break for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startSingleWords') @socketio.on('startSM') def startSM(data): index = data['index'] correct = data['correct'] corrects = ['일','이','삼','사','오','육','칠','팔','구','십'] print('start SM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) if stt in [correct,corrects[int(correct)-1]]: print('clear') stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':int(correct) ,'score':1}}} #print(wordsResult) print('emit result') socketio.emit('SMResult',Result) if (not stt) or stop: print('stop') break print('end SM') @socketio.on('startSMM') def startSMM(data): index = data['index'] correct = data['correct'] print('start SMM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSMM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == 0)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == 1)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) if (not stt) or stop: print('stop') break print('end SMM') @socketio.on('startStickMemory') def startStickMemory(element): print(element) info = datas[9].copy() index = element['index'] correct = element['content']['correct'] print(info) socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) gsp = gspeech.Gspeech() stop = False @socketio.on('stopStickMemory') def stopStickMemory(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == '0')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == '1')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) if (not stt) or stop: print('stop') gsp.mic.Pause() break @socketio.on('startShapeColor') def startShapeColor(element): print(element) @socketio.on('stopShapeColor') def stopShapeColor(): stop = True info = datas[13].copy() index = element['index'] if int(index) < 0 : index = '0' word = element['content']['word'] socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) wordlist = ['무', '수박', '귤', '호두', '당근', '깻잎', '연근', '오이', '고추', '땅콩', '말', '토끼', '다람쥐', '금붕어', '돼지', '오리'] corlist = [1,2,1,1,2,2,2,1,1,2,1,2,2,1,2,1] numberlist = [['1','일'],['2','이']] cor = numberlist[corlist[int(index)]-1] gsp = gspeech.Gspeech() stop = False while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) print('go') stop = True Result = {'response':{'index':index if int(index) > 0 else str(int(index)-1), 'phase':0, 'result':{'word': stt,'imageScore':1*((cor[0] in stt) or (cor[1] in stt)), 'nameScore':1*(word in stt)}}} #print(wordsResult) print('emit result') socketio.emit('shapeColorResult',Result) if (not stt) or stop: print('stop') gsp.pauseMic() break @socketio.on('startwordFluencyTest') def startShapeColor(element): start = time.time() #print(element) gsp = gspeech.Gspeech() seq = ['0 ~ 15 seconds', '16 ~ 30 seconds', '31 ~ 45 seconds', '46 ~ 60 seconds'] #print(anilist) @socketio.on('stopwordFluency') def stopwordFluency(): global stop stop = True while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) score = 0 corlist = [] for x in stt.split(): if x in anilist: score +=1 corlist.append(x) if score >0: endtime = int(time.time() - start) index = str(int(endtime/15)+1) print(endtime,score) Result = {'response':{ 'index':index, 'phase':0, 'result':{'word': ' '.join(corlist),'score':score}}} print('emit result') socketio.emit('wordFluencyResult',Result) cors_calTest= {'add':[[40,80,150],[30,70,120]], 'sub':[[10,40,90],[20,50,70]],'mult':[[300,60,250],[200,40,150]], 'div':[['네','다섯','아홉'],['세','세','일곱']]} @socketio.on('startcalTest') def startcalTest(element): #print(element) print('startCal') stop=False gsp = gspeech.Gspeech() @socketio.on('stopCalTest') def stopCalTest(): print('stopCalTest') stop = True ttype = element[0] index = element[1] cor = cors_calTest[ttype][int(index)-1][element[2]] #print(ttype,index) #print(cor) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':'-'.join([ttype,index,str(element[2]+1)]), 'phase':0, 'result':{'response':stt ,'score':1*(str(cor) in stt)}}} print('emit result') socketio.emit('calResult',Result) socketio.emit('next',[ttype,index,element[2]]) break gsp.pauseMic() print('endCalTest') numbersetdata = datas[5]['questions'].copy() cell = [] cells = [[],[]] for x in numbersetdata: cells[0].append(x['content']['leftSet']) cells[1].append(x['content']['rightSet']) nas=[] na = {'narration':'여기 맨 밑줄에 칸마다 숫자들이 적혀있습니다. 각 칸에 적힌 숫자를 2,1,3 이런 식으로 한 개씩만 읽으십시오. 한 번 해보세요. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 적힌 숫자를 말해보세요. 시작.', 'cells' : cells[0]} nas.append(na) na = {'narration':'이번에는 각 칸에 적힌 수자의 개수를 말씀해 보십시오. 한 개, 두 개, 세 개로 세지 마시고 숫자가 한 개 있으면 1, 두 개 있으면 2 이렇게 한 번 해보세요. 잘했습니다. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 몇 개의 숫자가 적혀있는지, 그 개수를 말씀해보십시오. 시작.', 'cells' : cells[1]} nas.append(na) numcorlist = [['1','일'],['2','이'],['3','삼']] @socketio.on('startNumberSet') def startNumberSet(data): stop=False gsp = gspeech.Gspeech() @socketio.on('stopnumSetTest') def stopnumSetTest(): print('stopnumSetTest') stop = True questions = numbersetdata[data[1]] print(questions,data) #print(nas[data[0]]) cors = [] cors.append(list(set(list(questions['content'].values())[0]))[0]) cors.append(len(list(questions['content'].values())[1])) print(cors) socketio.emit('startNarration',{'testType':'numbersSet', 'narration':nas[data[0]], 'questions':questions}) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':questions['index'], 'phase':data[0], 'result':{'response':stt ,'score':1*(str(numcorlist[cors[data[0]]-1]) in stt)}}} print('emit result') socketio.emit('numbersetResult',Result) break gsp.pauseMic() print('endnumTest') if __name__ == '__main__': socketio.run(app)
ta['id']) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() userid = rows['USER_ID'] #print(patients[data['phoneNumber']]) if rows: socketio.emit('doctorJoin',True) @socketio.on('patientInfo') def getPatientInfo(): print('patient
conditional_block
app.py
from flask import Flask,request from flask_socketio import SocketIO import logging import pymysql import json import pygame import threading import json import gspeech import time con = pymysql.connect(host='dementia.openlink.kr', user='admin', password='Opendb1234!@', db='openlink', charset='utf8') # 한글처리 (charset = 'utf8') app = Flask(__name__) app.config['SECRET_KEY'] = 'mysecret' socketio = SocketIO(app, cors_allowed_origins='*') app.debug = True #app.host = '0.0.0.0' app.port = 5000 logging.basicConfig(level=logging.ERROR) pygame.mixer.pre_init(24000) pygame.init() testing = False exit_event = threading.Event() test = [] patients= {} words = {} datas = {} stop=False with open('./testData.json', 'r') as f: datas = json.load(f) word = [] for x in datas[0]['questions']: word.append(x['content']['word']) words['words'] = word word = [] for x in datas[10]['questions']: word.append(x['content']['word']) words['single']=word anilist = ''' 1. 고양이 2. 강아지 3. 거북이 4. 토끼 5. 뱀 6. 사자 7. 호랑이 8. 표범 9. 치타 10. 하이에나 11. 기린 12. 코끼리 13. 코뿔소 14. 하마 15. 악어 16. 펭귄 17. 부엉이 18. 올빼미 19. 곰 20. 돼지 21. 소 22. 닭 23. 독수리 24. 타조 25. 고릴라 26. 오랑우탄 27. 침팬지 28. 원숭이 29. 코알라 30. 캥거루 31. 고래 32. 상어 33. 칠면조 34. 직박구리 35. 쥐 36. 청설모 37. 메추라기 38. 앵무새 39. 삵 40. 스라소니 41. 판다 42. 오소리 43. 오리 44. 거위 45. 백조 46. 두루미 47. 고슴도치 48. 두더지 49. 우파루파 50. 맹꽁이 51. 너구리 52. 개구리 53. 두꺼비 54. 카멜레온 55. 이구아나 56. 노루 57. 제비 58. 까지 59. 고라니 60. 수달 61. 당나귀 62. 순록 63. 염소 64. 공작 65. 바다표범 66. 들소 67. 박쥐 68. 참새 69. 물개 70. 바다사자 71. 살모사 72. 구렁이 73. 얼룩말 74. 산양 75. 멧돼지 76. 카피바라 77. 바다코끼리 78. 도롱뇽 79. 북극곰 80. 퓨마 81. 미어캣 82. 코요테 83. 라마 84. 딱따구리 85. 기러기 86. 비둘기 87. 스컹크 88. 아르마딜로 89. 돌고래 90. 까마귀 91. 매 92. 낙타 93. 여우 94. 사슴 95. 늑대 96. 재규어 97. 알파카 98. 양 99. 다람쥐 100. 담비 '''.split()[1::2] anilist = list(map(lambda x : x.strip(),anilist)) @app.route("/") def main(): return ' ' @socketio.on('connect') def on_connect(client): print('conn',client) @socketio.on('disconnect') def disconnect(): cur = con.cursor(pymysql.cursors.DictCursor) print('discon',request.sid) if request.sid in patients: sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid]) cur.execute(sql) con.commit() return @socketio.on('patientJoin') def checkpatient(data): phone = data['phoneNumber'] cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:]) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() print(rows,request.sid) if rows : socketio.emit('patientJoin',True) sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)" val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH']) cur.execute(sql,val) con.commit() patients[request.sid] = phone else : socketio.emit('patientJoin',False) @socketio.on('doctorJoin') def checkdoctor(data): print('doctorJoin',request.sid) cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(data['id']) cur.execute(sql) # 데이타 Fetch rows = cur.fetchone() userid = rows['USER_ID'] #print(patients[data['phoneNumber']]) if rows: socketio.emit('doctorJoin',True) @socketio.on('patientInfo') def getPatientInfo(): print('patientInfo') cur = con.cursor(pymysql.cursors.DictCursor) sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler" cur.execute(sql) # 데이타 Fetch rows = cur.fetchall() res = json.dumps(rows) # 전체 rows socketio.emit('patientInfo',res) @socketio.on('startTest') def startTest(index): print('start Test') info = datas[index] #print(info) socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']}) @socketio.on('testFinished') def testFinished(): socketio.emit('testFinished',True) @socketio.on('startWords') def startWords(data): info = datas[0] print('start words',data,info) gsp = gspeech.Gspeech() stop = False findwords = [] word = words[data].copy() print(word) @socketio.on('stopWords') def stopWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':0, 'result':{'response':r,'score':1}}} #print(wordsResult) if not stop: print('emit result') socketio.emit('wordsResult',wordsResult) findwords.append(word.index(r)) finded.append(r) for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startwords') @socketio.on('startSingleWords') def startSingleWordsMemory(data): print('startSingleWordsMemory',data) gsp = gspeech.Gspeech() stop = False findwords = [] word = words['single'].copy() print(word) order = 1 @socketio.on('stopSingleWords') def stopSingleWords(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) for r in word: if (r in stt) and not (r in finded): wordsResult = {'response':{'index':str(word.index(r)+1), 'phase':data, 'result':{'order':order,'word':r,'score':1}}} #print(wordsResult) if not stop: print('emit result')
socketio.emit('SingleWordsResult',wordsResult) print('inc order') order +=1 findwords.append(word.index(r)) finded.append(r) break for x in findwords: word[x] = '!@' if (not stt) or stop: print('stop') break print('end startSingleWords') @socketio.on('startSM') def startSM(data): index = data['index'] correct = data['correct'] corrects = ['일','이','삼','사','오','육','칠','팔','구','십'] print('start SM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() finded = [] stt = stt.strip() print(stt) time.sleep(0.01) if stt in [correct,corrects[int(correct)-1]]: print('clear') stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':int(correct) ,'score':1}}} #print(wordsResult) print('emit result') socketio.emit('SMResult',Result) if (not stt) or stop: print('stop') break print('end SM') @socketio.on('startSMM') def startSMM(data): index = data['index'] correct = data['correct'] print('start SMM') print(index,correct) gsp = gspeech.Gspeech() stop = False @socketio.on('stopSMM') def stopSM(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == 0)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == 1)}}} #print(wordsResult) print('emit result') socketio.emit('SMMResult',Result) if (not stt) or stop: print('stop') break print('end SMM') @socketio.on('startStickMemory') def startStickMemory(element): print(element) info = datas[9].copy() index = element['index'] correct = element['content']['correct'] print(info) socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) gsp = gspeech.Gspeech() stop = False @socketio.on('stopStickMemory') def stopStickMemory(): stop = True while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) if stt == '예': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response':True ,'score':1*(correct == '0')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) elif stt == '아니오' or stt=='아니요': stop = True Result = {'response':{'index':index, 'phase':0, 'result':{'response': False,'score':1*(correct == '1')}}} #print(wordsResult) print('emit result') socketio.emit('stickMemoryResult',Result) if (not stt) or stop: print('stop') gsp.mic.Pause() break @socketio.on('startShapeColor') def startShapeColor(element): print(element) @socketio.on('stopShapeColor') def stopShapeColor(): stop = True info = datas[13].copy() index = element['index'] if int(index) < 0 : index = '0' word = element['content']['word'] socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element}) wordlist = ['무', '수박', '귤', '호두', '당근', '깻잎', '연근', '오이', '고추', '땅콩', '말', '토끼', '다람쥐', '금붕어', '돼지', '오리'] corlist = [1,2,1,1,2,2,2,1,1,2,1,2,2,1,2,1] numberlist = [['1','일'],['2','이']] cor = numberlist[corlist[int(index)]-1] gsp = gspeech.Gspeech() stop = False while True: # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() print(stt) time.sleep(0.01) print('go') stop = True Result = {'response':{'index':index if int(index) > 0 else str(int(index)-1), 'phase':0, 'result':{'word': stt,'imageScore':1*((cor[0] in stt) or (cor[1] in stt)), 'nameScore':1*(word in stt)}}} #print(wordsResult) print('emit result') socketio.emit('shapeColorResult',Result) if (not stt) or stop: print('stop') gsp.pauseMic() break @socketio.on('startwordFluencyTest') def startShapeColor(element): start = time.time() #print(element) gsp = gspeech.Gspeech() seq = ['0 ~ 15 seconds', '16 ~ 30 seconds', '31 ~ 45 seconds', '46 ~ 60 seconds'] #print(anilist) @socketio.on('stopwordFluency') def stopwordFluency(): global stop stop = True while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) score = 0 corlist = [] for x in stt.split(): if x in anilist: score +=1 corlist.append(x) if score >0: endtime = int(time.time() - start) index = str(int(endtime/15)+1) print(endtime,score) Result = {'response':{ 'index':index, 'phase':0, 'result':{'word': ' '.join(corlist),'score':score}}} print('emit result') socketio.emit('wordFluencyResult',Result) cors_calTest= {'add':[[40,80,150],[30,70,120]], 'sub':[[10,40,90],[20,50,70]],'mult':[[300,60,250],[200,40,150]], 'div':[['네','다섯','아홉'],['세','세','일곱']]} @socketio.on('startcalTest') def startcalTest(element): #print(element) print('startCal') stop=False gsp = gspeech.Gspeech() @socketio.on('stopCalTest') def stopCalTest(): print('stopCalTest') stop = True ttype = element[0] index = element[1] cor = cors_calTest[ttype][int(index)-1][element[2]] #print(ttype,index) #print(cor) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':'-'.join([ttype,index,str(element[2]+1)]), 'phase':0, 'result':{'response':stt ,'score':1*(str(cor) in stt)}}} print('emit result') socketio.emit('calResult',Result) socketio.emit('next',[ttype,index,element[2]]) break gsp.pauseMic() print('endCalTest') numbersetdata = datas[5]['questions'].copy() cell = [] cells = [[],[]] for x in numbersetdata: cells[0].append(x['content']['leftSet']) cells[1].append(x['content']['rightSet']) nas=[] na = {'narration':'여기 맨 밑줄에 칸마다 숫자들이 적혀있습니다. 각 칸에 적힌 숫자를 2,1,3 이런 식으로 한 개씩만 읽으십시오. 한 번 해보세요. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 적힌 숫자를 말해보세요. 시작.', 'cells' : cells[0]} nas.append(na) na = {'narration':'이번에는 각 칸에 적힌 수자의 개수를 말씀해 보십시오. 한 개, 두 개, 세 개로 세지 마시고 숫자가 한 개 있으면 1, 두 개 있으면 2 이렇게 한 번 해보세요. 잘했습니다. 자, 이제 제가 시작하면 여기 처음부터 끝까지 정확하면서도 최대한 빠르게 각 칸에 몇 개의 숫자가 적혀있는지, 그 개수를 말씀해보십시오. 시작.', 'cells' : cells[1]} nas.append(na) numcorlist = [['1','일'],['2','이'],['3','삼']] @socketio.on('startNumberSet') def startNumberSet(data): stop=False gsp = gspeech.Gspeech() @socketio.on('stopnumSetTest') def stopnumSetTest(): print('stopnumSetTest') stop = True questions = numbersetdata[data[1]] print(questions,data) #print(nas[data[0]]) cors = [] cors.append(list(set(list(questions['content'].values())[0]))[0]) cors.append(len(list(questions['content'].values())[1])) print(cors) socketio.emit('startNarration',{'testType':'numbersSet', 'narration':nas[data[0]], 'questions':questions}) while True: if stop: print('stop') break # 음성 인식 될때까지 대기 한다. stt = gsp.getText() stt = stt.strip() time.sleep(0.01) print(stt) Result = {'response':{ 'index':questions['index'], 'phase':data[0], 'result':{'response':stt ,'score':1*(str(numcorlist[cors[data[0]]-1]) in stt)}}} print('emit result') socketio.emit('numbersetResult',Result) break gsp.pauseMic() print('endnumTest') if __name__ == '__main__': socketio.run(app)
identifier_name
GameEngine.ts
import Chance from "chance" import { BaseGameState, BaseInput } from "shared" import { cloneDeep, max, times } from "lodash" export interface EngineRunHelpers { chance: () => Chance.Chance } export type EngineRunFn<I extends BaseInput, G extends BaseGameState<I>> = (state: G, helpers: EngineRunHelpers) => G export interface GameStateRecalculateWithInput<I extends BaseInput> { stateIdx: number input: I } export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number } interface InputQueueItem<I extends BaseInput> { input: I stateId: number ts: number } interface SetInputParams<I extends BaseInput> { input: I stateId?: number ts?: number } interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> { fps: number startTime?: number gameTime?: number onStateUpdate?: (g: G) => any } export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> { public startTime = 0 private runFn: EngineRunFn<I, G> private states: G[] = [] private numStatesToKeep = 50 private exitGameLoopFn?: () => void private inputQueue: InputQueueItem<I>[] = [] private getEngineRunHelpers = (state: G): EngineRunHelpers => { const seed = [state.gameId, state.id].join("-") return { chance: () => new Chance(seed) } } private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => { return obj && typeof obj.stateIdx === "number" } private replaceInput = (inputs: I[], updateInput: I) => { const idx = inputs.findIndex(i => i.playerId === updateInput.playerId) if (idx !== -1) { inputs[idx] = updateInput } else { inputs.push(updateInput) } } private replaceInputInState = (state: G, input: I) => { this.replaceInput(state.inputs, input) } private processInputQueue = () => { const { inputQueue } = this const currentStateId = this.currentStateId() const indicesToRemove: number[] = [] // first figure if you are in the past, if so, fast forward const maxQueuedStateId = max(inputQueue.map(q => q.stateId)) if (maxQueuedStateId && maxQueuedStateId > currentStateId) { const numStatesToFastForward = maxQueuedStateId - currentStateId console.log("fast forwarding", numStatesToFastForward, "states to catch up") const currentState = this.states[this.states.length - 1] const { dt, time } = currentState times(numStatesToFastForward, i => { const stateTime = time * (i + 1) this.run({ time: stateTime, dt }) }) } for (let i = 0; i < inputQueue.length; i++) { const queueItem = inputQueue[i] const { input, stateId } = queueItem const iii = input as any const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId) console.log("received msg", this.states.length - 1 - stateIdx, "states in the past") if (stateIdx === -1) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`) } else { console.log("handle input queue", stateId, JSON.stringify(iii.axis)) this.run({ stateIdx, input }) indicesToRemove.push(i) } } indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1)) } constructor(engineRunFn: EngineRunFn<I, G>, startingState: G)
run = (params: RunParams<I>) => { const { states } = this if (!this.isGameStateWithRelac(params)) { const { time, dt } = params const state = cloneDeep(states[states.length - 1]) if (!state) { throw new Error("GameEngine::run no state") } state.id += 1 state.time = time state.dt = dt const newState = this.runFn(state, this.getEngineRunHelpers(state)) states.push(newState) // after we finish, make sure we only keep what we need this.states = this.states.slice(-this.numStatesToKeep) } else { const { input } = params const idx = params.stateIdx if (!states[idx]) { throw new Error("GameEngine::run no state") } for (let i = idx; i < states.length; i++) { if (i === idx) { // since this "correct" input would affect the next state, we dont // change this state. just its input this.replaceInputInState(states[i], input) } else { // the state at index i is inaccurate. however, we want to keep the other players' inputs from it const s = states[i] this.replaceInput(s.inputs, input) // clone the previous state, generate new state from it const toBeNewState = cloneDeep(states[i - 1]) toBeNewState.id = s.id toBeNewState.time = s.time toBeNewState.dt = s.dt states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState)) // now re-apply the inputs to it so the next state we generate from this updated state is ok states[i].inputs = s.inputs } } } } setInput = (params: SetInputParams<I>) => { let { ts } = params const { input, stateId } = params const { states } = this // this is local input. no need to put it on the queue if (stateId === undefined) { // this is a new input that should be applied on the next run call // we can effectively do this by replacing the input of the last // state we have if (states.length) { this.replaceInputInState(states[states.length - 1], input) } } else { if (!ts) { ts = new Date().getTime() } // if state id is less than the very first state we have in the array, // then this means we got this input too late. this means that the input packet // took too long to get to us and we will be desynced. we need to request new states! if (stateId < states[0].id) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`) // TODO wolf, handle this return } // figure out how far back in the past you are. this means you need to catch up const iii = input as any const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId) if (existingIdx === -1) { this.inputQueue.push({ input, stateId, ts }) console.log("Pushed to queue", stateId, JSON.stringify(iii.axis)) } else { // replace with more up to date information this.inputQueue[existingIdx] = { input, stateId, ts } console.log("replaced queue item", stateId, JSON.stringify(iii.axis)) } } } startGameLoop = (params: StartGameLoopParams<I, G>) => { const { fps, onStateUpdate } = params let { gameTime, startTime } = params if (!startTime) { startTime = new Date().getTime() } if (!gameTime) { gameTime = 0 } // kill any current loop if running this.stopGameLoop() // the tickTime basically tells us how often a frame is generated const tickTimeMs = 1000 / fps const timeTimeSeconds = tickTimeMs / 1000 const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame this.numStatesToKeep = fps * 5 console.log("num states to keep", this.numStatesToKeep) this.startTime = startTime let time = gameTime let quit = false let accumulator = 0 let didUpdateState = false let frameTime = this.startTime let currentTime = new Date().getTime() const loop = () => { if (quit) { console.log("Finished game loop after", time.valueOf(), "ms") return } // do normal game loop didUpdateState = false const now = new Date().getTime() frameTime = now - currentTime accumulator += frameTime currentTime = now // when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed while (accumulator >= tickTimeMs) { didUpdateState = true time += tickTimeMs this.run({ time, dt: timeTimeSeconds }) accumulator -= tickTimeMs } // handle input queues only on ticks where the state was updated if (didUpdateState) { // process the input queue this.processInputQueue() // if there's a state update. do that if (onStateUpdate) { onStateUpdate(this.currentState()) } } looperFn(loop) } loop() this.exitGameLoopFn = () => (quit = true) } stopGameLoop = () => { if (this.exitGameLoopFn) { this.exitGameLoopFn() this.exitGameLoopFn = undefined } } loadFromState = (states: G[]) => { console.log("loaded", states.length) this.states = states } // getters allStates = () => this.states currentState = () => { const { states } = this return cloneDeep(states[states.length - 1]) } currentStateId = () => { const { states } = this if (!states.length) { return 0 } return states[states.length - 1].id } gameId = () => { const { states } = this return !states.length ? "" : states[states.length - 1].gameId } }
{ this.runFn = engineRunFn this.states = [startingState] }
identifier_body
GameEngine.ts
import Chance from "chance" import { BaseGameState, BaseInput } from "shared" import { cloneDeep, max, times } from "lodash" export interface EngineRunHelpers { chance: () => Chance.Chance } export type EngineRunFn<I extends BaseInput, G extends BaseGameState<I>> = (state: G, helpers: EngineRunHelpers) => G export interface GameStateRecalculateWithInput<I extends BaseInput> { stateIdx: number input: I } export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number } interface InputQueueItem<I extends BaseInput> { input: I stateId: number ts: number } interface SetInputParams<I extends BaseInput> { input: I stateId?: number ts?: number } interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> { fps: number startTime?: number gameTime?: number onStateUpdate?: (g: G) => any } export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> { public startTime = 0 private runFn: EngineRunFn<I, G> private states: G[] = [] private numStatesToKeep = 50 private exitGameLoopFn?: () => void private inputQueue: InputQueueItem<I>[] = [] private getEngineRunHelpers = (state: G): EngineRunHelpers => { const seed = [state.gameId, state.id].join("-") return { chance: () => new Chance(seed) } } private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => { return obj && typeof obj.stateIdx === "number" } private replaceInput = (inputs: I[], updateInput: I) => { const idx = inputs.findIndex(i => i.playerId === updateInput.playerId) if (idx !== -1) { inputs[idx] = updateInput } else { inputs.push(updateInput) } } private replaceInputInState = (state: G, input: I) => { this.replaceInput(state.inputs, input) } private processInputQueue = () => { const { inputQueue } = this const currentStateId = this.currentStateId() const indicesToRemove: number[] = [] // first figure if you are in the past, if so, fast forward const maxQueuedStateId = max(inputQueue.map(q => q.stateId)) if (maxQueuedStateId && maxQueuedStateId > currentStateId) { const numStatesToFastForward = maxQueuedStateId - currentStateId console.log("fast forwarding", numStatesToFastForward, "states to catch up") const currentState = this.states[this.states.length - 1] const { dt, time } = currentState times(numStatesToFastForward, i => { const stateTime = time * (i + 1) this.run({ time: stateTime, dt }) }) } for (let i = 0; i < inputQueue.length; i++) { const queueItem = inputQueue[i] const { input, stateId } = queueItem const iii = input as any const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId) console.log("received msg", this.states.length - 1 - stateIdx, "states in the past") if (stateIdx === -1) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`) } else { console.log("handle input queue", stateId, JSON.stringify(iii.axis)) this.run({ stateIdx, input }) indicesToRemove.push(i) } } indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1)) }
(engineRunFn: EngineRunFn<I, G>, startingState: G) { this.runFn = engineRunFn this.states = [startingState] } run = (params: RunParams<I>) => { const { states } = this if (!this.isGameStateWithRelac(params)) { const { time, dt } = params const state = cloneDeep(states[states.length - 1]) if (!state) { throw new Error("GameEngine::run no state") } state.id += 1 state.time = time state.dt = dt const newState = this.runFn(state, this.getEngineRunHelpers(state)) states.push(newState) // after we finish, make sure we only keep what we need this.states = this.states.slice(-this.numStatesToKeep) } else { const { input } = params const idx = params.stateIdx if (!states[idx]) { throw new Error("GameEngine::run no state") } for (let i = idx; i < states.length; i++) { if (i === idx) { // since this "correct" input would affect the next state, we dont // change this state. just its input this.replaceInputInState(states[i], input) } else { // the state at index i is inaccurate. however, we want to keep the other players' inputs from it const s = states[i] this.replaceInput(s.inputs, input) // clone the previous state, generate new state from it const toBeNewState = cloneDeep(states[i - 1]) toBeNewState.id = s.id toBeNewState.time = s.time toBeNewState.dt = s.dt states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState)) // now re-apply the inputs to it so the next state we generate from this updated state is ok states[i].inputs = s.inputs } } } } setInput = (params: SetInputParams<I>) => { let { ts } = params const { input, stateId } = params const { states } = this // this is local input. no need to put it on the queue if (stateId === undefined) { // this is a new input that should be applied on the next run call // we can effectively do this by replacing the input of the last // state we have if (states.length) { this.replaceInputInState(states[states.length - 1], input) } } else { if (!ts) { ts = new Date().getTime() } // if state id is less than the very first state we have in the array, // then this means we got this input too late. this means that the input packet // took too long to get to us and we will be desynced. we need to request new states! if (stateId < states[0].id) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`) // TODO wolf, handle this return } // figure out how far back in the past you are. this means you need to catch up const iii = input as any const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId) if (existingIdx === -1) { this.inputQueue.push({ input, stateId, ts }) console.log("Pushed to queue", stateId, JSON.stringify(iii.axis)) } else { // replace with more up to date information this.inputQueue[existingIdx] = { input, stateId, ts } console.log("replaced queue item", stateId, JSON.stringify(iii.axis)) } } } startGameLoop = (params: StartGameLoopParams<I, G>) => { const { fps, onStateUpdate } = params let { gameTime, startTime } = params if (!startTime) { startTime = new Date().getTime() } if (!gameTime) { gameTime = 0 } // kill any current loop if running this.stopGameLoop() // the tickTime basically tells us how often a frame is generated const tickTimeMs = 1000 / fps const timeTimeSeconds = tickTimeMs / 1000 const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame this.numStatesToKeep = fps * 5 console.log("num states to keep", this.numStatesToKeep) this.startTime = startTime let time = gameTime let quit = false let accumulator = 0 let didUpdateState = false let frameTime = this.startTime let currentTime = new Date().getTime() const loop = () => { if (quit) { console.log("Finished game loop after", time.valueOf(), "ms") return } // do normal game loop didUpdateState = false const now = new Date().getTime() frameTime = now - currentTime accumulator += frameTime currentTime = now // when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed while (accumulator >= tickTimeMs) { didUpdateState = true time += tickTimeMs this.run({ time, dt: timeTimeSeconds }) accumulator -= tickTimeMs } // handle input queues only on ticks where the state was updated if (didUpdateState) { // process the input queue this.processInputQueue() // if there's a state update. do that if (onStateUpdate) { onStateUpdate(this.currentState()) } } looperFn(loop) } loop() this.exitGameLoopFn = () => (quit = true) } stopGameLoop = () => { if (this.exitGameLoopFn) { this.exitGameLoopFn() this.exitGameLoopFn = undefined } } loadFromState = (states: G[]) => { console.log("loaded", states.length) this.states = states } // getters allStates = () => this.states currentState = () => { const { states } = this return cloneDeep(states[states.length - 1]) } currentStateId = () => { const { states } = this if (!states.length) { return 0 } return states[states.length - 1].id } gameId = () => { const { states } = this return !states.length ? "" : states[states.length - 1].gameId } }
constructor
identifier_name
GameEngine.ts
import Chance from "chance" import { BaseGameState, BaseInput } from "shared" import { cloneDeep, max, times } from "lodash" export interface EngineRunHelpers { chance: () => Chance.Chance } export type EngineRunFn<I extends BaseInput, G extends BaseGameState<I>> = (state: G, helpers: EngineRunHelpers) => G export interface GameStateRecalculateWithInput<I extends BaseInput> { stateIdx: number input: I } export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number } interface InputQueueItem<I extends BaseInput> { input: I stateId: number ts: number } interface SetInputParams<I extends BaseInput> { input: I stateId?: number ts?: number } interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> { fps: number startTime?: number gameTime?: number onStateUpdate?: (g: G) => any } export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> { public startTime = 0 private runFn: EngineRunFn<I, G> private states: G[] = [] private numStatesToKeep = 50 private exitGameLoopFn?: () => void private inputQueue: InputQueueItem<I>[] = [] private getEngineRunHelpers = (state: G): EngineRunHelpers => { const seed = [state.gameId, state.id].join("-") return { chance: () => new Chance(seed) } } private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => { return obj && typeof obj.stateIdx === "number" } private replaceInput = (inputs: I[], updateInput: I) => { const idx = inputs.findIndex(i => i.playerId === updateInput.playerId) if (idx !== -1) { inputs[idx] = updateInput } else { inputs.push(updateInput) } } private replaceInputInState = (state: G, input: I) => { this.replaceInput(state.inputs, input) } private processInputQueue = () => { const { inputQueue } = this const currentStateId = this.currentStateId() const indicesToRemove: number[] = [] // first figure if you are in the past, if so, fast forward const maxQueuedStateId = max(inputQueue.map(q => q.stateId)) if (maxQueuedStateId && maxQueuedStateId > currentStateId) { const numStatesToFastForward = maxQueuedStateId - currentStateId console.log("fast forwarding", numStatesToFastForward, "states to catch up") const currentState = this.states[this.states.length - 1] const { dt, time } = currentState times(numStatesToFastForward, i => { const stateTime = time * (i + 1) this.run({ time: stateTime, dt }) }) } for (let i = 0; i < inputQueue.length; i++) { const queueItem = inputQueue[i] const { input, stateId } = queueItem const iii = input as any const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId) console.log("received msg", this.states.length - 1 - stateIdx, "states in the past") if (stateIdx === -1) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`) } else { console.log("handle input queue", stateId, JSON.stringify(iii.axis)) this.run({ stateIdx, input }) indicesToRemove.push(i) } } indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1)) } constructor(engineRunFn: EngineRunFn<I, G>, startingState: G) { this.runFn = engineRunFn this.states = [startingState] } run = (params: RunParams<I>) => { const { states } = this if (!this.isGameStateWithRelac(params)) { const { time, dt } = params const state = cloneDeep(states[states.length - 1]) if (!state) { throw new Error("GameEngine::run no state") } state.id += 1 state.time = time state.dt = dt const newState = this.runFn(state, this.getEngineRunHelpers(state)) states.push(newState) // after we finish, make sure we only keep what we need this.states = this.states.slice(-this.numStatesToKeep) } else { const { input } = params const idx = params.stateIdx if (!states[idx]) { throw new Error("GameEngine::run no state") } for (let i = idx; i < states.length; i++) { if (i === idx) { // since this "correct" input would affect the next state, we dont // change this state. just its input this.replaceInputInState(states[i], input) } else { // the state at index i is inaccurate. however, we want to keep the other players' inputs from it const s = states[i] this.replaceInput(s.inputs, input) // clone the previous state, generate new state from it const toBeNewState = cloneDeep(states[i - 1]) toBeNewState.id = s.id toBeNewState.time = s.time toBeNewState.dt = s.dt states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState)) // now re-apply the inputs to it so the next state we generate from this updated state is ok states[i].inputs = s.inputs } } } } setInput = (params: SetInputParams<I>) => { let { ts } = params const { input, stateId } = params const { states } = this // this is local input. no need to put it on the queue if (stateId === undefined) { // this is a new input that should be applied on the next run call // we can effectively do this by replacing the input of the last // state we have if (states.length) { this.replaceInputInState(states[states.length - 1], input) } } else
} startGameLoop = (params: StartGameLoopParams<I, G>) => { const { fps, onStateUpdate } = params let { gameTime, startTime } = params if (!startTime) { startTime = new Date().getTime() } if (!gameTime) { gameTime = 0 } // kill any current loop if running this.stopGameLoop() // the tickTime basically tells us how often a frame is generated const tickTimeMs = 1000 / fps const timeTimeSeconds = tickTimeMs / 1000 const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame this.numStatesToKeep = fps * 5 console.log("num states to keep", this.numStatesToKeep) this.startTime = startTime let time = gameTime let quit = false let accumulator = 0 let didUpdateState = false let frameTime = this.startTime let currentTime = new Date().getTime() const loop = () => { if (quit) { console.log("Finished game loop after", time.valueOf(), "ms") return } // do normal game loop didUpdateState = false const now = new Date().getTime() frameTime = now - currentTime accumulator += frameTime currentTime = now // when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed while (accumulator >= tickTimeMs) { didUpdateState = true time += tickTimeMs this.run({ time, dt: timeTimeSeconds }) accumulator -= tickTimeMs } // handle input queues only on ticks where the state was updated if (didUpdateState) { // process the input queue this.processInputQueue() // if there's a state update. do that if (onStateUpdate) { onStateUpdate(this.currentState()) } } looperFn(loop) } loop() this.exitGameLoopFn = () => (quit = true) } stopGameLoop = () => { if (this.exitGameLoopFn) { this.exitGameLoopFn() this.exitGameLoopFn = undefined } } loadFromState = (states: G[]) => { console.log("loaded", states.length) this.states = states } // getters allStates = () => this.states currentState = () => { const { states } = this return cloneDeep(states[states.length - 1]) } currentStateId = () => { const { states } = this if (!states.length) { return 0 } return states[states.length - 1].id } gameId = () => { const { states } = this return !states.length ? "" : states[states.length - 1].gameId } }
{ if (!ts) { ts = new Date().getTime() } // if state id is less than the very first state we have in the array, // then this means we got this input too late. this means that the input packet // took too long to get to us and we will be desynced. we need to request new states! if (stateId < states[0].id) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`) // TODO wolf, handle this return } // figure out how far back in the past you are. this means you need to catch up const iii = input as any const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId) if (existingIdx === -1) { this.inputQueue.push({ input, stateId, ts }) console.log("Pushed to queue", stateId, JSON.stringify(iii.axis)) } else { // replace with more up to date information this.inputQueue[existingIdx] = { input, stateId, ts } console.log("replaced queue item", stateId, JSON.stringify(iii.axis)) } }
conditional_block
GameEngine.ts
import Chance from "chance" import { BaseGameState, BaseInput } from "shared" import { cloneDeep, max, times } from "lodash" export interface EngineRunHelpers { chance: () => Chance.Chance } export type EngineRunFn<I extends BaseInput, G extends BaseGameState<I>> = (state: G, helpers: EngineRunHelpers) => G export interface GameStateRecalculateWithInput<I extends BaseInput> { stateIdx: number input: I } export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number } interface InputQueueItem<I extends BaseInput> { input: I stateId: number ts: number } interface SetInputParams<I extends BaseInput> { input: I stateId?: number ts?: number } interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> { fps: number startTime?: number gameTime?: number onStateUpdate?: (g: G) => any } export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> { public startTime = 0 private runFn: EngineRunFn<I, G> private states: G[] = [] private numStatesToKeep = 50 private exitGameLoopFn?: () => void private inputQueue: InputQueueItem<I>[] = [] private getEngineRunHelpers = (state: G): EngineRunHelpers => { const seed = [state.gameId, state.id].join("-") return { chance: () => new Chance(seed) } } private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => { return obj && typeof obj.stateIdx === "number" } private replaceInput = (inputs: I[], updateInput: I) => { const idx = inputs.findIndex(i => i.playerId === updateInput.playerId) if (idx !== -1) { inputs[idx] = updateInput } else { inputs.push(updateInput) } } private replaceInputInState = (state: G, input: I) => { this.replaceInput(state.inputs, input) } private processInputQueue = () => { const { inputQueue } = this const currentStateId = this.currentStateId() const indicesToRemove: number[] = [] // first figure if you are in the past, if so, fast forward const maxQueuedStateId = max(inputQueue.map(q => q.stateId)) if (maxQueuedStateId && maxQueuedStateId > currentStateId) { const numStatesToFastForward = maxQueuedStateId - currentStateId console.log("fast forwarding", numStatesToFastForward, "states to catch up") const currentState = this.states[this.states.length - 1] const { dt, time } = currentState times(numStatesToFastForward, i => { const stateTime = time * (i + 1) this.run({ time: stateTime, dt }) }) } for (let i = 0; i < inputQueue.length; i++) { const queueItem = inputQueue[i] const { input, stateId } = queueItem const iii = input as any const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId) console.log("received msg", this.states.length - 1 - stateIdx, "states in the past") if (stateIdx === -1) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`) } else { console.log("handle input queue", stateId, JSON.stringify(iii.axis)) this.run({ stateIdx, input }) indicesToRemove.push(i) } } indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1)) } constructor(engineRunFn: EngineRunFn<I, G>, startingState: G) { this.runFn = engineRunFn this.states = [startingState] } run = (params: RunParams<I>) => { const { states } = this if (!this.isGameStateWithRelac(params)) { const { time, dt } = params const state = cloneDeep(states[states.length - 1]) if (!state) { throw new Error("GameEngine::run no state") } state.id += 1 state.time = time state.dt = dt const newState = this.runFn(state, this.getEngineRunHelpers(state)) states.push(newState) // after we finish, make sure we only keep what we need this.states = this.states.slice(-this.numStatesToKeep) } else { const { input } = params const idx = params.stateIdx if (!states[idx]) { throw new Error("GameEngine::run no state") } for (let i = idx; i < states.length; i++) { if (i === idx) { // since this "correct" input would affect the next state, we dont // change this state. just its input this.replaceInputInState(states[i], input) } else { // the state at index i is inaccurate. however, we want to keep the other players' inputs from it const s = states[i] this.replaceInput(s.inputs, input) // clone the previous state, generate new state from it const toBeNewState = cloneDeep(states[i - 1]) toBeNewState.id = s.id toBeNewState.time = s.time toBeNewState.dt = s.dt states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState)) // now re-apply the inputs to it so the next state we generate from this updated state is ok states[i].inputs = s.inputs } } } } setInput = (params: SetInputParams<I>) => { let { ts } = params const { input, stateId } = params const { states } = this // this is local input. no need to put it on the queue if (stateId === undefined) { // this is a new input that should be applied on the next run call // we can effectively do this by replacing the input of the last // state we have if (states.length) { this.replaceInputInState(states[states.length - 1], input) } } else { if (!ts) { ts = new Date().getTime() } // if state id is less than the very first state we have in the array, // then this means we got this input too late. this means that the input packet // took too long to get to us and we will be desynced. we need to request new states! if (stateId < states[0].id) { console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`) // TODO wolf, handle this return } // figure out how far back in the past you are. this means you need to catch up const iii = input as any const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId) if (existingIdx === -1) { this.inputQueue.push({ input, stateId, ts }) console.log("Pushed to queue", stateId, JSON.stringify(iii.axis)) } else { // replace with more up to date information this.inputQueue[existingIdx] = { input, stateId, ts } console.log("replaced queue item", stateId, JSON.stringify(iii.axis)) } } } startGameLoop = (params: StartGameLoopParams<I, G>) => { const { fps, onStateUpdate } = params let { gameTime, startTime } = params if (!startTime) { startTime = new Date().getTime() } if (!gameTime) { gameTime = 0 } // kill any current loop if running this.stopGameLoop() // the tickTime basically tells us how often a frame is generated const tickTimeMs = 1000 / fps const timeTimeSeconds = tickTimeMs / 1000 const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame this.numStatesToKeep = fps * 5 console.log("num states to keep", this.numStatesToKeep) this.startTime = startTime let time = gameTime let quit = false let accumulator = 0 let didUpdateState = false let frameTime = this.startTime let currentTime = new Date().getTime() const loop = () => { if (quit) { console.log("Finished game loop after", time.valueOf(), "ms") return } // do normal game loop didUpdateState = false const now = new Date().getTime() frameTime = now - currentTime accumulator += frameTime currentTime = now // when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed while (accumulator >= tickTimeMs) { didUpdateState = true time += tickTimeMs this.run({ time, dt: timeTimeSeconds }) accumulator -= tickTimeMs } // handle input queues only on ticks where the state was updated if (didUpdateState) { // process the input queue this.processInputQueue() // if there's a state update. do that if (onStateUpdate) { onStateUpdate(this.currentState()) } } looperFn(loop) } loop() this.exitGameLoopFn = () => (quit = true)
this.exitGameLoopFn = undefined } } loadFromState = (states: G[]) => { console.log("loaded", states.length) this.states = states } // getters allStates = () => this.states currentState = () => { const { states } = this return cloneDeep(states[states.length - 1]) } currentStateId = () => { const { states } = this if (!states.length) { return 0 } return states[states.length - 1].id } gameId = () => { const { states } = this return !states.length ? "" : states[states.length - 1].gameId } }
} stopGameLoop = () => { if (this.exitGameLoopFn) { this.exitGameLoopFn()
random_line_split
3d_LJ_nList.py
# -*- coding: utf-8 -*- """ Created on Mon Sep 6 13:41:55 2021 @author: Archana P S """ # 3d lattice import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from numba import jit plt.rcParams['font.family']="Times New Roman" plt.rcParams['xtick.labelsize']=18 plt.rcParams['ytick.labelsize']=18 #=================== define all the parameters here. =================# nx = 5 # no. of atoms along each direction of the cube. N = nx**3 # total no. of atoms. rho = 0.5 # desired density. temperature = 1.0 # desired temperature. sigma = 1.0 epsilon = 1.0 mass = 1.0 rcut = 2.5*sigma vskin = 0.5*sigma dt = 0.002 nmaxsteps = 5000 thermo_freq = 10 nbrList_freq = 5 #=====================================================================# @jit def put_on_3d_lattice(N, rho, sigma): lx = (N/rho)**(1/3) ly = lx lz = lx nx = int(np.cbrt(N)) ny = nx nz = nx dx = (lx - nx*sigma)/(nx-1) x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) ix = 0 iy = 0 iz = 0 for i in range(N): if (i % nx == 0): ix = 0 if (i % (nx)**2 == 0): iy = 0 if (i % (nx)**3 == 0): iz = 0 else: iz = iz +1 else: iy = iy + 1 else: ix = ix + 1 x[i] = sigma/2.0 + ix*(dx + sigma) y[i] = sigma/2.0 + iy*(dx + sigma) z[i] = sigma/2.0 + iz*(dx + sigma) return [x,y,z,lx,ly,lz] def write_xyz_file(filename,x,y,z): fout_xyz = open(filename, 'w+') nMax = x.size fout_xyz.write("{}\n".format(nMax)) fout_xyz.write("comment\n") for i in range(nMax): fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i])) fout_xyz.close() return @jit def computeForces(x,y,z,natoms,sigma,epsilon): fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): for j in range(natoms): #avoid the self interaction. if (j != i): #calculate distance b/w i and j particles. dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] # minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz # distance b/w i and j particles. dr = np.sqrt(dx**2 + dy**2 + dz**2) # now calculate the force. sr6 = (sigma/dr)**6.0 rinv = 1.0/dr rinv2 = rinv**2.0 comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + comn_frc_term*dx fy[i] = fy[i] + comn_frc_term*dy fz[i] = fz[i] + comn_frc_term*dz # calculate potential energy here. pot_term = 4.0*epsilon*sr6*(sr6 - 1.0) PE = PE + pot_term # calculation of virial. vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i] virial = virial + vir_term PE = PE * 0.5 virial = virial * 0.5 return [fx,fy,fz,PE,virial] @jit def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass): # this does the first step of V-V algorithm. for i in range(N): # position update x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0 y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0 z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0 # velocity update. vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 return [x,y,z,vx,vy,vz] @jit def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass): # update only velocities. and calculate Kinetic energy. KE = 0.0 for i in range(N): vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5 return [vx,vy,vz,KE] #======== function which will calculate the neighbor list. @jit def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021. Distances = np.zeros((natoms,natoms)) nCount[:] = 0 nList[:,:] = 0 for i in range(natoms): Distances[i,i] = lx for j in range(natoms): if(j != i): dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] #minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2 + dy**2 + dz**2) Distances[i,j] = rij #Distances[j,i] = Distances[i,j] verlet_R = (rcut+vskin)*sigma if(rij < verlet_R): nCount[i] = nCount[i]+1 k = nCount[i] # start_index = i*natoms nList[i, k-1] = j else: continue return [nCount,nList,Distances] #======== function which will compute the forces on all the particles, #======== using the list of neighbors for every particle. @jit def
(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021. fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): # for k in range(nCount[i]): #starting = i*natoms j = nList[i, k] # if(j != i): #calculate the distance dx = x[i]-x[j] dy = y[i]-y[j] dz = z[i]-z[j] #minimum image. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0) rij2 = rij**2.0 rcut2 = rcut**2.0 if(rij2 < rcut2): # need to calculate the force. rinv = 1.0/rij rinv2 = rinv**2.0 sr6 = (sigma/rij)**6.0 src6 = (sigma/rcut)**6.0 rcinv = 1.0/rcut rcinv2 = rcinv**2.0 # #use LJ potential, with predefined cut-off. frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + frc_common*dx fy[i] = fy[i] + frc_common*dy fz[i] = fz[i] + frc_common*dz # shifting for the potential force-shifting. frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2 fx_shift = frc_shift*dx fy_shift = frc_shift*dy fz_shift = frc_shift*dz #shift it. fx[i] = fx[i] - fx_shift fy[i] = fy[i] - fy_shift fz[i] = fz[i] - fz_shift # now calculate PE & virial. pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0) pot_rc = 4.0*epsilon*src6*(src6 - 1.0) pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv # add all the components./ shifting. PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i]) else: continue PE = PE*0.5 virial = virial*0.5 # return [fx,fy,fz,PE,virial] @jit def applyPBC(N,x,y,z,lx,ly,lz): x = x - np.round(x/lx)*lx y = y - np.round(y/ly)*ly z = z - np.round(z/lz)*lz return [x,y,z] #======== main program ================================================# #======== main program ================================================# #======== main program ================================================# x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) vx = np.zeros(N) vy = np.zeros(N) vz = np.zeros(N) vx = np.random.rand(N) vy = np.random.rand(N) vz = np.random.rand(N) fx = np.zeros(N) fy = np.zeros(N) fz = np.zeros(N) nCount = np.zeros(N, dtype=int) nList = np.zeros((N,N), dtype=int) [x,y,z,lx,ly,lz] = put_on_3d_lattice(N, rho, sigma) fig = plt.figure() fig.patch.set_facecolor('white') ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z,s=60,color='blue') # plt.show() # to write the xyz file. xyz_file = "out_config.xyz" write_xyz_file(xyz_file, x,y,z) #open the thermo file. thermo_file = "out_thermo.dat" fout_thermo = open(thermo_file, 'w+') # get the neighbor list. [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) # now compute the forces, using the neighbor list. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) # move the particles by integrating the eq. of motion/ using V.V. # 1st step of V-V. [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) # compute forces for the 2nd step of V-V. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) #2nd step, of V-V. [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) fig1 = plt.figure() fig1.patch.set_facecolor('white') ax1 = fig1.add_subplot(111, projection='3d') ax1.scatter(x,y,z,s=60,color='blue') # plt.show() for itr in range(nmaxsteps): timeNow = itr*dt [x,y,z] = applyPBC(N,x,y,z,lx,ly,lz) # calculate the neighbor list at a defined interval, after every nbrList_freq steps. if (itr % nbrList_freq == 0): [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) #[fx,fy,fz,PE,virial] = computeForces(x,y,z,N,sigma,epsilon) [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) tempInst = KE*2.0/(3.0*N-1) virial = virial/(3.0*N) pressure = rho*(tempInst + virial) if (itr % thermo_freq == 0): fout_thermo.write("{} {} {} {} {} {}\n".format(timeNow,tempInst,PE,KE,pressure,virial)) fout_thermo.flush() fout_thermo.close() #=================== to plot. thermo_data = np.loadtxt('out_thermo.dat') time = thermo_data[:,0] T = thermo_data[:,1] PE = thermo_data[:,2] KE = thermo_data[:,3] pressure = thermo_data[:,4] virial = thermo_data[:,5] plt.figure(figsize=[5,5]) plt.plot(time,PE,label='PE') plt.plot(time,KE,label='KE') plt.plot(time,PE+KE,label='Total E') plt.title("PE, KE and Total energy",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Energy (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_E_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,T) plt.title("Temperature",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Temperature (r.u.)',fontsize=18) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_T_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,pressure,label='Pressure') plt.plot(time,virial,label='Virial') plt.title("Pressure and Virial",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Pressure (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_P_vs_time.png',format='png',dpi=300)
compute_Forces_nbrList
identifier_name
3d_LJ_nList.py
# -*- coding: utf-8 -*- """ Created on Mon Sep 6 13:41:55 2021 @author: Archana P S """ # 3d lattice import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from numba import jit plt.rcParams['font.family']="Times New Roman" plt.rcParams['xtick.labelsize']=18 plt.rcParams['ytick.labelsize']=18 #=================== define all the parameters here. =================# nx = 5 # no. of atoms along each direction of the cube. N = nx**3 # total no. of atoms. rho = 0.5 # desired density. temperature = 1.0 # desired temperature. sigma = 1.0 epsilon = 1.0 mass = 1.0 rcut = 2.5*sigma vskin = 0.5*sigma dt = 0.002 nmaxsteps = 5000 thermo_freq = 10 nbrList_freq = 5 #=====================================================================# @jit def put_on_3d_lattice(N, rho, sigma): lx = (N/rho)**(1/3) ly = lx lz = lx nx = int(np.cbrt(N)) ny = nx nz = nx dx = (lx - nx*sigma)/(nx-1) x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) ix = 0 iy = 0 iz = 0 for i in range(N): if (i % nx == 0): ix = 0
iz = 0 else: iz = iz +1 else: iy = iy + 1 else: ix = ix + 1 x[i] = sigma/2.0 + ix*(dx + sigma) y[i] = sigma/2.0 + iy*(dx + sigma) z[i] = sigma/2.0 + iz*(dx + sigma) return [x,y,z,lx,ly,lz] def write_xyz_file(filename,x,y,z): fout_xyz = open(filename, 'w+') nMax = x.size fout_xyz.write("{}\n".format(nMax)) fout_xyz.write("comment\n") for i in range(nMax): fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i])) fout_xyz.close() return @jit def computeForces(x,y,z,natoms,sigma,epsilon): fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): for j in range(natoms): #avoid the self interaction. if (j != i): #calculate distance b/w i and j particles. dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] # minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz # distance b/w i and j particles. dr = np.sqrt(dx**2 + dy**2 + dz**2) # now calculate the force. sr6 = (sigma/dr)**6.0 rinv = 1.0/dr rinv2 = rinv**2.0 comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + comn_frc_term*dx fy[i] = fy[i] + comn_frc_term*dy fz[i] = fz[i] + comn_frc_term*dz # calculate potential energy here. pot_term = 4.0*epsilon*sr6*(sr6 - 1.0) PE = PE + pot_term # calculation of virial. vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i] virial = virial + vir_term PE = PE * 0.5 virial = virial * 0.5 return [fx,fy,fz,PE,virial] @jit def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass): # this does the first step of V-V algorithm. for i in range(N): # position update x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0 y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0 z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0 # velocity update. vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 return [x,y,z,vx,vy,vz] @jit def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass): # update only velocities. and calculate Kinetic energy. KE = 0.0 for i in range(N): vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5 return [vx,vy,vz,KE] #======== function which will calculate the neighbor list. @jit def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021. Distances = np.zeros((natoms,natoms)) nCount[:] = 0 nList[:,:] = 0 for i in range(natoms): Distances[i,i] = lx for j in range(natoms): if(j != i): dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] #minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2 + dy**2 + dz**2) Distances[i,j] = rij #Distances[j,i] = Distances[i,j] verlet_R = (rcut+vskin)*sigma if(rij < verlet_R): nCount[i] = nCount[i]+1 k = nCount[i] # start_index = i*natoms nList[i, k-1] = j else: continue return [nCount,nList,Distances] #======== function which will compute the forces on all the particles, #======== using the list of neighbors for every particle. @jit def compute_Forces_nbrList(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021. fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): # for k in range(nCount[i]): #starting = i*natoms j = nList[i, k] # if(j != i): #calculate the distance dx = x[i]-x[j] dy = y[i]-y[j] dz = z[i]-z[j] #minimum image. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0) rij2 = rij**2.0 rcut2 = rcut**2.0 if(rij2 < rcut2): # need to calculate the force. rinv = 1.0/rij rinv2 = rinv**2.0 sr6 = (sigma/rij)**6.0 src6 = (sigma/rcut)**6.0 rcinv = 1.0/rcut rcinv2 = rcinv**2.0 # #use LJ potential, with predefined cut-off. frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + frc_common*dx fy[i] = fy[i] + frc_common*dy fz[i] = fz[i] + frc_common*dz # shifting for the potential force-shifting. frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2 fx_shift = frc_shift*dx fy_shift = frc_shift*dy fz_shift = frc_shift*dz #shift it. fx[i] = fx[i] - fx_shift fy[i] = fy[i] - fy_shift fz[i] = fz[i] - fz_shift # now calculate PE & virial. pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0) pot_rc = 4.0*epsilon*src6*(src6 - 1.0) pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv # add all the components./ shifting. PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i]) else: continue PE = PE*0.5 virial = virial*0.5 # return [fx,fy,fz,PE,virial] @jit def applyPBC(N,x,y,z,lx,ly,lz): x = x - np.round(x/lx)*lx y = y - np.round(y/ly)*ly z = z - np.round(z/lz)*lz return [x,y,z] #======== main program ================================================# #======== main program ================================================# #======== main program ================================================# x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) vx = np.zeros(N) vy = np.zeros(N) vz = np.zeros(N) vx = np.random.rand(N) vy = np.random.rand(N) vz = np.random.rand(N) fx = np.zeros(N) fy = np.zeros(N) fz = np.zeros(N) nCount = np.zeros(N, dtype=int) nList = np.zeros((N,N), dtype=int) [x,y,z,lx,ly,lz] = put_on_3d_lattice(N, rho, sigma) fig = plt.figure() fig.patch.set_facecolor('white') ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z,s=60,color='blue') # plt.show() # to write the xyz file. xyz_file = "out_config.xyz" write_xyz_file(xyz_file, x,y,z) #open the thermo file. thermo_file = "out_thermo.dat" fout_thermo = open(thermo_file, 'w+') # get the neighbor list. [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) # now compute the forces, using the neighbor list. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) # move the particles by integrating the eq. of motion/ using V.V. # 1st step of V-V. [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) # compute forces for the 2nd step of V-V. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) #2nd step, of V-V. [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) fig1 = plt.figure() fig1.patch.set_facecolor('white') ax1 = fig1.add_subplot(111, projection='3d') ax1.scatter(x,y,z,s=60,color='blue') # plt.show() for itr in range(nmaxsteps): timeNow = itr*dt [x,y,z] = applyPBC(N,x,y,z,lx,ly,lz) # calculate the neighbor list at a defined interval, after every nbrList_freq steps. if (itr % nbrList_freq == 0): [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) #[fx,fy,fz,PE,virial] = computeForces(x,y,z,N,sigma,epsilon) [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) tempInst = KE*2.0/(3.0*N-1) virial = virial/(3.0*N) pressure = rho*(tempInst + virial) if (itr % thermo_freq == 0): fout_thermo.write("{} {} {} {} {} {}\n".format(timeNow,tempInst,PE,KE,pressure,virial)) fout_thermo.flush() fout_thermo.close() #=================== to plot. thermo_data = np.loadtxt('out_thermo.dat') time = thermo_data[:,0] T = thermo_data[:,1] PE = thermo_data[:,2] KE = thermo_data[:,3] pressure = thermo_data[:,4] virial = thermo_data[:,5] plt.figure(figsize=[5,5]) plt.plot(time,PE,label='PE') plt.plot(time,KE,label='KE') plt.plot(time,PE+KE,label='Total E') plt.title("PE, KE and Total energy",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Energy (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_E_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,T) plt.title("Temperature",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Temperature (r.u.)',fontsize=18) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_T_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,pressure,label='Pressure') plt.plot(time,virial,label='Virial') plt.title("Pressure and Virial",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Pressure (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_P_vs_time.png',format='png',dpi=300)
if (i % (nx)**2 == 0): iy = 0 if (i % (nx)**3 == 0):
random_line_split
3d_LJ_nList.py
# -*- coding: utf-8 -*- """ Created on Mon Sep 6 13:41:55 2021 @author: Archana P S """ # 3d lattice import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from numba import jit plt.rcParams['font.family']="Times New Roman" plt.rcParams['xtick.labelsize']=18 plt.rcParams['ytick.labelsize']=18 #=================== define all the parameters here. =================# nx = 5 # no. of atoms along each direction of the cube. N = nx**3 # total no. of atoms. rho = 0.5 # desired density. temperature = 1.0 # desired temperature. sigma = 1.0 epsilon = 1.0 mass = 1.0 rcut = 2.5*sigma vskin = 0.5*sigma dt = 0.002 nmaxsteps = 5000 thermo_freq = 10 nbrList_freq = 5 #=====================================================================# @jit def put_on_3d_lattice(N, rho, sigma): lx = (N/rho)**(1/3) ly = lx lz = lx nx = int(np.cbrt(N)) ny = nx nz = nx dx = (lx - nx*sigma)/(nx-1) x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) ix = 0 iy = 0 iz = 0 for i in range(N): if (i % nx == 0): ix = 0 if (i % (nx)**2 == 0): iy = 0 if (i % (nx)**3 == 0): iz = 0 else: iz = iz +1 else: iy = iy + 1 else: ix = ix + 1 x[i] = sigma/2.0 + ix*(dx + sigma) y[i] = sigma/2.0 + iy*(dx + sigma) z[i] = sigma/2.0 + iz*(dx + sigma) return [x,y,z,lx,ly,lz] def write_xyz_file(filename,x,y,z): fout_xyz = open(filename, 'w+') nMax = x.size fout_xyz.write("{}\n".format(nMax)) fout_xyz.write("comment\n") for i in range(nMax): fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i])) fout_xyz.close() return @jit def computeForces(x,y,z,natoms,sigma,epsilon): fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): for j in range(natoms): #avoid the self interaction. if (j != i): #calculate distance b/w i and j particles. dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] # minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz # distance b/w i and j particles. dr = np.sqrt(dx**2 + dy**2 + dz**2) # now calculate the force. sr6 = (sigma/dr)**6.0 rinv = 1.0/dr rinv2 = rinv**2.0 comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + comn_frc_term*dx fy[i] = fy[i] + comn_frc_term*dy fz[i] = fz[i] + comn_frc_term*dz # calculate potential energy here. pot_term = 4.0*epsilon*sr6*(sr6 - 1.0) PE = PE + pot_term # calculation of virial. vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i] virial = virial + vir_term PE = PE * 0.5 virial = virial * 0.5 return [fx,fy,fz,PE,virial] @jit def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass): # this does the first step of V-V algorithm. for i in range(N): # position update x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0 y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0 z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0 # velocity update. vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 return [x,y,z,vx,vy,vz] @jit def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass): # update only velocities. and calculate Kinetic energy. KE = 0.0 for i in range(N): vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5 return [vx,vy,vz,KE] #======== function which will calculate the neighbor list. @jit def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021. Distances = np.zeros((natoms,natoms)) nCount[:] = 0 nList[:,:] = 0 for i in range(natoms): Distances[i,i] = lx for j in range(natoms): if(j != i): dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] #minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2 + dy**2 + dz**2) Distances[i,j] = rij #Distances[j,i] = Distances[i,j] verlet_R = (rcut+vskin)*sigma if(rij < verlet_R): nCount[i] = nCount[i]+1 k = nCount[i] # start_index = i*natoms nList[i, k-1] = j else: continue return [nCount,nList,Distances] #======== function which will compute the forces on all the particles, #======== using the list of neighbors for every particle. @jit def compute_Forces_nbrList(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021.
@jit def applyPBC(N,x,y,z,lx,ly,lz): x = x - np.round(x/lx)*lx y = y - np.round(y/ly)*ly z = z - np.round(z/lz)*lz return [x,y,z] #======== main program ================================================# #======== main program ================================================# #======== main program ================================================# x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) vx = np.zeros(N) vy = np.zeros(N) vz = np.zeros(N) vx = np.random.rand(N) vy = np.random.rand(N) vz = np.random.rand(N) fx = np.zeros(N) fy = np.zeros(N) fz = np.zeros(N) nCount = np.zeros(N, dtype=int) nList = np.zeros((N,N), dtype=int) [x,y,z,lx,ly,lz] = put_on_3d_lattice(N, rho, sigma) fig = plt.figure() fig.patch.set_facecolor('white') ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z,s=60,color='blue') # plt.show() # to write the xyz file. xyz_file = "out_config.xyz" write_xyz_file(xyz_file, x,y,z) #open the thermo file. thermo_file = "out_thermo.dat" fout_thermo = open(thermo_file, 'w+') # get the neighbor list. [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) # now compute the forces, using the neighbor list. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) # move the particles by integrating the eq. of motion/ using V.V. # 1st step of V-V. [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) # compute forces for the 2nd step of V-V. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) #2nd step, of V-V. [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) fig1 = plt.figure() fig1.patch.set_facecolor('white') ax1 = fig1.add_subplot(111, projection='3d') ax1.scatter(x,y,z,s=60,color='blue') # plt.show() for itr in range(nmaxsteps): timeNow = itr*dt [x,y,z] = applyPBC(N,x,y,z,lx,ly,lz) # calculate the neighbor list at a defined interval, after every nbrList_freq steps. if (itr % nbrList_freq == 0): [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) #[fx,fy,fz,PE,virial] = computeForces(x,y,z,N,sigma,epsilon) [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) tempInst = KE*2.0/(3.0*N-1) virial = virial/(3.0*N) pressure = rho*(tempInst + virial) if (itr % thermo_freq == 0): fout_thermo.write("{} {} {} {} {} {}\n".format(timeNow,tempInst,PE,KE,pressure,virial)) fout_thermo.flush() fout_thermo.close() #=================== to plot. thermo_data = np.loadtxt('out_thermo.dat') time = thermo_data[:,0] T = thermo_data[:,1] PE = thermo_data[:,2] KE = thermo_data[:,3] pressure = thermo_data[:,4] virial = thermo_data[:,5] plt.figure(figsize=[5,5]) plt.plot(time,PE,label='PE') plt.plot(time,KE,label='KE') plt.plot(time,PE+KE,label='Total E') plt.title("PE, KE and Total energy",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Energy (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_E_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,T) plt.title("Temperature",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Temperature (r.u.)',fontsize=18) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_T_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,pressure,label='Pressure') plt.plot(time,virial,label='Virial') plt.title("Pressure and Virial",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Pressure (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_P_vs_time.png',format='png',dpi=300)
fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): # for k in range(nCount[i]): #starting = i*natoms j = nList[i, k] # if(j != i): #calculate the distance dx = x[i]-x[j] dy = y[i]-y[j] dz = z[i]-z[j] #minimum image. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0) rij2 = rij**2.0 rcut2 = rcut**2.0 if(rij2 < rcut2): # need to calculate the force. rinv = 1.0/rij rinv2 = rinv**2.0 sr6 = (sigma/rij)**6.0 src6 = (sigma/rcut)**6.0 rcinv = 1.0/rcut rcinv2 = rcinv**2.0 # #use LJ potential, with predefined cut-off. frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + frc_common*dx fy[i] = fy[i] + frc_common*dy fz[i] = fz[i] + frc_common*dz # shifting for the potential force-shifting. frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2 fx_shift = frc_shift*dx fy_shift = frc_shift*dy fz_shift = frc_shift*dz #shift it. fx[i] = fx[i] - fx_shift fy[i] = fy[i] - fy_shift fz[i] = fz[i] - fz_shift # now calculate PE & virial. pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0) pot_rc = 4.0*epsilon*src6*(src6 - 1.0) pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv # add all the components./ shifting. PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i]) else: continue PE = PE*0.5 virial = virial*0.5 # return [fx,fy,fz,PE,virial]
identifier_body
3d_LJ_nList.py
# -*- coding: utf-8 -*- """ Created on Mon Sep 6 13:41:55 2021 @author: Archana P S """ # 3d lattice import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from numba import jit plt.rcParams['font.family']="Times New Roman" plt.rcParams['xtick.labelsize']=18 plt.rcParams['ytick.labelsize']=18 #=================== define all the parameters here. =================# nx = 5 # no. of atoms along each direction of the cube. N = nx**3 # total no. of atoms. rho = 0.5 # desired density. temperature = 1.0 # desired temperature. sigma = 1.0 epsilon = 1.0 mass = 1.0 rcut = 2.5*sigma vskin = 0.5*sigma dt = 0.002 nmaxsteps = 5000 thermo_freq = 10 nbrList_freq = 5 #=====================================================================# @jit def put_on_3d_lattice(N, rho, sigma): lx = (N/rho)**(1/3) ly = lx lz = lx nx = int(np.cbrt(N)) ny = nx nz = nx dx = (lx - nx*sigma)/(nx-1) x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) ix = 0 iy = 0 iz = 0 for i in range(N): if (i % nx == 0): ix = 0 if (i % (nx)**2 == 0): iy = 0 if (i % (nx)**3 == 0): iz = 0 else: iz = iz +1 else: iy = iy + 1 else: ix = ix + 1 x[i] = sigma/2.0 + ix*(dx + sigma) y[i] = sigma/2.0 + iy*(dx + sigma) z[i] = sigma/2.0 + iz*(dx + sigma) return [x,y,z,lx,ly,lz] def write_xyz_file(filename,x,y,z): fout_xyz = open(filename, 'w+') nMax = x.size fout_xyz.write("{}\n".format(nMax)) fout_xyz.write("comment\n") for i in range(nMax): fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i])) fout_xyz.close() return @jit def computeForces(x,y,z,natoms,sigma,epsilon): fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): for j in range(natoms): #avoid the self interaction. if (j != i): #calculate distance b/w i and j particles. dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] # minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz # distance b/w i and j particles. dr = np.sqrt(dx**2 + dy**2 + dz**2) # now calculate the force. sr6 = (sigma/dr)**6.0 rinv = 1.0/dr rinv2 = rinv**2.0 comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + comn_frc_term*dx fy[i] = fy[i] + comn_frc_term*dy fz[i] = fz[i] + comn_frc_term*dz # calculate potential energy here. pot_term = 4.0*epsilon*sr6*(sr6 - 1.0) PE = PE + pot_term # calculation of virial. vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i] virial = virial + vir_term PE = PE * 0.5 virial = virial * 0.5 return [fx,fy,fz,PE,virial] @jit def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass): # this does the first step of V-V algorithm. for i in range(N): # position update x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0 y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0 z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0 # velocity update. vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 return [x,y,z,vx,vy,vz] @jit def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass): # update only velocities. and calculate Kinetic energy. KE = 0.0 for i in range(N): vx[i] = vx[i] + fx[i]*dt*0.5 vy[i] = vy[i] + fy[i]*dt*0.5 vz[i] = vz[i] + fz[i]*dt*0.5 KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5 return [vx,vy,vz,KE] #======== function which will calculate the neighbor list. @jit def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021. Distances = np.zeros((natoms,natoms)) nCount[:] = 0 nList[:,:] = 0 for i in range(natoms): Distances[i,i] = lx for j in range(natoms): if(j != i): dx = x[i] - x[j] dy = y[i] - y[j] dz = z[i] - z[j] #minimum image convention. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2 + dy**2 + dz**2) Distances[i,j] = rij #Distances[j,i] = Distances[i,j] verlet_R = (rcut+vskin)*sigma if(rij < verlet_R): nCount[i] = nCount[i]+1 k = nCount[i] # start_index = i*natoms nList[i, k-1] = j else: continue return [nCount,nList,Distances] #======== function which will compute the forces on all the particles, #======== using the list of neighbors for every particle. @jit def compute_Forces_nbrList(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021. fx[:] = 0.0 fy[:] = 0.0 fz[:] = 0.0 PE = 0.0 virial = 0.0 for i in range(natoms): # for k in range(nCount[i]): #starting = i*natoms j = nList[i, k] # if(j != i): #calculate the distance dx = x[i]-x[j] dy = y[i]-y[j] dz = z[i]-z[j] #minimum image. dx = dx - np.round(dx/lx)*lx dy = dy - np.round(dy/ly)*ly dz = dz - np.round(dz/lz)*lz rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0) rij2 = rij**2.0 rcut2 = rcut**2.0 if(rij2 < rcut2): # need to calculate the force. rinv = 1.0/rij rinv2 = rinv**2.0 sr6 = (sigma/rij)**6.0 src6 = (sigma/rcut)**6.0 rcinv = 1.0/rcut rcinv2 = rcinv**2.0 # #use LJ potential, with predefined cut-off. frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2 fx[i] = fx[i] + frc_common*dx fy[i] = fy[i] + frc_common*dy fz[i] = fz[i] + frc_common*dz # shifting for the potential force-shifting. frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2 fx_shift = frc_shift*dx fy_shift = frc_shift*dy fz_shift = frc_shift*dz #shift it. fx[i] = fx[i] - fx_shift fy[i] = fy[i] - fy_shift fz[i] = fz[i] - fz_shift # now calculate PE & virial. pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0) pot_rc = 4.0*epsilon*src6*(src6 - 1.0) pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv # add all the components./ shifting. PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i]) else:
PE = PE*0.5 virial = virial*0.5 # return [fx,fy,fz,PE,virial] @jit def applyPBC(N,x,y,z,lx,ly,lz): x = x - np.round(x/lx)*lx y = y - np.round(y/ly)*ly z = z - np.round(z/lz)*lz return [x,y,z] #======== main program ================================================# #======== main program ================================================# #======== main program ================================================# x = np.zeros(N) y = np.zeros(N) z = np.zeros(N) vx = np.zeros(N) vy = np.zeros(N) vz = np.zeros(N) vx = np.random.rand(N) vy = np.random.rand(N) vz = np.random.rand(N) fx = np.zeros(N) fy = np.zeros(N) fz = np.zeros(N) nCount = np.zeros(N, dtype=int) nList = np.zeros((N,N), dtype=int) [x,y,z,lx,ly,lz] = put_on_3d_lattice(N, rho, sigma) fig = plt.figure() fig.patch.set_facecolor('white') ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z,s=60,color='blue') # plt.show() # to write the xyz file. xyz_file = "out_config.xyz" write_xyz_file(xyz_file, x,y,z) #open the thermo file. thermo_file = "out_thermo.dat" fout_thermo = open(thermo_file, 'w+') # get the neighbor list. [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) # now compute the forces, using the neighbor list. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) # move the particles by integrating the eq. of motion/ using V.V. # 1st step of V-V. [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) # compute forces for the 2nd step of V-V. [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) #2nd step, of V-V. [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) fig1 = plt.figure() fig1.patch.set_facecolor('white') ax1 = fig1.add_subplot(111, projection='3d') ax1.scatter(x,y,z,s=60,color='blue') # plt.show() for itr in range(nmaxsteps): timeNow = itr*dt [x,y,z] = applyPBC(N,x,y,z,lx,ly,lz) # calculate the neighbor list at a defined interval, after every nbrList_freq steps. if (itr % nbrList_freq == 0): [nCount,nList,Distances] = get_Neighbor_List(N,x,y,z,lx,ly,lz,sigma,rcut,vskin) [x,y,z,vx,vy,vz] = VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass) #[fx,fy,fz,PE,virial] = computeForces(x,y,z,N,sigma,epsilon) [fx,fy,fz,PE,virial] = compute_Forces_nbrList(N,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz) [vx,vy,vz,KE] = VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass) tempInst = KE*2.0/(3.0*N-1) virial = virial/(3.0*N) pressure = rho*(tempInst + virial) if (itr % thermo_freq == 0): fout_thermo.write("{} {} {} {} {} {}\n".format(timeNow,tempInst,PE,KE,pressure,virial)) fout_thermo.flush() fout_thermo.close() #=================== to plot. thermo_data = np.loadtxt('out_thermo.dat') time = thermo_data[:,0] T = thermo_data[:,1] PE = thermo_data[:,2] KE = thermo_data[:,3] pressure = thermo_data[:,4] virial = thermo_data[:,5] plt.figure(figsize=[5,5]) plt.plot(time,PE,label='PE') plt.plot(time,KE,label='KE') plt.plot(time,PE+KE,label='Total E') plt.title("PE, KE and Total energy",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Energy (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_E_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,T) plt.title("Temperature",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Temperature (r.u.)',fontsize=18) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_T_vs_time.png',format='png',dpi=300) plt.figure(figsize=[5,5]) plt.plot(time,pressure,label='Pressure') plt.plot(time,virial,label='Virial') plt.title("Pressure and Virial",fontsize=18) plt.xlabel('time t (r.u.)',fontsize=18) plt.ylabel('Pressure (r.u.)',fontsize=18) plt.legend(loc='best',fontsize=14) plt.tight_layout() #plt.show() plt.savefig('./forceShifted_P_vs_time.png',format='png',dpi=300)
continue
conditional_block
intcode.rs
//! # Day 2 1202 Program Alarm //! //! ## Part 1 //! //! On the way to your gravity assist around the Moon, your ship computer beeps //! angrily about a "1202 program alarm". On the radio, an Elf is already //! explaining how to handle the situation: "Don't worry, that's perfectly norma--" //! //! The ship computer bursts into flames. //! //! You notify the Elves that the computer's magic smoke seems to have escaped. //! //! "That computer ran Intcode programs like the gravity assist program it was //! working on; surely there are enough spare parts up there to build a new //! Intcode computer!" //! //! An Intcode program is a list of integers separated by commas (like 1,0,0,3,99). //! To run one, start by looking at the first integer (called position 0). Here, //! you will find an opcode - either 1, 2, or 99. The opcode indicates what to do; //! for example, 99 means that the program is finished and should immediately halt. //! Encountering an unknown opcode means something went wrong. //! //! Opcode 1 adds together numbers read from two positions and stores the result //! in a third position. The three integers immediately after the opcode tell you //! these three positions - the first two indicate the positions from which you //! should read the input values, and the third indicates the position at which //! the output should be stored. //! //! For example, if your Intcode computer encounters 1,10,20,30, it should read //! the values at positions 10 and 20, add those values, and then overwrite the //! value at position 30 with their sum. //! //! Opcode 2 works exactly like opcode 1, except it multiplies the two inputs //! instead of adding them. Again, the three integers after the opcode indicate //! where the inputs and outputs are, not their values. //! //! Once you're done processing an opcode, move to the next one by stepping //! forward 4 positions. //! //! For example, suppose you have the following program: //! //! ```text //! 1,9,10,3,2,3,11,0,99,30,40,50 //! ``` //! //! For the purposes of illustration, here is the same program split into //! multiple lines: //! //! ```text //! 1,9,10,3, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! The first four integers, `1,9,10,3`, are at positions `0`, `1`, `2`, and `3`. //! Together, they represent the first opcode (`1`, addition), the positions of //! the two inputs (`9` and `10`), and the position of the output (`3`). //! To handle this opcode, you first need to get the values at the input positions: //! position `9` contains `30`, and position `10` contains `40`. Add these numbers //! together to get `70`. Then, store this value at the output position; here, //! the output position (`3`) is at position `3`, so it overwrites itself. //! Afterward, the program looks like this: //! //! ```text //! 1,9,10,70, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! Step forward 4 positions to reach the next opcode, `2`. //! This opcode works just like the previous, but it multiplies instead of adding. //! The inputs are at positions `3` and `11`; these positions contain `70` and //! `50` respectively. Multiplying these produces `3500`; this is stored at //! position `0`: //! //! ```text //! 3500,9,10,70, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! Stepping forward 4 more positions arrives at opcode `99`, halting the //! program. //! //! Here are the initial and final states of a few more small programs: //! //! - `1,0,0,0,99` becomes `2,0,0,0,99` (1 + 1 = 2). //! - `2,3,0,3,99` becomes `2,3,0,6,99` (3 * 2 = 6). //! - `2,4,4,5,99,0` becomes `2,4,4,5,99,9801` (99 * 99 = 9801). //! - `1,1,1,4,99,5,6,0,99` becomes `30,1,1,4,2,5,6,0,99`. //! //! Once you have a working computer, the first step is to restore the gravity //! assist program (your puzzle input) to the "1202 program alarm" state it had //! just before the last computer caught fire. //! To do this, before running the program, replace position `1` with the value //! `12` and replace position `2` with the value `2`. What value is left at //! position `0` after the program halts? //! //! ## Part 2 //! //! "Good, the new computer seems to be working correctly! Keep it nearby during //! this mission - you'll probably use it again. Real Intcode computers support //! many more features than your new one, but we'll let you know what they are //! as you need them." //! //! "However, your current priority should be to complete your gravity assist //! around the Moon. For this mission to succeed, we should settle on some //! terminology for the parts you've already built." //! //! Intcode programs are given as a list of integers; these values are used as //! the initial state for the computer's memory. When you run an Intcode program, //! make sure to start by initializing memory to the program's values. A position //! in memory is called an address (for example, the first value in memory is at //! "address 0"). //! //! Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values //! used immediately after an opcode, if any, are called the instruction's //! parameters. For example, in the instruction 1,2,3,4, 1 is the opcode; 2, 3, //! and 4 are the parameters. The instruction 99 contains only an opcode and has //! no parameters. //! //! The address of the current instruction is called the instruction pointer; it //! starts at 0. After an instruction finishes, the instruction pointer increases //! by the number of values in the instruction; until you add more instructions //! to the computer, this is always 4 (1 opcode + 3 parameters) for the add and //! multiply instructions. (The halt instruction would increase the instruction //! pointer by 1, but it halts the program instead.) //! //! "With terminology out of the way, we're ready to proceed. To complete the //! gravity assist, you need to determine what pair of inputs produces the //! output 19690720." //! //! The inputs should still be provided to the program by replacing the values //! at addresses 1 and 2, just like before. In this program, the value placed in //! address 1 is called the noun, and the value placed in address 2 is called //! the verb. Each of the two input values will be between 0 and 99, inclusive. //! //! Once the program has halted, its output is available at address 0, also just
//! other words, don't reuse memory from a previous attempt. //! //! Find the input noun and verb that cause the program to produce the output //! 19690720. What is 100 * noun + verb? (For example, if noun=12 and verb=2, //! the answer would be 1202.) //! //! # Day 5: Sunny with a Chance of Asteroids //! //! ## Part 1 //! //! You're starting to sweat as the ship makes its way toward Mercury. The Elves //! suggest that you get the air conditioner working by upgrading your ship //! computer to support the Thermal Environment Supervision Terminal. //! //! The Thermal Environment Supervision Terminal (TEST) starts by running a //! diagnostic program (your puzzle input). The TEST diagnostic program will run //! on your existing Intcode computer after a few modifications: //! //! First, you'll need to add **two new instructions**: //! //! - Opcode `3` takes a single integer as **input** and saves it to the position //! given by its only parameter. For example, the instruction `3,50` would take //! an input value and store it at address `50`. //! - Opcode `4` **outputs** the value of its only parameter. For example, the //! instruction `4,50` would output the value at address `50`. //! //! Programs that use these instructions will come with documentation that //! explains what should be connected to the input and output. //! The program `3,0,4,0,99` outputs whatever it gets as input, then halts. //! //! Second, you'll need to add support for parameter modes: //! //! Each parameter of an instruction is handled based on its parameter mode. //! Right now, your ship computer already understands parameter mode 0, position //! mode, which causes the parameter to be interpreted as a position - if the //! parameter is 50, its value is the value stored at address 50 in memory. //! Until now, all parameters have been in position mode. //! //! Now, your ship computer will also need to handle parameters in mode 1, //! immediate mode. In immediate mode, a parameter is interpreted as a value - if //! the parameter is 50, its value is simply 50. //! //! Parameter modes are stored in the same value as the instruction's opcode. //! The opcode is a two-digit number based only on the ones and tens digit of the //! value, that is, the opcode is the rightmost two digits of the first value in //! an instruction. Parameter modes are single digits, one per parameter, read //! right-to-left from the opcode: the first parameter's mode is in the hundreds //! digit, the second parameter's mode is in the thousands digit, the third //! parameter's mode is in the ten-thousands digit, and so on. //! Any missing modes are 0. //! //! For example, consider the program `1002,4,3,4,33`. //! //! The first instruction, `1002,4,3,4`, is a multiply instruction - the rightmost //! two digits of the first value, 02, indicate opcode 2, multiplication. //! Then, going right to left, the parameter modes are 0 (hundreds digit), //! 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore //! zero): //! //! ```text //! ABCDE //! 1002 //! //! DE - two-digit opcode, 02 == opcode 2 //! C - mode of 1st parameter, 0 == position mode //! B - mode of 2nd parameter, 1 == immediate mode //! A - mode of 3rd parameter, 0 == position mode, //! omitted due to being a leading zero //! ``` //! //! This instruction multiplies its first two parameters. //! The first parameter, 4 in position mode, works like it did before - its value //! is the value stored at address 4 (33). The second parameter, 3 in immediate //! mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written //! according to the third parameter, 4 in position mode, which also works like //! it did before - 99 is written to address 4. //! //! Parameters that an instruction writes to will never be in immediate mode. //! //! Finally, some notes: //! //! - It is important to remember that the instruction pointer should increase by //! the number of values in the instruction after the instruction finishes. //! Because of the new instructions, this amount is no longer always `4`. //! - Integers can be negative: `1101,100,-1,4,0` is a valid program (find //! `100 + -1`, store the result in position `4`). //! //! The TEST diagnostic program will start by requesting from the user the ID of //! the system to test by running an input instruction - provide it 1, the ID for //! the ship's air conditioner unit. //! //! It will then perform a series of diagnostic tests confirming that various //! parts of the Intcode computer, like parameter modes, function correctly. //! For each test, it will run an output instruction indicating how far the result //! of the test was from the expected value, where 0 means the test was successful. //! Non-zero outputs mean that a function is not working correctly; check the //! instructions that were run before the output instruction to see which one //! failed. //! //! Finally, the program will output a diagnostic code and immediately halt. //! This final output isn't an error; an output followed immediately by a halt //! means the program finished. If all outputs were zero except the diagnostic //! code, the diagnostic program ran successfully. //! //! After providing 1 to the only input instruction and passing all the tests, //! what diagnostic code does the program produce? use std::convert::TryFrom; use std::str::FromStr; #[derive(Debug, PartialEq)] struct OpHeader { mode1: usize, mode2: usize, mode3: usize, opcode: usize, } impl FromStr for OpHeader { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { // initial string should not be larger than 5 or smaller than 1 chars. if s.len() > 5 || s.len() < 1 { return Err(()); } let padded = format!("{:0>5}", s.chars().take(5).collect::<String>()); let (modes, opcode) = padded.split_at(3); let modes: Vec<u32> = modes.chars().filter_map(|c| c.to_digit(10)).collect(); let opcode: usize = opcode.parse().map_err(|_| ())?; Ok(OpHeader { mode1: modes[2] as usize, mode2: modes[1] as usize, mode3: modes[0] as usize, opcode, }) } } impl TryFrom<i32> for OpHeader { type Error = (); fn try_from(value: i32) -> Result<Self, Self::Error> { value.to_string().parse() } } #[derive(Debug)] enum Param { Immediate(i32), Position(usize), } impl Param { pub fn from_pair((mode, value): (usize, i32)) -> Self { match mode { 0 => Param::Position(value as usize), 1 => Param::Immediate(value), _ => unreachable!(), } } } #[derive(Debug)] enum Op { Add { a: Param, b: Param, out: usize }, Multiply { a: Param, b: Param, out: usize }, Input { out: usize }, Output { value: Param }, Halt, Unknown, } /// Builds an `Op` from `data` by reading up to 4 items from a given offset. fn read_instruction(offset: usize, data: &[i32]) -> Op { // FIXME: add support for Input/Output let header: Option<OpHeader> = data.get(offset).and_then(|x| OpHeader::try_from(*x).ok()); match ( header, data.get(offset + 1).map(|x| *x), data.get(offset + 2).map(|x| *x), data.get(offset + 3).map(|x| *x), ) { ( Some(OpHeader { opcode: 1, mode1, mode2, mode3, }), Some(a), Some(b), Some(out), ) => Op::Add { a: Param::from_pair((mode1, a)), b: Param::from_pair((mode2, b)), out: match Param::from_pair((mode3, out)) { Param::Position(out) => out, _ => unreachable!("output params cannot be immediate"), }, }, ( Some(OpHeader { opcode: 2, mode1, mode2, mode3, }), Some(a), Some(b), Some(out), ) => Op::Multiply { a: Param::from_pair((mode1, a)), b: Param::from_pair((mode2, b)), out: match Param::from_pair((mode3, out)) { Param::Position(out) => out, _ => unreachable!("output params cannot be immediate"), }, }, (Some(OpHeader { opcode: 3, .. }), Some(out), _, _) => Op::Input { out: out as usize }, ( Some(OpHeader { opcode: 4, mode1, .. }), Some(value), _, _, ) => Op::Output { value: Param::from_pair((mode1, value)), }, (Some(OpHeader { opcode: 99, .. }), _, _, _) => Op::Halt, _ => Op::Unknown, } } fn read_value(param: Param, data: &[i32]) -> Option<i32> { match param { Param::Position(idx) => data.get(idx).map(|x| *x), Param::Immediate(val) => Some(val), } } use std::io::BufRead; fn prompt_for_input() -> Result<i32, ()> { let mut buf = String::new(); println!("Waiting for input... >"); std::io::stdin() .lock() .read_line(&mut buf) .expect("input read"); buf.trim().parse().map_err(|e| { eprintln!("{}", e); }) } /// Run an intcode program. pub fn compute(data: &mut [i32]) { let mut i = 0; loop { // FIXME: make read_instruction an iterator so it can manage the increment internally match read_instruction(i, &data) { Op::Add { a, b, out } => { let a = read_value(a, data).unwrap(); let b = read_value(b, data).unwrap(); data[out] = a + b; i += 4; } Op::Multiply { a, b, out } => { let a = read_value(a, data).unwrap(); let b = read_value(b, data).unwrap(); data[out] = a * b; i += 4; } Op::Input { out } => { let value = prompt_for_input().unwrap(); data[out] = value; i += 2; } Op::Output { value } => { let value = read_value(value, data).unwrap(); println!("offset={}, value={}", i, value); i += 2; } Op::Halt => break, _ => unreachable!(), } } } /// Attempt to identify the noun and verb (injected header values) which will /// yield a certain target from a source intcode program by way of permutations. pub fn solve(target: i32, data: &[i32]) -> Result<(i32, i32), ()> { for (noun, verb) in (0..=99).flat_map(|i| (0..=99).map(move |j| (i, j))) { let mut input = data.to_vec(); input[1] = noun; input[2] = verb; compute(&mut input); if input[0] == target { return Ok((noun, verb)); } } Err(()) } #[cfg(test)] mod day02_1_tests { use super::compute; #[test] fn test_example_1() { let mut input = vec![1, 0, 0, 0, 99]; compute(&mut input); assert_eq!(&input, &[2, 0, 0, 0, 99]); } #[test] fn test_example_2() { let mut input = vec![2, 3, 0, 3, 99]; compute(&mut input); assert_eq!(&input, &[2, 3, 0, 6, 99]); } #[test] fn test_example_3() { let mut input = vec![2, 4, 4, 5, 99, 0]; compute(&mut input); assert_eq!(&input, &[2, 4, 4, 5, 99, 9801]); } #[test] fn test_example_4() { let mut input = vec![1, 1, 1, 4, 99, 5, 6, 0, 99]; compute(&mut input); assert_eq!(&input, &[30, 1, 1, 4, 2, 5, 6, 0, 99]); } } #[cfg(test)] mod day05_1_tests { use super::{compute, OpHeader}; #[test] fn test_header_pad() { // when the header is "short" it should be padded on the left with zeros assert_eq!( "102".parse::<OpHeader>().unwrap(), OpHeader { mode1: 1, mode2: 0, mode3: 0, opcode: 2 } ); } #[test] fn test_header_parse_two_digit_opcode() { assert_eq!( "1022".parse::<OpHeader>().unwrap(), OpHeader { mode1: 0, mode2: 1, mode3: 0, opcode: 22 } ); } #[test] fn test_example_1() { let mut input = vec![1002, 4, 3, 4, 33]; compute(&mut input); assert_eq!(&input, &[1002, 4, 3, 4, 99]); } }
//! like before. Each time you try a pair of inputs, make sure you first reset //! the computer's memory to the values in the program (your puzzle input) - in
random_line_split
intcode.rs
//! # Day 2 1202 Program Alarm //! //! ## Part 1 //! //! On the way to your gravity assist around the Moon, your ship computer beeps //! angrily about a "1202 program alarm". On the radio, an Elf is already //! explaining how to handle the situation: "Don't worry, that's perfectly norma--" //! //! The ship computer bursts into flames. //! //! You notify the Elves that the computer's magic smoke seems to have escaped. //! //! "That computer ran Intcode programs like the gravity assist program it was //! working on; surely there are enough spare parts up there to build a new //! Intcode computer!" //! //! An Intcode program is a list of integers separated by commas (like 1,0,0,3,99). //! To run one, start by looking at the first integer (called position 0). Here, //! you will find an opcode - either 1, 2, or 99. The opcode indicates what to do; //! for example, 99 means that the program is finished and should immediately halt. //! Encountering an unknown opcode means something went wrong. //! //! Opcode 1 adds together numbers read from two positions and stores the result //! in a third position. The three integers immediately after the opcode tell you //! these three positions - the first two indicate the positions from which you //! should read the input values, and the third indicates the position at which //! the output should be stored. //! //! For example, if your Intcode computer encounters 1,10,20,30, it should read //! the values at positions 10 and 20, add those values, and then overwrite the //! value at position 30 with their sum. //! //! Opcode 2 works exactly like opcode 1, except it multiplies the two inputs //! instead of adding them. Again, the three integers after the opcode indicate //! where the inputs and outputs are, not their values. //! //! Once you're done processing an opcode, move to the next one by stepping //! forward 4 positions. //! //! For example, suppose you have the following program: //! //! ```text //! 1,9,10,3,2,3,11,0,99,30,40,50 //! ``` //! //! For the purposes of illustration, here is the same program split into //! multiple lines: //! //! ```text //! 1,9,10,3, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! The first four integers, `1,9,10,3`, are at positions `0`, `1`, `2`, and `3`. //! Together, they represent the first opcode (`1`, addition), the positions of //! the two inputs (`9` and `10`), and the position of the output (`3`). //! To handle this opcode, you first need to get the values at the input positions: //! position `9` contains `30`, and position `10` contains `40`. Add these numbers //! together to get `70`. Then, store this value at the output position; here, //! the output position (`3`) is at position `3`, so it overwrites itself. //! Afterward, the program looks like this: //! //! ```text //! 1,9,10,70, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! Step forward 4 positions to reach the next opcode, `2`. //! This opcode works just like the previous, but it multiplies instead of adding. //! The inputs are at positions `3` and `11`; these positions contain `70` and //! `50` respectively. Multiplying these produces `3500`; this is stored at //! position `0`: //! //! ```text //! 3500,9,10,70, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! Stepping forward 4 more positions arrives at opcode `99`, halting the //! program. //! //! Here are the initial and final states of a few more small programs: //! //! - `1,0,0,0,99` becomes `2,0,0,0,99` (1 + 1 = 2). //! - `2,3,0,3,99` becomes `2,3,0,6,99` (3 * 2 = 6). //! - `2,4,4,5,99,0` becomes `2,4,4,5,99,9801` (99 * 99 = 9801). //! - `1,1,1,4,99,5,6,0,99` becomes `30,1,1,4,2,5,6,0,99`. //! //! Once you have a working computer, the first step is to restore the gravity //! assist program (your puzzle input) to the "1202 program alarm" state it had //! just before the last computer caught fire. //! To do this, before running the program, replace position `1` with the value //! `12` and replace position `2` with the value `2`. What value is left at //! position `0` after the program halts? //! //! ## Part 2 //! //! "Good, the new computer seems to be working correctly! Keep it nearby during //! this mission - you'll probably use it again. Real Intcode computers support //! many more features than your new one, but we'll let you know what they are //! as you need them." //! //! "However, your current priority should be to complete your gravity assist //! around the Moon. For this mission to succeed, we should settle on some //! terminology for the parts you've already built." //! //! Intcode programs are given as a list of integers; these values are used as //! the initial state for the computer's memory. When you run an Intcode program, //! make sure to start by initializing memory to the program's values. A position //! in memory is called an address (for example, the first value in memory is at //! "address 0"). //! //! Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values //! used immediately after an opcode, if any, are called the instruction's //! parameters. For example, in the instruction 1,2,3,4, 1 is the opcode; 2, 3, //! and 4 are the parameters. The instruction 99 contains only an opcode and has //! no parameters. //! //! The address of the current instruction is called the instruction pointer; it //! starts at 0. After an instruction finishes, the instruction pointer increases //! by the number of values in the instruction; until you add more instructions //! to the computer, this is always 4 (1 opcode + 3 parameters) for the add and //! multiply instructions. (The halt instruction would increase the instruction //! pointer by 1, but it halts the program instead.) //! //! "With terminology out of the way, we're ready to proceed. To complete the //! gravity assist, you need to determine what pair of inputs produces the //! output 19690720." //! //! The inputs should still be provided to the program by replacing the values //! at addresses 1 and 2, just like before. In this program, the value placed in //! address 1 is called the noun, and the value placed in address 2 is called //! the verb. Each of the two input values will be between 0 and 99, inclusive. //! //! Once the program has halted, its output is available at address 0, also just //! like before. Each time you try a pair of inputs, make sure you first reset //! the computer's memory to the values in the program (your puzzle input) - in //! other words, don't reuse memory from a previous attempt. //! //! Find the input noun and verb that cause the program to produce the output //! 19690720. What is 100 * noun + verb? (For example, if noun=12 and verb=2, //! the answer would be 1202.) //! //! # Day 5: Sunny with a Chance of Asteroids //! //! ## Part 1 //! //! You're starting to sweat as the ship makes its way toward Mercury. The Elves //! suggest that you get the air conditioner working by upgrading your ship //! computer to support the Thermal Environment Supervision Terminal. //! //! The Thermal Environment Supervision Terminal (TEST) starts by running a //! diagnostic program (your puzzle input). The TEST diagnostic program will run //! on your existing Intcode computer after a few modifications: //! //! First, you'll need to add **two new instructions**: //! //! - Opcode `3` takes a single integer as **input** and saves it to the position //! given by its only parameter. For example, the instruction `3,50` would take //! an input value and store it at address `50`. //! - Opcode `4` **outputs** the value of its only parameter. For example, the //! instruction `4,50` would output the value at address `50`. //! //! Programs that use these instructions will come with documentation that //! explains what should be connected to the input and output. //! The program `3,0,4,0,99` outputs whatever it gets as input, then halts. //! //! Second, you'll need to add support for parameter modes: //! //! Each parameter of an instruction is handled based on its parameter mode. //! Right now, your ship computer already understands parameter mode 0, position //! mode, which causes the parameter to be interpreted as a position - if the //! parameter is 50, its value is the value stored at address 50 in memory. //! Until now, all parameters have been in position mode. //! //! Now, your ship computer will also need to handle parameters in mode 1, //! immediate mode. In immediate mode, a parameter is interpreted as a value - if //! the parameter is 50, its value is simply 50. //! //! Parameter modes are stored in the same value as the instruction's opcode. //! The opcode is a two-digit number based only on the ones and tens digit of the //! value, that is, the opcode is the rightmost two digits of the first value in //! an instruction. Parameter modes are single digits, one per parameter, read //! right-to-left from the opcode: the first parameter's mode is in the hundreds //! digit, the second parameter's mode is in the thousands digit, the third //! parameter's mode is in the ten-thousands digit, and so on. //! Any missing modes are 0. //! //! For example, consider the program `1002,4,3,4,33`. //! //! The first instruction, `1002,4,3,4`, is a multiply instruction - the rightmost //! two digits of the first value, 02, indicate opcode 2, multiplication. //! Then, going right to left, the parameter modes are 0 (hundreds digit), //! 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore //! zero): //! //! ```text //! ABCDE //! 1002 //! //! DE - two-digit opcode, 02 == opcode 2 //! C - mode of 1st parameter, 0 == position mode //! B - mode of 2nd parameter, 1 == immediate mode //! A - mode of 3rd parameter, 0 == position mode, //! omitted due to being a leading zero //! ``` //! //! This instruction multiplies its first two parameters. //! The first parameter, 4 in position mode, works like it did before - its value //! is the value stored at address 4 (33). The second parameter, 3 in immediate //! mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written //! according to the third parameter, 4 in position mode, which also works like //! it did before - 99 is written to address 4. //! //! Parameters that an instruction writes to will never be in immediate mode. //! //! Finally, some notes: //! //! - It is important to remember that the instruction pointer should increase by //! the number of values in the instruction after the instruction finishes. //! Because of the new instructions, this amount is no longer always `4`. //! - Integers can be negative: `1101,100,-1,4,0` is a valid program (find //! `100 + -1`, store the result in position `4`). //! //! The TEST diagnostic program will start by requesting from the user the ID of //! the system to test by running an input instruction - provide it 1, the ID for //! the ship's air conditioner unit. //! //! It will then perform a series of diagnostic tests confirming that various //! parts of the Intcode computer, like parameter modes, function correctly. //! For each test, it will run an output instruction indicating how far the result //! of the test was from the expected value, where 0 means the test was successful. //! Non-zero outputs mean that a function is not working correctly; check the //! instructions that were run before the output instruction to see which one //! failed. //! //! Finally, the program will output a diagnostic code and immediately halt. //! This final output isn't an error; an output followed immediately by a halt //! means the program finished. If all outputs were zero except the diagnostic //! code, the diagnostic program ran successfully. //! //! After providing 1 to the only input instruction and passing all the tests, //! what diagnostic code does the program produce? use std::convert::TryFrom; use std::str::FromStr; #[derive(Debug, PartialEq)] struct OpHeader { mode1: usize, mode2: usize, mode3: usize, opcode: usize, } impl FromStr for OpHeader { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { // initial string should not be larger than 5 or smaller than 1 chars. if s.len() > 5 || s.len() < 1 { return Err(()); } let padded = format!("{:0>5}", s.chars().take(5).collect::<String>()); let (modes, opcode) = padded.split_at(3); let modes: Vec<u32> = modes.chars().filter_map(|c| c.to_digit(10)).collect(); let opcode: usize = opcode.parse().map_err(|_| ())?; Ok(OpHeader { mode1: modes[2] as usize, mode2: modes[1] as usize, mode3: modes[0] as usize, opcode, }) } } impl TryFrom<i32> for OpHeader { type Error = (); fn try_from(value: i32) -> Result<Self, Self::Error> { value.to_string().parse() } } #[derive(Debug)] enum Param { Immediate(i32), Position(usize), } impl Param { pub fn
((mode, value): (usize, i32)) -> Self { match mode { 0 => Param::Position(value as usize), 1 => Param::Immediate(value), _ => unreachable!(), } } } #[derive(Debug)] enum Op { Add { a: Param, b: Param, out: usize }, Multiply { a: Param, b: Param, out: usize }, Input { out: usize }, Output { value: Param }, Halt, Unknown, } /// Builds an `Op` from `data` by reading up to 4 items from a given offset. fn read_instruction(offset: usize, data: &[i32]) -> Op { // FIXME: add support for Input/Output let header: Option<OpHeader> = data.get(offset).and_then(|x| OpHeader::try_from(*x).ok()); match ( header, data.get(offset + 1).map(|x| *x), data.get(offset + 2).map(|x| *x), data.get(offset + 3).map(|x| *x), ) { ( Some(OpHeader { opcode: 1, mode1, mode2, mode3, }), Some(a), Some(b), Some(out), ) => Op::Add { a: Param::from_pair((mode1, a)), b: Param::from_pair((mode2, b)), out: match Param::from_pair((mode3, out)) { Param::Position(out) => out, _ => unreachable!("output params cannot be immediate"), }, }, ( Some(OpHeader { opcode: 2, mode1, mode2, mode3, }), Some(a), Some(b), Some(out), ) => Op::Multiply { a: Param::from_pair((mode1, a)), b: Param::from_pair((mode2, b)), out: match Param::from_pair((mode3, out)) { Param::Position(out) => out, _ => unreachable!("output params cannot be immediate"), }, }, (Some(OpHeader { opcode: 3, .. }), Some(out), _, _) => Op::Input { out: out as usize }, ( Some(OpHeader { opcode: 4, mode1, .. }), Some(value), _, _, ) => Op::Output { value: Param::from_pair((mode1, value)), }, (Some(OpHeader { opcode: 99, .. }), _, _, _) => Op::Halt, _ => Op::Unknown, } } fn read_value(param: Param, data: &[i32]) -> Option<i32> { match param { Param::Position(idx) => data.get(idx).map(|x| *x), Param::Immediate(val) => Some(val), } } use std::io::BufRead; fn prompt_for_input() -> Result<i32, ()> { let mut buf = String::new(); println!("Waiting for input... >"); std::io::stdin() .lock() .read_line(&mut buf) .expect("input read"); buf.trim().parse().map_err(|e| { eprintln!("{}", e); }) } /// Run an intcode program. pub fn compute(data: &mut [i32]) { let mut i = 0; loop { // FIXME: make read_instruction an iterator so it can manage the increment internally match read_instruction(i, &data) { Op::Add { a, b, out } => { let a = read_value(a, data).unwrap(); let b = read_value(b, data).unwrap(); data[out] = a + b; i += 4; } Op::Multiply { a, b, out } => { let a = read_value(a, data).unwrap(); let b = read_value(b, data).unwrap(); data[out] = a * b; i += 4; } Op::Input { out } => { let value = prompt_for_input().unwrap(); data[out] = value; i += 2; } Op::Output { value } => { let value = read_value(value, data).unwrap(); println!("offset={}, value={}", i, value); i += 2; } Op::Halt => break, _ => unreachable!(), } } } /// Attempt to identify the noun and verb (injected header values) which will /// yield a certain target from a source intcode program by way of permutations. pub fn solve(target: i32, data: &[i32]) -> Result<(i32, i32), ()> { for (noun, verb) in (0..=99).flat_map(|i| (0..=99).map(move |j| (i, j))) { let mut input = data.to_vec(); input[1] = noun; input[2] = verb; compute(&mut input); if input[0] == target { return Ok((noun, verb)); } } Err(()) } #[cfg(test)] mod day02_1_tests { use super::compute; #[test] fn test_example_1() { let mut input = vec![1, 0, 0, 0, 99]; compute(&mut input); assert_eq!(&input, &[2, 0, 0, 0, 99]); } #[test] fn test_example_2() { let mut input = vec![2, 3, 0, 3, 99]; compute(&mut input); assert_eq!(&input, &[2, 3, 0, 6, 99]); } #[test] fn test_example_3() { let mut input = vec![2, 4, 4, 5, 99, 0]; compute(&mut input); assert_eq!(&input, &[2, 4, 4, 5, 99, 9801]); } #[test] fn test_example_4() { let mut input = vec![1, 1, 1, 4, 99, 5, 6, 0, 99]; compute(&mut input); assert_eq!(&input, &[30, 1, 1, 4, 2, 5, 6, 0, 99]); } } #[cfg(test)] mod day05_1_tests { use super::{compute, OpHeader}; #[test] fn test_header_pad() { // when the header is "short" it should be padded on the left with zeros assert_eq!( "102".parse::<OpHeader>().unwrap(), OpHeader { mode1: 1, mode2: 0, mode3: 0, opcode: 2 } ); } #[test] fn test_header_parse_two_digit_opcode() { assert_eq!( "1022".parse::<OpHeader>().unwrap(), OpHeader { mode1: 0, mode2: 1, mode3: 0, opcode: 22 } ); } #[test] fn test_example_1() { let mut input = vec![1002, 4, 3, 4, 33]; compute(&mut input); assert_eq!(&input, &[1002, 4, 3, 4, 99]); } }
from_pair
identifier_name
intcode.rs
//! # Day 2 1202 Program Alarm //! //! ## Part 1 //! //! On the way to your gravity assist around the Moon, your ship computer beeps //! angrily about a "1202 program alarm". On the radio, an Elf is already //! explaining how to handle the situation: "Don't worry, that's perfectly norma--" //! //! The ship computer bursts into flames. //! //! You notify the Elves that the computer's magic smoke seems to have escaped. //! //! "That computer ran Intcode programs like the gravity assist program it was //! working on; surely there are enough spare parts up there to build a new //! Intcode computer!" //! //! An Intcode program is a list of integers separated by commas (like 1,0,0,3,99). //! To run one, start by looking at the first integer (called position 0). Here, //! you will find an opcode - either 1, 2, or 99. The opcode indicates what to do; //! for example, 99 means that the program is finished and should immediately halt. //! Encountering an unknown opcode means something went wrong. //! //! Opcode 1 adds together numbers read from two positions and stores the result //! in a third position. The three integers immediately after the opcode tell you //! these three positions - the first two indicate the positions from which you //! should read the input values, and the third indicates the position at which //! the output should be stored. //! //! For example, if your Intcode computer encounters 1,10,20,30, it should read //! the values at positions 10 and 20, add those values, and then overwrite the //! value at position 30 with their sum. //! //! Opcode 2 works exactly like opcode 1, except it multiplies the two inputs //! instead of adding them. Again, the three integers after the opcode indicate //! where the inputs and outputs are, not their values. //! //! Once you're done processing an opcode, move to the next one by stepping //! forward 4 positions. //! //! For example, suppose you have the following program: //! //! ```text //! 1,9,10,3,2,3,11,0,99,30,40,50 //! ``` //! //! For the purposes of illustration, here is the same program split into //! multiple lines: //! //! ```text //! 1,9,10,3, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! The first four integers, `1,9,10,3`, are at positions `0`, `1`, `2`, and `3`. //! Together, they represent the first opcode (`1`, addition), the positions of //! the two inputs (`9` and `10`), and the position of the output (`3`). //! To handle this opcode, you first need to get the values at the input positions: //! position `9` contains `30`, and position `10` contains `40`. Add these numbers //! together to get `70`. Then, store this value at the output position; here, //! the output position (`3`) is at position `3`, so it overwrites itself. //! Afterward, the program looks like this: //! //! ```text //! 1,9,10,70, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! Step forward 4 positions to reach the next opcode, `2`. //! This opcode works just like the previous, but it multiplies instead of adding. //! The inputs are at positions `3` and `11`; these positions contain `70` and //! `50` respectively. Multiplying these produces `3500`; this is stored at //! position `0`: //! //! ```text //! 3500,9,10,70, //! 2,3,11,0, //! 99, //! 30,40,50 //! ``` //! //! Stepping forward 4 more positions arrives at opcode `99`, halting the //! program. //! //! Here are the initial and final states of a few more small programs: //! //! - `1,0,0,0,99` becomes `2,0,0,0,99` (1 + 1 = 2). //! - `2,3,0,3,99` becomes `2,3,0,6,99` (3 * 2 = 6). //! - `2,4,4,5,99,0` becomes `2,4,4,5,99,9801` (99 * 99 = 9801). //! - `1,1,1,4,99,5,6,0,99` becomes `30,1,1,4,2,5,6,0,99`. //! //! Once you have a working computer, the first step is to restore the gravity //! assist program (your puzzle input) to the "1202 program alarm" state it had //! just before the last computer caught fire. //! To do this, before running the program, replace position `1` with the value //! `12` and replace position `2` with the value `2`. What value is left at //! position `0` after the program halts? //! //! ## Part 2 //! //! "Good, the new computer seems to be working correctly! Keep it nearby during //! this mission - you'll probably use it again. Real Intcode computers support //! many more features than your new one, but we'll let you know what they are //! as you need them." //! //! "However, your current priority should be to complete your gravity assist //! around the Moon. For this mission to succeed, we should settle on some //! terminology for the parts you've already built." //! //! Intcode programs are given as a list of integers; these values are used as //! the initial state for the computer's memory. When you run an Intcode program, //! make sure to start by initializing memory to the program's values. A position //! in memory is called an address (for example, the first value in memory is at //! "address 0"). //! //! Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values //! used immediately after an opcode, if any, are called the instruction's //! parameters. For example, in the instruction 1,2,3,4, 1 is the opcode; 2, 3, //! and 4 are the parameters. The instruction 99 contains only an opcode and has //! no parameters. //! //! The address of the current instruction is called the instruction pointer; it //! starts at 0. After an instruction finishes, the instruction pointer increases //! by the number of values in the instruction; until you add more instructions //! to the computer, this is always 4 (1 opcode + 3 parameters) for the add and //! multiply instructions. (The halt instruction would increase the instruction //! pointer by 1, but it halts the program instead.) //! //! "With terminology out of the way, we're ready to proceed. To complete the //! gravity assist, you need to determine what pair of inputs produces the //! output 19690720." //! //! The inputs should still be provided to the program by replacing the values //! at addresses 1 and 2, just like before. In this program, the value placed in //! address 1 is called the noun, and the value placed in address 2 is called //! the verb. Each of the two input values will be between 0 and 99, inclusive. //! //! Once the program has halted, its output is available at address 0, also just //! like before. Each time you try a pair of inputs, make sure you first reset //! the computer's memory to the values in the program (your puzzle input) - in //! other words, don't reuse memory from a previous attempt. //! //! Find the input noun and verb that cause the program to produce the output //! 19690720. What is 100 * noun + verb? (For example, if noun=12 and verb=2, //! the answer would be 1202.) //! //! # Day 5: Sunny with a Chance of Asteroids //! //! ## Part 1 //! //! You're starting to sweat as the ship makes its way toward Mercury. The Elves //! suggest that you get the air conditioner working by upgrading your ship //! computer to support the Thermal Environment Supervision Terminal. //! //! The Thermal Environment Supervision Terminal (TEST) starts by running a //! diagnostic program (your puzzle input). The TEST diagnostic program will run //! on your existing Intcode computer after a few modifications: //! //! First, you'll need to add **two new instructions**: //! //! - Opcode `3` takes a single integer as **input** and saves it to the position //! given by its only parameter. For example, the instruction `3,50` would take //! an input value and store it at address `50`. //! - Opcode `4` **outputs** the value of its only parameter. For example, the //! instruction `4,50` would output the value at address `50`. //! //! Programs that use these instructions will come with documentation that //! explains what should be connected to the input and output. //! The program `3,0,4,0,99` outputs whatever it gets as input, then halts. //! //! Second, you'll need to add support for parameter modes: //! //! Each parameter of an instruction is handled based on its parameter mode. //! Right now, your ship computer already understands parameter mode 0, position //! mode, which causes the parameter to be interpreted as a position - if the //! parameter is 50, its value is the value stored at address 50 in memory. //! Until now, all parameters have been in position mode. //! //! Now, your ship computer will also need to handle parameters in mode 1, //! immediate mode. In immediate mode, a parameter is interpreted as a value - if //! the parameter is 50, its value is simply 50. //! //! Parameter modes are stored in the same value as the instruction's opcode. //! The opcode is a two-digit number based only on the ones and tens digit of the //! value, that is, the opcode is the rightmost two digits of the first value in //! an instruction. Parameter modes are single digits, one per parameter, read //! right-to-left from the opcode: the first parameter's mode is in the hundreds //! digit, the second parameter's mode is in the thousands digit, the third //! parameter's mode is in the ten-thousands digit, and so on. //! Any missing modes are 0. //! //! For example, consider the program `1002,4,3,4,33`. //! //! The first instruction, `1002,4,3,4`, is a multiply instruction - the rightmost //! two digits of the first value, 02, indicate opcode 2, multiplication. //! Then, going right to left, the parameter modes are 0 (hundreds digit), //! 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore //! zero): //! //! ```text //! ABCDE //! 1002 //! //! DE - two-digit opcode, 02 == opcode 2 //! C - mode of 1st parameter, 0 == position mode //! B - mode of 2nd parameter, 1 == immediate mode //! A - mode of 3rd parameter, 0 == position mode, //! omitted due to being a leading zero //! ``` //! //! This instruction multiplies its first two parameters. //! The first parameter, 4 in position mode, works like it did before - its value //! is the value stored at address 4 (33). The second parameter, 3 in immediate //! mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written //! according to the third parameter, 4 in position mode, which also works like //! it did before - 99 is written to address 4. //! //! Parameters that an instruction writes to will never be in immediate mode. //! //! Finally, some notes: //! //! - It is important to remember that the instruction pointer should increase by //! the number of values in the instruction after the instruction finishes. //! Because of the new instructions, this amount is no longer always `4`. //! - Integers can be negative: `1101,100,-1,4,0` is a valid program (find //! `100 + -1`, store the result in position `4`). //! //! The TEST diagnostic program will start by requesting from the user the ID of //! the system to test by running an input instruction - provide it 1, the ID for //! the ship's air conditioner unit. //! //! It will then perform a series of diagnostic tests confirming that various //! parts of the Intcode computer, like parameter modes, function correctly. //! For each test, it will run an output instruction indicating how far the result //! of the test was from the expected value, where 0 means the test was successful. //! Non-zero outputs mean that a function is not working correctly; check the //! instructions that were run before the output instruction to see which one //! failed. //! //! Finally, the program will output a diagnostic code and immediately halt. //! This final output isn't an error; an output followed immediately by a halt //! means the program finished. If all outputs were zero except the diagnostic //! code, the diagnostic program ran successfully. //! //! After providing 1 to the only input instruction and passing all the tests, //! what diagnostic code does the program produce? use std::convert::TryFrom; use std::str::FromStr; #[derive(Debug, PartialEq)] struct OpHeader { mode1: usize, mode2: usize, mode3: usize, opcode: usize, } impl FromStr for OpHeader { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { // initial string should not be larger than 5 or smaller than 1 chars. if s.len() > 5 || s.len() < 1 { return Err(()); } let padded = format!("{:0>5}", s.chars().take(5).collect::<String>()); let (modes, opcode) = padded.split_at(3); let modes: Vec<u32> = modes.chars().filter_map(|c| c.to_digit(10)).collect(); let opcode: usize = opcode.parse().map_err(|_| ())?; Ok(OpHeader { mode1: modes[2] as usize, mode2: modes[1] as usize, mode3: modes[0] as usize, opcode, }) } } impl TryFrom<i32> for OpHeader { type Error = (); fn try_from(value: i32) -> Result<Self, Self::Error> { value.to_string().parse() } } #[derive(Debug)] enum Param { Immediate(i32), Position(usize), } impl Param { pub fn from_pair((mode, value): (usize, i32)) -> Self { match mode { 0 => Param::Position(value as usize), 1 => Param::Immediate(value), _ => unreachable!(), } } } #[derive(Debug)] enum Op { Add { a: Param, b: Param, out: usize }, Multiply { a: Param, b: Param, out: usize }, Input { out: usize }, Output { value: Param }, Halt, Unknown, } /// Builds an `Op` from `data` by reading up to 4 items from a given offset. fn read_instruction(offset: usize, data: &[i32]) -> Op { // FIXME: add support for Input/Output let header: Option<OpHeader> = data.get(offset).and_then(|x| OpHeader::try_from(*x).ok()); match ( header, data.get(offset + 1).map(|x| *x), data.get(offset + 2).map(|x| *x), data.get(offset + 3).map(|x| *x), ) { ( Some(OpHeader { opcode: 1, mode1, mode2, mode3, }), Some(a), Some(b), Some(out), ) => Op::Add { a: Param::from_pair((mode1, a)), b: Param::from_pair((mode2, b)), out: match Param::from_pair((mode3, out)) { Param::Position(out) => out, _ => unreachable!("output params cannot be immediate"), }, }, ( Some(OpHeader { opcode: 2, mode1, mode2, mode3, }), Some(a), Some(b), Some(out), ) => Op::Multiply { a: Param::from_pair((mode1, a)), b: Param::from_pair((mode2, b)), out: match Param::from_pair((mode3, out)) { Param::Position(out) => out, _ => unreachable!("output params cannot be immediate"), }, }, (Some(OpHeader { opcode: 3, .. }), Some(out), _, _) => Op::Input { out: out as usize }, ( Some(OpHeader { opcode: 4, mode1, .. }), Some(value), _, _, ) => Op::Output { value: Param::from_pair((mode1, value)), }, (Some(OpHeader { opcode: 99, .. }), _, _, _) => Op::Halt, _ => Op::Unknown, } } fn read_value(param: Param, data: &[i32]) -> Option<i32> { match param { Param::Position(idx) => data.get(idx).map(|x| *x), Param::Immediate(val) => Some(val), } } use std::io::BufRead; fn prompt_for_input() -> Result<i32, ()> { let mut buf = String::new(); println!("Waiting for input... >"); std::io::stdin() .lock() .read_line(&mut buf) .expect("input read"); buf.trim().parse().map_err(|e| { eprintln!("{}", e); }) } /// Run an intcode program. pub fn compute(data: &mut [i32])
/// Attempt to identify the noun and verb (injected header values) which will /// yield a certain target from a source intcode program by way of permutations. pub fn solve(target: i32, data: &[i32]) -> Result<(i32, i32), ()> { for (noun, verb) in (0..=99).flat_map(|i| (0..=99).map(move |j| (i, j))) { let mut input = data.to_vec(); input[1] = noun; input[2] = verb; compute(&mut input); if input[0] == target { return Ok((noun, verb)); } } Err(()) } #[cfg(test)] mod day02_1_tests { use super::compute; #[test] fn test_example_1() { let mut input = vec![1, 0, 0, 0, 99]; compute(&mut input); assert_eq!(&input, &[2, 0, 0, 0, 99]); } #[test] fn test_example_2() { let mut input = vec![2, 3, 0, 3, 99]; compute(&mut input); assert_eq!(&input, &[2, 3, 0, 6, 99]); } #[test] fn test_example_3() { let mut input = vec![2, 4, 4, 5, 99, 0]; compute(&mut input); assert_eq!(&input, &[2, 4, 4, 5, 99, 9801]); } #[test] fn test_example_4() { let mut input = vec![1, 1, 1, 4, 99, 5, 6, 0, 99]; compute(&mut input); assert_eq!(&input, &[30, 1, 1, 4, 2, 5, 6, 0, 99]); } } #[cfg(test)] mod day05_1_tests { use super::{compute, OpHeader}; #[test] fn test_header_pad() { // when the header is "short" it should be padded on the left with zeros assert_eq!( "102".parse::<OpHeader>().unwrap(), OpHeader { mode1: 1, mode2: 0, mode3: 0, opcode: 2 } ); } #[test] fn test_header_parse_two_digit_opcode() { assert_eq!( "1022".parse::<OpHeader>().unwrap(), OpHeader { mode1: 0, mode2: 1, mode3: 0, opcode: 22 } ); } #[test] fn test_example_1() { let mut input = vec![1002, 4, 3, 4, 33]; compute(&mut input); assert_eq!(&input, &[1002, 4, 3, 4, 99]); } }
{ let mut i = 0; loop { // FIXME: make read_instruction an iterator so it can manage the increment internally match read_instruction(i, &data) { Op::Add { a, b, out } => { let a = read_value(a, data).unwrap(); let b = read_value(b, data).unwrap(); data[out] = a + b; i += 4; } Op::Multiply { a, b, out } => { let a = read_value(a, data).unwrap(); let b = read_value(b, data).unwrap(); data[out] = a * b; i += 4; } Op::Input { out } => { let value = prompt_for_input().unwrap(); data[out] = value; i += 2; } Op::Output { value } => { let value = read_value(value, data).unwrap(); println!("offset={}, value={}", i, value); i += 2; } Op::Halt => break, _ => unreachable!(), } } }
identifier_body
token.go
package lex import ( "fmt" "strings" ) // TokenType identifies the type of lexical tokens. type TokenType uint16 // TokenInfo provides metadata about tokens type TokenInfo struct { T TokenType Kw string firstWord string // in event multi-word (Group By) the first word for match HasSpaces bool Description string } // Token represents a text string returned from the lexer. type Token struct { T TokenType // type V string // value Quote byte // quote mark: " ` [ ' Line int // Line # Column int // Position in line Pos int // Absolute position } // convert to human readable string func (t Token) String() string { return fmt.Sprintf(`Token{ %s Type:"%v" Line:%d Col:%d Q:%s Pos:%d}`, t.V, t.T.String(), t.Line, t.Column, string(t.Quote), t.Pos) } func (t Token) Err(l *Lexer) error { return t.ErrMsg(l, "") } func (t Token) ErrMsg(l *Lexer, msg string) error { return l.ErrMsg(t, msg) } /* // List of datatypes from MySql, implement them as tokens? or leave as Identity during // DDL create/alter statements? BOOL TINYINT BOOLEAN TINYINT CHARACTER VARYING(M) VARCHAR(M) FIXED DECIMAL FLOAT4 FLOAT FLOAT8 DOUBLE INT1 TINYINT INT2 SMALLINT INT3 MEDIUMINT INT4 INT INT8 BIGINT LONG VARBINARY MEDIUMBLOB LONG VARCHAR MEDIUMTEXT LONG MEDIUMTEXT MIDDLEINT MEDIUMINT NUMERIC DECIMAL */ const ( // List of all TokenTypes Note we do NOT use IOTA because it is evil // if we change the position (ie, add a token not at end) it will cause any // usage of tokens serialized on disk/database to be invalid // Basic grammar items TokenNil TokenType = 0 // not used TokenEOF TokenType = 1 // EOF TokenEOS TokenType = 2 // ; TokenEofOrEos TokenType = 3 // End of file, OR ; TokenError TokenType = 4 // error occurred; value is text of error TokenRaw TokenType = 5 // raw unlexed text string TokenNewLine TokenType = 6 // NewLine = \n // Comments TokenComment TokenType = 10 // Comment value string TokenCommentML TokenType = 11 // Comment MultiValue TokenCommentStart TokenType = 12 // /* TokenCommentEnd TokenType = 13 // */ TokenCommentSlashes TokenType = 14 // Single Line comment: // hello TokenCommentSingleLine TokenType = 15 // Single Line comment: -- hello TokenCommentHash TokenType = 16 // Single Line comment: # hello // Misc TokenComma TokenType = 20 // , TokenStar TokenType = 21 // * TokenColon TokenType = 22 // : TokenLeftBracket TokenType = 23 // [ TokenRightBracket TokenType = 24 // ] TokenLeftBrace TokenType = 25 // { TokenRightBrace TokenType = 26 // } // operand related tokens TokenMinus TokenType = 60 // - TokenPlus TokenType = 61 // + TokenPlusPlus TokenType = 62 // ++ TokenPlusEquals TokenType = 63 // += TokenDivide TokenType = 64 // / TokenMultiply TokenType = 65 // * TokenModulus TokenType = 66 // % TokenEqual TokenType = 67 // = TokenEqualEqual TokenType = 68 // == TokenNE TokenType = 69 // != TokenGE TokenType = 70 // >= TokenLE TokenType = 71 // <= TokenGT TokenType = 72 // > TokenLT TokenType = 73 // < TokenIf TokenType = 74 // IF TokenOr TokenType = 75 // || TokenAnd TokenType = 76 // && TokenBetween TokenType = 77 // between TokenLogicOr TokenType = 78 // OR TokenLogicAnd TokenType = 79 // AND TokenIN TokenType = 80 // IN TokenLike TokenType = 81 // LIKE TokenNegate TokenType = 82 // NOT TokenLeftParenthesis TokenType = 83 // ( TokenRightParenthesis TokenType = 84 // ) TokenTrue TokenType = 85 // True TokenFalse TokenType = 86 // False TokenIs TokenType = 87 // IS TokenNull TokenType = 88 // NULL TokenContains TokenType = 89 // CONTAINS TokenIntersects TokenType = 90 // INTERSECTS // ql top-level keywords, these first keywords determine parser TokenPrepare TokenType = 200 TokenInsert TokenType = 201 TokenUpdate TokenType = 202 TokenDelete TokenType = 203 TokenSelect TokenType = 204 TokenUpsert TokenType = 205 TokenAlter TokenType = 206 TokenCreate TokenType = 207 TokenDrop TokenType = 208 TokenSubscribe TokenType = 209 TokenFilter TokenType = 210 TokenShow TokenType = 211 TokenDescribe TokenType = 212 // We can also use TokenDesc TokenExplain TokenType = 213 // another alias for desccribe TokenReplace TokenType = 214 // Insert/Replace are interchangeable on insert statements TokenRollback TokenType = 215 TokenCommit TokenType = 216 // Other QL Keywords, These are clause-level keywords that mark separation between clauses TokenFrom TokenType = 300 // from TokenWhere TokenType = 301 // where TokenHaving TokenType = 302 // having TokenGroupBy TokenType = 303 // group by TokenBy TokenType = 304 // by TokenAlias TokenType = 305 // alias TokenWith TokenType = 306 // with TokenValues TokenType = 307 // values TokenInto TokenType = 308 // into TokenLimit TokenType = 309 // limit TokenOrderBy TokenType = 310 // order by TokenInner TokenType = 311 // inner , ie of join TokenCross TokenType = 312 // cross TokenOuter TokenType = 313 // outer TokenLeft TokenType = 314 // left TokenRight TokenType = 315 // right TokenJoin TokenType = 316 // Join TokenOn TokenType = 317 // on TokenDistinct TokenType = 318 // DISTINCT TokenAll TokenType = 319 // all TokenInclude TokenType = 320 // INCLUDE TokenExists TokenType = 321 // EXISTS TokenOffset TokenType = 322 // OFFSET TokenFull TokenType = 323 // FULL TokenGlobal TokenType = 324 // GLOBAL TokenSession TokenType = 325 // SESSION TokenTables TokenType = 326 // TABLES // ddl major words TokenSchema TokenType = 400 // SCHEMA TokenDatabase TokenType = 401 // DATABASE TokenTable TokenType = 402 // TABLE TokenSource TokenType = 403 // SOURCE TokenView TokenType = 404 // VIEW TokenContinuousView TokenType = 405 // CONTINUOUSVIEW TokenTemp TokenType = 406 // TEMP or TEMPORARY // ddl other TokenChange TokenType = 410 // change TokenAdd TokenType = 411 // add TokenFirst TokenType = 412 // first TokenAfter TokenType = 413 // after TokenCharacterSet TokenType = 414 // character set TokenDefault TokenType = 415 // default TokenUnique TokenType = 416 // unique TokenKey TokenType = 417 // key TokenPrimary TokenType = 418 // primary TokenConstraint TokenType = 419 // constraint TokenForeign TokenType = 420 // foreign TokenReferences TokenType = 421 // references TokenEngine TokenType = 422 // engine // Other QL keywords TokenSet TokenType = 500 // set TokenAs TokenType = 501 // as TokenAsc TokenType = 502 // ascending TokenDesc TokenType = 503 // descending TokenUse TokenType = 504 // use // User defined function/expression TokenUdfExpr TokenType = 550 // Value Types TokenIdentity TokenType = 600 // identity, either column, table name etc TokenValue TokenType = 601 // 'some string' string or continuous sequence of chars delimited by WHITE SPACE | ' | , | ( | ) TokenValueEscaped TokenType = 602 // '' becomes ' inside the string, parser will need to replace the string TokenRegex TokenType = 603 // regex TokenDuration TokenType = 604 // 14d , 22w, 3y, 45ms, 45us, 24hr, 2h, 45m, 30s // Data Type Definitions TokenTypeDef TokenType = 999 TokenTypeBool TokenType = 998 TokenTypeFloat TokenType = 997 TokenTypeInteger TokenType = 996 TokenTypeString TokenType = 995 TokenTypeVarChar TokenType = 994 TokenTypeChar TokenType = 993 TokenTypeBigInt TokenType = 992 TokenTypeTime TokenType = 991 TokenTypeText TokenType = 990 TokenTypeJson TokenType = 989 // Value types TokenValueType TokenType = 1000 // A generic Identifier of value type TokenBool TokenType = 1001 TokenFloat TokenType = 1002 TokenInteger TokenType = 1003 TokenString TokenType = 1004 TokenTime TokenType = 1005 // Composite Data Types TokenJson TokenType = 1010 TokenList TokenType = 1011 TokenMap TokenType = 1012 ) var ( // IDENTITY_CHARS Which Identity Characters are allowed for UNESCAPED identities IDENTITY_CHARS = "_.-/" // A much more lax identity char set rule that allows spaces IDENTITY_LAX_CHARS = "_./- " // sql variables start with @@ ?? IDENTITY_SQL_CHARS = "@_.-" // list of token-name TokenNameMap = map[TokenType]*TokenInfo{ TokenEOF: {Description: "EOF"}, TokenEOS: {Description: ";"}, TokenEofOrEos: {Kw: "", Description: "; OR EOF"}, TokenError: {Description: "Error"}, TokenRaw: {Description: "unlexed text"}, TokenNewLine: {Description: "New Line"}, // Comments TokenComment: {Description: "Comment"}, TokenCommentML: {Description: "CommentMultiLine"}, TokenCommentStart: {Description: "/*"}, TokenCommentEnd: {Description: "*/"}, TokenCommentHash: {Description: "#"}, TokenCommentSingleLine: {Description: "--"}, TokenCommentSlashes: {Description: "//"}, // Misc TokenComma: {Description: ","}, TokenStar: {Kw: "*", Description: "*"}, TokenColon: {Kw: ":", Description: ":"}, TokenLeftBracket: {Kw: "[", Description: "["}, TokenRightBracket: {Kw: "]", Description: "]"}, TokenLeftBrace: {Kw: "{", Description: "{"}, TokenRightBrace: {Kw: "}", Description: "}"}, // Logic, Expressions, Operators etc TokenMultiply: {Kw: "*", Description: "Multiply"}, TokenMinus: {Kw: "-", Description: "-"}, TokenPlus: {Kw: "+", Description: "+"}, TokenPlusPlus: {Kw: "++", Description: "++"}, TokenPlusEquals: {Kw: "+=", Description: "+="}, TokenDivide: {Kw: "/", Description: "Divide /"}, TokenModulus: {Kw: "%", Description: "Modulus %"}, TokenEqual: {Kw: "=", Description: "Equal"}, TokenEqualEqual: {Kw: "==", Description: "=="}, TokenNE: {Kw: "!=", Description: "NE"}, TokenGE: {Kw: ">=", Description: "GE"}, TokenLE: {Kw: "<=", Description: "LE"}, TokenGT: {Kw: ">", Description: "GT"}, TokenLT: {Kw: "<", Description: "LT"}, TokenIf: {Kw: "if", Description: "IF"}, TokenAnd: {Kw: "&&", Description: "&&"}, TokenOr: {Kw: "||", Description: "||"}, TokenLogicOr: {Kw: "or", Description: "Or"}, TokenLogicAnd: {Kw: "and", Description: "And"}, TokenIN: {Kw: "in", Description: "IN"}, TokenLike: {Kw: "like", Description: "LIKE"}, TokenNegate: {Kw: "not", Description: "NOT"}, TokenBetween: {Kw: "between", Description: "between"}, TokenIs: {Kw: "is", Description: "IS"}, TokenNull: {Kw: "null", Description: "NULL"}, TokenContains: {Kw: "contains", Description: "contains"}, TokenIntersects: {Kw: "intersects", Description: "intersects"}, // Identity ish bools TokenTrue: {Kw: "true", Description: "True"}, TokenFalse: {Kw: "false", Description: "False"}, // parens, both logical expression as well as functional TokenLeftParenthesis: {Description: "("}, TokenRightParenthesis: {Description: ")"}, // Expression Identifier TokenUdfExpr: {Description: "expr"}, // Initial Keywords, these are the most important QL Type words TokenPrepare: {Description: "prepare"}, TokenInsert: {Description: "insert"}, TokenSelect: {Description: "select"}, TokenDelete: {Description: "delete"}, TokenUpdate: {Description: "update"}, TokenUpsert: {Description: "upsert"}, TokenAlter: {Description: "alter"}, TokenCreate: {Description: "create"}, TokenDrop: {Description: "drop"}, TokenSubscribe: {Description: "subscribe"}, TokenFilter: {Description: "filter"}, TokenShow: {Description: "show"}, TokenDescribe: {Description: "describe"}, TokenExplain: {Description: "explain"}, TokenReplace: {Description: "replace"}, TokenRollback: {Description: "rollback"}, TokenCommit: {Description: "commit"}, // Top Level dml ql clause keywords TokenInto: {Description: "into"}, TokenBy: {Description: "by"}, TokenFrom: {Description: "from"}, TokenWhere: {Description: "where"}, TokenHaving: {Description: "having"}, TokenGroupBy: {Description: "group by"}, // Other Ql Keywords TokenAlias: {Description: "alias"}, TokenWith: {Description: "with"}, TokenValues: {Description: "values"}, TokenLimit: {Description: "limit"}, TokenOrderBy: {Description: "order by"}, TokenInner: {Description: "inner"}, TokenCross: {Description: "cross"}, TokenOuter: {Description: "outer"}, TokenLeft: {Description: "left"}, TokenRight: {Description: "right"}, TokenJoin: {Description: "join"}, TokenOn: {Description: "on"}, TokenDistinct: {Description: "distinct"}, TokenAll: {Description: "all"}, TokenInclude: {Description: "include"}, TokenExists: {Description: "exists"}, TokenOffset: {Description: "offset"}, TokenFull: {Description: "full"}, TokenGlobal: {Description: "global"}, TokenSession: {Description: "session"}, TokenTables: {Description: "tables"}, // ddl keywords TokenSchema: {Description: "schema"}, TokenDatabase: {Description: "database"}, TokenTable: {Description: "table"}, TokenSource: {Description: "source"}, TokenView: {Description: "view"}, TokenContinuousView: {Description: "continuousview"}, TokenTemp: {Description: "temp"}, // ddl other TokenChange: {Description: "change"}, TokenCharacterSet: {Description: "character set"}, TokenAdd: {Description: "add"}, TokenFirst: {Description: "first"}, TokenAfter: {Description: "after"}, TokenDefault: {Description: "default"}, TokenUnique: {Description: "unique"}, TokenKey: {Description: "key"}, TokenPrimary: {Description: "primary"}, TokenConstraint: {Description: "constraint"}, TokenForeign: {Description: "foreign"}, TokenReferences: {Description: "references"}, TokenEngine: {Description: "engine"}, // QL Keywords, all lower-case TokenSet: {Description: "set"}, TokenAs: {Description: "as"}, TokenAsc: {Description: "asc"}, TokenDesc: {Description: "desc"}, TokenUse: {Description: "use"}, // special value types TokenIdentity: {Description: "identity"}, TokenValue: {Description: "value"}, TokenValueEscaped: {Description: "value-escaped"}, TokenRegex: {Description: "regex"}, TokenDuration: {Description: "duration"}, // Data TYPES: ie type system TokenTypeDef: {Description: "TypeDef"}, // Generic DataType TokenTypeBool: {Description: "BoolType"}, TokenTypeFloat: {Description: "FloatType"}, TokenTypeInteger: {Description: "IntegerType"}, TokenTypeString: {Description: "StringType"}, TokenTypeVarChar: {Description: "VarCharType"}, TokenTypeChar: {Description: "CharType"}, TokenTypeBigInt: {Description: "BigIntType"}, TokenTypeTime: {Description: "TimeType"}, TokenTypeText: {Description: "TextType"}, TokenTypeJson: {Description: "JsonType"}, // VALUE TYPES: ie literal values TokenBool: {Description: "BoolVal"}, TokenFloat: {Description: "FloatVal"}, TokenInteger: {Description: "IntegerVal"}, TokenString: {Description: "StringVal"}, TokenTime: {Description: "TimeVal"}, // Some other value Types TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value TokenList: {Description: "List"}, TokenMap: {Description: "Map"}, TokenJson: {Description: "JSON"}, } TokenToOp = make(map[string]TokenType) ) func init() { LoadTokenInfo() SqlDialect.Init() FilterQLDialect.Init() JsonDialect.Init() } // LoadTokenInfo load the token info into global map func LoadTokenInfo() { for tok, ti := range TokenNameMap { ti.T = tok if ti.Kw == "" { ti.Kw = ti.Description } TokenToOp[ti.Kw] = tok if strings.Contains(ti.Kw, " ") { parts := strings.Split(ti.Kw, " ") ti.firstWord = parts[0] ti.HasSpaces = true } } } // TokenFromOp get token from operation string func TokenFromOp(op string) Token { tt, ok := TokenToOp[op] if ok
return Token{T: TokenNil} } // String convert to human readable string func (typ TokenType) String() string { s, ok := TokenNameMap[typ] if ok { return s.Kw } return "not implemented" } // MatchString which keyword should we look for, either full keyword // OR in case of spaces such as "group by" look for group func (typ TokenType) MatchString() string { tokInfo, ok := TokenNameMap[typ] //u.Debugf("matchstring: '%v' '%v' '%v'", tokInfo.T, tokInfo.Kw, tokInfo.Description) if ok { if tokInfo.HasSpaces { return tokInfo.firstWord } return tokInfo.Kw } return "not implemented" } // MultiWord is this a word such as "Group by" with multiple words? func (typ TokenType) MultiWord() bool { tokInfo, ok := TokenNameMap[typ] if ok { return tokInfo.HasSpaces } return false }
{ return Token{T: tt, V: op} }
conditional_block
token.go
package lex import ( "fmt" "strings" ) // TokenType identifies the type of lexical tokens. type TokenType uint16 // TokenInfo provides metadata about tokens type TokenInfo struct { T TokenType Kw string firstWord string // in event multi-word (Group By) the first word for match HasSpaces bool Description string } // Token represents a text string returned from the lexer. type Token struct { T TokenType // type V string // value Quote byte // quote mark: " ` [ ' Line int // Line # Column int // Position in line Pos int // Absolute position } // convert to human readable string func (t Token) String() string { return fmt.Sprintf(`Token{ %s Type:"%v" Line:%d Col:%d Q:%s Pos:%d}`, t.V, t.T.String(), t.Line, t.Column, string(t.Quote), t.Pos) } func (t Token) Err(l *Lexer) error { return t.ErrMsg(l, "") } func (t Token) ErrMsg(l *Lexer, msg string) error { return l.ErrMsg(t, msg) } /* // List of datatypes from MySql, implement them as tokens? or leave as Identity during // DDL create/alter statements? BOOL TINYINT BOOLEAN TINYINT CHARACTER VARYING(M) VARCHAR(M) FIXED DECIMAL FLOAT4 FLOAT FLOAT8 DOUBLE INT1 TINYINT INT2 SMALLINT INT3 MEDIUMINT INT4 INT INT8 BIGINT LONG VARBINARY MEDIUMBLOB LONG VARCHAR MEDIUMTEXT LONG MEDIUMTEXT MIDDLEINT MEDIUMINT NUMERIC DECIMAL */ const ( // List of all TokenTypes Note we do NOT use IOTA because it is evil // if we change the position (ie, add a token not at end) it will cause any // usage of tokens serialized on disk/database to be invalid // Basic grammar items TokenNil TokenType = 0 // not used TokenEOF TokenType = 1 // EOF TokenEOS TokenType = 2 // ; TokenEofOrEos TokenType = 3 // End of file, OR ; TokenError TokenType = 4 // error occurred; value is text of error TokenRaw TokenType = 5 // raw unlexed text string TokenNewLine TokenType = 6 // NewLine = \n // Comments TokenComment TokenType = 10 // Comment value string TokenCommentML TokenType = 11 // Comment MultiValue TokenCommentStart TokenType = 12 // /* TokenCommentEnd TokenType = 13 // */ TokenCommentSlashes TokenType = 14 // Single Line comment: // hello TokenCommentSingleLine TokenType = 15 // Single Line comment: -- hello TokenCommentHash TokenType = 16 // Single Line comment: # hello // Misc TokenComma TokenType = 20 // , TokenStar TokenType = 21 // * TokenColon TokenType = 22 // : TokenLeftBracket TokenType = 23 // [ TokenRightBracket TokenType = 24 // ] TokenLeftBrace TokenType = 25 // { TokenRightBrace TokenType = 26 // } // operand related tokens TokenMinus TokenType = 60 // - TokenPlus TokenType = 61 // + TokenPlusPlus TokenType = 62 // ++ TokenPlusEquals TokenType = 63 // += TokenDivide TokenType = 64 // / TokenMultiply TokenType = 65 // * TokenModulus TokenType = 66 // % TokenEqual TokenType = 67 // = TokenEqualEqual TokenType = 68 // == TokenNE TokenType = 69 // != TokenGE TokenType = 70 // >= TokenLE TokenType = 71 // <= TokenGT TokenType = 72 // > TokenLT TokenType = 73 // < TokenIf TokenType = 74 // IF TokenOr TokenType = 75 // || TokenAnd TokenType = 76 // && TokenBetween TokenType = 77 // between TokenLogicOr TokenType = 78 // OR TokenLogicAnd TokenType = 79 // AND TokenIN TokenType = 80 // IN TokenLike TokenType = 81 // LIKE TokenNegate TokenType = 82 // NOT TokenLeftParenthesis TokenType = 83 // ( TokenRightParenthesis TokenType = 84 // ) TokenTrue TokenType = 85 // True TokenFalse TokenType = 86 // False TokenIs TokenType = 87 // IS TokenNull TokenType = 88 // NULL TokenContains TokenType = 89 // CONTAINS TokenIntersects TokenType = 90 // INTERSECTS // ql top-level keywords, these first keywords determine parser TokenPrepare TokenType = 200 TokenInsert TokenType = 201 TokenUpdate TokenType = 202 TokenDelete TokenType = 203 TokenSelect TokenType = 204 TokenUpsert TokenType = 205 TokenAlter TokenType = 206 TokenCreate TokenType = 207 TokenDrop TokenType = 208 TokenSubscribe TokenType = 209 TokenFilter TokenType = 210 TokenShow TokenType = 211 TokenDescribe TokenType = 212 // We can also use TokenDesc TokenExplain TokenType = 213 // another alias for desccribe TokenReplace TokenType = 214 // Insert/Replace are interchangeable on insert statements TokenRollback TokenType = 215 TokenCommit TokenType = 216 // Other QL Keywords, These are clause-level keywords that mark separation between clauses TokenFrom TokenType = 300 // from TokenWhere TokenType = 301 // where TokenHaving TokenType = 302 // having TokenGroupBy TokenType = 303 // group by TokenBy TokenType = 304 // by TokenAlias TokenType = 305 // alias TokenWith TokenType = 306 // with TokenValues TokenType = 307 // values TokenInto TokenType = 308 // into TokenLimit TokenType = 309 // limit TokenOrderBy TokenType = 310 // order by TokenInner TokenType = 311 // inner , ie of join TokenCross TokenType = 312 // cross TokenOuter TokenType = 313 // outer TokenLeft TokenType = 314 // left TokenRight TokenType = 315 // right TokenJoin TokenType = 316 // Join TokenOn TokenType = 317 // on TokenDistinct TokenType = 318 // DISTINCT TokenAll TokenType = 319 // all TokenInclude TokenType = 320 // INCLUDE TokenExists TokenType = 321 // EXISTS TokenOffset TokenType = 322 // OFFSET TokenFull TokenType = 323 // FULL TokenGlobal TokenType = 324 // GLOBAL TokenSession TokenType = 325 // SESSION TokenTables TokenType = 326 // TABLES // ddl major words TokenSchema TokenType = 400 // SCHEMA TokenDatabase TokenType = 401 // DATABASE TokenTable TokenType = 402 // TABLE TokenSource TokenType = 403 // SOURCE TokenView TokenType = 404 // VIEW TokenContinuousView TokenType = 405 // CONTINUOUSVIEW TokenTemp TokenType = 406 // TEMP or TEMPORARY // ddl other TokenChange TokenType = 410 // change TokenAdd TokenType = 411 // add TokenFirst TokenType = 412 // first TokenAfter TokenType = 413 // after TokenCharacterSet TokenType = 414 // character set TokenDefault TokenType = 415 // default TokenUnique TokenType = 416 // unique TokenKey TokenType = 417 // key TokenPrimary TokenType = 418 // primary TokenConstraint TokenType = 419 // constraint TokenForeign TokenType = 420 // foreign TokenReferences TokenType = 421 // references TokenEngine TokenType = 422 // engine // Other QL keywords TokenSet TokenType = 500 // set TokenAs TokenType = 501 // as TokenAsc TokenType = 502 // ascending TokenDesc TokenType = 503 // descending TokenUse TokenType = 504 // use // User defined function/expression TokenUdfExpr TokenType = 550 // Value Types TokenIdentity TokenType = 600 // identity, either column, table name etc TokenValue TokenType = 601 // 'some string' string or continuous sequence of chars delimited by WHITE SPACE | ' | , | ( | ) TokenValueEscaped TokenType = 602 // '' becomes ' inside the string, parser will need to replace the string TokenRegex TokenType = 603 // regex TokenDuration TokenType = 604 // 14d , 22w, 3y, 45ms, 45us, 24hr, 2h, 45m, 30s // Data Type Definitions TokenTypeDef TokenType = 999 TokenTypeBool TokenType = 998 TokenTypeFloat TokenType = 997 TokenTypeInteger TokenType = 996 TokenTypeString TokenType = 995 TokenTypeVarChar TokenType = 994 TokenTypeChar TokenType = 993 TokenTypeBigInt TokenType = 992 TokenTypeTime TokenType = 991 TokenTypeText TokenType = 990 TokenTypeJson TokenType = 989 // Value types TokenValueType TokenType = 1000 // A generic Identifier of value type TokenBool TokenType = 1001 TokenFloat TokenType = 1002 TokenInteger TokenType = 1003 TokenString TokenType = 1004 TokenTime TokenType = 1005 // Composite Data Types TokenJson TokenType = 1010 TokenList TokenType = 1011 TokenMap TokenType = 1012 ) var ( // IDENTITY_CHARS Which Identity Characters are allowed for UNESCAPED identities IDENTITY_CHARS = "_.-/" // A much more lax identity char set rule that allows spaces IDENTITY_LAX_CHARS = "_./- " // sql variables start with @@ ?? IDENTITY_SQL_CHARS = "@_.-" // list of token-name TokenNameMap = map[TokenType]*TokenInfo{ TokenEOF: {Description: "EOF"}, TokenEOS: {Description: ";"}, TokenEofOrEos: {Kw: "", Description: "; OR EOF"}, TokenError: {Description: "Error"}, TokenRaw: {Description: "unlexed text"}, TokenNewLine: {Description: "New Line"}, // Comments TokenComment: {Description: "Comment"}, TokenCommentML: {Description: "CommentMultiLine"}, TokenCommentStart: {Description: "/*"}, TokenCommentEnd: {Description: "*/"}, TokenCommentHash: {Description: "#"}, TokenCommentSingleLine: {Description: "--"}, TokenCommentSlashes: {Description: "//"}, // Misc TokenComma: {Description: ","}, TokenStar: {Kw: "*", Description: "*"}, TokenColon: {Kw: ":", Description: ":"}, TokenLeftBracket: {Kw: "[", Description: "["}, TokenRightBracket: {Kw: "]", Description: "]"}, TokenLeftBrace: {Kw: "{", Description: "{"}, TokenRightBrace: {Kw: "}", Description: "}"}, // Logic, Expressions, Operators etc TokenMultiply: {Kw: "*", Description: "Multiply"}, TokenMinus: {Kw: "-", Description: "-"}, TokenPlus: {Kw: "+", Description: "+"}, TokenPlusPlus: {Kw: "++", Description: "++"}, TokenPlusEquals: {Kw: "+=", Description: "+="}, TokenDivide: {Kw: "/", Description: "Divide /"}, TokenModulus: {Kw: "%", Description: "Modulus %"}, TokenEqual: {Kw: "=", Description: "Equal"}, TokenEqualEqual: {Kw: "==", Description: "=="}, TokenNE: {Kw: "!=", Description: "NE"}, TokenGE: {Kw: ">=", Description: "GE"}, TokenLE: {Kw: "<=", Description: "LE"}, TokenGT: {Kw: ">", Description: "GT"}, TokenLT: {Kw: "<", Description: "LT"}, TokenIf: {Kw: "if", Description: "IF"}, TokenAnd: {Kw: "&&", Description: "&&"}, TokenOr: {Kw: "||", Description: "||"}, TokenLogicOr: {Kw: "or", Description: "Or"}, TokenLogicAnd: {Kw: "and", Description: "And"}, TokenIN: {Kw: "in", Description: "IN"}, TokenLike: {Kw: "like", Description: "LIKE"}, TokenNegate: {Kw: "not", Description: "NOT"}, TokenBetween: {Kw: "between", Description: "between"}, TokenIs: {Kw: "is", Description: "IS"}, TokenNull: {Kw: "null", Description: "NULL"}, TokenContains: {Kw: "contains", Description: "contains"}, TokenIntersects: {Kw: "intersects", Description: "intersects"}, // Identity ish bools TokenTrue: {Kw: "true", Description: "True"}, TokenFalse: {Kw: "false", Description: "False"}, // parens, both logical expression as well as functional TokenLeftParenthesis: {Description: "("}, TokenRightParenthesis: {Description: ")"}, // Expression Identifier TokenUdfExpr: {Description: "expr"}, // Initial Keywords, these are the most important QL Type words TokenPrepare: {Description: "prepare"}, TokenInsert: {Description: "insert"}, TokenSelect: {Description: "select"}, TokenDelete: {Description: "delete"}, TokenUpdate: {Description: "update"}, TokenUpsert: {Description: "upsert"}, TokenAlter: {Description: "alter"}, TokenCreate: {Description: "create"}, TokenDrop: {Description: "drop"}, TokenSubscribe: {Description: "subscribe"}, TokenFilter: {Description: "filter"}, TokenShow: {Description: "show"}, TokenDescribe: {Description: "describe"}, TokenExplain: {Description: "explain"}, TokenReplace: {Description: "replace"}, TokenRollback: {Description: "rollback"}, TokenCommit: {Description: "commit"}, // Top Level dml ql clause keywords TokenInto: {Description: "into"}, TokenBy: {Description: "by"}, TokenFrom: {Description: "from"}, TokenWhere: {Description: "where"}, TokenHaving: {Description: "having"}, TokenGroupBy: {Description: "group by"}, // Other Ql Keywords TokenAlias: {Description: "alias"}, TokenWith: {Description: "with"}, TokenValues: {Description: "values"}, TokenLimit: {Description: "limit"}, TokenOrderBy: {Description: "order by"}, TokenInner: {Description: "inner"}, TokenCross: {Description: "cross"}, TokenOuter: {Description: "outer"}, TokenLeft: {Description: "left"}, TokenRight: {Description: "right"}, TokenJoin: {Description: "join"}, TokenOn: {Description: "on"}, TokenDistinct: {Description: "distinct"}, TokenAll: {Description: "all"}, TokenInclude: {Description: "include"}, TokenExists: {Description: "exists"}, TokenOffset: {Description: "offset"}, TokenFull: {Description: "full"}, TokenGlobal: {Description: "global"}, TokenSession: {Description: "session"}, TokenTables: {Description: "tables"}, // ddl keywords TokenSchema: {Description: "schema"}, TokenDatabase: {Description: "database"}, TokenTable: {Description: "table"}, TokenSource: {Description: "source"}, TokenView: {Description: "view"}, TokenContinuousView: {Description: "continuousview"}, TokenTemp: {Description: "temp"}, // ddl other TokenChange: {Description: "change"}, TokenCharacterSet: {Description: "character set"}, TokenAdd: {Description: "add"}, TokenFirst: {Description: "first"}, TokenAfter: {Description: "after"}, TokenDefault: {Description: "default"}, TokenUnique: {Description: "unique"}, TokenKey: {Description: "key"}, TokenPrimary: {Description: "primary"}, TokenConstraint: {Description: "constraint"}, TokenForeign: {Description: "foreign"}, TokenReferences: {Description: "references"}, TokenEngine: {Description: "engine"}, // QL Keywords, all lower-case TokenSet: {Description: "set"}, TokenAs: {Description: "as"}, TokenAsc: {Description: "asc"}, TokenDesc: {Description: "desc"}, TokenUse: {Description: "use"}, // special value types TokenIdentity: {Description: "identity"}, TokenValue: {Description: "value"}, TokenValueEscaped: {Description: "value-escaped"}, TokenRegex: {Description: "regex"}, TokenDuration: {Description: "duration"}, // Data TYPES: ie type system TokenTypeDef: {Description: "TypeDef"}, // Generic DataType TokenTypeBool: {Description: "BoolType"}, TokenTypeFloat: {Description: "FloatType"}, TokenTypeInteger: {Description: "IntegerType"}, TokenTypeString: {Description: "StringType"}, TokenTypeVarChar: {Description: "VarCharType"}, TokenTypeChar: {Description: "CharType"}, TokenTypeBigInt: {Description: "BigIntType"}, TokenTypeTime: {Description: "TimeType"}, TokenTypeText: {Description: "TextType"}, TokenTypeJson: {Description: "JsonType"}, // VALUE TYPES: ie literal values TokenBool: {Description: "BoolVal"}, TokenFloat: {Description: "FloatVal"}, TokenInteger: {Description: "IntegerVal"}, TokenString: {Description: "StringVal"}, TokenTime: {Description: "TimeVal"}, // Some other value Types TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value TokenList: {Description: "List"}, TokenMap: {Description: "Map"}, TokenJson: {Description: "JSON"}, } TokenToOp = make(map[string]TokenType) ) func init() { LoadTokenInfo() SqlDialect.Init() FilterQLDialect.Init() JsonDialect.Init() } // LoadTokenInfo load the token info into global map func LoadTokenInfo() { for tok, ti := range TokenNameMap { ti.T = tok if ti.Kw == "" { ti.Kw = ti.Description } TokenToOp[ti.Kw] = tok if strings.Contains(ti.Kw, " ") { parts := strings.Split(ti.Kw, " ") ti.firstWord = parts[0] ti.HasSpaces = true } } } // TokenFromOp get token from operation string func TokenFromOp(op string) Token
// String convert to human readable string func (typ TokenType) String() string { s, ok := TokenNameMap[typ] if ok { return s.Kw } return "not implemented" } // MatchString which keyword should we look for, either full keyword // OR in case of spaces such as "group by" look for group func (typ TokenType) MatchString() string { tokInfo, ok := TokenNameMap[typ] //u.Debugf("matchstring: '%v' '%v' '%v'", tokInfo.T, tokInfo.Kw, tokInfo.Description) if ok { if tokInfo.HasSpaces { return tokInfo.firstWord } return tokInfo.Kw } return "not implemented" } // MultiWord is this a word such as "Group by" with multiple words? func (typ TokenType) MultiWord() bool { tokInfo, ok := TokenNameMap[typ] if ok { return tokInfo.HasSpaces } return false }
{ tt, ok := TokenToOp[op] if ok { return Token{T: tt, V: op} } return Token{T: TokenNil} }
identifier_body
token.go
package lex import ( "fmt" "strings" ) // TokenType identifies the type of lexical tokens. type TokenType uint16 // TokenInfo provides metadata about tokens type TokenInfo struct { T TokenType Kw string firstWord string // in event multi-word (Group By) the first word for match HasSpaces bool Description string } // Token represents a text string returned from the lexer. type Token struct { T TokenType // type V string // value Quote byte // quote mark: " ` [ ' Line int // Line # Column int // Position in line Pos int // Absolute position } // convert to human readable string func (t Token) String() string { return fmt.Sprintf(`Token{ %s Type:"%v" Line:%d Col:%d Q:%s Pos:%d}`, t.V, t.T.String(), t.Line, t.Column, string(t.Quote), t.Pos) } func (t Token) Err(l *Lexer) error { return t.ErrMsg(l, "") } func (t Token) ErrMsg(l *Lexer, msg string) error { return l.ErrMsg(t, msg) } /* // List of datatypes from MySql, implement them as tokens? or leave as Identity during // DDL create/alter statements? BOOL TINYINT BOOLEAN TINYINT CHARACTER VARYING(M) VARCHAR(M) FIXED DECIMAL FLOAT4 FLOAT FLOAT8 DOUBLE INT1 TINYINT INT2 SMALLINT INT3 MEDIUMINT INT4 INT INT8 BIGINT LONG VARBINARY MEDIUMBLOB LONG VARCHAR MEDIUMTEXT LONG MEDIUMTEXT MIDDLEINT MEDIUMINT NUMERIC DECIMAL */ const ( // List of all TokenTypes Note we do NOT use IOTA because it is evil // if we change the position (ie, add a token not at end) it will cause any // usage of tokens serialized on disk/database to be invalid // Basic grammar items TokenNil TokenType = 0 // not used TokenEOF TokenType = 1 // EOF TokenEOS TokenType = 2 // ; TokenEofOrEos TokenType = 3 // End of file, OR ; TokenError TokenType = 4 // error occurred; value is text of error TokenRaw TokenType = 5 // raw unlexed text string TokenNewLine TokenType = 6 // NewLine = \n // Comments TokenComment TokenType = 10 // Comment value string TokenCommentML TokenType = 11 // Comment MultiValue TokenCommentStart TokenType = 12 // /* TokenCommentEnd TokenType = 13 // */ TokenCommentSlashes TokenType = 14 // Single Line comment: // hello TokenCommentSingleLine TokenType = 15 // Single Line comment: -- hello TokenCommentHash TokenType = 16 // Single Line comment: # hello // Misc TokenComma TokenType = 20 // , TokenStar TokenType = 21 // * TokenColon TokenType = 22 // : TokenLeftBracket TokenType = 23 // [ TokenRightBracket TokenType = 24 // ] TokenLeftBrace TokenType = 25 // { TokenRightBrace TokenType = 26 // } // operand related tokens TokenMinus TokenType = 60 // - TokenPlus TokenType = 61 // + TokenPlusPlus TokenType = 62 // ++ TokenPlusEquals TokenType = 63 // += TokenDivide TokenType = 64 // / TokenMultiply TokenType = 65 // * TokenModulus TokenType = 66 // % TokenEqual TokenType = 67 // = TokenEqualEqual TokenType = 68 // == TokenNE TokenType = 69 // != TokenGE TokenType = 70 // >= TokenLE TokenType = 71 // <= TokenGT TokenType = 72 // > TokenLT TokenType = 73 // < TokenIf TokenType = 74 // IF TokenOr TokenType = 75 // || TokenAnd TokenType = 76 // && TokenBetween TokenType = 77 // between TokenLogicOr TokenType = 78 // OR TokenLogicAnd TokenType = 79 // AND TokenIN TokenType = 80 // IN TokenLike TokenType = 81 // LIKE TokenNegate TokenType = 82 // NOT TokenLeftParenthesis TokenType = 83 // ( TokenRightParenthesis TokenType = 84 // ) TokenTrue TokenType = 85 // True TokenFalse TokenType = 86 // False TokenIs TokenType = 87 // IS TokenNull TokenType = 88 // NULL TokenContains TokenType = 89 // CONTAINS TokenIntersects TokenType = 90 // INTERSECTS // ql top-level keywords, these first keywords determine parser TokenPrepare TokenType = 200 TokenInsert TokenType = 201 TokenUpdate TokenType = 202 TokenDelete TokenType = 203 TokenSelect TokenType = 204 TokenUpsert TokenType = 205 TokenAlter TokenType = 206 TokenCreate TokenType = 207 TokenDrop TokenType = 208 TokenSubscribe TokenType = 209 TokenFilter TokenType = 210 TokenShow TokenType = 211 TokenDescribe TokenType = 212 // We can also use TokenDesc TokenExplain TokenType = 213 // another alias for desccribe TokenReplace TokenType = 214 // Insert/Replace are interchangeable on insert statements TokenRollback TokenType = 215 TokenCommit TokenType = 216 // Other QL Keywords, These are clause-level keywords that mark separation between clauses TokenFrom TokenType = 300 // from TokenWhere TokenType = 301 // where TokenHaving TokenType = 302 // having TokenGroupBy TokenType = 303 // group by TokenBy TokenType = 304 // by TokenAlias TokenType = 305 // alias TokenWith TokenType = 306 // with TokenValues TokenType = 307 // values TokenInto TokenType = 308 // into TokenLimit TokenType = 309 // limit TokenOrderBy TokenType = 310 // order by TokenInner TokenType = 311 // inner , ie of join TokenCross TokenType = 312 // cross TokenOuter TokenType = 313 // outer TokenLeft TokenType = 314 // left TokenRight TokenType = 315 // right TokenJoin TokenType = 316 // Join TokenOn TokenType = 317 // on TokenDistinct TokenType = 318 // DISTINCT TokenAll TokenType = 319 // all TokenInclude TokenType = 320 // INCLUDE TokenExists TokenType = 321 // EXISTS TokenOffset TokenType = 322 // OFFSET TokenFull TokenType = 323 // FULL TokenGlobal TokenType = 324 // GLOBAL TokenSession TokenType = 325 // SESSION TokenTables TokenType = 326 // TABLES // ddl major words TokenSchema TokenType = 400 // SCHEMA TokenDatabase TokenType = 401 // DATABASE TokenTable TokenType = 402 // TABLE TokenSource TokenType = 403 // SOURCE TokenView TokenType = 404 // VIEW TokenContinuousView TokenType = 405 // CONTINUOUSVIEW TokenTemp TokenType = 406 // TEMP or TEMPORARY // ddl other TokenChange TokenType = 410 // change TokenAdd TokenType = 411 // add TokenFirst TokenType = 412 // first TokenAfter TokenType = 413 // after TokenCharacterSet TokenType = 414 // character set TokenDefault TokenType = 415 // default TokenUnique TokenType = 416 // unique TokenKey TokenType = 417 // key TokenPrimary TokenType = 418 // primary TokenConstraint TokenType = 419 // constraint TokenForeign TokenType = 420 // foreign TokenReferences TokenType = 421 // references TokenEngine TokenType = 422 // engine // Other QL keywords TokenSet TokenType = 500 // set TokenAs TokenType = 501 // as TokenAsc TokenType = 502 // ascending TokenDesc TokenType = 503 // descending TokenUse TokenType = 504 // use // User defined function/expression TokenUdfExpr TokenType = 550 // Value Types TokenIdentity TokenType = 600 // identity, either column, table name etc TokenValue TokenType = 601 // 'some string' string or continuous sequence of chars delimited by WHITE SPACE | ' | , | ( | ) TokenValueEscaped TokenType = 602 // '' becomes ' inside the string, parser will need to replace the string TokenRegex TokenType = 603 // regex TokenDuration TokenType = 604 // 14d , 22w, 3y, 45ms, 45us, 24hr, 2h, 45m, 30s // Data Type Definitions TokenTypeDef TokenType = 999 TokenTypeBool TokenType = 998 TokenTypeFloat TokenType = 997 TokenTypeInteger TokenType = 996 TokenTypeString TokenType = 995 TokenTypeVarChar TokenType = 994 TokenTypeChar TokenType = 993 TokenTypeBigInt TokenType = 992 TokenTypeTime TokenType = 991 TokenTypeText TokenType = 990 TokenTypeJson TokenType = 989 // Value types TokenValueType TokenType = 1000 // A generic Identifier of value type TokenBool TokenType = 1001 TokenFloat TokenType = 1002 TokenInteger TokenType = 1003 TokenString TokenType = 1004 TokenTime TokenType = 1005 // Composite Data Types TokenJson TokenType = 1010 TokenList TokenType = 1011 TokenMap TokenType = 1012 ) var ( // IDENTITY_CHARS Which Identity Characters are allowed for UNESCAPED identities IDENTITY_CHARS = "_.-/" // A much more lax identity char set rule that allows spaces IDENTITY_LAX_CHARS = "_./- " // sql variables start with @@ ?? IDENTITY_SQL_CHARS = "@_.-" // list of token-name TokenNameMap = map[TokenType]*TokenInfo{ TokenEOF: {Description: "EOF"}, TokenEOS: {Description: ";"}, TokenEofOrEos: {Kw: "", Description: "; OR EOF"}, TokenError: {Description: "Error"}, TokenRaw: {Description: "unlexed text"}, TokenNewLine: {Description: "New Line"}, // Comments TokenComment: {Description: "Comment"}, TokenCommentML: {Description: "CommentMultiLine"}, TokenCommentStart: {Description: "/*"}, TokenCommentEnd: {Description: "*/"}, TokenCommentHash: {Description: "#"}, TokenCommentSingleLine: {Description: "--"}, TokenCommentSlashes: {Description: "//"}, // Misc TokenComma: {Description: ","}, TokenStar: {Kw: "*", Description: "*"}, TokenColon: {Kw: ":", Description: ":"}, TokenLeftBracket: {Kw: "[", Description: "["}, TokenRightBracket: {Kw: "]", Description: "]"}, TokenLeftBrace: {Kw: "{", Description: "{"}, TokenRightBrace: {Kw: "}", Description: "}"}, // Logic, Expressions, Operators etc TokenMultiply: {Kw: "*", Description: "Multiply"}, TokenMinus: {Kw: "-", Description: "-"}, TokenPlus: {Kw: "+", Description: "+"}, TokenPlusPlus: {Kw: "++", Description: "++"}, TokenPlusEquals: {Kw: "+=", Description: "+="}, TokenDivide: {Kw: "/", Description: "Divide /"}, TokenModulus: {Kw: "%", Description: "Modulus %"}, TokenEqual: {Kw: "=", Description: "Equal"}, TokenEqualEqual: {Kw: "==", Description: "=="}, TokenNE: {Kw: "!=", Description: "NE"}, TokenGE: {Kw: ">=", Description: "GE"}, TokenLE: {Kw: "<=", Description: "LE"}, TokenGT: {Kw: ">", Description: "GT"}, TokenLT: {Kw: "<", Description: "LT"}, TokenIf: {Kw: "if", Description: "IF"}, TokenAnd: {Kw: "&&", Description: "&&"}, TokenOr: {Kw: "||", Description: "||"}, TokenLogicOr: {Kw: "or", Description: "Or"}, TokenLogicAnd: {Kw: "and", Description: "And"}, TokenIN: {Kw: "in", Description: "IN"}, TokenLike: {Kw: "like", Description: "LIKE"}, TokenNegate: {Kw: "not", Description: "NOT"}, TokenBetween: {Kw: "between", Description: "between"}, TokenIs: {Kw: "is", Description: "IS"}, TokenNull: {Kw: "null", Description: "NULL"}, TokenContains: {Kw: "contains", Description: "contains"}, TokenIntersects: {Kw: "intersects", Description: "intersects"}, // Identity ish bools TokenTrue: {Kw: "true", Description: "True"}, TokenFalse: {Kw: "false", Description: "False"}, // parens, both logical expression as well as functional TokenLeftParenthesis: {Description: "("}, TokenRightParenthesis: {Description: ")"}, // Expression Identifier TokenUdfExpr: {Description: "expr"}, // Initial Keywords, these are the most important QL Type words TokenPrepare: {Description: "prepare"}, TokenInsert: {Description: "insert"}, TokenSelect: {Description: "select"}, TokenDelete: {Description: "delete"}, TokenUpdate: {Description: "update"}, TokenUpsert: {Description: "upsert"}, TokenAlter: {Description: "alter"}, TokenCreate: {Description: "create"}, TokenDrop: {Description: "drop"},
TokenDescribe: {Description: "describe"}, TokenExplain: {Description: "explain"}, TokenReplace: {Description: "replace"}, TokenRollback: {Description: "rollback"}, TokenCommit: {Description: "commit"}, // Top Level dml ql clause keywords TokenInto: {Description: "into"}, TokenBy: {Description: "by"}, TokenFrom: {Description: "from"}, TokenWhere: {Description: "where"}, TokenHaving: {Description: "having"}, TokenGroupBy: {Description: "group by"}, // Other Ql Keywords TokenAlias: {Description: "alias"}, TokenWith: {Description: "with"}, TokenValues: {Description: "values"}, TokenLimit: {Description: "limit"}, TokenOrderBy: {Description: "order by"}, TokenInner: {Description: "inner"}, TokenCross: {Description: "cross"}, TokenOuter: {Description: "outer"}, TokenLeft: {Description: "left"}, TokenRight: {Description: "right"}, TokenJoin: {Description: "join"}, TokenOn: {Description: "on"}, TokenDistinct: {Description: "distinct"}, TokenAll: {Description: "all"}, TokenInclude: {Description: "include"}, TokenExists: {Description: "exists"}, TokenOffset: {Description: "offset"}, TokenFull: {Description: "full"}, TokenGlobal: {Description: "global"}, TokenSession: {Description: "session"}, TokenTables: {Description: "tables"}, // ddl keywords TokenSchema: {Description: "schema"}, TokenDatabase: {Description: "database"}, TokenTable: {Description: "table"}, TokenSource: {Description: "source"}, TokenView: {Description: "view"}, TokenContinuousView: {Description: "continuousview"}, TokenTemp: {Description: "temp"}, // ddl other TokenChange: {Description: "change"}, TokenCharacterSet: {Description: "character set"}, TokenAdd: {Description: "add"}, TokenFirst: {Description: "first"}, TokenAfter: {Description: "after"}, TokenDefault: {Description: "default"}, TokenUnique: {Description: "unique"}, TokenKey: {Description: "key"}, TokenPrimary: {Description: "primary"}, TokenConstraint: {Description: "constraint"}, TokenForeign: {Description: "foreign"}, TokenReferences: {Description: "references"}, TokenEngine: {Description: "engine"}, // QL Keywords, all lower-case TokenSet: {Description: "set"}, TokenAs: {Description: "as"}, TokenAsc: {Description: "asc"}, TokenDesc: {Description: "desc"}, TokenUse: {Description: "use"}, // special value types TokenIdentity: {Description: "identity"}, TokenValue: {Description: "value"}, TokenValueEscaped: {Description: "value-escaped"}, TokenRegex: {Description: "regex"}, TokenDuration: {Description: "duration"}, // Data TYPES: ie type system TokenTypeDef: {Description: "TypeDef"}, // Generic DataType TokenTypeBool: {Description: "BoolType"}, TokenTypeFloat: {Description: "FloatType"}, TokenTypeInteger: {Description: "IntegerType"}, TokenTypeString: {Description: "StringType"}, TokenTypeVarChar: {Description: "VarCharType"}, TokenTypeChar: {Description: "CharType"}, TokenTypeBigInt: {Description: "BigIntType"}, TokenTypeTime: {Description: "TimeType"}, TokenTypeText: {Description: "TextType"}, TokenTypeJson: {Description: "JsonType"}, // VALUE TYPES: ie literal values TokenBool: {Description: "BoolVal"}, TokenFloat: {Description: "FloatVal"}, TokenInteger: {Description: "IntegerVal"}, TokenString: {Description: "StringVal"}, TokenTime: {Description: "TimeVal"}, // Some other value Types TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value TokenList: {Description: "List"}, TokenMap: {Description: "Map"}, TokenJson: {Description: "JSON"}, } TokenToOp = make(map[string]TokenType) ) func init() { LoadTokenInfo() SqlDialect.Init() FilterQLDialect.Init() JsonDialect.Init() } // LoadTokenInfo load the token info into global map func LoadTokenInfo() { for tok, ti := range TokenNameMap { ti.T = tok if ti.Kw == "" { ti.Kw = ti.Description } TokenToOp[ti.Kw] = tok if strings.Contains(ti.Kw, " ") { parts := strings.Split(ti.Kw, " ") ti.firstWord = parts[0] ti.HasSpaces = true } } } // TokenFromOp get token from operation string func TokenFromOp(op string) Token { tt, ok := TokenToOp[op] if ok { return Token{T: tt, V: op} } return Token{T: TokenNil} } // String convert to human readable string func (typ TokenType) String() string { s, ok := TokenNameMap[typ] if ok { return s.Kw } return "not implemented" } // MatchString which keyword should we look for, either full keyword // OR in case of spaces such as "group by" look for group func (typ TokenType) MatchString() string { tokInfo, ok := TokenNameMap[typ] //u.Debugf("matchstring: '%v' '%v' '%v'", tokInfo.T, tokInfo.Kw, tokInfo.Description) if ok { if tokInfo.HasSpaces { return tokInfo.firstWord } return tokInfo.Kw } return "not implemented" } // MultiWord is this a word such as "Group by" with multiple words? func (typ TokenType) MultiWord() bool { tokInfo, ok := TokenNameMap[typ] if ok { return tokInfo.HasSpaces } return false }
TokenSubscribe: {Description: "subscribe"}, TokenFilter: {Description: "filter"}, TokenShow: {Description: "show"},
random_line_split
token.go
package lex import ( "fmt" "strings" ) // TokenType identifies the type of lexical tokens. type TokenType uint16 // TokenInfo provides metadata about tokens type TokenInfo struct { T TokenType Kw string firstWord string // in event multi-word (Group By) the first word for match HasSpaces bool Description string } // Token represents a text string returned from the lexer. type Token struct { T TokenType // type V string // value Quote byte // quote mark: " ` [ ' Line int // Line # Column int // Position in line Pos int // Absolute position } // convert to human readable string func (t Token) String() string { return fmt.Sprintf(`Token{ %s Type:"%v" Line:%d Col:%d Q:%s Pos:%d}`, t.V, t.T.String(), t.Line, t.Column, string(t.Quote), t.Pos) } func (t Token) Err(l *Lexer) error { return t.ErrMsg(l, "") } func (t Token) ErrMsg(l *Lexer, msg string) error { return l.ErrMsg(t, msg) } /* // List of datatypes from MySql, implement them as tokens? or leave as Identity during // DDL create/alter statements? BOOL TINYINT BOOLEAN TINYINT CHARACTER VARYING(M) VARCHAR(M) FIXED DECIMAL FLOAT4 FLOAT FLOAT8 DOUBLE INT1 TINYINT INT2 SMALLINT INT3 MEDIUMINT INT4 INT INT8 BIGINT LONG VARBINARY MEDIUMBLOB LONG VARCHAR MEDIUMTEXT LONG MEDIUMTEXT MIDDLEINT MEDIUMINT NUMERIC DECIMAL */ const ( // List of all TokenTypes Note we do NOT use IOTA because it is evil // if we change the position (ie, add a token not at end) it will cause any // usage of tokens serialized on disk/database to be invalid // Basic grammar items TokenNil TokenType = 0 // not used TokenEOF TokenType = 1 // EOF TokenEOS TokenType = 2 // ; TokenEofOrEos TokenType = 3 // End of file, OR ; TokenError TokenType = 4 // error occurred; value is text of error TokenRaw TokenType = 5 // raw unlexed text string TokenNewLine TokenType = 6 // NewLine = \n // Comments TokenComment TokenType = 10 // Comment value string TokenCommentML TokenType = 11 // Comment MultiValue TokenCommentStart TokenType = 12 // /* TokenCommentEnd TokenType = 13 // */ TokenCommentSlashes TokenType = 14 // Single Line comment: // hello TokenCommentSingleLine TokenType = 15 // Single Line comment: -- hello TokenCommentHash TokenType = 16 // Single Line comment: # hello // Misc TokenComma TokenType = 20 // , TokenStar TokenType = 21 // * TokenColon TokenType = 22 // : TokenLeftBracket TokenType = 23 // [ TokenRightBracket TokenType = 24 // ] TokenLeftBrace TokenType = 25 // { TokenRightBrace TokenType = 26 // } // operand related tokens TokenMinus TokenType = 60 // - TokenPlus TokenType = 61 // + TokenPlusPlus TokenType = 62 // ++ TokenPlusEquals TokenType = 63 // += TokenDivide TokenType = 64 // / TokenMultiply TokenType = 65 // * TokenModulus TokenType = 66 // % TokenEqual TokenType = 67 // = TokenEqualEqual TokenType = 68 // == TokenNE TokenType = 69 // != TokenGE TokenType = 70 // >= TokenLE TokenType = 71 // <= TokenGT TokenType = 72 // > TokenLT TokenType = 73 // < TokenIf TokenType = 74 // IF TokenOr TokenType = 75 // || TokenAnd TokenType = 76 // && TokenBetween TokenType = 77 // between TokenLogicOr TokenType = 78 // OR TokenLogicAnd TokenType = 79 // AND TokenIN TokenType = 80 // IN TokenLike TokenType = 81 // LIKE TokenNegate TokenType = 82 // NOT TokenLeftParenthesis TokenType = 83 // ( TokenRightParenthesis TokenType = 84 // ) TokenTrue TokenType = 85 // True TokenFalse TokenType = 86 // False TokenIs TokenType = 87 // IS TokenNull TokenType = 88 // NULL TokenContains TokenType = 89 // CONTAINS TokenIntersects TokenType = 90 // INTERSECTS // ql top-level keywords, these first keywords determine parser TokenPrepare TokenType = 200 TokenInsert TokenType = 201 TokenUpdate TokenType = 202 TokenDelete TokenType = 203 TokenSelect TokenType = 204 TokenUpsert TokenType = 205 TokenAlter TokenType = 206 TokenCreate TokenType = 207 TokenDrop TokenType = 208 TokenSubscribe TokenType = 209 TokenFilter TokenType = 210 TokenShow TokenType = 211 TokenDescribe TokenType = 212 // We can also use TokenDesc TokenExplain TokenType = 213 // another alias for desccribe TokenReplace TokenType = 214 // Insert/Replace are interchangeable on insert statements TokenRollback TokenType = 215 TokenCommit TokenType = 216 // Other QL Keywords, These are clause-level keywords that mark separation between clauses TokenFrom TokenType = 300 // from TokenWhere TokenType = 301 // where TokenHaving TokenType = 302 // having TokenGroupBy TokenType = 303 // group by TokenBy TokenType = 304 // by TokenAlias TokenType = 305 // alias TokenWith TokenType = 306 // with TokenValues TokenType = 307 // values TokenInto TokenType = 308 // into TokenLimit TokenType = 309 // limit TokenOrderBy TokenType = 310 // order by TokenInner TokenType = 311 // inner , ie of join TokenCross TokenType = 312 // cross TokenOuter TokenType = 313 // outer TokenLeft TokenType = 314 // left TokenRight TokenType = 315 // right TokenJoin TokenType = 316 // Join TokenOn TokenType = 317 // on TokenDistinct TokenType = 318 // DISTINCT TokenAll TokenType = 319 // all TokenInclude TokenType = 320 // INCLUDE TokenExists TokenType = 321 // EXISTS TokenOffset TokenType = 322 // OFFSET TokenFull TokenType = 323 // FULL TokenGlobal TokenType = 324 // GLOBAL TokenSession TokenType = 325 // SESSION TokenTables TokenType = 326 // TABLES // ddl major words TokenSchema TokenType = 400 // SCHEMA TokenDatabase TokenType = 401 // DATABASE TokenTable TokenType = 402 // TABLE TokenSource TokenType = 403 // SOURCE TokenView TokenType = 404 // VIEW TokenContinuousView TokenType = 405 // CONTINUOUSVIEW TokenTemp TokenType = 406 // TEMP or TEMPORARY // ddl other TokenChange TokenType = 410 // change TokenAdd TokenType = 411 // add TokenFirst TokenType = 412 // first TokenAfter TokenType = 413 // after TokenCharacterSet TokenType = 414 // character set TokenDefault TokenType = 415 // default TokenUnique TokenType = 416 // unique TokenKey TokenType = 417 // key TokenPrimary TokenType = 418 // primary TokenConstraint TokenType = 419 // constraint TokenForeign TokenType = 420 // foreign TokenReferences TokenType = 421 // references TokenEngine TokenType = 422 // engine // Other QL keywords TokenSet TokenType = 500 // set TokenAs TokenType = 501 // as TokenAsc TokenType = 502 // ascending TokenDesc TokenType = 503 // descending TokenUse TokenType = 504 // use // User defined function/expression TokenUdfExpr TokenType = 550 // Value Types TokenIdentity TokenType = 600 // identity, either column, table name etc TokenValue TokenType = 601 // 'some string' string or continuous sequence of chars delimited by WHITE SPACE | ' | , | ( | ) TokenValueEscaped TokenType = 602 // '' becomes ' inside the string, parser will need to replace the string TokenRegex TokenType = 603 // regex TokenDuration TokenType = 604 // 14d , 22w, 3y, 45ms, 45us, 24hr, 2h, 45m, 30s // Data Type Definitions TokenTypeDef TokenType = 999 TokenTypeBool TokenType = 998 TokenTypeFloat TokenType = 997 TokenTypeInteger TokenType = 996 TokenTypeString TokenType = 995 TokenTypeVarChar TokenType = 994 TokenTypeChar TokenType = 993 TokenTypeBigInt TokenType = 992 TokenTypeTime TokenType = 991 TokenTypeText TokenType = 990 TokenTypeJson TokenType = 989 // Value types TokenValueType TokenType = 1000 // A generic Identifier of value type TokenBool TokenType = 1001 TokenFloat TokenType = 1002 TokenInteger TokenType = 1003 TokenString TokenType = 1004 TokenTime TokenType = 1005 // Composite Data Types TokenJson TokenType = 1010 TokenList TokenType = 1011 TokenMap TokenType = 1012 ) var ( // IDENTITY_CHARS Which Identity Characters are allowed for UNESCAPED identities IDENTITY_CHARS = "_.-/" // A much more lax identity char set rule that allows spaces IDENTITY_LAX_CHARS = "_./- " // sql variables start with @@ ?? IDENTITY_SQL_CHARS = "@_.-" // list of token-name TokenNameMap = map[TokenType]*TokenInfo{ TokenEOF: {Description: "EOF"}, TokenEOS: {Description: ";"}, TokenEofOrEos: {Kw: "", Description: "; OR EOF"}, TokenError: {Description: "Error"}, TokenRaw: {Description: "unlexed text"}, TokenNewLine: {Description: "New Line"}, // Comments TokenComment: {Description: "Comment"}, TokenCommentML: {Description: "CommentMultiLine"}, TokenCommentStart: {Description: "/*"}, TokenCommentEnd: {Description: "*/"}, TokenCommentHash: {Description: "#"}, TokenCommentSingleLine: {Description: "--"}, TokenCommentSlashes: {Description: "//"}, // Misc TokenComma: {Description: ","}, TokenStar: {Kw: "*", Description: "*"}, TokenColon: {Kw: ":", Description: ":"}, TokenLeftBracket: {Kw: "[", Description: "["}, TokenRightBracket: {Kw: "]", Description: "]"}, TokenLeftBrace: {Kw: "{", Description: "{"}, TokenRightBrace: {Kw: "}", Description: "}"}, // Logic, Expressions, Operators etc TokenMultiply: {Kw: "*", Description: "Multiply"}, TokenMinus: {Kw: "-", Description: "-"}, TokenPlus: {Kw: "+", Description: "+"}, TokenPlusPlus: {Kw: "++", Description: "++"}, TokenPlusEquals: {Kw: "+=", Description: "+="}, TokenDivide: {Kw: "/", Description: "Divide /"}, TokenModulus: {Kw: "%", Description: "Modulus %"}, TokenEqual: {Kw: "=", Description: "Equal"}, TokenEqualEqual: {Kw: "==", Description: "=="}, TokenNE: {Kw: "!=", Description: "NE"}, TokenGE: {Kw: ">=", Description: "GE"}, TokenLE: {Kw: "<=", Description: "LE"}, TokenGT: {Kw: ">", Description: "GT"}, TokenLT: {Kw: "<", Description: "LT"}, TokenIf: {Kw: "if", Description: "IF"}, TokenAnd: {Kw: "&&", Description: "&&"}, TokenOr: {Kw: "||", Description: "||"}, TokenLogicOr: {Kw: "or", Description: "Or"}, TokenLogicAnd: {Kw: "and", Description: "And"}, TokenIN: {Kw: "in", Description: "IN"}, TokenLike: {Kw: "like", Description: "LIKE"}, TokenNegate: {Kw: "not", Description: "NOT"}, TokenBetween: {Kw: "between", Description: "between"}, TokenIs: {Kw: "is", Description: "IS"}, TokenNull: {Kw: "null", Description: "NULL"}, TokenContains: {Kw: "contains", Description: "contains"}, TokenIntersects: {Kw: "intersects", Description: "intersects"}, // Identity ish bools TokenTrue: {Kw: "true", Description: "True"}, TokenFalse: {Kw: "false", Description: "False"}, // parens, both logical expression as well as functional TokenLeftParenthesis: {Description: "("}, TokenRightParenthesis: {Description: ")"}, // Expression Identifier TokenUdfExpr: {Description: "expr"}, // Initial Keywords, these are the most important QL Type words TokenPrepare: {Description: "prepare"}, TokenInsert: {Description: "insert"}, TokenSelect: {Description: "select"}, TokenDelete: {Description: "delete"}, TokenUpdate: {Description: "update"}, TokenUpsert: {Description: "upsert"}, TokenAlter: {Description: "alter"}, TokenCreate: {Description: "create"}, TokenDrop: {Description: "drop"}, TokenSubscribe: {Description: "subscribe"}, TokenFilter: {Description: "filter"}, TokenShow: {Description: "show"}, TokenDescribe: {Description: "describe"}, TokenExplain: {Description: "explain"}, TokenReplace: {Description: "replace"}, TokenRollback: {Description: "rollback"}, TokenCommit: {Description: "commit"}, // Top Level dml ql clause keywords TokenInto: {Description: "into"}, TokenBy: {Description: "by"}, TokenFrom: {Description: "from"}, TokenWhere: {Description: "where"}, TokenHaving: {Description: "having"}, TokenGroupBy: {Description: "group by"}, // Other Ql Keywords TokenAlias: {Description: "alias"}, TokenWith: {Description: "with"}, TokenValues: {Description: "values"}, TokenLimit: {Description: "limit"}, TokenOrderBy: {Description: "order by"}, TokenInner: {Description: "inner"}, TokenCross: {Description: "cross"}, TokenOuter: {Description: "outer"}, TokenLeft: {Description: "left"}, TokenRight: {Description: "right"}, TokenJoin: {Description: "join"}, TokenOn: {Description: "on"}, TokenDistinct: {Description: "distinct"}, TokenAll: {Description: "all"}, TokenInclude: {Description: "include"}, TokenExists: {Description: "exists"}, TokenOffset: {Description: "offset"}, TokenFull: {Description: "full"}, TokenGlobal: {Description: "global"}, TokenSession: {Description: "session"}, TokenTables: {Description: "tables"}, // ddl keywords TokenSchema: {Description: "schema"}, TokenDatabase: {Description: "database"}, TokenTable: {Description: "table"}, TokenSource: {Description: "source"}, TokenView: {Description: "view"}, TokenContinuousView: {Description: "continuousview"}, TokenTemp: {Description: "temp"}, // ddl other TokenChange: {Description: "change"}, TokenCharacterSet: {Description: "character set"}, TokenAdd: {Description: "add"}, TokenFirst: {Description: "first"}, TokenAfter: {Description: "after"}, TokenDefault: {Description: "default"}, TokenUnique: {Description: "unique"}, TokenKey: {Description: "key"}, TokenPrimary: {Description: "primary"}, TokenConstraint: {Description: "constraint"}, TokenForeign: {Description: "foreign"}, TokenReferences: {Description: "references"}, TokenEngine: {Description: "engine"}, // QL Keywords, all lower-case TokenSet: {Description: "set"}, TokenAs: {Description: "as"}, TokenAsc: {Description: "asc"}, TokenDesc: {Description: "desc"}, TokenUse: {Description: "use"}, // special value types TokenIdentity: {Description: "identity"}, TokenValue: {Description: "value"}, TokenValueEscaped: {Description: "value-escaped"}, TokenRegex: {Description: "regex"}, TokenDuration: {Description: "duration"}, // Data TYPES: ie type system TokenTypeDef: {Description: "TypeDef"}, // Generic DataType TokenTypeBool: {Description: "BoolType"}, TokenTypeFloat: {Description: "FloatType"}, TokenTypeInteger: {Description: "IntegerType"}, TokenTypeString: {Description: "StringType"}, TokenTypeVarChar: {Description: "VarCharType"}, TokenTypeChar: {Description: "CharType"}, TokenTypeBigInt: {Description: "BigIntType"}, TokenTypeTime: {Description: "TimeType"}, TokenTypeText: {Description: "TextType"}, TokenTypeJson: {Description: "JsonType"}, // VALUE TYPES: ie literal values TokenBool: {Description: "BoolVal"}, TokenFloat: {Description: "FloatVal"}, TokenInteger: {Description: "IntegerVal"}, TokenString: {Description: "StringVal"}, TokenTime: {Description: "TimeVal"}, // Some other value Types TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value TokenList: {Description: "List"}, TokenMap: {Description: "Map"}, TokenJson: {Description: "JSON"}, } TokenToOp = make(map[string]TokenType) ) func init() { LoadTokenInfo() SqlDialect.Init() FilterQLDialect.Init() JsonDialect.Init() } // LoadTokenInfo load the token info into global map func LoadTokenInfo() { for tok, ti := range TokenNameMap { ti.T = tok if ti.Kw == "" { ti.Kw = ti.Description } TokenToOp[ti.Kw] = tok if strings.Contains(ti.Kw, " ") { parts := strings.Split(ti.Kw, " ") ti.firstWord = parts[0] ti.HasSpaces = true } } } // TokenFromOp get token from operation string func TokenFromOp(op string) Token { tt, ok := TokenToOp[op] if ok { return Token{T: tt, V: op} } return Token{T: TokenNil} } // String convert to human readable string func (typ TokenType)
() string { s, ok := TokenNameMap[typ] if ok { return s.Kw } return "not implemented" } // MatchString which keyword should we look for, either full keyword // OR in case of spaces such as "group by" look for group func (typ TokenType) MatchString() string { tokInfo, ok := TokenNameMap[typ] //u.Debugf("matchstring: '%v' '%v' '%v'", tokInfo.T, tokInfo.Kw, tokInfo.Description) if ok { if tokInfo.HasSpaces { return tokInfo.firstWord } return tokInfo.Kw } return "not implemented" } // MultiWord is this a word such as "Group by" with multiple words? func (typ TokenType) MultiWord() bool { tokInfo, ok := TokenNameMap[typ] if ok { return tokInfo.HasSpaces } return false }
String
identifier_name
begot.go
// Copyright (c) 2014-2015 Solano Labs Inc. All Rights Reserved. package main import ( "bytes" "crypto/sha1" "encoding/hex" "fmt" "go/parser" "go/token" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "sort" "strings" "syscall" "gopkg.in/yaml.v2" ) const ( BEGOTTEN = "Begotten" BEGOTTEN_LOCK = "Begotten.lock" EMPTY_DEP = "_begot_empty_dep" IMPLICIT_PREFIX = "_begot_implicit" // This should change if the format of Begotten.lock changes in an incompatible // way. (But prefer changing it in compatible ways and not incrementing this.) FILE_VERSION = 1 ) // Known public servers and how many path components form the repo name. var KNOWN_GIT_SERVERS = map[string]int{ "github.com": 2, "bitbucket.org": 2, "begot.test": 2, } var RE_NON_IDENTIFIER_CHAR = regexp.MustCompile("\\W") func replace_non_identifier_chars(in string) string { return RE_NON_IDENTIFIER_CHAR.ReplaceAllLiteralString(in, "_") } func Command(cwd string, name string, args ...string) (cmd *exec.Cmd) { cmd = exec.Command(name, args...) cmd.Dir = cwd return } func cc(cwd string, name string, args ...string) { cmd := Command(cwd, name, args...) if err := cmd.Run(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } } func co(cwd string, name string, args ...string) string { cmd := Command(cwd, name, args...) if outb, err := cmd.Output(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } else { return string(outb) } } func contains_str(lst []string, val string) bool { for _, item := range lst { if item == val { return true } } return false } func sha1str(in string) string { sum := sha1.Sum([]byte(in)) return hex.EncodeToString(sum[:]) } func sha1bts(in []byte) string { sum := sha1.Sum(in) return hex.EncodeToString(sum[:]) } func realpath(path string) (out string) { if abs, err := filepath.Abs(path); err != nil { panic(err) } else if out, err = filepath.EvalSymlinks(abs); err != nil { panic(err) } return } func ln_sf(target, path string) (created bool, err error) { current, e := os.Readlink(path) if e != nil || current != target { if err = os.RemoveAll(path); err != nil { return } if err = os.MkdirAll(filepath.Dir(path), 0777); err != nil { return } if err = os.Symlink(target, path); err != nil { return } created = true } return } func yaml_copy(in interface{}, out interface{}) { if bts, err := yaml.Marshal(in); err != nil { panic(err) } else if err = yaml.Unmarshal(bts, out); err != nil { panic(err) } } type Dep struct { name string Aliases []string Git_url string Import_path string `yaml:",omitempty"` Ref string Subpath string } // A Begotten or Begotten.lock file contains exactly one of these in YAML format. type BegottenFileStruct struct { Deps map[string]interface{} // either string or Dep Meta struct { File_version int Generated_by string } Repo_aliases map[string]interface{} // either string or subset of Dep {git_url, ref} Repo_deps map[string][]string } type BegottenFile struct { data BegottenFileStruct } func BegottenFileNew(fn string) (bf *BegottenFile) { bf = new(BegottenFile) bf.data.Meta.File_version = -1 if data, err := ioutil.ReadFile(fn); err != nil { panic(err) } else if err := yaml.Unmarshal(data, &bf.data); err != nil { panic(err) } ver := bf.data.Meta.File_version if ver != -1 && ver != FILE_VERSION { panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver)) } return } type SortedStringMap yaml.MapSlice func (sm SortedStringMap) Len() int { return len(sm) } func (sm SortedStringMap) Less(i, j int) bool { return sm[i].Key.(string) < sm[j].Key.(string) } func (sm SortedStringMap) Swap(i, j int) { sm[i], sm[j] = sm[j], sm[i] } func (bf *BegottenFile) save(fn string) { // We have to sort everything so the output is deterministic. go-yaml // doesn't write maps in sorted order, so we have to convert them to // yaml.MapSlices and sort those. var out struct { Deps SortedStringMap Meta struct { File_version int Generated_by string } Repo_aliases SortedStringMap Repo_deps SortedStringMap } out.Meta.File_version = FILE_VERSION out.Meta.Generated_by = CODE_VERSION for k, v := range bf.data.Deps { dep := v.(Dep) dep.Import_path = "" sort.StringSlice(dep.Aliases).Sort() out.Deps = append(out.Deps, yaml.MapItem{k, dep}) } sort.Sort(out.Deps) for k, v := range bf.data.Repo_aliases { out.Repo_aliases = append(out.Repo_aliases, yaml.MapItem{k, v}) } sort.Sort(out.Repo_aliases) for k, v := range bf.data.Repo_deps { sort.StringSlice(v).Sort() out.Repo_deps = append(out.Repo_deps, yaml.MapItem{k, v}) } sort.Sort(out.Repo_deps) if data, err := yaml.Marshal(out); err != nil { panic(err) } else if err := ioutil.WriteFile(fn, data, 0666); err != nil { panic(err) } } func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string { // Hook for testing: test_repo_path := os.Getenv("BEGOT_TEST_REPOS") if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" { return "file://" + filepath.Join(test_repo_path, repo_path) } // Default to https for other repos: return "https://" + repo_path } func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) { dep.name = name if _, ok := v.(string); ok { v = map[interface{}]interface{}{"import_path": v} } mv, ok := v.(map[interface{}]interface{}) if !ok { panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v)) } yaml_copy(mv, &dep) if dep.Import_path != "" { parts := strings.Split(dep.Import_path, "/") if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok { panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name)) } else { repo_path := strings.Join(parts[:repo_parts+1], "/") dep.Git_url = bf.default_git_url_from_repo_path(repo_path) dep.Subpath = strings.Join(parts[repo_parts+1:], "/") dep.Aliases = append(dep.Aliases, dep.Import_path) // Redirect through repo aliases: if alias, ok := bf.data.Repo_aliases[repo_path]; ok { var aliasdep Dep // only allow git_url and ref if aliasstr, ok := alias.(string); ok { aliasstr = bf.default_git_url_from_repo_path(aliasstr) alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}} } yaml_copy(alias, &aliasdep) if aliasdep.Git_url != "" { dep.Git_url = aliasdep.Git_url } if aliasdep.Ref != "" { dep.Ref = aliasdep.Ref } } } } if dep.Git_url == "" { panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name)) } if dep.Ref == "" { dep.Ref = "master" } return } func (bf *BegottenFile) deps() (out []Dep) { out = make([]Dep, len(bf.data.Deps)) i := 0 for name, v := range bf.data.Deps { out[i] = bf.parse_dep(name, v) i++ } return } func (bf *BegottenFile) set_deps(deps []Dep) { bf.data.Deps = make(map[string]interface{}) for _, dep := range deps { bf.data.Deps[dep.name] = dep } } func (bf *BegottenFile) repo_deps() map[string][]string { if bf.data.Repo_deps == nil { bf.data.Repo_deps = make(map[string][]string) } return bf.data.Repo_deps } func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) { bf.data.Repo_deps = repo_deps } type Env struct { Home string BegotCache string DepWorkspaceDir string CodeWorkspaceDir string RepoDir string CacheLock string } func EnvNew() (env *Env) { env = new(Env) env.Home = os.Getenv("HOME") env.BegotCache = os.Getenv("BEGOT_CACHE") if env.BegotCache == "" { env.BegotCache = filepath.Join(env.Home, ".cache", "begot") } env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk") env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk") env.RepoDir = filepath.Join(env.BegotCache, "repo") env.CacheLock = filepath.Join(env.BegotCache, "lock") return } type Builder struct { env *Env code_root string code_wk string dep_wk string bf *BegottenFile deps []Dep repo_deps map[string][]string cached_lf_hash string } func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) { b = new(Builder) b.env = env b.code_root = realpath(code_root) hsh := sha1str(b.code_root)[:8] b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh) b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh) var fn string if use_lockfile { fn = filepath.Join(b.code_root, BEGOTTEN_LOCK) } else { fn = filepath.Join(b.code_root, BEGOTTEN) } b.bf = BegottenFileNew(fn) b.deps = b.bf.deps() b.repo_deps = b.bf.repo_deps() return } func (b *Builder)
() (out map[string]string) { out = make(map[string]string) for _, dep := range b.deps { out[dep.Git_url] = dep.Ref } return } func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) { out = make(map[string]string) if len(limits) == 0 { return } defer func() { if err := recover(); err != nil { panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK)) } }() bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK)) lock_deps := bf_lock.deps() lock_repo_deps := bf_lock.repo_deps() match := func(name string) bool { for _, limit := range limits { if matched, err := filepath.Match(limit, name); err != nil { panic(err) } else if matched { return true } } return false } repos_to_update := make(map[string]bool) for _, dep := range lock_deps { if match(dep.name) { repos_to_update[dep.Git_url] = true } } // transitive closure n := -1 for len(repos_to_update) != n { n = len(repos_to_update) repos := make([]string, 0, len(repos_to_update)) for repo, _ := range repos_to_update { repos = append(repos, repo) } for _, repo := range repos { if deps, ok := lock_repo_deps[repo]; ok { for _, dep := range deps { repos_to_update[dep] = true } } } } for _, dep := range lock_deps { if !repos_to_update[dep.Git_url] { out[dep.Git_url] = dep.Ref } } return } func (b *Builder) setup_repos(fetch bool, limits []string) *Builder { processed_deps := 0 repo_versions := make(map[string]string) var fetched_set map[string]bool if fetch { fetched_set = make(map[string]bool) } locked_refs := b.get_locked_refs_for_update(limits) for processed_deps < len(b.deps) { repos_to_setup := []string{} for i, dep := range b.deps[processed_deps:] { have := repo_versions[dep.Git_url] if fetch && strings.HasPrefix(dep.name, IMPLICIT_PREFIX) && have != "" { // Implicit deps take the revision of an explicit dep from the same // repo, if one exists. b.deps[processed_deps+i].Ref = have continue } want := locked_refs[dep.Git_url] if want == "" { want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set) } if have != "" { if have != want { panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)", dep.name, have, want, dep.Ref)) } } else { repo_versions[dep.Git_url] = want repos_to_setup = append(repos_to_setup, dep.Git_url) } b.deps[processed_deps+i].Ref = want } processed_deps = len(b.deps) // This will add newly-found dependencies to b.deps. for _, url := range repos_to_setup { b._setup_repo(url, repo_versions[url]) } } return b } func (b *Builder) save_lockfile() *Builder { // Should only be called when loaded from Begotten, not lockfile. b.bf.set_deps(b.deps) b.bf.set_repo_deps(b.repo_deps) b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK)) return b } func (b *Builder) _record_repo_dep(src_url, dep_url string) { if src_url != dep_url { lst := b.repo_deps[src_url] if !contains_str(lst, dep_url) { b.repo_deps[src_url] = append(lst, dep_url) } } } func (b *Builder) _repo_dir(url string) string { return filepath.Join(b.env.RepoDir, sha1str(url)) } var RE_SHA1_HASH = regexp.MustCompile("[[:xdigit:]]{40}") func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) { repo_dir := b._repo_dir(url) if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() { fmt.Printf("Cloning %s\n", url) cc("/", "git", "clone", "-q", url, repo_dir) // Get into detached head state so we can manipulate things without // worrying about messing up a branch. cc(repo_dir, "git", "checkout", "-q", "--detach") } else if fetched_set != nil { if !fetched_set[url] { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") fetched_set[url] = true } } if RE_SHA1_HASH.MatchString(ref) { return ref } for _, pfx := range []string{"origin/", ""} { cmd := Command(repo_dir, "git", "rev-parse", "--verify", pfx+ref) cmd.Stderr = nil if outb, err := cmd.Output(); err == nil { resolved_ref = strings.TrimSpace(string(outb)) return } } panic(fmt.Errorf("Can't resolve reference %q for %s", ref, url)) } func (b *Builder) _setup_repo(url, resolved_ref string) { hsh := sha1str(url)[:8] repo_dir := b._repo_dir(url) fmt.Printf("Fixing imports in %s\n", url) cmd := Command(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) if err := cmd.Run(); err != nil { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") cc(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) } // Match up sub-deps to our deps. sub_dep_map := make(map[string]string) self_deps := []Dep{} sub_bg_path := filepath.Join(repo_dir, BEGOTTEN_LOCK) if _, err := os.Stat(sub_bg_path); err == nil { sub_bg := BegottenFileNew(sub_bg_path) // Add implicit and explicit external dependencies. for _, sub_dep := range sub_bg.deps() { b._record_repo_dep(url, sub_dep.Git_url) our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath) if our_dep != nil { if sub_dep.Ref != our_dep.Ref { panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s", url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref)) } sub_dep_map[sub_dep.name] = our_dep.name } else { // Include a hash of this repo identifier so that if two repos use the // same dep name to refer to two different things, they don't conflict // when we flatten deps. transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name) sub_dep_map[sub_dep.name] = transitive_name sub_dep.name = transitive_name b.deps = append(b.deps, sub_dep) } } // Allow relative import paths within this repo. e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { basename := filepath.Base(path) if err != nil { return err } else if fi.IsDir() && basename[0] == '.' { return filepath.SkipDir } else if path == repo_dir { return nil } relpath := path[len(repo_dir)+1:] our_dep := b._lookup_dep_by_git_url_and_path(url, relpath) if our_dep != nil { sub_dep_map[relpath] = our_dep.name } else { // See comment on _lookup_dep_name for rationale. self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath)) sub_dep_map[relpath] = self_name self_deps = append(self_deps, Dep{ name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref}) } return nil }) if e != nil { panic(e) } } used_rewrites := make(map[string]bool) b._rewrite_imports(url, repo_dir, &sub_dep_map, &used_rewrites) msg := fmt.Sprintf("rewritten by begot for %s", b.code_root) cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg) // Add only the self-deps that were used, to reduce clutter. for _, self_dep := range self_deps { if used_rewrites[self_dep.name] { b.deps = append(b.deps, self_dep) } } } func (b *Builder) _rewrite_imports(src_url, repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if strings.HasSuffix(path, ".go") { b._rewrite_file(src_url, path, sub_dep_map, used_rewrites) } return nil }) } func (b *Builder) _rewrite_file(src_url, path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { bts, err := ioutil.ReadFile(path) if err != nil { panic(err) } fs := token.NewFileSet() f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly) if err != nil { panic(err) } var pos int var out bytes.Buffer out.Grow(len(bts) * 5 / 4) for _, imp := range f.Imports { start := fs.Position(imp.Path.Pos()).Offset end := fs.Position(imp.Path.End()).Offset orig_import := string(bts[start+1 : end-1]) rewritten := b._rewrite_import(src_url, orig_import, sub_dep_map, used_rewrites) if orig_import != rewritten { out.Write(bts[pos : start+1]) out.WriteString(rewritten) pos = end - 1 } } out.Write(bts[pos:]) if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil { panic(err) } } func (b *Builder) _rewrite_import(src_url, imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string { if rewrite, ok := (*sub_dep_map)[imp]; ok { imp = rewrite (*used_rewrites)[rewrite] = true } else { parts := strings.Split(imp, "/") if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok { imp = b._lookup_dep_name(src_url, imp) } } return imp } func (b *Builder) _lookup_dep_name(src_url, imp string) string { for _, dep := range b.deps { if contains_str(dep.Aliases, imp) { b._record_repo_dep(src_url, dep.Git_url) return dep.name } } // Each dep turns into a symlink at build time. Packages can be nested, so we // might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't // also create 'a/b'. So rename it to 'a_b'. name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp) dep := b.bf.parse_dep(name, imp) b.deps = append(b.deps, dep) b._record_repo_dep(src_url, dep.Git_url) return name } func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep { for _, dep := range b.deps { if dep.Git_url == git_url && dep.Subpath == subpath { return &dep } } return nil } func (b *Builder) tag_repos() { // Run this after setup_repos. for url, ref := range b._all_repos() { out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref)) for _, line := range strings.SplitAfter(out, "\n") { if !strings.HasPrefix(line, "Updated tag ") { fmt.Print(line) } } } } func (b *Builder) _tag_hash(ref string) string { // We want to tag the current state with a name that depends on: // 1. The base ref that we rewrote from. // 2. The full set of deps that describe how we rewrote imports. // The contents of Begotten.lock suffice for (2): if b.cached_lf_hash == "" { lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK) if bts, err := ioutil.ReadFile(lockfile); err != nil { panic(err) } else { b.cached_lf_hash = sha1bts(bts) } } return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash) } func (b *Builder) run(args []string) { b._reset_to_tags() // Set up code_wk. cbin := filepath.Join(b.code_wk, "bin") depsrc := filepath.Join(b.dep_wk, "src") empty_dep := filepath.Join(depsrc, EMPTY_DEP) os.MkdirAll(cbin, 0777) os.MkdirAll(empty_dep, 0777) if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil { panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " + "Please remove it before using begot.")) } ln_sf(b.code_root, filepath.Join(b.code_wk, "src")) old_links := make(map[string]bool) filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Mode()&os.ModeType == os.ModeSymlink { old_links[path] = true } return nil }) for _, dep := range b.deps { path := filepath.Join(depsrc, dep.name) target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath) if created, err := ln_sf(target, path); err != nil { panic(err) } else if created { // If we've created or changed this symlink, any pkg files that go may // have compiled from it should be invalidated. // Note: This makes some assumptions about go's build layout. It should // be safe enough, though it may be simpler to just blow away everything // if any dep symlinks change. pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*")) for _, pkg := range pkgs { os.RemoveAll(pkg) } } delete(old_links, path) } // Remove unexpected links. for old_link := range old_links { os.RemoveAll(old_link) } // Try to remove all directories; ignore ENOTEMPTY errors. var dirs []string filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.IsDir() { dirs = append(dirs, path) } return nil }) for i := len(dirs) - 1; i >= 0; i-- { if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY { panic(err) } } // Set up empty dep. // // The go tool tries to be helpful by not rebuilding modified code if that // code is in a workspace and no packages from that workspace are mentioned // on the command line. See cmd/go/pkg.go:isStale around line 680. // // We are explicitly managing all of the workspaces in our GOPATH and do // indeed want to rebuild everything when dependencies change. That is // required by the goal of reproducible builds: the alternative would mean // what you get for this build depends on the state of a previous build. // // The go tool doesn't provide any way of disabling this "helpful" // functionality. The simplest workaround is to always mention a package from // the dependency workspace on the command line. Hence, we add an empty // package. empty_go := filepath.Join(empty_dep, "empty.go") if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() { os.MkdirAll(filepath.Dir(empty_go), 0777) if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil { panic(err) } } // Overwrite any existing GOPATH. if argv0, err := exec.LookPath(args[0]); err != nil { panic(err) } else { os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk)) os.Chdir(b.code_root) err := syscall.Exec(argv0, args, os.Environ()) panic(fmt.Errorf("exec failed: %s", err)) } } func (b *Builder) _reset_to_tags() { defer func() { if recover() != nil { panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " + "Please run 'begot fetch' first.")) } }() for url, ref := range b._all_repos() { wd := b._repo_dir(url) if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() { panic("not directory") } cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref)) } } func (b *Builder) clean() { os.RemoveAll(b.dep_wk) os.RemoveAll(b.code_wk) os.Remove(filepath.Join(b.code_root, "bin")) } func get_gopath(env *Env) string { // This duplicates logic in Builder, but we want to just get the GOPATH without // parsing anything. for { if _, err := os.Stat(BEGOTTEN); err == nil { break } if wd, err := os.Getwd(); err != nil { panic(err) } else if wd == "/" { panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN)) } if err := os.Chdir(".."); err != nil { panic(err) } } hsh := sha1str(realpath("."))[:8] code_wk := filepath.Join(env.CodeWorkspaceDir, hsh) dep_wk := filepath.Join(env.DepWorkspaceDir, hsh) return code_wk + ":" + dep_wk } var _cache_lock *os.File func lock_cache(env *Env) { os.MkdirAll(env.BegotCache, 0777) _cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666) if err != nil { panic(err) } err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err != nil { panic(fmt.Errorf("Can't lock %r", env.BegotCache)) } // Leave file open for lifetime of this process and anything exec'd by this // process. } func print_help(ret int) { fmt.Fprintln(os.Stderr, "FIXME") os.Exit(ret) } func main() { env := EnvNew() defer func() { if err := recover(); err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } }() lock_cache(env) if len(os.Args) < 2 { print_help(1) } switch os.Args[1] { case "update": BuilderNew(env, ".", false).setup_repos(true, os.Args[2:]).save_lockfile().tag_repos() case "just_rewrite": BuilderNew(env, ".", false).setup_repos(false, []string{}).save_lockfile().tag_repos() case "fetch": BuilderNew(env, ".", true).setup_repos(false, []string{}).tag_repos() case "build": BuilderNew(env, ".", true).run([]string{"go", "install", "./...", EMPTY_DEP}) case "go": BuilderNew(env, ".", true).run(append([]string{"go"}, os.Args[2:]...)) case "exec": BuilderNew(env, ".", true).run(os.Args[2:]) case "clean": BuilderNew(env, ".", false).clean() case "gopath": fmt.Println(get_gopath(env)) case "help": print_help(0) default: fmt.Fprintf(os.Stderr, "Unknown subcommand %q\n", os.Args[1]) print_help(1) } }
_all_repos
identifier_name
begot.go
// Copyright (c) 2014-2015 Solano Labs Inc. All Rights Reserved. package main import ( "bytes" "crypto/sha1" "encoding/hex" "fmt" "go/parser" "go/token" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "sort" "strings" "syscall" "gopkg.in/yaml.v2" ) const ( BEGOTTEN = "Begotten" BEGOTTEN_LOCK = "Begotten.lock" EMPTY_DEP = "_begot_empty_dep" IMPLICIT_PREFIX = "_begot_implicit" // This should change if the format of Begotten.lock changes in an incompatible // way. (But prefer changing it in compatible ways and not incrementing this.) FILE_VERSION = 1 ) // Known public servers and how many path components form the repo name. var KNOWN_GIT_SERVERS = map[string]int{ "github.com": 2, "bitbucket.org": 2, "begot.test": 2, } var RE_NON_IDENTIFIER_CHAR = regexp.MustCompile("\\W") func replace_non_identifier_chars(in string) string { return RE_NON_IDENTIFIER_CHAR.ReplaceAllLiteralString(in, "_") } func Command(cwd string, name string, args ...string) (cmd *exec.Cmd) { cmd = exec.Command(name, args...) cmd.Dir = cwd return } func cc(cwd string, name string, args ...string) { cmd := Command(cwd, name, args...) if err := cmd.Run(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } } func co(cwd string, name string, args ...string) string { cmd := Command(cwd, name, args...) if outb, err := cmd.Output(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } else { return string(outb) } } func contains_str(lst []string, val string) bool { for _, item := range lst { if item == val { return true } } return false } func sha1str(in string) string { sum := sha1.Sum([]byte(in)) return hex.EncodeToString(sum[:]) } func sha1bts(in []byte) string { sum := sha1.Sum(in) return hex.EncodeToString(sum[:]) } func realpath(path string) (out string) { if abs, err := filepath.Abs(path); err != nil { panic(err) } else if out, err = filepath.EvalSymlinks(abs); err != nil { panic(err) } return } func ln_sf(target, path string) (created bool, err error) { current, e := os.Readlink(path) if e != nil || current != target { if err = os.RemoveAll(path); err != nil { return } if err = os.MkdirAll(filepath.Dir(path), 0777); err != nil { return } if err = os.Symlink(target, path); err != nil { return } created = true } return } func yaml_copy(in interface{}, out interface{}) { if bts, err := yaml.Marshal(in); err != nil { panic(err) } else if err = yaml.Unmarshal(bts, out); err != nil { panic(err) } } type Dep struct { name string Aliases []string Git_url string Import_path string `yaml:",omitempty"` Ref string Subpath string } // A Begotten or Begotten.lock file contains exactly one of these in YAML format. type BegottenFileStruct struct { Deps map[string]interface{} // either string or Dep Meta struct { File_version int Generated_by string } Repo_aliases map[string]interface{} // either string or subset of Dep {git_url, ref} Repo_deps map[string][]string } type BegottenFile struct { data BegottenFileStruct } func BegottenFileNew(fn string) (bf *BegottenFile)
type SortedStringMap yaml.MapSlice func (sm SortedStringMap) Len() int { return len(sm) } func (sm SortedStringMap) Less(i, j int) bool { return sm[i].Key.(string) < sm[j].Key.(string) } func (sm SortedStringMap) Swap(i, j int) { sm[i], sm[j] = sm[j], sm[i] } func (bf *BegottenFile) save(fn string) { // We have to sort everything so the output is deterministic. go-yaml // doesn't write maps in sorted order, so we have to convert them to // yaml.MapSlices and sort those. var out struct { Deps SortedStringMap Meta struct { File_version int Generated_by string } Repo_aliases SortedStringMap Repo_deps SortedStringMap } out.Meta.File_version = FILE_VERSION out.Meta.Generated_by = CODE_VERSION for k, v := range bf.data.Deps { dep := v.(Dep) dep.Import_path = "" sort.StringSlice(dep.Aliases).Sort() out.Deps = append(out.Deps, yaml.MapItem{k, dep}) } sort.Sort(out.Deps) for k, v := range bf.data.Repo_aliases { out.Repo_aliases = append(out.Repo_aliases, yaml.MapItem{k, v}) } sort.Sort(out.Repo_aliases) for k, v := range bf.data.Repo_deps { sort.StringSlice(v).Sort() out.Repo_deps = append(out.Repo_deps, yaml.MapItem{k, v}) } sort.Sort(out.Repo_deps) if data, err := yaml.Marshal(out); err != nil { panic(err) } else if err := ioutil.WriteFile(fn, data, 0666); err != nil { panic(err) } } func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string { // Hook for testing: test_repo_path := os.Getenv("BEGOT_TEST_REPOS") if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" { return "file://" + filepath.Join(test_repo_path, repo_path) } // Default to https for other repos: return "https://" + repo_path } func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) { dep.name = name if _, ok := v.(string); ok { v = map[interface{}]interface{}{"import_path": v} } mv, ok := v.(map[interface{}]interface{}) if !ok { panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v)) } yaml_copy(mv, &dep) if dep.Import_path != "" { parts := strings.Split(dep.Import_path, "/") if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok { panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name)) } else { repo_path := strings.Join(parts[:repo_parts+1], "/") dep.Git_url = bf.default_git_url_from_repo_path(repo_path) dep.Subpath = strings.Join(parts[repo_parts+1:], "/") dep.Aliases = append(dep.Aliases, dep.Import_path) // Redirect through repo aliases: if alias, ok := bf.data.Repo_aliases[repo_path]; ok { var aliasdep Dep // only allow git_url and ref if aliasstr, ok := alias.(string); ok { aliasstr = bf.default_git_url_from_repo_path(aliasstr) alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}} } yaml_copy(alias, &aliasdep) if aliasdep.Git_url != "" { dep.Git_url = aliasdep.Git_url } if aliasdep.Ref != "" { dep.Ref = aliasdep.Ref } } } } if dep.Git_url == "" { panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name)) } if dep.Ref == "" { dep.Ref = "master" } return } func (bf *BegottenFile) deps() (out []Dep) { out = make([]Dep, len(bf.data.Deps)) i := 0 for name, v := range bf.data.Deps { out[i] = bf.parse_dep(name, v) i++ } return } func (bf *BegottenFile) set_deps(deps []Dep) { bf.data.Deps = make(map[string]interface{}) for _, dep := range deps { bf.data.Deps[dep.name] = dep } } func (bf *BegottenFile) repo_deps() map[string][]string { if bf.data.Repo_deps == nil { bf.data.Repo_deps = make(map[string][]string) } return bf.data.Repo_deps } func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) { bf.data.Repo_deps = repo_deps } type Env struct { Home string BegotCache string DepWorkspaceDir string CodeWorkspaceDir string RepoDir string CacheLock string } func EnvNew() (env *Env) { env = new(Env) env.Home = os.Getenv("HOME") env.BegotCache = os.Getenv("BEGOT_CACHE") if env.BegotCache == "" { env.BegotCache = filepath.Join(env.Home, ".cache", "begot") } env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk") env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk") env.RepoDir = filepath.Join(env.BegotCache, "repo") env.CacheLock = filepath.Join(env.BegotCache, "lock") return } type Builder struct { env *Env code_root string code_wk string dep_wk string bf *BegottenFile deps []Dep repo_deps map[string][]string cached_lf_hash string } func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) { b = new(Builder) b.env = env b.code_root = realpath(code_root) hsh := sha1str(b.code_root)[:8] b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh) b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh) var fn string if use_lockfile { fn = filepath.Join(b.code_root, BEGOTTEN_LOCK) } else { fn = filepath.Join(b.code_root, BEGOTTEN) } b.bf = BegottenFileNew(fn) b.deps = b.bf.deps() b.repo_deps = b.bf.repo_deps() return } func (b *Builder) _all_repos() (out map[string]string) { out = make(map[string]string) for _, dep := range b.deps { out[dep.Git_url] = dep.Ref } return } func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) { out = make(map[string]string) if len(limits) == 0 { return } defer func() { if err := recover(); err != nil { panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK)) } }() bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK)) lock_deps := bf_lock.deps() lock_repo_deps := bf_lock.repo_deps() match := func(name string) bool { for _, limit := range limits { if matched, err := filepath.Match(limit, name); err != nil { panic(err) } else if matched { return true } } return false } repos_to_update := make(map[string]bool) for _, dep := range lock_deps { if match(dep.name) { repos_to_update[dep.Git_url] = true } } // transitive closure n := -1 for len(repos_to_update) != n { n = len(repos_to_update) repos := make([]string, 0, len(repos_to_update)) for repo, _ := range repos_to_update { repos = append(repos, repo) } for _, repo := range repos { if deps, ok := lock_repo_deps[repo]; ok { for _, dep := range deps { repos_to_update[dep] = true } } } } for _, dep := range lock_deps { if !repos_to_update[dep.Git_url] { out[dep.Git_url] = dep.Ref } } return } func (b *Builder) setup_repos(fetch bool, limits []string) *Builder { processed_deps := 0 repo_versions := make(map[string]string) var fetched_set map[string]bool if fetch { fetched_set = make(map[string]bool) } locked_refs := b.get_locked_refs_for_update(limits) for processed_deps < len(b.deps) { repos_to_setup := []string{} for i, dep := range b.deps[processed_deps:] { have := repo_versions[dep.Git_url] if fetch && strings.HasPrefix(dep.name, IMPLICIT_PREFIX) && have != "" { // Implicit deps take the revision of an explicit dep from the same // repo, if one exists. b.deps[processed_deps+i].Ref = have continue } want := locked_refs[dep.Git_url] if want == "" { want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set) } if have != "" { if have != want { panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)", dep.name, have, want, dep.Ref)) } } else { repo_versions[dep.Git_url] = want repos_to_setup = append(repos_to_setup, dep.Git_url) } b.deps[processed_deps+i].Ref = want } processed_deps = len(b.deps) // This will add newly-found dependencies to b.deps. for _, url := range repos_to_setup { b._setup_repo(url, repo_versions[url]) } } return b } func (b *Builder) save_lockfile() *Builder { // Should only be called when loaded from Begotten, not lockfile. b.bf.set_deps(b.deps) b.bf.set_repo_deps(b.repo_deps) b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK)) return b } func (b *Builder) _record_repo_dep(src_url, dep_url string) { if src_url != dep_url { lst := b.repo_deps[src_url] if !contains_str(lst, dep_url) { b.repo_deps[src_url] = append(lst, dep_url) } } } func (b *Builder) _repo_dir(url string) string { return filepath.Join(b.env.RepoDir, sha1str(url)) } var RE_SHA1_HASH = regexp.MustCompile("[[:xdigit:]]{40}") func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) { repo_dir := b._repo_dir(url) if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() { fmt.Printf("Cloning %s\n", url) cc("/", "git", "clone", "-q", url, repo_dir) // Get into detached head state so we can manipulate things without // worrying about messing up a branch. cc(repo_dir, "git", "checkout", "-q", "--detach") } else if fetched_set != nil { if !fetched_set[url] { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") fetched_set[url] = true } } if RE_SHA1_HASH.MatchString(ref) { return ref } for _, pfx := range []string{"origin/", ""} { cmd := Command(repo_dir, "git", "rev-parse", "--verify", pfx+ref) cmd.Stderr = nil if outb, err := cmd.Output(); err == nil { resolved_ref = strings.TrimSpace(string(outb)) return } } panic(fmt.Errorf("Can't resolve reference %q for %s", ref, url)) } func (b *Builder) _setup_repo(url, resolved_ref string) { hsh := sha1str(url)[:8] repo_dir := b._repo_dir(url) fmt.Printf("Fixing imports in %s\n", url) cmd := Command(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) if err := cmd.Run(); err != nil { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") cc(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) } // Match up sub-deps to our deps. sub_dep_map := make(map[string]string) self_deps := []Dep{} sub_bg_path := filepath.Join(repo_dir, BEGOTTEN_LOCK) if _, err := os.Stat(sub_bg_path); err == nil { sub_bg := BegottenFileNew(sub_bg_path) // Add implicit and explicit external dependencies. for _, sub_dep := range sub_bg.deps() { b._record_repo_dep(url, sub_dep.Git_url) our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath) if our_dep != nil { if sub_dep.Ref != our_dep.Ref { panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s", url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref)) } sub_dep_map[sub_dep.name] = our_dep.name } else { // Include a hash of this repo identifier so that if two repos use the // same dep name to refer to two different things, they don't conflict // when we flatten deps. transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name) sub_dep_map[sub_dep.name] = transitive_name sub_dep.name = transitive_name b.deps = append(b.deps, sub_dep) } } // Allow relative import paths within this repo. e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { basename := filepath.Base(path) if err != nil { return err } else if fi.IsDir() && basename[0] == '.' { return filepath.SkipDir } else if path == repo_dir { return nil } relpath := path[len(repo_dir)+1:] our_dep := b._lookup_dep_by_git_url_and_path(url, relpath) if our_dep != nil { sub_dep_map[relpath] = our_dep.name } else { // See comment on _lookup_dep_name for rationale. self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath)) sub_dep_map[relpath] = self_name self_deps = append(self_deps, Dep{ name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref}) } return nil }) if e != nil { panic(e) } } used_rewrites := make(map[string]bool) b._rewrite_imports(url, repo_dir, &sub_dep_map, &used_rewrites) msg := fmt.Sprintf("rewritten by begot for %s", b.code_root) cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg) // Add only the self-deps that were used, to reduce clutter. for _, self_dep := range self_deps { if used_rewrites[self_dep.name] { b.deps = append(b.deps, self_dep) } } } func (b *Builder) _rewrite_imports(src_url, repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if strings.HasSuffix(path, ".go") { b._rewrite_file(src_url, path, sub_dep_map, used_rewrites) } return nil }) } func (b *Builder) _rewrite_file(src_url, path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { bts, err := ioutil.ReadFile(path) if err != nil { panic(err) } fs := token.NewFileSet() f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly) if err != nil { panic(err) } var pos int var out bytes.Buffer out.Grow(len(bts) * 5 / 4) for _, imp := range f.Imports { start := fs.Position(imp.Path.Pos()).Offset end := fs.Position(imp.Path.End()).Offset orig_import := string(bts[start+1 : end-1]) rewritten := b._rewrite_import(src_url, orig_import, sub_dep_map, used_rewrites) if orig_import != rewritten { out.Write(bts[pos : start+1]) out.WriteString(rewritten) pos = end - 1 } } out.Write(bts[pos:]) if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil { panic(err) } } func (b *Builder) _rewrite_import(src_url, imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string { if rewrite, ok := (*sub_dep_map)[imp]; ok { imp = rewrite (*used_rewrites)[rewrite] = true } else { parts := strings.Split(imp, "/") if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok { imp = b._lookup_dep_name(src_url, imp) } } return imp } func (b *Builder) _lookup_dep_name(src_url, imp string) string { for _, dep := range b.deps { if contains_str(dep.Aliases, imp) { b._record_repo_dep(src_url, dep.Git_url) return dep.name } } // Each dep turns into a symlink at build time. Packages can be nested, so we // might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't // also create 'a/b'. So rename it to 'a_b'. name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp) dep := b.bf.parse_dep(name, imp) b.deps = append(b.deps, dep) b._record_repo_dep(src_url, dep.Git_url) return name } func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep { for _, dep := range b.deps { if dep.Git_url == git_url && dep.Subpath == subpath { return &dep } } return nil } func (b *Builder) tag_repos() { // Run this after setup_repos. for url, ref := range b._all_repos() { out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref)) for _, line := range strings.SplitAfter(out, "\n") { if !strings.HasPrefix(line, "Updated tag ") { fmt.Print(line) } } } } func (b *Builder) _tag_hash(ref string) string { // We want to tag the current state with a name that depends on: // 1. The base ref that we rewrote from. // 2. The full set of deps that describe how we rewrote imports. // The contents of Begotten.lock suffice for (2): if b.cached_lf_hash == "" { lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK) if bts, err := ioutil.ReadFile(lockfile); err != nil { panic(err) } else { b.cached_lf_hash = sha1bts(bts) } } return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash) } func (b *Builder) run(args []string) { b._reset_to_tags() // Set up code_wk. cbin := filepath.Join(b.code_wk, "bin") depsrc := filepath.Join(b.dep_wk, "src") empty_dep := filepath.Join(depsrc, EMPTY_DEP) os.MkdirAll(cbin, 0777) os.MkdirAll(empty_dep, 0777) if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil { panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " + "Please remove it before using begot.")) } ln_sf(b.code_root, filepath.Join(b.code_wk, "src")) old_links := make(map[string]bool) filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Mode()&os.ModeType == os.ModeSymlink { old_links[path] = true } return nil }) for _, dep := range b.deps { path := filepath.Join(depsrc, dep.name) target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath) if created, err := ln_sf(target, path); err != nil { panic(err) } else if created { // If we've created or changed this symlink, any pkg files that go may // have compiled from it should be invalidated. // Note: This makes some assumptions about go's build layout. It should // be safe enough, though it may be simpler to just blow away everything // if any dep symlinks change. pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*")) for _, pkg := range pkgs { os.RemoveAll(pkg) } } delete(old_links, path) } // Remove unexpected links. for old_link := range old_links { os.RemoveAll(old_link) } // Try to remove all directories; ignore ENOTEMPTY errors. var dirs []string filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.IsDir() { dirs = append(dirs, path) } return nil }) for i := len(dirs) - 1; i >= 0; i-- { if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY { panic(err) } } // Set up empty dep. // // The go tool tries to be helpful by not rebuilding modified code if that // code is in a workspace and no packages from that workspace are mentioned // on the command line. See cmd/go/pkg.go:isStale around line 680. // // We are explicitly managing all of the workspaces in our GOPATH and do // indeed want to rebuild everything when dependencies change. That is // required by the goal of reproducible builds: the alternative would mean // what you get for this build depends on the state of a previous build. // // The go tool doesn't provide any way of disabling this "helpful" // functionality. The simplest workaround is to always mention a package from // the dependency workspace on the command line. Hence, we add an empty // package. empty_go := filepath.Join(empty_dep, "empty.go") if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() { os.MkdirAll(filepath.Dir(empty_go), 0777) if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil { panic(err) } } // Overwrite any existing GOPATH. if argv0, err := exec.LookPath(args[0]); err != nil { panic(err) } else { os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk)) os.Chdir(b.code_root) err := syscall.Exec(argv0, args, os.Environ()) panic(fmt.Errorf("exec failed: %s", err)) } } func (b *Builder) _reset_to_tags() { defer func() { if recover() != nil { panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " + "Please run 'begot fetch' first.")) } }() for url, ref := range b._all_repos() { wd := b._repo_dir(url) if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() { panic("not directory") } cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref)) } } func (b *Builder) clean() { os.RemoveAll(b.dep_wk) os.RemoveAll(b.code_wk) os.Remove(filepath.Join(b.code_root, "bin")) } func get_gopath(env *Env) string { // This duplicates logic in Builder, but we want to just get the GOPATH without // parsing anything. for { if _, err := os.Stat(BEGOTTEN); err == nil { break } if wd, err := os.Getwd(); err != nil { panic(err) } else if wd == "/" { panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN)) } if err := os.Chdir(".."); err != nil { panic(err) } } hsh := sha1str(realpath("."))[:8] code_wk := filepath.Join(env.CodeWorkspaceDir, hsh) dep_wk := filepath.Join(env.DepWorkspaceDir, hsh) return code_wk + ":" + dep_wk } var _cache_lock *os.File func lock_cache(env *Env) { os.MkdirAll(env.BegotCache, 0777) _cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666) if err != nil { panic(err) } err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err != nil { panic(fmt.Errorf("Can't lock %r", env.BegotCache)) } // Leave file open for lifetime of this process and anything exec'd by this // process. } func print_help(ret int) { fmt.Fprintln(os.Stderr, "FIXME") os.Exit(ret) } func main() { env := EnvNew() defer func() { if err := recover(); err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } }() lock_cache(env) if len(os.Args) < 2 { print_help(1) } switch os.Args[1] { case "update": BuilderNew(env, ".", false).setup_repos(true, os.Args[2:]).save_lockfile().tag_repos() case "just_rewrite": BuilderNew(env, ".", false).setup_repos(false, []string{}).save_lockfile().tag_repos() case "fetch": BuilderNew(env, ".", true).setup_repos(false, []string{}).tag_repos() case "build": BuilderNew(env, ".", true).run([]string{"go", "install", "./...", EMPTY_DEP}) case "go": BuilderNew(env, ".", true).run(append([]string{"go"}, os.Args[2:]...)) case "exec": BuilderNew(env, ".", true).run(os.Args[2:]) case "clean": BuilderNew(env, ".", false).clean() case "gopath": fmt.Println(get_gopath(env)) case "help": print_help(0) default: fmt.Fprintf(os.Stderr, "Unknown subcommand %q\n", os.Args[1]) print_help(1) } }
{ bf = new(BegottenFile) bf.data.Meta.File_version = -1 if data, err := ioutil.ReadFile(fn); err != nil { panic(err) } else if err := yaml.Unmarshal(data, &bf.data); err != nil { panic(err) } ver := bf.data.Meta.File_version if ver != -1 && ver != FILE_VERSION { panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver)) } return }
identifier_body
begot.go
// Copyright (c) 2014-2015 Solano Labs Inc. All Rights Reserved. package main import ( "bytes" "crypto/sha1" "encoding/hex" "fmt" "go/parser" "go/token" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "sort" "strings" "syscall" "gopkg.in/yaml.v2" ) const ( BEGOTTEN = "Begotten" BEGOTTEN_LOCK = "Begotten.lock" EMPTY_DEP = "_begot_empty_dep" IMPLICIT_PREFIX = "_begot_implicit" // This should change if the format of Begotten.lock changes in an incompatible // way. (But prefer changing it in compatible ways and not incrementing this.) FILE_VERSION = 1 ) // Known public servers and how many path components form the repo name. var KNOWN_GIT_SERVERS = map[string]int{ "github.com": 2, "bitbucket.org": 2, "begot.test": 2, } var RE_NON_IDENTIFIER_CHAR = regexp.MustCompile("\\W") func replace_non_identifier_chars(in string) string { return RE_NON_IDENTIFIER_CHAR.ReplaceAllLiteralString(in, "_") } func Command(cwd string, name string, args ...string) (cmd *exec.Cmd) { cmd = exec.Command(name, args...) cmd.Dir = cwd return } func cc(cwd string, name string, args ...string) { cmd := Command(cwd, name, args...) if err := cmd.Run(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } } func co(cwd string, name string, args ...string) string { cmd := Command(cwd, name, args...) if outb, err := cmd.Output(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } else { return string(outb) } } func contains_str(lst []string, val string) bool { for _, item := range lst { if item == val { return true } } return false } func sha1str(in string) string { sum := sha1.Sum([]byte(in)) return hex.EncodeToString(sum[:]) } func sha1bts(in []byte) string { sum := sha1.Sum(in) return hex.EncodeToString(sum[:]) } func realpath(path string) (out string) { if abs, err := filepath.Abs(path); err != nil { panic(err) } else if out, err = filepath.EvalSymlinks(abs); err != nil { panic(err) } return } func ln_sf(target, path string) (created bool, err error) { current, e := os.Readlink(path) if e != nil || current != target { if err = os.RemoveAll(path); err != nil { return } if err = os.MkdirAll(filepath.Dir(path), 0777); err != nil { return } if err = os.Symlink(target, path); err != nil { return } created = true } return } func yaml_copy(in interface{}, out interface{}) { if bts, err := yaml.Marshal(in); err != nil { panic(err) } else if err = yaml.Unmarshal(bts, out); err != nil { panic(err) } } type Dep struct { name string Aliases []string Git_url string Import_path string `yaml:",omitempty"` Ref string Subpath string } // A Begotten or Begotten.lock file contains exactly one of these in YAML format. type BegottenFileStruct struct { Deps map[string]interface{} // either string or Dep Meta struct { File_version int Generated_by string } Repo_aliases map[string]interface{} // either string or subset of Dep {git_url, ref} Repo_deps map[string][]string } type BegottenFile struct { data BegottenFileStruct } func BegottenFileNew(fn string) (bf *BegottenFile) { bf = new(BegottenFile) bf.data.Meta.File_version = -1 if data, err := ioutil.ReadFile(fn); err != nil { panic(err) } else if err := yaml.Unmarshal(data, &bf.data); err != nil { panic(err) } ver := bf.data.Meta.File_version if ver != -1 && ver != FILE_VERSION { panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver)) } return } type SortedStringMap yaml.MapSlice func (sm SortedStringMap) Len() int { return len(sm) } func (sm SortedStringMap) Less(i, j int) bool { return sm[i].Key.(string) < sm[j].Key.(string) } func (sm SortedStringMap) Swap(i, j int) { sm[i], sm[j] = sm[j], sm[i] } func (bf *BegottenFile) save(fn string) { // We have to sort everything so the output is deterministic. go-yaml // doesn't write maps in sorted order, so we have to convert them to // yaml.MapSlices and sort those. var out struct { Deps SortedStringMap Meta struct { File_version int Generated_by string } Repo_aliases SortedStringMap Repo_deps SortedStringMap } out.Meta.File_version = FILE_VERSION out.Meta.Generated_by = CODE_VERSION for k, v := range bf.data.Deps { dep := v.(Dep) dep.Import_path = "" sort.StringSlice(dep.Aliases).Sort() out.Deps = append(out.Deps, yaml.MapItem{k, dep}) } sort.Sort(out.Deps) for k, v := range bf.data.Repo_aliases { out.Repo_aliases = append(out.Repo_aliases, yaml.MapItem{k, v}) } sort.Sort(out.Repo_aliases) for k, v := range bf.data.Repo_deps { sort.StringSlice(v).Sort() out.Repo_deps = append(out.Repo_deps, yaml.MapItem{k, v}) } sort.Sort(out.Repo_deps) if data, err := yaml.Marshal(out); err != nil { panic(err) } else if err := ioutil.WriteFile(fn, data, 0666); err != nil { panic(err) } } func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string { // Hook for testing: test_repo_path := os.Getenv("BEGOT_TEST_REPOS") if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" { return "file://" + filepath.Join(test_repo_path, repo_path) } // Default to https for other repos: return "https://" + repo_path } func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) { dep.name = name if _, ok := v.(string); ok { v = map[interface{}]interface{}{"import_path": v} } mv, ok := v.(map[interface{}]interface{}) if !ok { panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v)) } yaml_copy(mv, &dep) if dep.Import_path != "" { parts := strings.Split(dep.Import_path, "/") if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok { panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name)) } else { repo_path := strings.Join(parts[:repo_parts+1], "/") dep.Git_url = bf.default_git_url_from_repo_path(repo_path) dep.Subpath = strings.Join(parts[repo_parts+1:], "/") dep.Aliases = append(dep.Aliases, dep.Import_path) // Redirect through repo aliases: if alias, ok := bf.data.Repo_aliases[repo_path]; ok { var aliasdep Dep // only allow git_url and ref if aliasstr, ok := alias.(string); ok { aliasstr = bf.default_git_url_from_repo_path(aliasstr) alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}} } yaml_copy(alias, &aliasdep) if aliasdep.Git_url != "" { dep.Git_url = aliasdep.Git_url } if aliasdep.Ref != "" { dep.Ref = aliasdep.Ref } } } } if dep.Git_url == "" { panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name)) } if dep.Ref == "" { dep.Ref = "master" } return } func (bf *BegottenFile) deps() (out []Dep) { out = make([]Dep, len(bf.data.Deps)) i := 0 for name, v := range bf.data.Deps { out[i] = bf.parse_dep(name, v) i++ } return } func (bf *BegottenFile) set_deps(deps []Dep) { bf.data.Deps = make(map[string]interface{}) for _, dep := range deps { bf.data.Deps[dep.name] = dep } } func (bf *BegottenFile) repo_deps() map[string][]string { if bf.data.Repo_deps == nil { bf.data.Repo_deps = make(map[string][]string) } return bf.data.Repo_deps } func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) { bf.data.Repo_deps = repo_deps } type Env struct { Home string BegotCache string DepWorkspaceDir string CodeWorkspaceDir string RepoDir string CacheLock string } func EnvNew() (env *Env) { env = new(Env) env.Home = os.Getenv("HOME") env.BegotCache = os.Getenv("BEGOT_CACHE") if env.BegotCache == "" { env.BegotCache = filepath.Join(env.Home, ".cache", "begot") } env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk") env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk") env.RepoDir = filepath.Join(env.BegotCache, "repo") env.CacheLock = filepath.Join(env.BegotCache, "lock") return } type Builder struct { env *Env code_root string code_wk string dep_wk string bf *BegottenFile deps []Dep repo_deps map[string][]string cached_lf_hash string } func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) { b = new(Builder) b.env = env b.code_root = realpath(code_root) hsh := sha1str(b.code_root)[:8] b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh) b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh) var fn string if use_lockfile { fn = filepath.Join(b.code_root, BEGOTTEN_LOCK) } else { fn = filepath.Join(b.code_root, BEGOTTEN) } b.bf = BegottenFileNew(fn) b.deps = b.bf.deps() b.repo_deps = b.bf.repo_deps() return } func (b *Builder) _all_repos() (out map[string]string) { out = make(map[string]string) for _, dep := range b.deps { out[dep.Git_url] = dep.Ref } return } func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) { out = make(map[string]string) if len(limits) == 0 { return } defer func() { if err := recover(); err != nil { panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK)) } }() bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK)) lock_deps := bf_lock.deps() lock_repo_deps := bf_lock.repo_deps() match := func(name string) bool { for _, limit := range limits { if matched, err := filepath.Match(limit, name); err != nil { panic(err) } else if matched { return true } } return false } repos_to_update := make(map[string]bool) for _, dep := range lock_deps { if match(dep.name) { repos_to_update[dep.Git_url] = true } } // transitive closure n := -1 for len(repos_to_update) != n { n = len(repos_to_update) repos := make([]string, 0, len(repos_to_update)) for repo, _ := range repos_to_update { repos = append(repos, repo) } for _, repo := range repos { if deps, ok := lock_repo_deps[repo]; ok { for _, dep := range deps { repos_to_update[dep] = true } } } } for _, dep := range lock_deps { if !repos_to_update[dep.Git_url] { out[dep.Git_url] = dep.Ref } } return } func (b *Builder) setup_repos(fetch bool, limits []string) *Builder { processed_deps := 0 repo_versions := make(map[string]string) var fetched_set map[string]bool if fetch { fetched_set = make(map[string]bool) } locked_refs := b.get_locked_refs_for_update(limits) for processed_deps < len(b.deps) { repos_to_setup := []string{} for i, dep := range b.deps[processed_deps:] { have := repo_versions[dep.Git_url] if fetch && strings.HasPrefix(dep.name, IMPLICIT_PREFIX) && have != "" { // Implicit deps take the revision of an explicit dep from the same // repo, if one exists. b.deps[processed_deps+i].Ref = have continue } want := locked_refs[dep.Git_url] if want == "" { want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set) } if have != "" { if have != want { panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)", dep.name, have, want, dep.Ref)) } } else { repo_versions[dep.Git_url] = want repos_to_setup = append(repos_to_setup, dep.Git_url) } b.deps[processed_deps+i].Ref = want } processed_deps = len(b.deps) // This will add newly-found dependencies to b.deps. for _, url := range repos_to_setup { b._setup_repo(url, repo_versions[url]) } } return b } func (b *Builder) save_lockfile() *Builder { // Should only be called when loaded from Begotten, not lockfile. b.bf.set_deps(b.deps) b.bf.set_repo_deps(b.repo_deps) b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK)) return b } func (b *Builder) _record_repo_dep(src_url, dep_url string) { if src_url != dep_url { lst := b.repo_deps[src_url] if !contains_str(lst, dep_url) { b.repo_deps[src_url] = append(lst, dep_url) } } } func (b *Builder) _repo_dir(url string) string { return filepath.Join(b.env.RepoDir, sha1str(url)) } var RE_SHA1_HASH = regexp.MustCompile("[[:xdigit:]]{40}") func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) { repo_dir := b._repo_dir(url) if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() { fmt.Printf("Cloning %s\n", url) cc("/", "git", "clone", "-q", url, repo_dir) // Get into detached head state so we can manipulate things without // worrying about messing up a branch. cc(repo_dir, "git", "checkout", "-q", "--detach") } else if fetched_set != nil { if !fetched_set[url] { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") fetched_set[url] = true } } if RE_SHA1_HASH.MatchString(ref) { return ref } for _, pfx := range []string{"origin/", ""} { cmd := Command(repo_dir, "git", "rev-parse", "--verify", pfx+ref) cmd.Stderr = nil if outb, err := cmd.Output(); err == nil { resolved_ref = strings.TrimSpace(string(outb)) return } } panic(fmt.Errorf("Can't resolve reference %q for %s", ref, url)) } func (b *Builder) _setup_repo(url, resolved_ref string) { hsh := sha1str(url)[:8] repo_dir := b._repo_dir(url) fmt.Printf("Fixing imports in %s\n", url) cmd := Command(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) if err := cmd.Run(); err != nil { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") cc(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) } // Match up sub-deps to our deps. sub_dep_map := make(map[string]string) self_deps := []Dep{} sub_bg_path := filepath.Join(repo_dir, BEGOTTEN_LOCK) if _, err := os.Stat(sub_bg_path); err == nil { sub_bg := BegottenFileNew(sub_bg_path) // Add implicit and explicit external dependencies. for _, sub_dep := range sub_bg.deps() { b._record_repo_dep(url, sub_dep.Git_url) our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath) if our_dep != nil { if sub_dep.Ref != our_dep.Ref { panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s", url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref)) } sub_dep_map[sub_dep.name] = our_dep.name } else { // Include a hash of this repo identifier so that if two repos use the // same dep name to refer to two different things, they don't conflict // when we flatten deps. transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name) sub_dep_map[sub_dep.name] = transitive_name sub_dep.name = transitive_name b.deps = append(b.deps, sub_dep) } } // Allow relative import paths within this repo. e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { basename := filepath.Base(path) if err != nil { return err } else if fi.IsDir() && basename[0] == '.' { return filepath.SkipDir } else if path == repo_dir { return nil } relpath := path[len(repo_dir)+1:] our_dep := b._lookup_dep_by_git_url_and_path(url, relpath) if our_dep != nil { sub_dep_map[relpath] = our_dep.name } else { // See comment on _lookup_dep_name for rationale. self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath)) sub_dep_map[relpath] = self_name self_deps = append(self_deps, Dep{ name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref}) } return nil }) if e != nil { panic(e) } } used_rewrites := make(map[string]bool) b._rewrite_imports(url, repo_dir, &sub_dep_map, &used_rewrites) msg := fmt.Sprintf("rewritten by begot for %s", b.code_root) cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg) // Add only the self-deps that were used, to reduce clutter. for _, self_dep := range self_deps { if used_rewrites[self_dep.name] { b.deps = append(b.deps, self_dep) } } } func (b *Builder) _rewrite_imports(src_url, repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if strings.HasSuffix(path, ".go") { b._rewrite_file(src_url, path, sub_dep_map, used_rewrites) } return nil }) } func (b *Builder) _rewrite_file(src_url, path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { bts, err := ioutil.ReadFile(path) if err != nil { panic(err) } fs := token.NewFileSet() f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly) if err != nil { panic(err) } var pos int var out bytes.Buffer out.Grow(len(bts) * 5 / 4) for _, imp := range f.Imports { start := fs.Position(imp.Path.Pos()).Offset end := fs.Position(imp.Path.End()).Offset orig_import := string(bts[start+1 : end-1]) rewritten := b._rewrite_import(src_url, orig_import, sub_dep_map, used_rewrites) if orig_import != rewritten { out.Write(bts[pos : start+1]) out.WriteString(rewritten) pos = end - 1 } } out.Write(bts[pos:]) if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil { panic(err) } } func (b *Builder) _rewrite_import(src_url, imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string { if rewrite, ok := (*sub_dep_map)[imp]; ok { imp = rewrite (*used_rewrites)[rewrite] = true } else { parts := strings.Split(imp, "/") if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok { imp = b._lookup_dep_name(src_url, imp) } } return imp } func (b *Builder) _lookup_dep_name(src_url, imp string) string { for _, dep := range b.deps { if contains_str(dep.Aliases, imp) { b._record_repo_dep(src_url, dep.Git_url) return dep.name } } // Each dep turns into a symlink at build time. Packages can be nested, so we // might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't // also create 'a/b'. So rename it to 'a_b'. name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp) dep := b.bf.parse_dep(name, imp) b.deps = append(b.deps, dep) b._record_repo_dep(src_url, dep.Git_url) return name } func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep { for _, dep := range b.deps { if dep.Git_url == git_url && dep.Subpath == subpath { return &dep } } return nil } func (b *Builder) tag_repos() { // Run this after setup_repos. for url, ref := range b._all_repos() { out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref)) for _, line := range strings.SplitAfter(out, "\n") { if !strings.HasPrefix(line, "Updated tag ") { fmt.Print(line) } } } } func (b *Builder) _tag_hash(ref string) string { // We want to tag the current state with a name that depends on: // 1. The base ref that we rewrote from. // 2. The full set of deps that describe how we rewrote imports. // The contents of Begotten.lock suffice for (2): if b.cached_lf_hash == "" { lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK) if bts, err := ioutil.ReadFile(lockfile); err != nil { panic(err) } else { b.cached_lf_hash = sha1bts(bts) } } return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash) } func (b *Builder) run(args []string) { b._reset_to_tags() // Set up code_wk. cbin := filepath.Join(b.code_wk, "bin") depsrc := filepath.Join(b.dep_wk, "src") empty_dep := filepath.Join(depsrc, EMPTY_DEP) os.MkdirAll(cbin, 0777) os.MkdirAll(empty_dep, 0777) if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil { panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " + "Please remove it before using begot.")) } ln_sf(b.code_root, filepath.Join(b.code_wk, "src")) old_links := make(map[string]bool) filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Mode()&os.ModeType == os.ModeSymlink { old_links[path] = true } return nil }) for _, dep := range b.deps { path := filepath.Join(depsrc, dep.name) target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath) if created, err := ln_sf(target, path); err != nil { panic(err) } else if created { // If we've created or changed this symlink, any pkg files that go may // have compiled from it should be invalidated. // Note: This makes some assumptions about go's build layout. It should // be safe enough, though it may be simpler to just blow away everything // if any dep symlinks change. pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*")) for _, pkg := range pkgs { os.RemoveAll(pkg) } } delete(old_links, path) } // Remove unexpected links. for old_link := range old_links { os.RemoveAll(old_link) } // Try to remove all directories; ignore ENOTEMPTY errors. var dirs []string filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.IsDir() { dirs = append(dirs, path) } return nil }) for i := len(dirs) - 1; i >= 0; i-- { if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY { panic(err) } } // Set up empty dep. // // The go tool tries to be helpful by not rebuilding modified code if that // code is in a workspace and no packages from that workspace are mentioned // on the command line. See cmd/go/pkg.go:isStale around line 680. // // We are explicitly managing all of the workspaces in our GOPATH and do // indeed want to rebuild everything when dependencies change. That is // required by the goal of reproducible builds: the alternative would mean // what you get for this build depends on the state of a previous build. // // The go tool doesn't provide any way of disabling this "helpful" // functionality. The simplest workaround is to always mention a package from // the dependency workspace on the command line. Hence, we add an empty // package. empty_go := filepath.Join(empty_dep, "empty.go") if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() { os.MkdirAll(filepath.Dir(empty_go), 0777) if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil { panic(err) } } // Overwrite any existing GOPATH. if argv0, err := exec.LookPath(args[0]); err != nil { panic(err) } else { os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk)) os.Chdir(b.code_root) err := syscall.Exec(argv0, args, os.Environ()) panic(fmt.Errorf("exec failed: %s", err)) } } func (b *Builder) _reset_to_tags() { defer func() { if recover() != nil { panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " + "Please run 'begot fetch' first.")) } }() for url, ref := range b._all_repos() { wd := b._repo_dir(url) if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() { panic("not directory") } cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref)) } } func (b *Builder) clean() { os.RemoveAll(b.dep_wk) os.RemoveAll(b.code_wk) os.Remove(filepath.Join(b.code_root, "bin")) } func get_gopath(env *Env) string { // This duplicates logic in Builder, but we want to just get the GOPATH without // parsing anything. for { if _, err := os.Stat(BEGOTTEN); err == nil { break } if wd, err := os.Getwd(); err != nil { panic(err) } else if wd == "/" { panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN)) } if err := os.Chdir(".."); err != nil { panic(err) } } hsh := sha1str(realpath("."))[:8] code_wk := filepath.Join(env.CodeWorkspaceDir, hsh) dep_wk := filepath.Join(env.DepWorkspaceDir, hsh) return code_wk + ":" + dep_wk } var _cache_lock *os.File func lock_cache(env *Env) { os.MkdirAll(env.BegotCache, 0777) _cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666) if err != nil { panic(err) } err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err != nil
// Leave file open for lifetime of this process and anything exec'd by this // process. } func print_help(ret int) { fmt.Fprintln(os.Stderr, "FIXME") os.Exit(ret) } func main() { env := EnvNew() defer func() { if err := recover(); err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } }() lock_cache(env) if len(os.Args) < 2 { print_help(1) } switch os.Args[1] { case "update": BuilderNew(env, ".", false).setup_repos(true, os.Args[2:]).save_lockfile().tag_repos() case "just_rewrite": BuilderNew(env, ".", false).setup_repos(false, []string{}).save_lockfile().tag_repos() case "fetch": BuilderNew(env, ".", true).setup_repos(false, []string{}).tag_repos() case "build": BuilderNew(env, ".", true).run([]string{"go", "install", "./...", EMPTY_DEP}) case "go": BuilderNew(env, ".", true).run(append([]string{"go"}, os.Args[2:]...)) case "exec": BuilderNew(env, ".", true).run(os.Args[2:]) case "clean": BuilderNew(env, ".", false).clean() case "gopath": fmt.Println(get_gopath(env)) case "help": print_help(0) default: fmt.Fprintf(os.Stderr, "Unknown subcommand %q\n", os.Args[1]) print_help(1) } }
{ panic(fmt.Errorf("Can't lock %r", env.BegotCache)) }
conditional_block
begot.go
// Copyright (c) 2014-2015 Solano Labs Inc. All Rights Reserved. package main import ( "bytes" "crypto/sha1" "encoding/hex" "fmt" "go/parser" "go/token" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "sort" "strings" "syscall" "gopkg.in/yaml.v2" ) const ( BEGOTTEN = "Begotten" BEGOTTEN_LOCK = "Begotten.lock" EMPTY_DEP = "_begot_empty_dep" IMPLICIT_PREFIX = "_begot_implicit" // This should change if the format of Begotten.lock changes in an incompatible // way. (But prefer changing it in compatible ways and not incrementing this.) FILE_VERSION = 1 ) // Known public servers and how many path components form the repo name. var KNOWN_GIT_SERVERS = map[string]int{ "github.com": 2, "bitbucket.org": 2, "begot.test": 2, } var RE_NON_IDENTIFIER_CHAR = regexp.MustCompile("\\W") func replace_non_identifier_chars(in string) string { return RE_NON_IDENTIFIER_CHAR.ReplaceAllLiteralString(in, "_") } func Command(cwd string, name string, args ...string) (cmd *exec.Cmd) { cmd = exec.Command(name, args...) cmd.Dir = cwd return } func cc(cwd string, name string, args ...string) { cmd := Command(cwd, name, args...) if err := cmd.Run(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } } func co(cwd string, name string, args ...string) string { cmd := Command(cwd, name, args...) if outb, err := cmd.Output(); err != nil { panic(fmt.Errorf("command '%s %s' in %s: %s", name, strings.Join(args, " "), cwd, err)) } else { return string(outb) } } func contains_str(lst []string, val string) bool { for _, item := range lst { if item == val { return true } } return false } func sha1str(in string) string { sum := sha1.Sum([]byte(in)) return hex.EncodeToString(sum[:]) } func sha1bts(in []byte) string { sum := sha1.Sum(in) return hex.EncodeToString(sum[:]) } func realpath(path string) (out string) { if abs, err := filepath.Abs(path); err != nil { panic(err) } else if out, err = filepath.EvalSymlinks(abs); err != nil { panic(err) } return } func ln_sf(target, path string) (created bool, err error) { current, e := os.Readlink(path) if e != nil || current != target { if err = os.RemoveAll(path); err != nil { return } if err = os.MkdirAll(filepath.Dir(path), 0777); err != nil { return } if err = os.Symlink(target, path); err != nil { return } created = true } return } func yaml_copy(in interface{}, out interface{}) { if bts, err := yaml.Marshal(in); err != nil { panic(err) } else if err = yaml.Unmarshal(bts, out); err != nil { panic(err) } } type Dep struct { name string Aliases []string Git_url string Import_path string `yaml:",omitempty"` Ref string Subpath string } // A Begotten or Begotten.lock file contains exactly one of these in YAML format. type BegottenFileStruct struct { Deps map[string]interface{} // either string or Dep Meta struct { File_version int Generated_by string } Repo_aliases map[string]interface{} // either string or subset of Dep {git_url, ref} Repo_deps map[string][]string } type BegottenFile struct { data BegottenFileStruct } func BegottenFileNew(fn string) (bf *BegottenFile) { bf = new(BegottenFile) bf.data.Meta.File_version = -1 if data, err := ioutil.ReadFile(fn); err != nil { panic(err) } else if err := yaml.Unmarshal(data, &bf.data); err != nil { panic(err) } ver := bf.data.Meta.File_version if ver != -1 && ver != FILE_VERSION { panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver)) } return } type SortedStringMap yaml.MapSlice func (sm SortedStringMap) Len() int { return len(sm) } func (sm SortedStringMap) Less(i, j int) bool { return sm[i].Key.(string) < sm[j].Key.(string) } func (sm SortedStringMap) Swap(i, j int) { sm[i], sm[j] = sm[j], sm[i] } func (bf *BegottenFile) save(fn string) { // We have to sort everything so the output is deterministic. go-yaml // doesn't write maps in sorted order, so we have to convert them to // yaml.MapSlices and sort those. var out struct { Deps SortedStringMap Meta struct { File_version int Generated_by string } Repo_aliases SortedStringMap Repo_deps SortedStringMap } out.Meta.File_version = FILE_VERSION out.Meta.Generated_by = CODE_VERSION for k, v := range bf.data.Deps { dep := v.(Dep) dep.Import_path = "" sort.StringSlice(dep.Aliases).Sort() out.Deps = append(out.Deps, yaml.MapItem{k, dep}) } sort.Sort(out.Deps) for k, v := range bf.data.Repo_aliases { out.Repo_aliases = append(out.Repo_aliases, yaml.MapItem{k, v}) } sort.Sort(out.Repo_aliases) for k, v := range bf.data.Repo_deps { sort.StringSlice(v).Sort() out.Repo_deps = append(out.Repo_deps, yaml.MapItem{k, v}) } sort.Sort(out.Repo_deps) if data, err := yaml.Marshal(out); err != nil { panic(err) } else if err := ioutil.WriteFile(fn, data, 0666); err != nil { panic(err) } } func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string { // Hook for testing: test_repo_path := os.Getenv("BEGOT_TEST_REPOS") if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" { return "file://" + filepath.Join(test_repo_path, repo_path) } // Default to https for other repos: return "https://" + repo_path } func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) { dep.name = name if _, ok := v.(string); ok { v = map[interface{}]interface{}{"import_path": v} } mv, ok := v.(map[interface{}]interface{}) if !ok { panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v)) } yaml_copy(mv, &dep) if dep.Import_path != "" { parts := strings.Split(dep.Import_path, "/") if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok { panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name)) } else { repo_path := strings.Join(parts[:repo_parts+1], "/") dep.Git_url = bf.default_git_url_from_repo_path(repo_path) dep.Subpath = strings.Join(parts[repo_parts+1:], "/") dep.Aliases = append(dep.Aliases, dep.Import_path) // Redirect through repo aliases: if alias, ok := bf.data.Repo_aliases[repo_path]; ok { var aliasdep Dep // only allow git_url and ref if aliasstr, ok := alias.(string); ok { aliasstr = bf.default_git_url_from_repo_path(aliasstr) alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}} } yaml_copy(alias, &aliasdep) if aliasdep.Git_url != "" { dep.Git_url = aliasdep.Git_url } if aliasdep.Ref != "" { dep.Ref = aliasdep.Ref } } } } if dep.Git_url == "" { panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name)) } if dep.Ref == "" { dep.Ref = "master" } return } func (bf *BegottenFile) deps() (out []Dep) { out = make([]Dep, len(bf.data.Deps)) i := 0 for name, v := range bf.data.Deps { out[i] = bf.parse_dep(name, v) i++ } return } func (bf *BegottenFile) set_deps(deps []Dep) { bf.data.Deps = make(map[string]interface{}) for _, dep := range deps { bf.data.Deps[dep.name] = dep } } func (bf *BegottenFile) repo_deps() map[string][]string { if bf.data.Repo_deps == nil { bf.data.Repo_deps = make(map[string][]string) } return bf.data.Repo_deps } func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) { bf.data.Repo_deps = repo_deps } type Env struct { Home string BegotCache string DepWorkspaceDir string CodeWorkspaceDir string RepoDir string CacheLock string } func EnvNew() (env *Env) { env = new(Env) env.Home = os.Getenv("HOME") env.BegotCache = os.Getenv("BEGOT_CACHE") if env.BegotCache == "" { env.BegotCache = filepath.Join(env.Home, ".cache", "begot") } env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk") env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk") env.RepoDir = filepath.Join(env.BegotCache, "repo") env.CacheLock = filepath.Join(env.BegotCache, "lock") return } type Builder struct { env *Env code_root string code_wk string dep_wk string bf *BegottenFile deps []Dep repo_deps map[string][]string cached_lf_hash string } func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) { b = new(Builder) b.env = env b.code_root = realpath(code_root) hsh := sha1str(b.code_root)[:8] b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh) b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh) var fn string if use_lockfile { fn = filepath.Join(b.code_root, BEGOTTEN_LOCK) } else { fn = filepath.Join(b.code_root, BEGOTTEN) } b.bf = BegottenFileNew(fn) b.deps = b.bf.deps() b.repo_deps = b.bf.repo_deps() return } func (b *Builder) _all_repos() (out map[string]string) { out = make(map[string]string) for _, dep := range b.deps { out[dep.Git_url] = dep.Ref } return } func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) { out = make(map[string]string) if len(limits) == 0 { return } defer func() { if err := recover(); err != nil { panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK)) } }() bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK)) lock_deps := bf_lock.deps() lock_repo_deps := bf_lock.repo_deps() match := func(name string) bool { for _, limit := range limits { if matched, err := filepath.Match(limit, name); err != nil { panic(err) } else if matched { return true } } return false } repos_to_update := make(map[string]bool) for _, dep := range lock_deps { if match(dep.name) { repos_to_update[dep.Git_url] = true } } // transitive closure n := -1 for len(repos_to_update) != n { n = len(repos_to_update) repos := make([]string, 0, len(repos_to_update)) for repo, _ := range repos_to_update { repos = append(repos, repo) } for _, repo := range repos { if deps, ok := lock_repo_deps[repo]; ok { for _, dep := range deps { repos_to_update[dep] = true } } } } for _, dep := range lock_deps { if !repos_to_update[dep.Git_url] { out[dep.Git_url] = dep.Ref } } return } func (b *Builder) setup_repos(fetch bool, limits []string) *Builder { processed_deps := 0 repo_versions := make(map[string]string) var fetched_set map[string]bool if fetch { fetched_set = make(map[string]bool) } locked_refs := b.get_locked_refs_for_update(limits) for processed_deps < len(b.deps) { repos_to_setup := []string{} for i, dep := range b.deps[processed_deps:] { have := repo_versions[dep.Git_url] if fetch && strings.HasPrefix(dep.name, IMPLICIT_PREFIX) && have != "" { // Implicit deps take the revision of an explicit dep from the same // repo, if one exists. b.deps[processed_deps+i].Ref = have continue } want := locked_refs[dep.Git_url] if want == "" { want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set) } if have != "" { if have != want { panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)", dep.name, have, want, dep.Ref)) } } else { repo_versions[dep.Git_url] = want repos_to_setup = append(repos_to_setup, dep.Git_url) } b.deps[processed_deps+i].Ref = want } processed_deps = len(b.deps) // This will add newly-found dependencies to b.deps. for _, url := range repos_to_setup { b._setup_repo(url, repo_versions[url]) } } return b } func (b *Builder) save_lockfile() *Builder { // Should only be called when loaded from Begotten, not lockfile. b.bf.set_deps(b.deps) b.bf.set_repo_deps(b.repo_deps) b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK)) return b } func (b *Builder) _record_repo_dep(src_url, dep_url string) { if src_url != dep_url { lst := b.repo_deps[src_url] if !contains_str(lst, dep_url) { b.repo_deps[src_url] = append(lst, dep_url) } } } func (b *Builder) _repo_dir(url string) string { return filepath.Join(b.env.RepoDir, sha1str(url)) } var RE_SHA1_HASH = regexp.MustCompile("[[:xdigit:]]{40}") func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) { repo_dir := b._repo_dir(url) if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() { fmt.Printf("Cloning %s\n", url) cc("/", "git", "clone", "-q", url, repo_dir) // Get into detached head state so we can manipulate things without // worrying about messing up a branch. cc(repo_dir, "git", "checkout", "-q", "--detach") } else if fetched_set != nil { if !fetched_set[url] { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") fetched_set[url] = true } } if RE_SHA1_HASH.MatchString(ref) { return ref } for _, pfx := range []string{"origin/", ""} { cmd := Command(repo_dir, "git", "rev-parse", "--verify", pfx+ref) cmd.Stderr = nil if outb, err := cmd.Output(); err == nil { resolved_ref = strings.TrimSpace(string(outb)) return } } panic(fmt.Errorf("Can't resolve reference %q for %s", ref, url)) } func (b *Builder) _setup_repo(url, resolved_ref string) { hsh := sha1str(url)[:8] repo_dir := b._repo_dir(url) fmt.Printf("Fixing imports in %s\n", url) cmd := Command(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) if err := cmd.Run(); err != nil { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch", "-q") cc(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) } // Match up sub-deps to our deps. sub_dep_map := make(map[string]string) self_deps := []Dep{} sub_bg_path := filepath.Join(repo_dir, BEGOTTEN_LOCK) if _, err := os.Stat(sub_bg_path); err == nil { sub_bg := BegottenFileNew(sub_bg_path) // Add implicit and explicit external dependencies. for _, sub_dep := range sub_bg.deps() { b._record_repo_dep(url, sub_dep.Git_url) our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath) if our_dep != nil { if sub_dep.Ref != our_dep.Ref { panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s", url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref)) } sub_dep_map[sub_dep.name] = our_dep.name } else { // Include a hash of this repo identifier so that if two repos use the // same dep name to refer to two different things, they don't conflict // when we flatten deps. transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name) sub_dep_map[sub_dep.name] = transitive_name sub_dep.name = transitive_name b.deps = append(b.deps, sub_dep) } } // Allow relative import paths within this repo. e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { basename := filepath.Base(path) if err != nil { return err } else if fi.IsDir() && basename[0] == '.' { return filepath.SkipDir } else if path == repo_dir { return nil } relpath := path[len(repo_dir)+1:] our_dep := b._lookup_dep_by_git_url_and_path(url, relpath) if our_dep != nil { sub_dep_map[relpath] = our_dep.name } else { // See comment on _lookup_dep_name for rationale. self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath)) sub_dep_map[relpath] = self_name self_deps = append(self_deps, Dep{ name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref}) } return nil }) if e != nil { panic(e) } } used_rewrites := make(map[string]bool) b._rewrite_imports(url, repo_dir, &sub_dep_map, &used_rewrites) msg := fmt.Sprintf("rewritten by begot for %s", b.code_root) cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg) // Add only the self-deps that were used, to reduce clutter. for _, self_dep := range self_deps { if used_rewrites[self_dep.name] { b.deps = append(b.deps, self_dep) } } } func (b *Builder) _rewrite_imports(src_url, repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if strings.HasSuffix(path, ".go") { b._rewrite_file(src_url, path, sub_dep_map, used_rewrites) } return nil }) } func (b *Builder) _rewrite_file(src_url, path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { bts, err := ioutil.ReadFile(path) if err != nil { panic(err) } fs := token.NewFileSet() f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly) if err != nil { panic(err) } var pos int var out bytes.Buffer out.Grow(len(bts) * 5 / 4) for _, imp := range f.Imports { start := fs.Position(imp.Path.Pos()).Offset end := fs.Position(imp.Path.End()).Offset orig_import := string(bts[start+1 : end-1]) rewritten := b._rewrite_import(src_url, orig_import, sub_dep_map, used_rewrites) if orig_import != rewritten { out.Write(bts[pos : start+1]) out.WriteString(rewritten) pos = end - 1 } } out.Write(bts[pos:]) if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil { panic(err) } } func (b *Builder) _rewrite_import(src_url, imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string { if rewrite, ok := (*sub_dep_map)[imp]; ok { imp = rewrite (*used_rewrites)[rewrite] = true } else { parts := strings.Split(imp, "/") if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok { imp = b._lookup_dep_name(src_url, imp) } } return imp } func (b *Builder) _lookup_dep_name(src_url, imp string) string { for _, dep := range b.deps { if contains_str(dep.Aliases, imp) { b._record_repo_dep(src_url, dep.Git_url) return dep.name } } // Each dep turns into a symlink at build time. Packages can be nested, so we // might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't // also create 'a/b'. So rename it to 'a_b'. name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp) dep := b.bf.parse_dep(name, imp) b.deps = append(b.deps, dep) b._record_repo_dep(src_url, dep.Git_url) return name } func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep { for _, dep := range b.deps { if dep.Git_url == git_url && dep.Subpath == subpath { return &dep } } return nil } func (b *Builder) tag_repos() { // Run this after setup_repos. for url, ref := range b._all_repos() { out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref)) for _, line := range strings.SplitAfter(out, "\n") { if !strings.HasPrefix(line, "Updated tag ") { fmt.Print(line) } } } } func (b *Builder) _tag_hash(ref string) string { // We want to tag the current state with a name that depends on: // 1. The base ref that we rewrote from. // 2. The full set of deps that describe how we rewrote imports. // The contents of Begotten.lock suffice for (2): if b.cached_lf_hash == "" { lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK) if bts, err := ioutil.ReadFile(lockfile); err != nil { panic(err) } else { b.cached_lf_hash = sha1bts(bts) } } return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash) } func (b *Builder) run(args []string) { b._reset_to_tags() // Set up code_wk. cbin := filepath.Join(b.code_wk, "bin") depsrc := filepath.Join(b.dep_wk, "src") empty_dep := filepath.Join(depsrc, EMPTY_DEP) os.MkdirAll(cbin, 0777) os.MkdirAll(empty_dep, 0777) if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil { panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " + "Please remove it before using begot.")) } ln_sf(b.code_root, filepath.Join(b.code_wk, "src")) old_links := make(map[string]bool) filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Mode()&os.ModeType == os.ModeSymlink { old_links[path] = true } return nil }) for _, dep := range b.deps { path := filepath.Join(depsrc, dep.name) target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath) if created, err := ln_sf(target, path); err != nil { panic(err) } else if created { // If we've created or changed this symlink, any pkg files that go may // have compiled from it should be invalidated. // Note: This makes some assumptions about go's build layout. It should // be safe enough, though it may be simpler to just blow away everything // if any dep symlinks change. pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*")) for _, pkg := range pkgs { os.RemoveAll(pkg)
// Remove unexpected links. for old_link := range old_links { os.RemoveAll(old_link) } // Try to remove all directories; ignore ENOTEMPTY errors. var dirs []string filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.IsDir() { dirs = append(dirs, path) } return nil }) for i := len(dirs) - 1; i >= 0; i-- { if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY { panic(err) } } // Set up empty dep. // // The go tool tries to be helpful by not rebuilding modified code if that // code is in a workspace and no packages from that workspace are mentioned // on the command line. See cmd/go/pkg.go:isStale around line 680. // // We are explicitly managing all of the workspaces in our GOPATH and do // indeed want to rebuild everything when dependencies change. That is // required by the goal of reproducible builds: the alternative would mean // what you get for this build depends on the state of a previous build. // // The go tool doesn't provide any way of disabling this "helpful" // functionality. The simplest workaround is to always mention a package from // the dependency workspace on the command line. Hence, we add an empty // package. empty_go := filepath.Join(empty_dep, "empty.go") if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() { os.MkdirAll(filepath.Dir(empty_go), 0777) if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil { panic(err) } } // Overwrite any existing GOPATH. if argv0, err := exec.LookPath(args[0]); err != nil { panic(err) } else { os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk)) os.Chdir(b.code_root) err := syscall.Exec(argv0, args, os.Environ()) panic(fmt.Errorf("exec failed: %s", err)) } } func (b *Builder) _reset_to_tags() { defer func() { if recover() != nil { panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " + "Please run 'begot fetch' first.")) } }() for url, ref := range b._all_repos() { wd := b._repo_dir(url) if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() { panic("not directory") } cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref)) } } func (b *Builder) clean() { os.RemoveAll(b.dep_wk) os.RemoveAll(b.code_wk) os.Remove(filepath.Join(b.code_root, "bin")) } func get_gopath(env *Env) string { // This duplicates logic in Builder, but we want to just get the GOPATH without // parsing anything. for { if _, err := os.Stat(BEGOTTEN); err == nil { break } if wd, err := os.Getwd(); err != nil { panic(err) } else if wd == "/" { panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN)) } if err := os.Chdir(".."); err != nil { panic(err) } } hsh := sha1str(realpath("."))[:8] code_wk := filepath.Join(env.CodeWorkspaceDir, hsh) dep_wk := filepath.Join(env.DepWorkspaceDir, hsh) return code_wk + ":" + dep_wk } var _cache_lock *os.File func lock_cache(env *Env) { os.MkdirAll(env.BegotCache, 0777) _cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666) if err != nil { panic(err) } err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err != nil { panic(fmt.Errorf("Can't lock %r", env.BegotCache)) } // Leave file open for lifetime of this process and anything exec'd by this // process. } func print_help(ret int) { fmt.Fprintln(os.Stderr, "FIXME") os.Exit(ret) } func main() { env := EnvNew() defer func() { if err := recover(); err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } }() lock_cache(env) if len(os.Args) < 2 { print_help(1) } switch os.Args[1] { case "update": BuilderNew(env, ".", false).setup_repos(true, os.Args[2:]).save_lockfile().tag_repos() case "just_rewrite": BuilderNew(env, ".", false).setup_repos(false, []string{}).save_lockfile().tag_repos() case "fetch": BuilderNew(env, ".", true).setup_repos(false, []string{}).tag_repos() case "build": BuilderNew(env, ".", true).run([]string{"go", "install", "./...", EMPTY_DEP}) case "go": BuilderNew(env, ".", true).run(append([]string{"go"}, os.Args[2:]...)) case "exec": BuilderNew(env, ".", true).run(os.Args[2:]) case "clean": BuilderNew(env, ".", false).clean() case "gopath": fmt.Println(get_gopath(env)) case "help": print_help(0) default: fmt.Fprintf(os.Stderr, "Unknown subcommand %q\n", os.Args[1]) print_help(1) } }
} } delete(old_links, path) }
random_line_split
com.rs
//! Common utilities //! //! A standard vocabulary used throughout the code. use std::{self, cmp, convert, fmt, hash, iter, marker, num, ops, sync}; use crate::basic::sea::TableIndex; /// A fragment of source code. #[derive(Clone)] pub struct CodeFragment(sync::Arc<Vec<u8>>); impl CodeFragment { /// Creates a new `CodeFragment`. pub fn new(code: Vec<u8>) -> CodeFragment { CodeFragment(sync::Arc::new(code)) } } impl ops::Deref for CodeFragment { type Target = [u8]; fn deref(&self) -> &[u8] { &*self.0 } } /// The core implementation of a u32-based ID. /// /// The ID can be any number in the `[0, u32::MAX - 2]` range: /// - `u32::MAX` is reserved to enable size optimizations (Option). /// - `u32::MAX - 1` is reserved to denote Default constructed IDs. /// /// IDs built on top of `CoreId` may reserve further numbers for their own ends. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct CoreId(num::NonZeroU32); impl CoreId { /// Creates a new instance. /// /// # Panics /// /// Panics if the integer provided is `u32::MAX`. pub fn new(id: u32) -> CoreId { if id == std::u32::MAX { panic!("Unsuitable ID: {}", id); } unsafe { CoreId(num::NonZeroU32::new_unchecked(id + 1)) } } /// Get the raw ID. pub fn raw(&self) -> u32 { self.0.get() - 1 } } impl fmt::Debug for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl Default for CoreId { fn default() -> CoreId { unsafe { CoreId(num::NonZeroU32::new_unchecked(std::u32::MAX)) } } } impl fmt::Display for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl convert::From<CoreId> for u32 { fn from(core_id: CoreId) -> u32 { core_id.raw() } } /// An Id implementation based on CoreId. /// /// It contains a default empty state, to represent empty streams. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Id<T: ?Sized>(CoreId, marker::PhantomData<*const T>); impl<T: ?Sized> Id<T> { /// Creates a new instance. pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) } /// Creates an empty instance. pub fn empty() -> Self { Self::new(std::u32::MAX - 2) } /// Returns whether the corresponding list is empty. pub fn is_empty(&self) -> bool { *self == Self::empty() } /// Returns the inner ID. pub fn value(&self) -> u32 { self.0.raw() } } impl<T: ?Sized> Clone for Id<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for Id<T> {} impl<T: ?Sized> fmt::Debug for Id<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { const MODULE_OFFSET: usize = 1usize << 30; const REPOSITORY_OFFSET: usize = 1usize << 31; // More compact representation for `{:#?}`. // // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. if *self == Default::default() { write!(f, "Id(default)") } else if *self == Self::empty() { write!(f, "Id(empty)") } else { match self.index() { index if index < MODULE_OFFSET => write!(f, "Id({})", index), index if index < REPOSITORY_OFFSET => write!(f, "Id(M-{})", index - MODULE_OFFSET), index => write!(f, "Id(R-{})", index - REPOSITORY_OFFSET), } } } } impl<T: ?Sized> Default for Id<T> { fn default() -> Self { Id(Default::default(), marker::PhantomData) } } impl<T: ?Sized> cmp::Eq for Id<T> {} impl<T: ?Sized> hash::Hash for Id<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } impl<T: ?Sized> cmp::Ord for Id<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) } } impl<T: ?Sized> cmp::PartialEq for Id<T> { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<T: ?Sized> cmp::PartialOrd for Id<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.0.partial_cmp(&other.0) } } impl<T: ?Sized> TableIndex for Id<T> { fn from_index(index: usize) -> Self { Id::new(index as u32) } fn index(&self) -> usize { self.value() as usize } } /// IdIterator. /// /// An Iterator over consecutive IDs. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct IdIterator<T: ?Sized> { start: u32, end: u32, _marker: marker::PhantomData<*const T>, } impl<T: ?Sized> IdIterator<T> { /// Creates an instance. pub fn new(start: u32, end: u32) -> Self { IdIterator { start, end, _marker: marker::PhantomData } } } impl<T: ?Sized> Clone for IdIterator<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for IdIterator<T> {} impl<T: ?Sized> fmt::Debug for IdIterator<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. write!(f, "IdIterator({}, {})", self.start, self.end) } } impl<T: ?Sized> Default for IdIterator<T> { fn default() -> Self { IdIterator::new(0, 0) } } impl<T: ?Sized> cmp::Eq for IdIterator<T> {} impl<T: ?Sized> hash::Hash for IdIterator<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.start.hash(state); self.end.hash(state); } } impl<T: ?Sized> iter::Iterator for IdIterator<T> { type Item = Id<T>; fn next(&mut self) -> Option<Id<T>> { if self.start < self.end { let result = Id::new(self.start); self.start += 1; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let difference = self.len(); (difference, Some(difference)) } fn count(self) -> usize { self.len() } fn last(self) -> Option<Id<T>> { if self.start < self.end { Some(Id::new(self.end - 1)) } else { None } } fn nth(&mut self, n: usize) -> Option<Id<T>> { let result = self.start.saturating_add(n as u32); if result < self.end { self.start = result + 1; Some(Id::new(result)) } else { self.start = self.end; None } } fn max(self) -> Option<Id<T>> { self.last() } fn min(mut self) -> Option<Id<T>> { self.next() } } impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> { fn next_back(&mut self) -> Option<Id<T>> { if self.start < self.end { self.end -= 1; Some(Id::new(self.end)) } else { None } } } impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> { fn len(&self) -> usize { self.end.saturating_sub(self.start) as usize } } impl<T: ?Sized> cmp::Ord for IdIterator<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { (self.start, self.end).cmp(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialEq for IdIterator<T> { fn eq(&self, other: &Self) -> bool { (self.start, self.end).eq(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { (self.start, self.end).partial_cmp(&(other.start, other.end)) } } /// A Range represents a start and end position in a buffer. /// /// Note: the `Range` does not know which buffer it indexes in. /// /// Note: a `Range` cannot index past 4GB. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Range { offset: u32, length: u32, } impl Range { /// Creates a new `Range` from a start position and length. /// /// In Debug, it is checked that the end position will not exceed 4GB. pub fn new(offset: usize, length: usize) -> Range { debug_assert!(offset <= std::u32::MAX as usize); debug_assert!(length <= std::u32::MAX as usize); debug_assert!(offset <= (std::u32::MAX as usize - length)); Range { offset: offset as u32, length: length as u32 } } /// Creates a new `Range` from a start and end position. /// /// As the name implies, this creates a half-open range, similar to `start..end`. pub fn half_open(start: u32, end: u32) -> Range { debug_assert!(start <= end); Range { offset: start, length: end - start } } /// Returns the start position of the range. pub fn offset(self) -> usize { self.offset as usize } /// Returns the end position of the range (excluded). pub fn end_offset(self) -> usize { self.offset() + self.length() } /// Returns the length of the range. pub fn length(self) -> usize { self.length as usize } /// Shifts range to the left. pub fn shift_left(self, n: usize) -> Range { self.shift_to(self.offset() - n) } /// Shifts range to the right. pub fn shift_right(self, n: usize) -> Range { self.shift_to(self.offset() + n) }
} /// Skips n from the left. pub fn skip_left(self, n: usize) -> Range { Range { offset: self.offset + (n as u32), length: self.length - (n as u32), } } /// Skips n from the right. pub fn skip_right(self, n: usize) -> Range { Range { offset: self.offset, length: self.length - (n as u32), } } /// Extend one range with another, the resulting range spans both ranges, /// and in the case they were discontiguous also spans the interval. pub fn extend(self, other: Range) -> Range { if self.offset > other.offset { other.extend(self) } else if self.end_offset() >= other.end_offset() { self } else { Range { offset: self.offset, length: (other.end_offset() - self.offset()) as u32 } } } } impl fmt::Debug for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl Default for Range { fn default() -> Range { Range::new(0, 0) } } impl fmt::Display for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl ops::Index<Range> for [u8] { type Output = [u8]; fn index(&self, index: Range) -> &[u8] { &self[index.offset()..index.end_offset()] } } /// A Slice of bytes, printed more pleasantly #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Slice<'a>(pub &'a [u8]); impl<'a> Slice<'a> { /// Returns true if empty, false otherwise. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the length of the slice. pub fn len(&self) -> usize { self.0.len() } /// Returns the byte at the indicated position, or None if it is invalid. pub fn get(&self, pos: usize) -> Option<&u8> { self.0.get(pos) } } impl<'a> fmt::Debug for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self) } } impl<'a> fmt::Display for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let mut start = 0; while start < self.0.len() { let end = self.0[start..].iter().position(|&b| b < 32 || b > 126) .unwrap_or(self.len()); f.write_str( std::str::from_utf8(&self.0[start..end]).expect("Valid UTF-8") )?; start = end; let end = self.0[start..].iter().position(|&b| b >= 32 && b <= 126) .unwrap_or(self.len()); for &byte in &self.0[start..end] { write!(f, "{{0x{:X}}}", byte)?; } start = end; } Ok(()) } } /// Span pub trait Span { /// Returns the Range spanned by the element. fn span(&self) -> Range; } /// A Store trait, to abstract over the actual storage of individual elements. pub trait Store<T, I = Id<T>> { /// Returns the number of items. fn len(&self) -> usize; /// Returns a copy of the item. fn get(&self, id: I) -> T; /// Returns the range of the item. fn get_range(&self, id: I) -> Range; /// Pushes an item. fn push(&mut self, item: T, range: Range) -> I; } /// A MultiStore trait, to abstract over the actual storage of slices. pub trait MultiStore<T, I = Id<[T]>> { /// Returns the slice of items. fn get_slice(&self, id: I) -> &[T]; // TODO(matthieum): A more efficient interface would take IntoIterator<Item = T> /// Pushes a slice of element. fn push_slice(&mut self, items: &[T]) -> I; } // // Tests // #[cfg(test)] mod tests { use super::{CoreId, Range}; #[test] fn core_id_roundtrip() { for i in 0..10 { assert_eq!(i, CoreId::new(i).raw()); } } #[test] fn core_id_default() { let core: CoreId = Default::default(); assert_eq!(std::u32::MAX - 1, core.raw()); } #[test] #[should_panic] fn core_id_reserved_size_optimization() { CoreId::new(std::u32::MAX); } #[test] fn range_extend_contiguous() { let result = Range::new(3, 4).extend(Range::new(7, 2)); assert_eq!(result, Range::new(3, 6)); } #[test] fn range_extend_separated() { let result = Range::new(3, 4).extend(Range::new(11, 3)); assert_eq!(result, Range::new(3, 11)); } #[test] fn range_extend_partially_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 3)); assert_eq!(result, Range::new(3, 5)); } #[test] fn range_extend_totally_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 2)); assert_eq!(result, Range::new(3, 4)); } #[test] fn range_extend_reversed() { let result = Range::new(5, 3).extend(Range::new(3, 4)); assert_eq!(result, Range::new(3, 5)); } }
/// Shifts range to specified offset. pub fn shift_to(self, offset: usize) -> Range { Range { offset: offset as u32, ..self }
random_line_split
com.rs
//! Common utilities //! //! A standard vocabulary used throughout the code. use std::{self, cmp, convert, fmt, hash, iter, marker, num, ops, sync}; use crate::basic::sea::TableIndex; /// A fragment of source code. #[derive(Clone)] pub struct CodeFragment(sync::Arc<Vec<u8>>); impl CodeFragment { /// Creates a new `CodeFragment`. pub fn new(code: Vec<u8>) -> CodeFragment { CodeFragment(sync::Arc::new(code)) } } impl ops::Deref for CodeFragment { type Target = [u8]; fn deref(&self) -> &[u8] { &*self.0 } } /// The core implementation of a u32-based ID. /// /// The ID can be any number in the `[0, u32::MAX - 2]` range: /// - `u32::MAX` is reserved to enable size optimizations (Option). /// - `u32::MAX - 1` is reserved to denote Default constructed IDs. /// /// IDs built on top of `CoreId` may reserve further numbers for their own ends. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct CoreId(num::NonZeroU32); impl CoreId { /// Creates a new instance. /// /// # Panics /// /// Panics if the integer provided is `u32::MAX`. pub fn new(id: u32) -> CoreId { if id == std::u32::MAX { panic!("Unsuitable ID: {}", id); } unsafe { CoreId(num::NonZeroU32::new_unchecked(id + 1)) } } /// Get the raw ID. pub fn
(&self) -> u32 { self.0.get() - 1 } } impl fmt::Debug for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl Default for CoreId { fn default() -> CoreId { unsafe { CoreId(num::NonZeroU32::new_unchecked(std::u32::MAX)) } } } impl fmt::Display for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl convert::From<CoreId> for u32 { fn from(core_id: CoreId) -> u32 { core_id.raw() } } /// An Id implementation based on CoreId. /// /// It contains a default empty state, to represent empty streams. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Id<T: ?Sized>(CoreId, marker::PhantomData<*const T>); impl<T: ?Sized> Id<T> { /// Creates a new instance. pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) } /// Creates an empty instance. pub fn empty() -> Self { Self::new(std::u32::MAX - 2) } /// Returns whether the corresponding list is empty. pub fn is_empty(&self) -> bool { *self == Self::empty() } /// Returns the inner ID. pub fn value(&self) -> u32 { self.0.raw() } } impl<T: ?Sized> Clone for Id<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for Id<T> {} impl<T: ?Sized> fmt::Debug for Id<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { const MODULE_OFFSET: usize = 1usize << 30; const REPOSITORY_OFFSET: usize = 1usize << 31; // More compact representation for `{:#?}`. // // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. if *self == Default::default() { write!(f, "Id(default)") } else if *self == Self::empty() { write!(f, "Id(empty)") } else { match self.index() { index if index < MODULE_OFFSET => write!(f, "Id({})", index), index if index < REPOSITORY_OFFSET => write!(f, "Id(M-{})", index - MODULE_OFFSET), index => write!(f, "Id(R-{})", index - REPOSITORY_OFFSET), } } } } impl<T: ?Sized> Default for Id<T> { fn default() -> Self { Id(Default::default(), marker::PhantomData) } } impl<T: ?Sized> cmp::Eq for Id<T> {} impl<T: ?Sized> hash::Hash for Id<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } impl<T: ?Sized> cmp::Ord for Id<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) } } impl<T: ?Sized> cmp::PartialEq for Id<T> { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<T: ?Sized> cmp::PartialOrd for Id<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.0.partial_cmp(&other.0) } } impl<T: ?Sized> TableIndex for Id<T> { fn from_index(index: usize) -> Self { Id::new(index as u32) } fn index(&self) -> usize { self.value() as usize } } /// IdIterator. /// /// An Iterator over consecutive IDs. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct IdIterator<T: ?Sized> { start: u32, end: u32, _marker: marker::PhantomData<*const T>, } impl<T: ?Sized> IdIterator<T> { /// Creates an instance. pub fn new(start: u32, end: u32) -> Self { IdIterator { start, end, _marker: marker::PhantomData } } } impl<T: ?Sized> Clone for IdIterator<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for IdIterator<T> {} impl<T: ?Sized> fmt::Debug for IdIterator<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. write!(f, "IdIterator({}, {})", self.start, self.end) } } impl<T: ?Sized> Default for IdIterator<T> { fn default() -> Self { IdIterator::new(0, 0) } } impl<T: ?Sized> cmp::Eq for IdIterator<T> {} impl<T: ?Sized> hash::Hash for IdIterator<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.start.hash(state); self.end.hash(state); } } impl<T: ?Sized> iter::Iterator for IdIterator<T> { type Item = Id<T>; fn next(&mut self) -> Option<Id<T>> { if self.start < self.end { let result = Id::new(self.start); self.start += 1; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let difference = self.len(); (difference, Some(difference)) } fn count(self) -> usize { self.len() } fn last(self) -> Option<Id<T>> { if self.start < self.end { Some(Id::new(self.end - 1)) } else { None } } fn nth(&mut self, n: usize) -> Option<Id<T>> { let result = self.start.saturating_add(n as u32); if result < self.end { self.start = result + 1; Some(Id::new(result)) } else { self.start = self.end; None } } fn max(self) -> Option<Id<T>> { self.last() } fn min(mut self) -> Option<Id<T>> { self.next() } } impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> { fn next_back(&mut self) -> Option<Id<T>> { if self.start < self.end { self.end -= 1; Some(Id::new(self.end)) } else { None } } } impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> { fn len(&self) -> usize { self.end.saturating_sub(self.start) as usize } } impl<T: ?Sized> cmp::Ord for IdIterator<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { (self.start, self.end).cmp(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialEq for IdIterator<T> { fn eq(&self, other: &Self) -> bool { (self.start, self.end).eq(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { (self.start, self.end).partial_cmp(&(other.start, other.end)) } } /// A Range represents a start and end position in a buffer. /// /// Note: the `Range` does not know which buffer it indexes in. /// /// Note: a `Range` cannot index past 4GB. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Range { offset: u32, length: u32, } impl Range { /// Creates a new `Range` from a start position and length. /// /// In Debug, it is checked that the end position will not exceed 4GB. pub fn new(offset: usize, length: usize) -> Range { debug_assert!(offset <= std::u32::MAX as usize); debug_assert!(length <= std::u32::MAX as usize); debug_assert!(offset <= (std::u32::MAX as usize - length)); Range { offset: offset as u32, length: length as u32 } } /// Creates a new `Range` from a start and end position. /// /// As the name implies, this creates a half-open range, similar to `start..end`. pub fn half_open(start: u32, end: u32) -> Range { debug_assert!(start <= end); Range { offset: start, length: end - start } } /// Returns the start position of the range. pub fn offset(self) -> usize { self.offset as usize } /// Returns the end position of the range (excluded). pub fn end_offset(self) -> usize { self.offset() + self.length() } /// Returns the length of the range. pub fn length(self) -> usize { self.length as usize } /// Shifts range to the left. pub fn shift_left(self, n: usize) -> Range { self.shift_to(self.offset() - n) } /// Shifts range to the right. pub fn shift_right(self, n: usize) -> Range { self.shift_to(self.offset() + n) } /// Shifts range to specified offset. pub fn shift_to(self, offset: usize) -> Range { Range { offset: offset as u32, ..self } } /// Skips n from the left. pub fn skip_left(self, n: usize) -> Range { Range { offset: self.offset + (n as u32), length: self.length - (n as u32), } } /// Skips n from the right. pub fn skip_right(self, n: usize) -> Range { Range { offset: self.offset, length: self.length - (n as u32), } } /// Extend one range with another, the resulting range spans both ranges, /// and in the case they were discontiguous also spans the interval. pub fn extend(self, other: Range) -> Range { if self.offset > other.offset { other.extend(self) } else if self.end_offset() >= other.end_offset() { self } else { Range { offset: self.offset, length: (other.end_offset() - self.offset()) as u32 } } } } impl fmt::Debug for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl Default for Range { fn default() -> Range { Range::new(0, 0) } } impl fmt::Display for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl ops::Index<Range> for [u8] { type Output = [u8]; fn index(&self, index: Range) -> &[u8] { &self[index.offset()..index.end_offset()] } } /// A Slice of bytes, printed more pleasantly #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Slice<'a>(pub &'a [u8]); impl<'a> Slice<'a> { /// Returns true if empty, false otherwise. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the length of the slice. pub fn len(&self) -> usize { self.0.len() } /// Returns the byte at the indicated position, or None if it is invalid. pub fn get(&self, pos: usize) -> Option<&u8> { self.0.get(pos) } } impl<'a> fmt::Debug for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self) } } impl<'a> fmt::Display for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let mut start = 0; while start < self.0.len() { let end = self.0[start..].iter().position(|&b| b < 32 || b > 126) .unwrap_or(self.len()); f.write_str( std::str::from_utf8(&self.0[start..end]).expect("Valid UTF-8") )?; start = end; let end = self.0[start..].iter().position(|&b| b >= 32 && b <= 126) .unwrap_or(self.len()); for &byte in &self.0[start..end] { write!(f, "{{0x{:X}}}", byte)?; } start = end; } Ok(()) } } /// Span pub trait Span { /// Returns the Range spanned by the element. fn span(&self) -> Range; } /// A Store trait, to abstract over the actual storage of individual elements. pub trait Store<T, I = Id<T>> { /// Returns the number of items. fn len(&self) -> usize; /// Returns a copy of the item. fn get(&self, id: I) -> T; /// Returns the range of the item. fn get_range(&self, id: I) -> Range; /// Pushes an item. fn push(&mut self, item: T, range: Range) -> I; } /// A MultiStore trait, to abstract over the actual storage of slices. pub trait MultiStore<T, I = Id<[T]>> { /// Returns the slice of items. fn get_slice(&self, id: I) -> &[T]; // TODO(matthieum): A more efficient interface would take IntoIterator<Item = T> /// Pushes a slice of element. fn push_slice(&mut self, items: &[T]) -> I; } // // Tests // #[cfg(test)] mod tests { use super::{CoreId, Range}; #[test] fn core_id_roundtrip() { for i in 0..10 { assert_eq!(i, CoreId::new(i).raw()); } } #[test] fn core_id_default() { let core: CoreId = Default::default(); assert_eq!(std::u32::MAX - 1, core.raw()); } #[test] #[should_panic] fn core_id_reserved_size_optimization() { CoreId::new(std::u32::MAX); } #[test] fn range_extend_contiguous() { let result = Range::new(3, 4).extend(Range::new(7, 2)); assert_eq!(result, Range::new(3, 6)); } #[test] fn range_extend_separated() { let result = Range::new(3, 4).extend(Range::new(11, 3)); assert_eq!(result, Range::new(3, 11)); } #[test] fn range_extend_partially_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 3)); assert_eq!(result, Range::new(3, 5)); } #[test] fn range_extend_totally_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 2)); assert_eq!(result, Range::new(3, 4)); } #[test] fn range_extend_reversed() { let result = Range::new(5, 3).extend(Range::new(3, 4)); assert_eq!(result, Range::new(3, 5)); } }
raw
identifier_name
com.rs
//! Common utilities //! //! A standard vocabulary used throughout the code. use std::{self, cmp, convert, fmt, hash, iter, marker, num, ops, sync}; use crate::basic::sea::TableIndex; /// A fragment of source code. #[derive(Clone)] pub struct CodeFragment(sync::Arc<Vec<u8>>); impl CodeFragment { /// Creates a new `CodeFragment`. pub fn new(code: Vec<u8>) -> CodeFragment { CodeFragment(sync::Arc::new(code)) } } impl ops::Deref for CodeFragment { type Target = [u8]; fn deref(&self) -> &[u8] { &*self.0 } } /// The core implementation of a u32-based ID. /// /// The ID can be any number in the `[0, u32::MAX - 2]` range: /// - `u32::MAX` is reserved to enable size optimizations (Option). /// - `u32::MAX - 1` is reserved to denote Default constructed IDs. /// /// IDs built on top of `CoreId` may reserve further numbers for their own ends. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct CoreId(num::NonZeroU32); impl CoreId { /// Creates a new instance. /// /// # Panics /// /// Panics if the integer provided is `u32::MAX`. pub fn new(id: u32) -> CoreId { if id == std::u32::MAX { panic!("Unsuitable ID: {}", id); } unsafe { CoreId(num::NonZeroU32::new_unchecked(id + 1)) } } /// Get the raw ID. pub fn raw(&self) -> u32 { self.0.get() - 1 } } impl fmt::Debug for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl Default for CoreId { fn default() -> CoreId { unsafe { CoreId(num::NonZeroU32::new_unchecked(std::u32::MAX)) } } } impl fmt::Display for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl convert::From<CoreId> for u32 { fn from(core_id: CoreId) -> u32 { core_id.raw() } } /// An Id implementation based on CoreId. /// /// It contains a default empty state, to represent empty streams. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Id<T: ?Sized>(CoreId, marker::PhantomData<*const T>); impl<T: ?Sized> Id<T> { /// Creates a new instance. pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) } /// Creates an empty instance. pub fn empty() -> Self { Self::new(std::u32::MAX - 2) } /// Returns whether the corresponding list is empty. pub fn is_empty(&self) -> bool { *self == Self::empty() } /// Returns the inner ID. pub fn value(&self) -> u32 { self.0.raw() } } impl<T: ?Sized> Clone for Id<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for Id<T> {} impl<T: ?Sized> fmt::Debug for Id<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { const MODULE_OFFSET: usize = 1usize << 30; const REPOSITORY_OFFSET: usize = 1usize << 31; // More compact representation for `{:#?}`. // // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. if *self == Default::default() { write!(f, "Id(default)") } else if *self == Self::empty() { write!(f, "Id(empty)") } else { match self.index() { index if index < MODULE_OFFSET => write!(f, "Id({})", index), index if index < REPOSITORY_OFFSET => write!(f, "Id(M-{})", index - MODULE_OFFSET), index => write!(f, "Id(R-{})", index - REPOSITORY_OFFSET), } } } } impl<T: ?Sized> Default for Id<T> { fn default() -> Self { Id(Default::default(), marker::PhantomData) } } impl<T: ?Sized> cmp::Eq for Id<T> {} impl<T: ?Sized> hash::Hash for Id<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } impl<T: ?Sized> cmp::Ord for Id<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) } } impl<T: ?Sized> cmp::PartialEq for Id<T> { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<T: ?Sized> cmp::PartialOrd for Id<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.0.partial_cmp(&other.0) } } impl<T: ?Sized> TableIndex for Id<T> { fn from_index(index: usize) -> Self { Id::new(index as u32) } fn index(&self) -> usize { self.value() as usize } } /// IdIterator. /// /// An Iterator over consecutive IDs. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct IdIterator<T: ?Sized> { start: u32, end: u32, _marker: marker::PhantomData<*const T>, } impl<T: ?Sized> IdIterator<T> { /// Creates an instance. pub fn new(start: u32, end: u32) -> Self { IdIterator { start, end, _marker: marker::PhantomData } } } impl<T: ?Sized> Clone for IdIterator<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for IdIterator<T> {} impl<T: ?Sized> fmt::Debug for IdIterator<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. write!(f, "IdIterator({}, {})", self.start, self.end) } } impl<T: ?Sized> Default for IdIterator<T> { fn default() -> Self { IdIterator::new(0, 0) } } impl<T: ?Sized> cmp::Eq for IdIterator<T> {} impl<T: ?Sized> hash::Hash for IdIterator<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.start.hash(state); self.end.hash(state); } } impl<T: ?Sized> iter::Iterator for IdIterator<T> { type Item = Id<T>; fn next(&mut self) -> Option<Id<T>> { if self.start < self.end { let result = Id::new(self.start); self.start += 1; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let difference = self.len(); (difference, Some(difference)) } fn count(self) -> usize { self.len() } fn last(self) -> Option<Id<T>> { if self.start < self.end { Some(Id::new(self.end - 1)) } else { None } } fn nth(&mut self, n: usize) -> Option<Id<T>> { let result = self.start.saturating_add(n as u32); if result < self.end { self.start = result + 1; Some(Id::new(result)) } else { self.start = self.end; None } } fn max(self) -> Option<Id<T>> { self.last() } fn min(mut self) -> Option<Id<T>> { self.next() } } impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> { fn next_back(&mut self) -> Option<Id<T>> { if self.start < self.end { self.end -= 1; Some(Id::new(self.end)) } else { None } } } impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> { fn len(&self) -> usize { self.end.saturating_sub(self.start) as usize } } impl<T: ?Sized> cmp::Ord for IdIterator<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { (self.start, self.end).cmp(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialEq for IdIterator<T> { fn eq(&self, other: &Self) -> bool { (self.start, self.end).eq(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { (self.start, self.end).partial_cmp(&(other.start, other.end)) } } /// A Range represents a start and end position in a buffer. /// /// Note: the `Range` does not know which buffer it indexes in. /// /// Note: a `Range` cannot index past 4GB. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Range { offset: u32, length: u32, } impl Range { /// Creates a new `Range` from a start position and length. /// /// In Debug, it is checked that the end position will not exceed 4GB. pub fn new(offset: usize, length: usize) -> Range { debug_assert!(offset <= std::u32::MAX as usize); debug_assert!(length <= std::u32::MAX as usize); debug_assert!(offset <= (std::u32::MAX as usize - length)); Range { offset: offset as u32, length: length as u32 } } /// Creates a new `Range` from a start and end position. /// /// As the name implies, this creates a half-open range, similar to `start..end`. pub fn half_open(start: u32, end: u32) -> Range { debug_assert!(start <= end); Range { offset: start, length: end - start } } /// Returns the start position of the range. pub fn offset(self) -> usize { self.offset as usize } /// Returns the end position of the range (excluded). pub fn end_offset(self) -> usize { self.offset() + self.length() } /// Returns the length of the range. pub fn length(self) -> usize { self.length as usize } /// Shifts range to the left. pub fn shift_left(self, n: usize) -> Range { self.shift_to(self.offset() - n) } /// Shifts range to the right. pub fn shift_right(self, n: usize) -> Range { self.shift_to(self.offset() + n) } /// Shifts range to specified offset. pub fn shift_to(self, offset: usize) -> Range { Range { offset: offset as u32, ..self } } /// Skips n from the left. pub fn skip_left(self, n: usize) -> Range { Range { offset: self.offset + (n as u32), length: self.length - (n as u32), } } /// Skips n from the right. pub fn skip_right(self, n: usize) -> Range { Range { offset: self.offset, length: self.length - (n as u32), } } /// Extend one range with another, the resulting range spans both ranges, /// and in the case they were discontiguous also spans the interval. pub fn extend(self, other: Range) -> Range { if self.offset > other.offset { other.extend(self) } else if self.end_offset() >= other.end_offset() { self } else { Range { offset: self.offset, length: (other.end_offset() - self.offset()) as u32 } } } } impl fmt::Debug for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl Default for Range { fn default() -> Range { Range::new(0, 0) } } impl fmt::Display for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl ops::Index<Range> for [u8] { type Output = [u8]; fn index(&self, index: Range) -> &[u8] { &self[index.offset()..index.end_offset()] } } /// A Slice of bytes, printed more pleasantly #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Slice<'a>(pub &'a [u8]); impl<'a> Slice<'a> { /// Returns true if empty, false otherwise. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the length of the slice. pub fn len(&self) -> usize { self.0.len() } /// Returns the byte at the indicated position, or None if it is invalid. pub fn get(&self, pos: usize) -> Option<&u8> { self.0.get(pos) } } impl<'a> fmt::Debug for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self) } } impl<'a> fmt::Display for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let mut start = 0; while start < self.0.len() { let end = self.0[start..].iter().position(|&b| b < 32 || b > 126) .unwrap_or(self.len()); f.write_str( std::str::from_utf8(&self.0[start..end]).expect("Valid UTF-8") )?; start = end; let end = self.0[start..].iter().position(|&b| b >= 32 && b <= 126) .unwrap_or(self.len()); for &byte in &self.0[start..end] { write!(f, "{{0x{:X}}}", byte)?; } start = end; } Ok(()) } } /// Span pub trait Span { /// Returns the Range spanned by the element. fn span(&self) -> Range; } /// A Store trait, to abstract over the actual storage of individual elements. pub trait Store<T, I = Id<T>> { /// Returns the number of items. fn len(&self) -> usize; /// Returns a copy of the item. fn get(&self, id: I) -> T; /// Returns the range of the item. fn get_range(&self, id: I) -> Range; /// Pushes an item. fn push(&mut self, item: T, range: Range) -> I; } /// A MultiStore trait, to abstract over the actual storage of slices. pub trait MultiStore<T, I = Id<[T]>> { /// Returns the slice of items. fn get_slice(&self, id: I) -> &[T]; // TODO(matthieum): A more efficient interface would take IntoIterator<Item = T> /// Pushes a slice of element. fn push_slice(&mut self, items: &[T]) -> I; } // // Tests // #[cfg(test)] mod tests { use super::{CoreId, Range}; #[test] fn core_id_roundtrip() { for i in 0..10 { assert_eq!(i, CoreId::new(i).raw()); } } #[test] fn core_id_default() { let core: CoreId = Default::default(); assert_eq!(std::u32::MAX - 1, core.raw()); } #[test] #[should_panic] fn core_id_reserved_size_optimization() { CoreId::new(std::u32::MAX); } #[test] fn range_extend_contiguous() { let result = Range::new(3, 4).extend(Range::new(7, 2)); assert_eq!(result, Range::new(3, 6)); } #[test] fn range_extend_separated() { let result = Range::new(3, 4).extend(Range::new(11, 3)); assert_eq!(result, Range::new(3, 11)); } #[test] fn range_extend_partially_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 3)); assert_eq!(result, Range::new(3, 5)); } #[test] fn range_extend_totally_overlapping()
#[test] fn range_extend_reversed() { let result = Range::new(5, 3).extend(Range::new(3, 4)); assert_eq!(result, Range::new(3, 5)); } }
{ let result = Range::new(3, 4).extend(Range::new(5, 2)); assert_eq!(result, Range::new(3, 4)); }
identifier_body
com.rs
//! Common utilities //! //! A standard vocabulary used throughout the code. use std::{self, cmp, convert, fmt, hash, iter, marker, num, ops, sync}; use crate::basic::sea::TableIndex; /// A fragment of source code. #[derive(Clone)] pub struct CodeFragment(sync::Arc<Vec<u8>>); impl CodeFragment { /// Creates a new `CodeFragment`. pub fn new(code: Vec<u8>) -> CodeFragment { CodeFragment(sync::Arc::new(code)) } } impl ops::Deref for CodeFragment { type Target = [u8]; fn deref(&self) -> &[u8] { &*self.0 } } /// The core implementation of a u32-based ID. /// /// The ID can be any number in the `[0, u32::MAX - 2]` range: /// - `u32::MAX` is reserved to enable size optimizations (Option). /// - `u32::MAX - 1` is reserved to denote Default constructed IDs. /// /// IDs built on top of `CoreId` may reserve further numbers for their own ends. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct CoreId(num::NonZeroU32); impl CoreId { /// Creates a new instance. /// /// # Panics /// /// Panics if the integer provided is `u32::MAX`. pub fn new(id: u32) -> CoreId { if id == std::u32::MAX { panic!("Unsuitable ID: {}", id); } unsafe { CoreId(num::NonZeroU32::new_unchecked(id + 1)) } } /// Get the raw ID. pub fn raw(&self) -> u32 { self.0.get() - 1 } } impl fmt::Debug for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl Default for CoreId { fn default() -> CoreId { unsafe { CoreId(num::NonZeroU32::new_unchecked(std::u32::MAX)) } } } impl fmt::Display for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl convert::From<CoreId> for u32 { fn from(core_id: CoreId) -> u32 { core_id.raw() } } /// An Id implementation based on CoreId. /// /// It contains a default empty state, to represent empty streams. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Id<T: ?Sized>(CoreId, marker::PhantomData<*const T>); impl<T: ?Sized> Id<T> { /// Creates a new instance. pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) } /// Creates an empty instance. pub fn empty() -> Self { Self::new(std::u32::MAX - 2) } /// Returns whether the corresponding list is empty. pub fn is_empty(&self) -> bool { *self == Self::empty() } /// Returns the inner ID. pub fn value(&self) -> u32 { self.0.raw() } } impl<T: ?Sized> Clone for Id<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for Id<T> {} impl<T: ?Sized> fmt::Debug for Id<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { const MODULE_OFFSET: usize = 1usize << 30; const REPOSITORY_OFFSET: usize = 1usize << 31; // More compact representation for `{:#?}`. // // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. if *self == Default::default() { write!(f, "Id(default)") } else if *self == Self::empty() { write!(f, "Id(empty)") } else { match self.index() { index if index < MODULE_OFFSET => write!(f, "Id({})", index), index if index < REPOSITORY_OFFSET => write!(f, "Id(M-{})", index - MODULE_OFFSET), index => write!(f, "Id(R-{})", index - REPOSITORY_OFFSET), } } } } impl<T: ?Sized> Default for Id<T> { fn default() -> Self { Id(Default::default(), marker::PhantomData) } } impl<T: ?Sized> cmp::Eq for Id<T> {} impl<T: ?Sized> hash::Hash for Id<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } impl<T: ?Sized> cmp::Ord for Id<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) } } impl<T: ?Sized> cmp::PartialEq for Id<T> { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<T: ?Sized> cmp::PartialOrd for Id<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.0.partial_cmp(&other.0) } } impl<T: ?Sized> TableIndex for Id<T> { fn from_index(index: usize) -> Self { Id::new(index as u32) } fn index(&self) -> usize { self.value() as usize } } /// IdIterator. /// /// An Iterator over consecutive IDs. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct IdIterator<T: ?Sized> { start: u32, end: u32, _marker: marker::PhantomData<*const T>, } impl<T: ?Sized> IdIterator<T> { /// Creates an instance. pub fn new(start: u32, end: u32) -> Self { IdIterator { start, end, _marker: marker::PhantomData } } } impl<T: ?Sized> Clone for IdIterator<T> { fn clone(&self) -> Self { *self } } impl<T: ?Sized> Copy for IdIterator<T> {} impl<T: ?Sized> fmt::Debug for IdIterator<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. write!(f, "IdIterator({}, {})", self.start, self.end) } } impl<T: ?Sized> Default for IdIterator<T> { fn default() -> Self { IdIterator::new(0, 0) } } impl<T: ?Sized> cmp::Eq for IdIterator<T> {} impl<T: ?Sized> hash::Hash for IdIterator<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.start.hash(state); self.end.hash(state); } } impl<T: ?Sized> iter::Iterator for IdIterator<T> { type Item = Id<T>; fn next(&mut self) -> Option<Id<T>> { if self.start < self.end { let result = Id::new(self.start); self.start += 1; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let difference = self.len(); (difference, Some(difference)) } fn count(self) -> usize { self.len() } fn last(self) -> Option<Id<T>> { if self.start < self.end { Some(Id::new(self.end - 1)) } else { None } } fn nth(&mut self, n: usize) -> Option<Id<T>> { let result = self.start.saturating_add(n as u32); if result < self.end { self.start = result + 1; Some(Id::new(result)) } else { self.start = self.end; None } } fn max(self) -> Option<Id<T>> { self.last() } fn min(mut self) -> Option<Id<T>> { self.next() } } impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> { fn next_back(&mut self) -> Option<Id<T>> { if self.start < self.end { self.end -= 1; Some(Id::new(self.end)) } else { None } } } impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> { fn len(&self) -> usize { self.end.saturating_sub(self.start) as usize } } impl<T: ?Sized> cmp::Ord for IdIterator<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { (self.start, self.end).cmp(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialEq for IdIterator<T> { fn eq(&self, other: &Self) -> bool { (self.start, self.end).eq(&(other.start, other.end)) } } impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { (self.start, self.end).partial_cmp(&(other.start, other.end)) } } /// A Range represents a start and end position in a buffer. /// /// Note: the `Range` does not know which buffer it indexes in. /// /// Note: a `Range` cannot index past 4GB. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Range { offset: u32, length: u32, } impl Range { /// Creates a new `Range` from a start position and length. /// /// In Debug, it is checked that the end position will not exceed 4GB. pub fn new(offset: usize, length: usize) -> Range { debug_assert!(offset <= std::u32::MAX as usize); debug_assert!(length <= std::u32::MAX as usize); debug_assert!(offset <= (std::u32::MAX as usize - length)); Range { offset: offset as u32, length: length as u32 } } /// Creates a new `Range` from a start and end position. /// /// As the name implies, this creates a half-open range, similar to `start..end`. pub fn half_open(start: u32, end: u32) -> Range { debug_assert!(start <= end); Range { offset: start, length: end - start } } /// Returns the start position of the range. pub fn offset(self) -> usize { self.offset as usize } /// Returns the end position of the range (excluded). pub fn end_offset(self) -> usize { self.offset() + self.length() } /// Returns the length of the range. pub fn length(self) -> usize { self.length as usize } /// Shifts range to the left. pub fn shift_left(self, n: usize) -> Range { self.shift_to(self.offset() - n) } /// Shifts range to the right. pub fn shift_right(self, n: usize) -> Range { self.shift_to(self.offset() + n) } /// Shifts range to specified offset. pub fn shift_to(self, offset: usize) -> Range { Range { offset: offset as u32, ..self } } /// Skips n from the left. pub fn skip_left(self, n: usize) -> Range { Range { offset: self.offset + (n as u32), length: self.length - (n as u32), } } /// Skips n from the right. pub fn skip_right(self, n: usize) -> Range { Range { offset: self.offset, length: self.length - (n as u32), } } /// Extend one range with another, the resulting range spans both ranges, /// and in the case they were discontiguous also spans the interval. pub fn extend(self, other: Range) -> Range { if self.offset > other.offset { other.extend(self) } else if self.end_offset() >= other.end_offset()
else { Range { offset: self.offset, length: (other.end_offset() - self.offset()) as u32 } } } } impl fmt::Debug for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl Default for Range { fn default() -> Range { Range::new(0, 0) } } impl fmt::Display for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl ops::Index<Range> for [u8] { type Output = [u8]; fn index(&self, index: Range) -> &[u8] { &self[index.offset()..index.end_offset()] } } /// A Slice of bytes, printed more pleasantly #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Slice<'a>(pub &'a [u8]); impl<'a> Slice<'a> { /// Returns true if empty, false otherwise. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the length of the slice. pub fn len(&self) -> usize { self.0.len() } /// Returns the byte at the indicated position, or None if it is invalid. pub fn get(&self, pos: usize) -> Option<&u8> { self.0.get(pos) } } impl<'a> fmt::Debug for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self) } } impl<'a> fmt::Display for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let mut start = 0; while start < self.0.len() { let end = self.0[start..].iter().position(|&b| b < 32 || b > 126) .unwrap_or(self.len()); f.write_str( std::str::from_utf8(&self.0[start..end]).expect("Valid UTF-8") )?; start = end; let end = self.0[start..].iter().position(|&b| b >= 32 && b <= 126) .unwrap_or(self.len()); for &byte in &self.0[start..end] { write!(f, "{{0x{:X}}}", byte)?; } start = end; } Ok(()) } } /// Span pub trait Span { /// Returns the Range spanned by the element. fn span(&self) -> Range; } /// A Store trait, to abstract over the actual storage of individual elements. pub trait Store<T, I = Id<T>> { /// Returns the number of items. fn len(&self) -> usize; /// Returns a copy of the item. fn get(&self, id: I) -> T; /// Returns the range of the item. fn get_range(&self, id: I) -> Range; /// Pushes an item. fn push(&mut self, item: T, range: Range) -> I; } /// A MultiStore trait, to abstract over the actual storage of slices. pub trait MultiStore<T, I = Id<[T]>> { /// Returns the slice of items. fn get_slice(&self, id: I) -> &[T]; // TODO(matthieum): A more efficient interface would take IntoIterator<Item = T> /// Pushes a slice of element. fn push_slice(&mut self, items: &[T]) -> I; } // // Tests // #[cfg(test)] mod tests { use super::{CoreId, Range}; #[test] fn core_id_roundtrip() { for i in 0..10 { assert_eq!(i, CoreId::new(i).raw()); } } #[test] fn core_id_default() { let core: CoreId = Default::default(); assert_eq!(std::u32::MAX - 1, core.raw()); } #[test] #[should_panic] fn core_id_reserved_size_optimization() { CoreId::new(std::u32::MAX); } #[test] fn range_extend_contiguous() { let result = Range::new(3, 4).extend(Range::new(7, 2)); assert_eq!(result, Range::new(3, 6)); } #[test] fn range_extend_separated() { let result = Range::new(3, 4).extend(Range::new(11, 3)); assert_eq!(result, Range::new(3, 11)); } #[test] fn range_extend_partially_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 3)); assert_eq!(result, Range::new(3, 5)); } #[test] fn range_extend_totally_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 2)); assert_eq!(result, Range::new(3, 4)); } #[test] fn range_extend_reversed() { let result = Range::new(5, 3).extend(Range::new(3, 4)); assert_eq!(result, Range::new(3, 5)); } }
{ self }
conditional_block
circuit.go
package circuit import ( "context" "expvar" "sync" "time" "github.com/cep21/circuit/v3/faststats" ) // Circuit is a circuit breaker pattern implementation that can accept commands and open/close on failures type Circuit struct { // circuitStats CmdMetricCollector RunMetricsCollection FallbackMetricCollector FallbackMetricsCollection CircuitMetricsCollector MetricsCollection // This is used to help run `Go` calls in the background goroutineWrapper goroutineWrapper name string // The passed in config is not atomic and thread safe. We reference thread safe values during circuit operations // with atomicCircuitConfig. Those are, also, the only values that can actually be changed while a circuit is // running. notThreadSafeConfig Config // The mutex supports setting and reading the command properties, but is not locked when we reference the config // while live: we use the threadSafeConfig below notThreadSafeConfigMu sync.Mutex threadSafeConfig atomicCircuitConfig // Tracks if the circuit has been shut open or closed isOpen faststats.AtomicBoolean // Tracks how many commands are currently running concurrentCommands faststats.AtomicInt64 // Tracks how many fallbacks are currently running concurrentFallbacks faststats.AtomicInt64 // ClosedToOpen controls when to open a closed circuit ClosedToOpen ClosedToOpen // openToClosed controls when to close an open circuit OpenToClose OpenToClosed timeNow func() time.Time } // NewCircuitFromConfig creates an inline circuit. If you want to group all your circuits together, you should probably // just use Manager struct instead. func NewCircuitFromConfig(name string, config Config) *Circuit { config.Merge(defaultCommandProperties) ret := &Circuit{ name: name, notThreadSafeConfig: config, } ret.SetConfigNotThreadSafe(config) return ret } // ConcurrentCommands returns how many commands are currently running func (c *Circuit) ConcurrentCommands() int64 { return c.concurrentCommands.Get() } // ConcurrentFallbacks returns how many fallbacks are currently running func (c *Circuit) ConcurrentFallbacks() int64 { return c.concurrentFallbacks.Get() } // SetConfigThreadSafe changes the current configuration of this circuit. Note that many config parameters, specifically those // around creating stat tracking buckets, are not modifiable during runtime for efficiency reasons. Those buckets // will stay the same. func (c *Circuit) SetConfigThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() c.notThreadSafeConfig = config c.threadSafeConfig.reset(c.notThreadSafeConfig) if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigThreadSafe(config) } } // Config returns the circuit's configuration. Modifications to this configuration are not reflected by the circuit. // In other words, this creates a copy. func (c *Circuit) Config() Config { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() return c.notThreadSafeConfig } // SetConfigNotThreadSafe is only useful during construction before a circuit is being used. It is not thread safe, // but will modify all the circuit's internal structs to match what the config wants. It also doe *NOT* use the // default configuration parameters. func (c *Circuit) SetConfigNotThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() // Set, but do not reference this config inside this function, since that would not be thread safe (no mu protection) c.notThreadSafeConfig = config c.notThreadSafeConfigMu.Unlock() c.goroutineWrapper.lostErrors = config.General.GoLostErrors c.timeNow = config.General.TimeKeeper.Now c.OpenToClose = config.General.OpenToClosedFactory() c.ClosedToOpen = config.General.ClosedToOpenFactory() if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } c.CmdMetricCollector = append( make([]RunMetrics, 0, len(config.Metrics.Run)+2), c.OpenToClose, c.ClosedToOpen) c.CmdMetricCollector = append(c.CmdMetricCollector, config.Metrics.Run...) c.FallbackMetricCollector = append( make([]FallbackMetrics, 0, len(config.Metrics.Fallback)+2), config.Metrics.Fallback...) c.CircuitMetricsCollector = append( make([]Metrics, 0, len(config.Metrics.Circuit)+2), c.OpenToClose, c.ClosedToOpen) c.CircuitMetricsCollector = append(c.CircuitMetricsCollector, config.Metrics.Circuit...) c.SetConfigThreadSafe(config) } func (c *Circuit) now() time.Time { return c.timeNow() } // Var exports that help diagnose the circuit func (c *Circuit) Var() expvar.Var { return expvar.Func(func() interface{} { if c == nil { return nil } ret := map[string]interface{}{ "config": c.Config(), "is_open": c.IsOpen(), "name": c.Name(), "run_metrics": expvarToVal(c.CmdMetricCollector.Var()), "concurrent_commands": c.ConcurrentCommands(), "concurrent_fallbacks": c.ConcurrentFallbacks(), "closer": c.OpenToClose, "opener": c.ClosedToOpen, "fallback_metrics": expvarToVal(c.FallbackMetricCollector.Var()), } return ret }) } // Name of this circuit func (c *Circuit) Name() string { if c == nil { return "" } return c.name } // IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls) func (c *Circuit) IsOpen() bool { if c == nil { return false } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return true } if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { return false } return c.isOpen.Get() } // CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually // healthy, it will just open back up again. func (c *Circuit) CloseCircuit() { c.close(c.now(), true) } // OpenCircuit will open a closed circuit. The circuit will then try to repair itself func (c *Circuit) OpenCircuit() { c.openCircuit(time.Now()) } // OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after // some delay, try to close again. func (c *Circuit) openCircuit(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open return } c.CircuitMetricsCollector.Opened(now) c.isOpen.Set(true) } // Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust // the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it // is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what. func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c == nil { var wrapper goroutineWrapper return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc)) } return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc)) } // Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error { return c.Execute(ctx, runFunc, nil) } // Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute-- // The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error. // Internal library errors will match the interface Error and you can use type casting to check this. func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() { return runFunc(ctx) } // Try to run the command in the context of the circuit err := c.run(ctx, runFunc) if err == nil { return nil } // A bad request should not trigger fallback logic. The user just gave bad input. // The list of conditions that trigger fallbacks is documented at // https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype if IsBadRequest(err) { return err } return c.fallback(ctx, err, fallbackFunc) } // --------- only private functions below here func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error { if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() { return errThrottledConcurrentCommands
} // isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty // circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior // in the nil/empty case. func (c *Circuit) isEmptyOrNil() bool { return c == nil || c.OpenToClose == nil } // run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run() func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) { if runFunc == nil { return nil } var expectedDoneBy time.Time startTime := c.now() originalContext := ctx if !c.allowNewRun(startTime) { // Rather than make this inline, return a global reference (for memory optimization sake). c.CmdMetricCollector.ErrShortCircuit(startTime) return errCircuitOpen } if c.ClosedToOpen.Prevent(startTime) { return errCircuitOpen } currentCommandCount := c.concurrentCommands.Add(1) defer c.concurrentCommands.Add(-1) if err := c.throttleConcurrentCommands(currentCommandCount); err != nil { c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime) return err } // Set timeout on the command if we have one if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 { var timeoutCancel func() expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration()) ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy) defer timeoutCancel() } ret := runFunc(ctx) endTime := c.now() totalCmdTime := endTime.Sub(startTime) runFuncDoneTime := c.now() // See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation // This request had invalid input, but shouldn't be marked as an 'error' for the circuit // From documentation // ------- // The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system // failures that should not count against the failure metrics and should not trigger fallback logic. if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) { return ret } // Even if there is no error (or if there is an error), if the request took too long it is always an error for the // circuit. Note that ret *MAY* actually be nil. In that case, we still want to return nil. if c.checkErrTimeout(expectedDoneBy, runFuncDoneTime, totalCmdTime) { // Note: ret could possibly be nil. We will still return nil, but the circuit will consider it a failure. return ret } // The runFunc failed, but someone asked the original context to end. This probably isn't a failure of the // circuit: someone just wanted `Execute` to end early, so don't track it as a failure. if c.checkErrInterrupt(originalContext, ret, runFuncDoneTime, totalCmdTime) { return ret } if c.checkErrFailure(ret, runFuncDoneTime, totalCmdTime) { return ret } // The circuit works. Close it! // Note: Execute this *after* you check for timeouts so we can still track circuit time outs that happen to also return a // valid value later. c.checkSuccess(runFuncDoneTime, totalCmdTime) return nil } func (c *Circuit) checkSuccess(runFuncDoneTime time.Time, totalCmdTime time.Duration) { c.CmdMetricCollector.Success(runFuncDoneTime, totalCmdTime) if c.IsOpen() { c.close(runFuncDoneTime, false) } } // checkErrInterrupt returns true if this is considered an interrupt error: interrupt errors do not open the circuit. // Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit // unhealthy. But when ExecutionConfig.IgnoreInterrupts set to true we try to classify originalContext.Err() // with help of ExecutionConfig.IsErrInterrupt function. When this function returns true we do not open the circuit func (c *Circuit) checkErrInterrupt(originalContext context.Context, ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // We need to see an error in both the original context and the return value to consider this an "interrupt" caused // error. if ret == nil || originalContext.Err() == nil { return false } isErrInterrupt := c.notThreadSafeConfig.Execution.IsErrInterrupt if isErrInterrupt == nil { isErrInterrupt = func(_ error) bool { // By default, we consider any error from the original context an interrupt causing error return true } } if !c.threadSafeConfig.GoSpecific.IgnoreInterrupts.Get() && isErrInterrupt(originalContext.Err()) { c.CmdMetricCollector.ErrInterrupt(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrBadRequest(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if IsBadRequest(ret) { c.CmdMetricCollector.ErrBadRequest(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrFailure(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if ret != nil { c.CmdMetricCollector.ErrFailure(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } func (c *Circuit) checkErrTimeout(expectedDoneBy time.Time, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // I don't use the deadline from the context because it could be a smaller timeout from the parent context if !expectedDoneBy.IsZero() && expectedDoneBy.Before(runFuncDoneTime) { c.CmdMetricCollector.ErrTimeout(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } // Does fallback logic. Equivalent of // http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#getFallback func (c *Circuit) fallback(ctx context.Context, err error, fallbackFunc func(context.Context, error) error) error { // Use the fallback command if available if fallbackFunc == nil || c.threadSafeConfig.Fallback.Disabled.Get() { return err } // Throttle concurrent fallback calls currentFallbackCount := c.concurrentFallbacks.Add(1) defer c.concurrentFallbacks.Add(-1) if c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() >= 0 && currentFallbackCount > c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() { c.FallbackMetricCollector.ErrConcurrencyLimitReject(c.now()) return &circuitError{concurrencyLimitReached: true, msg: "throttling concurrency to fallbacks"} } startTime := c.now() retErr := fallbackFunc(ctx, err) totalCmdTime := c.now().Sub(startTime) if retErr != nil { c.FallbackMetricCollector.ErrFailure(startTime, totalCmdTime) return retErr } c.FallbackMetricCollector.Success(startTime, totalCmdTime) return nil } // allowNewRun checks if the circuit is allowing new run commands. This happens if the circuit is closed, or // if it is open, but we want to explore to see if we should close it again. func (c *Circuit) allowNewRun(now time.Time) bool { if !c.IsOpen() { return true } if c.OpenToClose.Allow(now) { return true } return false } // close closes an open circuit. Usually because we think it's healthy again. func (c *Circuit) close(now time.Time, forceClosed bool) { if !c.IsOpen() { // Not open. Don't need to close it return } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return } if forceClosed || c.OpenToClose.ShouldClose(now) { c.CircuitMetricsCollector.Closed(now) c.isOpen.Set(false) } } // attemptToOpen tries to open an unhealthy circuit. Usually because we think run is having problems, and we want // to give run a rest for a bit. // // It is called "attemptToOpen" because the circuit may not actually open (for example if there aren't enough requests) func (c *Circuit) attemptToOpen(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open // This check isn't needed (it is also checked inside OpenCircuit below), but is an optimization to avoid // the below logic when the circuit is in a bad state and would otherwise try to close itself repeatedly. return } if c.ClosedToOpen.ShouldOpen(now) { c.openCircuit(now) } }
} return nil
random_line_split
circuit.go
package circuit import ( "context" "expvar" "sync" "time" "github.com/cep21/circuit/v3/faststats" ) // Circuit is a circuit breaker pattern implementation that can accept commands and open/close on failures type Circuit struct { // circuitStats CmdMetricCollector RunMetricsCollection FallbackMetricCollector FallbackMetricsCollection CircuitMetricsCollector MetricsCollection // This is used to help run `Go` calls in the background goroutineWrapper goroutineWrapper name string // The passed in config is not atomic and thread safe. We reference thread safe values during circuit operations // with atomicCircuitConfig. Those are, also, the only values that can actually be changed while a circuit is // running. notThreadSafeConfig Config // The mutex supports setting and reading the command properties, but is not locked when we reference the config // while live: we use the threadSafeConfig below notThreadSafeConfigMu sync.Mutex threadSafeConfig atomicCircuitConfig // Tracks if the circuit has been shut open or closed isOpen faststats.AtomicBoolean // Tracks how many commands are currently running concurrentCommands faststats.AtomicInt64 // Tracks how many fallbacks are currently running concurrentFallbacks faststats.AtomicInt64 // ClosedToOpen controls when to open a closed circuit ClosedToOpen ClosedToOpen // openToClosed controls when to close an open circuit OpenToClose OpenToClosed timeNow func() time.Time } // NewCircuitFromConfig creates an inline circuit. If you want to group all your circuits together, you should probably // just use Manager struct instead. func NewCircuitFromConfig(name string, config Config) *Circuit { config.Merge(defaultCommandProperties) ret := &Circuit{ name: name, notThreadSafeConfig: config, } ret.SetConfigNotThreadSafe(config) return ret } // ConcurrentCommands returns how many commands are currently running func (c *Circuit) ConcurrentCommands() int64 { return c.concurrentCommands.Get() } // ConcurrentFallbacks returns how many fallbacks are currently running func (c *Circuit) ConcurrentFallbacks() int64 { return c.concurrentFallbacks.Get() } // SetConfigThreadSafe changes the current configuration of this circuit. Note that many config parameters, specifically those // around creating stat tracking buckets, are not modifiable during runtime for efficiency reasons. Those buckets // will stay the same. func (c *Circuit) SetConfigThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() c.notThreadSafeConfig = config c.threadSafeConfig.reset(c.notThreadSafeConfig) if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigThreadSafe(config) } } // Config returns the circuit's configuration. Modifications to this configuration are not reflected by the circuit. // In other words, this creates a copy. func (c *Circuit) Config() Config { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() return c.notThreadSafeConfig } // SetConfigNotThreadSafe is only useful during construction before a circuit is being used. It is not thread safe, // but will modify all the circuit's internal structs to match what the config wants. It also doe *NOT* use the // default configuration parameters. func (c *Circuit) SetConfigNotThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() // Set, but do not reference this config inside this function, since that would not be thread safe (no mu protection) c.notThreadSafeConfig = config c.notThreadSafeConfigMu.Unlock() c.goroutineWrapper.lostErrors = config.General.GoLostErrors c.timeNow = config.General.TimeKeeper.Now c.OpenToClose = config.General.OpenToClosedFactory() c.ClosedToOpen = config.General.ClosedToOpenFactory() if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } c.CmdMetricCollector = append( make([]RunMetrics, 0, len(config.Metrics.Run)+2), c.OpenToClose, c.ClosedToOpen) c.CmdMetricCollector = append(c.CmdMetricCollector, config.Metrics.Run...) c.FallbackMetricCollector = append( make([]FallbackMetrics, 0, len(config.Metrics.Fallback)+2), config.Metrics.Fallback...) c.CircuitMetricsCollector = append( make([]Metrics, 0, len(config.Metrics.Circuit)+2), c.OpenToClose, c.ClosedToOpen) c.CircuitMetricsCollector = append(c.CircuitMetricsCollector, config.Metrics.Circuit...) c.SetConfigThreadSafe(config) } func (c *Circuit) now() time.Time { return c.timeNow() } // Var exports that help diagnose the circuit func (c *Circuit) Var() expvar.Var { return expvar.Func(func() interface{} { if c == nil { return nil } ret := map[string]interface{}{ "config": c.Config(), "is_open": c.IsOpen(), "name": c.Name(), "run_metrics": expvarToVal(c.CmdMetricCollector.Var()), "concurrent_commands": c.ConcurrentCommands(), "concurrent_fallbacks": c.ConcurrentFallbacks(), "closer": c.OpenToClose, "opener": c.ClosedToOpen, "fallback_metrics": expvarToVal(c.FallbackMetricCollector.Var()), } return ret }) } // Name of this circuit func (c *Circuit) Name() string { if c == nil { return "" } return c.name } // IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls) func (c *Circuit) IsOpen() bool { if c == nil { return false } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return true } if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { return false } return c.isOpen.Get() } // CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually // healthy, it will just open back up again. func (c *Circuit) CloseCircuit() { c.close(c.now(), true) } // OpenCircuit will open a closed circuit. The circuit will then try to repair itself func (c *Circuit) OpenCircuit() { c.openCircuit(time.Now()) } // OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after // some delay, try to close again. func (c *Circuit) openCircuit(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open return } c.CircuitMetricsCollector.Opened(now) c.isOpen.Set(true) } // Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust // the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it // is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what. func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c == nil { var wrapper goroutineWrapper return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc)) } return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc)) } // Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error { return c.Execute(ctx, runFunc, nil) } // Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute-- // The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error. // Internal library errors will match the interface Error and you can use type casting to check this. func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() { return runFunc(ctx) } // Try to run the command in the context of the circuit err := c.run(ctx, runFunc) if err == nil { return nil } // A bad request should not trigger fallback logic. The user just gave bad input. // The list of conditions that trigger fallbacks is documented at // https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype if IsBadRequest(err) { return err } return c.fallback(ctx, err, fallbackFunc) } // --------- only private functions below here func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error { if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() { return errThrottledConcurrentCommands } return nil } // isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty // circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior // in the nil/empty case. func (c *Circuit) isEmptyOrNil() bool { return c == nil || c.OpenToClose == nil } // run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run() func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) { if runFunc == nil { return nil } var expectedDoneBy time.Time startTime := c.now() originalContext := ctx if !c.allowNewRun(startTime) { // Rather than make this inline, return a global reference (for memory optimization sake). c.CmdMetricCollector.ErrShortCircuit(startTime) return errCircuitOpen } if c.ClosedToOpen.Prevent(startTime)
currentCommandCount := c.concurrentCommands.Add(1) defer c.concurrentCommands.Add(-1) if err := c.throttleConcurrentCommands(currentCommandCount); err != nil { c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime) return err } // Set timeout on the command if we have one if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 { var timeoutCancel func() expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration()) ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy) defer timeoutCancel() } ret := runFunc(ctx) endTime := c.now() totalCmdTime := endTime.Sub(startTime) runFuncDoneTime := c.now() // See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation // This request had invalid input, but shouldn't be marked as an 'error' for the circuit // From documentation // ------- // The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system // failures that should not count against the failure metrics and should not trigger fallback logic. if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) { return ret } // Even if there is no error (or if there is an error), if the request took too long it is always an error for the // circuit. Note that ret *MAY* actually be nil. In that case, we still want to return nil. if c.checkErrTimeout(expectedDoneBy, runFuncDoneTime, totalCmdTime) { // Note: ret could possibly be nil. We will still return nil, but the circuit will consider it a failure. return ret } // The runFunc failed, but someone asked the original context to end. This probably isn't a failure of the // circuit: someone just wanted `Execute` to end early, so don't track it as a failure. if c.checkErrInterrupt(originalContext, ret, runFuncDoneTime, totalCmdTime) { return ret } if c.checkErrFailure(ret, runFuncDoneTime, totalCmdTime) { return ret } // The circuit works. Close it! // Note: Execute this *after* you check for timeouts so we can still track circuit time outs that happen to also return a // valid value later. c.checkSuccess(runFuncDoneTime, totalCmdTime) return nil } func (c *Circuit) checkSuccess(runFuncDoneTime time.Time, totalCmdTime time.Duration) { c.CmdMetricCollector.Success(runFuncDoneTime, totalCmdTime) if c.IsOpen() { c.close(runFuncDoneTime, false) } } // checkErrInterrupt returns true if this is considered an interrupt error: interrupt errors do not open the circuit. // Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit // unhealthy. But when ExecutionConfig.IgnoreInterrupts set to true we try to classify originalContext.Err() // with help of ExecutionConfig.IsErrInterrupt function. When this function returns true we do not open the circuit func (c *Circuit) checkErrInterrupt(originalContext context.Context, ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // We need to see an error in both the original context and the return value to consider this an "interrupt" caused // error. if ret == nil || originalContext.Err() == nil { return false } isErrInterrupt := c.notThreadSafeConfig.Execution.IsErrInterrupt if isErrInterrupt == nil { isErrInterrupt = func(_ error) bool { // By default, we consider any error from the original context an interrupt causing error return true } } if !c.threadSafeConfig.GoSpecific.IgnoreInterrupts.Get() && isErrInterrupt(originalContext.Err()) { c.CmdMetricCollector.ErrInterrupt(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrBadRequest(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if IsBadRequest(ret) { c.CmdMetricCollector.ErrBadRequest(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrFailure(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if ret != nil { c.CmdMetricCollector.ErrFailure(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } func (c *Circuit) checkErrTimeout(expectedDoneBy time.Time, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // I don't use the deadline from the context because it could be a smaller timeout from the parent context if !expectedDoneBy.IsZero() && expectedDoneBy.Before(runFuncDoneTime) { c.CmdMetricCollector.ErrTimeout(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } // Does fallback logic. Equivalent of // http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#getFallback func (c *Circuit) fallback(ctx context.Context, err error, fallbackFunc func(context.Context, error) error) error { // Use the fallback command if available if fallbackFunc == nil || c.threadSafeConfig.Fallback.Disabled.Get() { return err } // Throttle concurrent fallback calls currentFallbackCount := c.concurrentFallbacks.Add(1) defer c.concurrentFallbacks.Add(-1) if c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() >= 0 && currentFallbackCount > c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() { c.FallbackMetricCollector.ErrConcurrencyLimitReject(c.now()) return &circuitError{concurrencyLimitReached: true, msg: "throttling concurrency to fallbacks"} } startTime := c.now() retErr := fallbackFunc(ctx, err) totalCmdTime := c.now().Sub(startTime) if retErr != nil { c.FallbackMetricCollector.ErrFailure(startTime, totalCmdTime) return retErr } c.FallbackMetricCollector.Success(startTime, totalCmdTime) return nil } // allowNewRun checks if the circuit is allowing new run commands. This happens if the circuit is closed, or // if it is open, but we want to explore to see if we should close it again. func (c *Circuit) allowNewRun(now time.Time) bool { if !c.IsOpen() { return true } if c.OpenToClose.Allow(now) { return true } return false } // close closes an open circuit. Usually because we think it's healthy again. func (c *Circuit) close(now time.Time, forceClosed bool) { if !c.IsOpen() { // Not open. Don't need to close it return } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return } if forceClosed || c.OpenToClose.ShouldClose(now) { c.CircuitMetricsCollector.Closed(now) c.isOpen.Set(false) } } // attemptToOpen tries to open an unhealthy circuit. Usually because we think run is having problems, and we want // to give run a rest for a bit. // // It is called "attemptToOpen" because the circuit may not actually open (for example if there aren't enough requests) func (c *Circuit) attemptToOpen(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open // This check isn't needed (it is also checked inside OpenCircuit below), but is an optimization to avoid // the below logic when the circuit is in a bad state and would otherwise try to close itself repeatedly. return } if c.ClosedToOpen.ShouldOpen(now) { c.openCircuit(now) } }
{ return errCircuitOpen }
conditional_block
circuit.go
package circuit import ( "context" "expvar" "sync" "time" "github.com/cep21/circuit/v3/faststats" ) // Circuit is a circuit breaker pattern implementation that can accept commands and open/close on failures type Circuit struct { // circuitStats CmdMetricCollector RunMetricsCollection FallbackMetricCollector FallbackMetricsCollection CircuitMetricsCollector MetricsCollection // This is used to help run `Go` calls in the background goroutineWrapper goroutineWrapper name string // The passed in config is not atomic and thread safe. We reference thread safe values during circuit operations // with atomicCircuitConfig. Those are, also, the only values that can actually be changed while a circuit is // running. notThreadSafeConfig Config // The mutex supports setting and reading the command properties, but is not locked when we reference the config // while live: we use the threadSafeConfig below notThreadSafeConfigMu sync.Mutex threadSafeConfig atomicCircuitConfig // Tracks if the circuit has been shut open or closed isOpen faststats.AtomicBoolean // Tracks how many commands are currently running concurrentCommands faststats.AtomicInt64 // Tracks how many fallbacks are currently running concurrentFallbacks faststats.AtomicInt64 // ClosedToOpen controls when to open a closed circuit ClosedToOpen ClosedToOpen // openToClosed controls when to close an open circuit OpenToClose OpenToClosed timeNow func() time.Time } // NewCircuitFromConfig creates an inline circuit. If you want to group all your circuits together, you should probably // just use Manager struct instead. func NewCircuitFromConfig(name string, config Config) *Circuit { config.Merge(defaultCommandProperties) ret := &Circuit{ name: name, notThreadSafeConfig: config, } ret.SetConfigNotThreadSafe(config) return ret } // ConcurrentCommands returns how many commands are currently running func (c *Circuit) ConcurrentCommands() int64 { return c.concurrentCommands.Get() } // ConcurrentFallbacks returns how many fallbacks are currently running func (c *Circuit) ConcurrentFallbacks() int64 { return c.concurrentFallbacks.Get() } // SetConfigThreadSafe changes the current configuration of this circuit. Note that many config parameters, specifically those // around creating stat tracking buckets, are not modifiable during runtime for efficiency reasons. Those buckets // will stay the same. func (c *Circuit) SetConfigThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() c.notThreadSafeConfig = config c.threadSafeConfig.reset(c.notThreadSafeConfig) if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigThreadSafe(config) } } // Config returns the circuit's configuration. Modifications to this configuration are not reflected by the circuit. // In other words, this creates a copy. func (c *Circuit) Config() Config { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() return c.notThreadSafeConfig } // SetConfigNotThreadSafe is only useful during construction before a circuit is being used. It is not thread safe, // but will modify all the circuit's internal structs to match what the config wants. It also doe *NOT* use the // default configuration parameters. func (c *Circuit) SetConfigNotThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() // Set, but do not reference this config inside this function, since that would not be thread safe (no mu protection) c.notThreadSafeConfig = config c.notThreadSafeConfigMu.Unlock() c.goroutineWrapper.lostErrors = config.General.GoLostErrors c.timeNow = config.General.TimeKeeper.Now c.OpenToClose = config.General.OpenToClosedFactory() c.ClosedToOpen = config.General.ClosedToOpenFactory() if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } c.CmdMetricCollector = append( make([]RunMetrics, 0, len(config.Metrics.Run)+2), c.OpenToClose, c.ClosedToOpen) c.CmdMetricCollector = append(c.CmdMetricCollector, config.Metrics.Run...) c.FallbackMetricCollector = append( make([]FallbackMetrics, 0, len(config.Metrics.Fallback)+2), config.Metrics.Fallback...) c.CircuitMetricsCollector = append( make([]Metrics, 0, len(config.Metrics.Circuit)+2), c.OpenToClose, c.ClosedToOpen) c.CircuitMetricsCollector = append(c.CircuitMetricsCollector, config.Metrics.Circuit...) c.SetConfigThreadSafe(config) } func (c *Circuit) now() time.Time { return c.timeNow() } // Var exports that help diagnose the circuit func (c *Circuit) Var() expvar.Var { return expvar.Func(func() interface{} { if c == nil { return nil } ret := map[string]interface{}{ "config": c.Config(), "is_open": c.IsOpen(), "name": c.Name(), "run_metrics": expvarToVal(c.CmdMetricCollector.Var()), "concurrent_commands": c.ConcurrentCommands(), "concurrent_fallbacks": c.ConcurrentFallbacks(), "closer": c.OpenToClose, "opener": c.ClosedToOpen, "fallback_metrics": expvarToVal(c.FallbackMetricCollector.Var()), } return ret }) } // Name of this circuit func (c *Circuit) Name() string { if c == nil { return "" } return c.name } // IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls) func (c *Circuit) IsOpen() bool { if c == nil { return false } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return true } if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { return false } return c.isOpen.Get() } // CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually // healthy, it will just open back up again. func (c *Circuit) CloseCircuit() { c.close(c.now(), true) } // OpenCircuit will open a closed circuit. The circuit will then try to repair itself func (c *Circuit) OpenCircuit() { c.openCircuit(time.Now()) } // OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after // some delay, try to close again. func (c *Circuit) openCircuit(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open return } c.CircuitMetricsCollector.Opened(now) c.isOpen.Set(true) } // Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust // the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it // is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what. func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c == nil { var wrapper goroutineWrapper return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc)) } return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc)) } // Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error { return c.Execute(ctx, runFunc, nil) } // Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute-- // The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error. // Internal library errors will match the interface Error and you can use type casting to check this. func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() { return runFunc(ctx) } // Try to run the command in the context of the circuit err := c.run(ctx, runFunc) if err == nil { return nil } // A bad request should not trigger fallback logic. The user just gave bad input. // The list of conditions that trigger fallbacks is documented at // https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype if IsBadRequest(err) { return err } return c.fallback(ctx, err, fallbackFunc) } // --------- only private functions below here func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error { if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() { return errThrottledConcurrentCommands } return nil } // isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty // circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior // in the nil/empty case. func (c *Circuit) isEmptyOrNil() bool { return c == nil || c.OpenToClose == nil } // run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run() func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) { if runFunc == nil { return nil } var expectedDoneBy time.Time startTime := c.now() originalContext := ctx if !c.allowNewRun(startTime) { // Rather than make this inline, return a global reference (for memory optimization sake). c.CmdMetricCollector.ErrShortCircuit(startTime) return errCircuitOpen } if c.ClosedToOpen.Prevent(startTime) { return errCircuitOpen } currentCommandCount := c.concurrentCommands.Add(1) defer c.concurrentCommands.Add(-1) if err := c.throttleConcurrentCommands(currentCommandCount); err != nil { c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime) return err } // Set timeout on the command if we have one if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 { var timeoutCancel func() expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration()) ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy) defer timeoutCancel() } ret := runFunc(ctx) endTime := c.now() totalCmdTime := endTime.Sub(startTime) runFuncDoneTime := c.now() // See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation // This request had invalid input, but shouldn't be marked as an 'error' for the circuit // From documentation // ------- // The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system // failures that should not count against the failure metrics and should not trigger fallback logic. if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) { return ret } // Even if there is no error (or if there is an error), if the request took too long it is always an error for the // circuit. Note that ret *MAY* actually be nil. In that case, we still want to return nil. if c.checkErrTimeout(expectedDoneBy, runFuncDoneTime, totalCmdTime) { // Note: ret could possibly be nil. We will still return nil, but the circuit will consider it a failure. return ret } // The runFunc failed, but someone asked the original context to end. This probably isn't a failure of the // circuit: someone just wanted `Execute` to end early, so don't track it as a failure. if c.checkErrInterrupt(originalContext, ret, runFuncDoneTime, totalCmdTime) { return ret } if c.checkErrFailure(ret, runFuncDoneTime, totalCmdTime) { return ret } // The circuit works. Close it! // Note: Execute this *after* you check for timeouts so we can still track circuit time outs that happen to also return a // valid value later. c.checkSuccess(runFuncDoneTime, totalCmdTime) return nil } func (c *Circuit) checkSuccess(runFuncDoneTime time.Time, totalCmdTime time.Duration) { c.CmdMetricCollector.Success(runFuncDoneTime, totalCmdTime) if c.IsOpen() { c.close(runFuncDoneTime, false) } } // checkErrInterrupt returns true if this is considered an interrupt error: interrupt errors do not open the circuit. // Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit // unhealthy. But when ExecutionConfig.IgnoreInterrupts set to true we try to classify originalContext.Err() // with help of ExecutionConfig.IsErrInterrupt function. When this function returns true we do not open the circuit func (c *Circuit) checkErrInterrupt(originalContext context.Context, ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool
func (c *Circuit) checkErrBadRequest(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if IsBadRequest(ret) { c.CmdMetricCollector.ErrBadRequest(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrFailure(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if ret != nil { c.CmdMetricCollector.ErrFailure(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } func (c *Circuit) checkErrTimeout(expectedDoneBy time.Time, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // I don't use the deadline from the context because it could be a smaller timeout from the parent context if !expectedDoneBy.IsZero() && expectedDoneBy.Before(runFuncDoneTime) { c.CmdMetricCollector.ErrTimeout(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } // Does fallback logic. Equivalent of // http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#getFallback func (c *Circuit) fallback(ctx context.Context, err error, fallbackFunc func(context.Context, error) error) error { // Use the fallback command if available if fallbackFunc == nil || c.threadSafeConfig.Fallback.Disabled.Get() { return err } // Throttle concurrent fallback calls currentFallbackCount := c.concurrentFallbacks.Add(1) defer c.concurrentFallbacks.Add(-1) if c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() >= 0 && currentFallbackCount > c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() { c.FallbackMetricCollector.ErrConcurrencyLimitReject(c.now()) return &circuitError{concurrencyLimitReached: true, msg: "throttling concurrency to fallbacks"} } startTime := c.now() retErr := fallbackFunc(ctx, err) totalCmdTime := c.now().Sub(startTime) if retErr != nil { c.FallbackMetricCollector.ErrFailure(startTime, totalCmdTime) return retErr } c.FallbackMetricCollector.Success(startTime, totalCmdTime) return nil } // allowNewRun checks if the circuit is allowing new run commands. This happens if the circuit is closed, or // if it is open, but we want to explore to see if we should close it again. func (c *Circuit) allowNewRun(now time.Time) bool { if !c.IsOpen() { return true } if c.OpenToClose.Allow(now) { return true } return false } // close closes an open circuit. Usually because we think it's healthy again. func (c *Circuit) close(now time.Time, forceClosed bool) { if !c.IsOpen() { // Not open. Don't need to close it return } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return } if forceClosed || c.OpenToClose.ShouldClose(now) { c.CircuitMetricsCollector.Closed(now) c.isOpen.Set(false) } } // attemptToOpen tries to open an unhealthy circuit. Usually because we think run is having problems, and we want // to give run a rest for a bit. // // It is called "attemptToOpen" because the circuit may not actually open (for example if there aren't enough requests) func (c *Circuit) attemptToOpen(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open // This check isn't needed (it is also checked inside OpenCircuit below), but is an optimization to avoid // the below logic when the circuit is in a bad state and would otherwise try to close itself repeatedly. return } if c.ClosedToOpen.ShouldOpen(now) { c.openCircuit(now) } }
{ // We need to see an error in both the original context and the return value to consider this an "interrupt" caused // error. if ret == nil || originalContext.Err() == nil { return false } isErrInterrupt := c.notThreadSafeConfig.Execution.IsErrInterrupt if isErrInterrupt == nil { isErrInterrupt = func(_ error) bool { // By default, we consider any error from the original context an interrupt causing error return true } } if !c.threadSafeConfig.GoSpecific.IgnoreInterrupts.Get() && isErrInterrupt(originalContext.Err()) { c.CmdMetricCollector.ErrInterrupt(runFuncDoneTime, totalCmdTime) return true } return false }
identifier_body
circuit.go
package circuit import ( "context" "expvar" "sync" "time" "github.com/cep21/circuit/v3/faststats" ) // Circuit is a circuit breaker pattern implementation that can accept commands and open/close on failures type Circuit struct { // circuitStats CmdMetricCollector RunMetricsCollection FallbackMetricCollector FallbackMetricsCollection CircuitMetricsCollector MetricsCollection // This is used to help run `Go` calls in the background goroutineWrapper goroutineWrapper name string // The passed in config is not atomic and thread safe. We reference thread safe values during circuit operations // with atomicCircuitConfig. Those are, also, the only values that can actually be changed while a circuit is // running. notThreadSafeConfig Config // The mutex supports setting and reading the command properties, but is not locked when we reference the config // while live: we use the threadSafeConfig below notThreadSafeConfigMu sync.Mutex threadSafeConfig atomicCircuitConfig // Tracks if the circuit has been shut open or closed isOpen faststats.AtomicBoolean // Tracks how many commands are currently running concurrentCommands faststats.AtomicInt64 // Tracks how many fallbacks are currently running concurrentFallbacks faststats.AtomicInt64 // ClosedToOpen controls when to open a closed circuit ClosedToOpen ClosedToOpen // openToClosed controls when to close an open circuit OpenToClose OpenToClosed timeNow func() time.Time } // NewCircuitFromConfig creates an inline circuit. If you want to group all your circuits together, you should probably // just use Manager struct instead. func NewCircuitFromConfig(name string, config Config) *Circuit { config.Merge(defaultCommandProperties) ret := &Circuit{ name: name, notThreadSafeConfig: config, } ret.SetConfigNotThreadSafe(config) return ret } // ConcurrentCommands returns how many commands are currently running func (c *Circuit) ConcurrentCommands() int64 { return c.concurrentCommands.Get() } // ConcurrentFallbacks returns how many fallbacks are currently running func (c *Circuit) ConcurrentFallbacks() int64 { return c.concurrentFallbacks.Get() } // SetConfigThreadSafe changes the current configuration of this circuit. Note that many config parameters, specifically those // around creating stat tracking buckets, are not modifiable during runtime for efficiency reasons. Those buckets // will stay the same. func (c *Circuit) SetConfigThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() c.notThreadSafeConfig = config c.threadSafeConfig.reset(c.notThreadSafeConfig) if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigThreadSafe(config) } } // Config returns the circuit's configuration. Modifications to this configuration are not reflected by the circuit. // In other words, this creates a copy. func (c *Circuit) Config() Config { c.notThreadSafeConfigMu.Lock() defer c.notThreadSafeConfigMu.Unlock() return c.notThreadSafeConfig } // SetConfigNotThreadSafe is only useful during construction before a circuit is being used. It is not thread safe, // but will modify all the circuit's internal structs to match what the config wants. It also doe *NOT* use the // default configuration parameters. func (c *Circuit) SetConfigNotThreadSafe(config Config) { c.notThreadSafeConfigMu.Lock() // Set, but do not reference this config inside this function, since that would not be thread safe (no mu protection) c.notThreadSafeConfig = config c.notThreadSafeConfigMu.Unlock() c.goroutineWrapper.lostErrors = config.General.GoLostErrors c.timeNow = config.General.TimeKeeper.Now c.OpenToClose = config.General.OpenToClosedFactory() c.ClosedToOpen = config.General.ClosedToOpenFactory() if cfg, ok := c.OpenToClose.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } if cfg, ok := c.ClosedToOpen.(Configurable); ok { cfg.SetConfigNotThreadSafe(config) } c.CmdMetricCollector = append( make([]RunMetrics, 0, len(config.Metrics.Run)+2), c.OpenToClose, c.ClosedToOpen) c.CmdMetricCollector = append(c.CmdMetricCollector, config.Metrics.Run...) c.FallbackMetricCollector = append( make([]FallbackMetrics, 0, len(config.Metrics.Fallback)+2), config.Metrics.Fallback...) c.CircuitMetricsCollector = append( make([]Metrics, 0, len(config.Metrics.Circuit)+2), c.OpenToClose, c.ClosedToOpen) c.CircuitMetricsCollector = append(c.CircuitMetricsCollector, config.Metrics.Circuit...) c.SetConfigThreadSafe(config) } func (c *Circuit) now() time.Time { return c.timeNow() } // Var exports that help diagnose the circuit func (c *Circuit) Var() expvar.Var { return expvar.Func(func() interface{} { if c == nil { return nil } ret := map[string]interface{}{ "config": c.Config(), "is_open": c.IsOpen(), "name": c.Name(), "run_metrics": expvarToVal(c.CmdMetricCollector.Var()), "concurrent_commands": c.ConcurrentCommands(), "concurrent_fallbacks": c.ConcurrentFallbacks(), "closer": c.OpenToClose, "opener": c.ClosedToOpen, "fallback_metrics": expvarToVal(c.FallbackMetricCollector.Var()), } return ret }) } // Name of this circuit func (c *Circuit) Name() string { if c == nil { return "" } return c.name } // IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls) func (c *Circuit) IsOpen() bool { if c == nil { return false } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return true } if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { return false } return c.isOpen.Get() } // CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually // healthy, it will just open back up again. func (c *Circuit) CloseCircuit() { c.close(c.now(), true) } // OpenCircuit will open a closed circuit. The circuit will then try to repair itself func (c *Circuit) OpenCircuit() { c.openCircuit(time.Now()) } // OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after // some delay, try to close again. func (c *Circuit) openCircuit(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open return } c.CircuitMetricsCollector.Opened(now) c.isOpen.Set(true) } // Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust // the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it // is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what. func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c == nil { var wrapper goroutineWrapper return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc)) } return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc)) } // Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error { return c.Execute(ctx, runFunc, nil) } // Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute-- // The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error. // Internal library errors will match the interface Error and you can use type casting to check this. func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error { if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() { return runFunc(ctx) } // Try to run the command in the context of the circuit err := c.run(ctx, runFunc) if err == nil { return nil } // A bad request should not trigger fallback logic. The user just gave bad input. // The list of conditions that trigger fallbacks is documented at // https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype if IsBadRequest(err) { return err } return c.fallback(ctx, err, fallbackFunc) } // --------- only private functions below here func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error { if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() { return errThrottledConcurrentCommands } return nil } // isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty // circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior // in the nil/empty case. func (c *Circuit) isEmptyOrNil() bool { return c == nil || c.OpenToClose == nil } // run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run() func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) { if runFunc == nil { return nil } var expectedDoneBy time.Time startTime := c.now() originalContext := ctx if !c.allowNewRun(startTime) { // Rather than make this inline, return a global reference (for memory optimization sake). c.CmdMetricCollector.ErrShortCircuit(startTime) return errCircuitOpen } if c.ClosedToOpen.Prevent(startTime) { return errCircuitOpen } currentCommandCount := c.concurrentCommands.Add(1) defer c.concurrentCommands.Add(-1) if err := c.throttleConcurrentCommands(currentCommandCount); err != nil { c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime) return err } // Set timeout on the command if we have one if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 { var timeoutCancel func() expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration()) ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy) defer timeoutCancel() } ret := runFunc(ctx) endTime := c.now() totalCmdTime := endTime.Sub(startTime) runFuncDoneTime := c.now() // See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation // This request had invalid input, but shouldn't be marked as an 'error' for the circuit // From documentation // ------- // The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system // failures that should not count against the failure metrics and should not trigger fallback logic. if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) { return ret } // Even if there is no error (or if there is an error), if the request took too long it is always an error for the // circuit. Note that ret *MAY* actually be nil. In that case, we still want to return nil. if c.checkErrTimeout(expectedDoneBy, runFuncDoneTime, totalCmdTime) { // Note: ret could possibly be nil. We will still return nil, but the circuit will consider it a failure. return ret } // The runFunc failed, but someone asked the original context to end. This probably isn't a failure of the // circuit: someone just wanted `Execute` to end early, so don't track it as a failure. if c.checkErrInterrupt(originalContext, ret, runFuncDoneTime, totalCmdTime) { return ret } if c.checkErrFailure(ret, runFuncDoneTime, totalCmdTime) { return ret } // The circuit works. Close it! // Note: Execute this *after* you check for timeouts so we can still track circuit time outs that happen to also return a // valid value later. c.checkSuccess(runFuncDoneTime, totalCmdTime) return nil } func (c *Circuit) checkSuccess(runFuncDoneTime time.Time, totalCmdTime time.Duration) { c.CmdMetricCollector.Success(runFuncDoneTime, totalCmdTime) if c.IsOpen() { c.close(runFuncDoneTime, false) } } // checkErrInterrupt returns true if this is considered an interrupt error: interrupt errors do not open the circuit. // Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit // unhealthy. But when ExecutionConfig.IgnoreInterrupts set to true we try to classify originalContext.Err() // with help of ExecutionConfig.IsErrInterrupt function. When this function returns true we do not open the circuit func (c *Circuit) checkErrInterrupt(originalContext context.Context, ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // We need to see an error in both the original context and the return value to consider this an "interrupt" caused // error. if ret == nil || originalContext.Err() == nil { return false } isErrInterrupt := c.notThreadSafeConfig.Execution.IsErrInterrupt if isErrInterrupt == nil { isErrInterrupt = func(_ error) bool { // By default, we consider any error from the original context an interrupt causing error return true } } if !c.threadSafeConfig.GoSpecific.IgnoreInterrupts.Get() && isErrInterrupt(originalContext.Err()) { c.CmdMetricCollector.ErrInterrupt(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrBadRequest(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if IsBadRequest(ret) { c.CmdMetricCollector.ErrBadRequest(runFuncDoneTime, totalCmdTime) return true } return false } func (c *Circuit) checkErrFailure(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { if ret != nil { c.CmdMetricCollector.ErrFailure(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } func (c *Circuit) checkErrTimeout(expectedDoneBy time.Time, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool { // I don't use the deadline from the context because it could be a smaller timeout from the parent context if !expectedDoneBy.IsZero() && expectedDoneBy.Before(runFuncDoneTime) { c.CmdMetricCollector.ErrTimeout(runFuncDoneTime, totalCmdTime) if !c.IsOpen() { c.attemptToOpen(runFuncDoneTime) } return true } return false } // Does fallback logic. Equivalent of // http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#getFallback func (c *Circuit)
(ctx context.Context, err error, fallbackFunc func(context.Context, error) error) error { // Use the fallback command if available if fallbackFunc == nil || c.threadSafeConfig.Fallback.Disabled.Get() { return err } // Throttle concurrent fallback calls currentFallbackCount := c.concurrentFallbacks.Add(1) defer c.concurrentFallbacks.Add(-1) if c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() >= 0 && currentFallbackCount > c.threadSafeConfig.Fallback.MaxConcurrentRequests.Get() { c.FallbackMetricCollector.ErrConcurrencyLimitReject(c.now()) return &circuitError{concurrencyLimitReached: true, msg: "throttling concurrency to fallbacks"} } startTime := c.now() retErr := fallbackFunc(ctx, err) totalCmdTime := c.now().Sub(startTime) if retErr != nil { c.FallbackMetricCollector.ErrFailure(startTime, totalCmdTime) return retErr } c.FallbackMetricCollector.Success(startTime, totalCmdTime) return nil } // allowNewRun checks if the circuit is allowing new run commands. This happens if the circuit is closed, or // if it is open, but we want to explore to see if we should close it again. func (c *Circuit) allowNewRun(now time.Time) bool { if !c.IsOpen() { return true } if c.OpenToClose.Allow(now) { return true } return false } // close closes an open circuit. Usually because we think it's healthy again. func (c *Circuit) close(now time.Time, forceClosed bool) { if !c.IsOpen() { // Not open. Don't need to close it return } if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() { return } if forceClosed || c.OpenToClose.ShouldClose(now) { c.CircuitMetricsCollector.Closed(now) c.isOpen.Set(false) } } // attemptToOpen tries to open an unhealthy circuit. Usually because we think run is having problems, and we want // to give run a rest for a bit. // // It is called "attemptToOpen" because the circuit may not actually open (for example if there aren't enough requests) func (c *Circuit) attemptToOpen(now time.Time) { if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() { // Don't open circuits that are forced closed return } if c.IsOpen() { // Don't bother opening a circuit that is already open // This check isn't needed (it is also checked inside OpenCircuit below), but is an optimization to avoid // the below logic when the circuit is in a bad state and would otherwise try to close itself repeatedly. return } if c.ClosedToOpen.ShouldOpen(now) { c.openCircuit(now) } }
fallback
identifier_name
aku-utils.go
package main import ( "bufio" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "io" "io/ioutil" "os" "strconv" "strings" "time" aerospike "github.com/aerospike/aerospike-client-go" "go.uber.org/zap" ) // Read certificate file and abort if any errors // Returns file content as byte array func readCertFile(filename string) []byte { dataBytes, err := ioutil.ReadFile(filename) if err != nil { zap.S().Fatalf("Failed to read certificate or key file `%s` : `%s`", filename, err) } return dataBytes } // Initialize Aerospike client func initAerospikeClient(host string, username string, password string) (*aerospike.Client, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) client, err := aerospike.NewClientWithPolicyAndHost(clientPolicy, server) if err != nil { return nil, err } return client, nil } // Create a connection to Aerospike node func initAerospikeConnection(host string, username string, password string) (*aerospike.Connection, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external"
} // only one connection clientPolicy.ConnectionQueueSize = 1 clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) connection, err := aerospike.NewConnection(clientPolicy, server) if err != nil { return nil, err } if clientPolicy.RequiresAuthentication() { if err := connection.Login(clientPolicy); err != nil { return nil, err } } return connection, nil } // Initialize TLS config func initTLSConfig() *tls.Config { var tlsConfig *tls.Config if serviceTLSEnabled == "true" { serverPool, err := x509.SystemCertPool() if serverPool == nil || err != nil { zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err) serverPool = x509.NewCertPool() } if len(serviceCAFile) > 0 { path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } // Try to load system CA certs and add them to the system cert pool caCert := readCertFile(path) zap.S().Debugf("Adding server certificate `%s` to the pool.", path) serverPool.AppendCertsFromPEM(caCert) } var clientPool []tls.Certificate if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 { certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem") if err != nil { zap.S().Fatal("Unable to get key file path: %v.", err) } // Read Cert and Key files certFileBytes := readCertFile(certPath) keyFileBytes := readCertFile(keyPath) // Decode PEM data keyBlock, _ := pem.Decode(keyFileBytes) certBlock, _ := pem.Decode(certFileBytes) if keyBlock == nil || certBlock == nil { zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath) } // Encode PEM data keyPEM := pem.EncodeToMemory(keyBlock) certPEM := pem.EncodeToMemory(certBlock) if keyPEM == nil || certPEM == nil { zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err) } zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath) clientPool = append(clientPool, cert) } tlsConfig = &tls.Config{ Certificates: clientPool, RootCAs: serverPool, InsecureSkipVerify: false, PreferServerCipherSuites: true, } tlsConfig.BuildNameToCertificate() } return tlsConfig } // Get certificate file path func getCertFilePath(configMountPoint string, certFile string, fileName string) (string, error) { if certFile == "" { return "", fmt.Errorf("certificate file name empty") } parsedCertFile := strings.Split(certFile, ":") switch len(parsedCertFile) { case 1: return certFile, nil case 2: switch parsedCertFile[0] { case "file": return parsedCertFile[1], nil case "b64enc": return configMountPoint + "/certs/" + fileName, nil default: return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0]) } } // Should not reach here return "", fmt.Errorf("Unable to parse cert file: %s", certFile) } // Update global variables from ENV variable inputs func initVars() { zap.S().Info("Initializing variables.") podIP, ok := os.LookupEnv("MY_POD_IP") if ok { myPodIP = podIP } secEnabled, ok := os.LookupEnv("SECURITY_ENABLED") if ok { securityEnabled = secEnabled } helmusr, ok := os.LookupEnv("HELM_USERNAME") if ok { helmUsername = helmusr } helmpass, ok := os.LookupEnv("HELM_PASSWORD") if ok { helmPassword = helmpass } adminusr, ok := os.LookupEnv("ADMIN_USERNAME") if ok { adminUsername = adminusr } adminpass, ok := os.LookupEnv("ADMIN_PASSWORD") if ok { adminPassword = adminpass } auth, ok := os.LookupEnv("AUTH_MODE") if ok { authMode = auth } tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED") if ok { serviceTLSEnabled = tlsEnabled } tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE") if ok { serviceCAFile = tlsCAFile } tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE") if ok { serviceCertFile = tlsCertFile } tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE") if ok { serviceKeyFile = tlsKeyFile } tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME") if ok { serviceTLSName = tlsName } tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH") if ok { serviceMutualAuth = tlsMutualAuth } tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT") if ok { serviceTLSPort = tlsPort } plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT") if ok { servicePlainPort = plainPort } } // InfoParser provides a reader for Aerospike cluster's response for any of the metric type InfoParser struct { *bufio.Reader } // NewInfoParser provides an instance of the InfoParser func NewInfoParser(s string) *InfoParser { return &InfoParser{bufio.NewReader(strings.NewReader(s))} } // PeekAndExpect checks if the expected value is present without advancing the reader func (ip *InfoParser) PeekAndExpect(s string) error { bytes, err := ip.Peek(len(s)) if err != nil { return err } v := string(bytes) if v != s { return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v) } return nil } // Expect validates the expected value against the one returned by the InfoParser // This advances the reader by length of the input string. func (ip *InfoParser) Expect(s string) error { bytes := make([]byte, len(s)) v, err := ip.Read(bytes) if err != nil { return err } if string(bytes) != s { return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v) } return nil } // ReadUntil reads bytes from the InfoParser by handeling some edge-cases func (ip *InfoParser) ReadUntil(delim byte) (string, error) { v, err := ip.ReadBytes(delim) switch len(v) { case 0: return string(v), err case 1: if v[0] == delim { return "", err } return string(v), err } return string(v[:len(v)-1]), err } // Get ops/sec // Format (with and without latency data) // {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00; // error-no-data-yet-or-back-too-small; // or, // {test}-write:; func getOpsPerSecLegacy(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err := ip.Expect("{"); err != nil { // it's an error string, read to next section if _, err := ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err := ip.ReadUntil('}') if err != nil { break } if err := ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output if there's no latency data (in 5.1), so continue to next section if err := ip.PeekAndExpect(";"); err == nil { if err := ip.Expect(";"); err != nil { break } continue } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Ignore labels _, err = ip.ReadUntil(';') if err != nil { break } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } // Get ops/sec // Format (with and without latency data) // {test}-write:msec,4234.9,28.75,7.40,1.63,0.26,0.03,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00; // {test}-read:; func getOpsPerSecNew(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err = ip.Expect("{"); err != nil { if _, err = ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err = ip.ReadUntil('}') if err != nil { break } if err = ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output due to no latency data available, so continue to next section if err = ip.PeekAndExpect(";"); err == nil { if err = ip.Expect(";"); err != nil { break } continue } // time unit - msec or usec _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } func parseStats(s, sep string) map[string]string { stats := make(map[string]string, strings.Count(s, sep)+1) s2 := strings.Split(s, sep) for _, s := range s2 { list := strings.SplitN(s, "=", 2) switch len(list) { case 0, 1: case 2: stats[list[0]] = list[1] default: stats[list[0]] = strings.Join(list[1:], "=") } } return stats }
{ clientPolicy.AuthMode = aerospike.AuthModeExternal }
conditional_block
aku-utils.go
package main import ( "bufio" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "io" "io/ioutil" "os" "strconv" "strings" "time" aerospike "github.com/aerospike/aerospike-client-go" "go.uber.org/zap" ) // Read certificate file and abort if any errors // Returns file content as byte array func readCertFile(filename string) []byte { dataBytes, err := ioutil.ReadFile(filename) if err != nil { zap.S().Fatalf("Failed to read certificate or key file `%s` : `%s`", filename, err) } return dataBytes } // Initialize Aerospike client func initAerospikeClient(host string, username string, password string) (*aerospike.Client, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) client, err := aerospike.NewClientWithPolicyAndHost(clientPolicy, server) if err != nil { return nil, err } return client, nil } // Create a connection to Aerospike node func initAerospikeConnection(host string, username string, password string) (*aerospike.Connection, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } // only one connection clientPolicy.ConnectionQueueSize = 1 clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) connection, err := aerospike.NewConnection(clientPolicy, server) if err != nil { return nil, err } if clientPolicy.RequiresAuthentication() { if err := connection.Login(clientPolicy); err != nil { return nil, err } } return connection, nil } // Initialize TLS config func initTLSConfig() *tls.Config
// Get certificate file path func getCertFilePath(configMountPoint string, certFile string, fileName string) (string, error) { if certFile == "" { return "", fmt.Errorf("certificate file name empty") } parsedCertFile := strings.Split(certFile, ":") switch len(parsedCertFile) { case 1: return certFile, nil case 2: switch parsedCertFile[0] { case "file": return parsedCertFile[1], nil case "b64enc": return configMountPoint + "/certs/" + fileName, nil default: return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0]) } } // Should not reach here return "", fmt.Errorf("Unable to parse cert file: %s", certFile) } // Update global variables from ENV variable inputs func initVars() { zap.S().Info("Initializing variables.") podIP, ok := os.LookupEnv("MY_POD_IP") if ok { myPodIP = podIP } secEnabled, ok := os.LookupEnv("SECURITY_ENABLED") if ok { securityEnabled = secEnabled } helmusr, ok := os.LookupEnv("HELM_USERNAME") if ok { helmUsername = helmusr } helmpass, ok := os.LookupEnv("HELM_PASSWORD") if ok { helmPassword = helmpass } adminusr, ok := os.LookupEnv("ADMIN_USERNAME") if ok { adminUsername = adminusr } adminpass, ok := os.LookupEnv("ADMIN_PASSWORD") if ok { adminPassword = adminpass } auth, ok := os.LookupEnv("AUTH_MODE") if ok { authMode = auth } tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED") if ok { serviceTLSEnabled = tlsEnabled } tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE") if ok { serviceCAFile = tlsCAFile } tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE") if ok { serviceCertFile = tlsCertFile } tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE") if ok { serviceKeyFile = tlsKeyFile } tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME") if ok { serviceTLSName = tlsName } tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH") if ok { serviceMutualAuth = tlsMutualAuth } tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT") if ok { serviceTLSPort = tlsPort } plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT") if ok { servicePlainPort = plainPort } } // InfoParser provides a reader for Aerospike cluster's response for any of the metric type InfoParser struct { *bufio.Reader } // NewInfoParser provides an instance of the InfoParser func NewInfoParser(s string) *InfoParser { return &InfoParser{bufio.NewReader(strings.NewReader(s))} } // PeekAndExpect checks if the expected value is present without advancing the reader func (ip *InfoParser) PeekAndExpect(s string) error { bytes, err := ip.Peek(len(s)) if err != nil { return err } v := string(bytes) if v != s { return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v) } return nil } // Expect validates the expected value against the one returned by the InfoParser // This advances the reader by length of the input string. func (ip *InfoParser) Expect(s string) error { bytes := make([]byte, len(s)) v, err := ip.Read(bytes) if err != nil { return err } if string(bytes) != s { return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v) } return nil } // ReadUntil reads bytes from the InfoParser by handeling some edge-cases func (ip *InfoParser) ReadUntil(delim byte) (string, error) { v, err := ip.ReadBytes(delim) switch len(v) { case 0: return string(v), err case 1: if v[0] == delim { return "", err } return string(v), err } return string(v[:len(v)-1]), err } // Get ops/sec // Format (with and without latency data) // {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00; // error-no-data-yet-or-back-too-small; // or, // {test}-write:; func getOpsPerSecLegacy(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err := ip.Expect("{"); err != nil { // it's an error string, read to next section if _, err := ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err := ip.ReadUntil('}') if err != nil { break } if err := ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output if there's no latency data (in 5.1), so continue to next section if err := ip.PeekAndExpect(";"); err == nil { if err := ip.Expect(";"); err != nil { break } continue } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Ignore labels _, err = ip.ReadUntil(';') if err != nil { break } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } // Get ops/sec // Format (with and without latency data) // {test}-write:msec,4234.9,28.75,7.40,1.63,0.26,0.03,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00; // {test}-read:; func getOpsPerSecNew(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err = ip.Expect("{"); err != nil { if _, err = ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err = ip.ReadUntil('}') if err != nil { break } if err = ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output due to no latency data available, so continue to next section if err = ip.PeekAndExpect(";"); err == nil { if err = ip.Expect(";"); err != nil { break } continue } // time unit - msec or usec _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } func parseStats(s, sep string) map[string]string { stats := make(map[string]string, strings.Count(s, sep)+1) s2 := strings.Split(s, sep) for _, s := range s2 { list := strings.SplitN(s, "=", 2) switch len(list) { case 0, 1: case 2: stats[list[0]] = list[1] default: stats[list[0]] = strings.Join(list[1:], "=") } } return stats }
{ var tlsConfig *tls.Config if serviceTLSEnabled == "true" { serverPool, err := x509.SystemCertPool() if serverPool == nil || err != nil { zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err) serverPool = x509.NewCertPool() } if len(serviceCAFile) > 0 { path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } // Try to load system CA certs and add them to the system cert pool caCert := readCertFile(path) zap.S().Debugf("Adding server certificate `%s` to the pool.", path) serverPool.AppendCertsFromPEM(caCert) } var clientPool []tls.Certificate if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 { certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem") if err != nil { zap.S().Fatal("Unable to get key file path: %v.", err) } // Read Cert and Key files certFileBytes := readCertFile(certPath) keyFileBytes := readCertFile(keyPath) // Decode PEM data keyBlock, _ := pem.Decode(keyFileBytes) certBlock, _ := pem.Decode(certFileBytes) if keyBlock == nil || certBlock == nil { zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath) } // Encode PEM data keyPEM := pem.EncodeToMemory(keyBlock) certPEM := pem.EncodeToMemory(certBlock) if keyPEM == nil || certPEM == nil { zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err) } zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath) clientPool = append(clientPool, cert) } tlsConfig = &tls.Config{ Certificates: clientPool, RootCAs: serverPool, InsecureSkipVerify: false, PreferServerCipherSuites: true, } tlsConfig.BuildNameToCertificate() } return tlsConfig }
identifier_body
aku-utils.go
package main import ( "bufio" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "io" "io/ioutil" "os" "strconv" "strings" "time" aerospike "github.com/aerospike/aerospike-client-go" "go.uber.org/zap" ) // Read certificate file and abort if any errors // Returns file content as byte array func readCertFile(filename string) []byte { dataBytes, err := ioutil.ReadFile(filename) if err != nil { zap.S().Fatalf("Failed to read certificate or key file `%s` : `%s`", filename, err) } return dataBytes } // Initialize Aerospike client func initAerospikeClient(host string, username string, password string) (*aerospike.Client, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) client, err := aerospike.NewClientWithPolicyAndHost(clientPolicy, server) if err != nil { return nil, err } return client, nil } // Create a connection to Aerospike node func initAerospikeConnection(host string, username string, password string) (*aerospike.Connection, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } // only one connection clientPolicy.ConnectionQueueSize = 1 clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) connection, err := aerospike.NewConnection(clientPolicy, server) if err != nil { return nil, err } if clientPolicy.RequiresAuthentication() { if err := connection.Login(clientPolicy); err != nil { return nil, err } } return connection, nil } // Initialize TLS config func initTLSConfig() *tls.Config { var tlsConfig *tls.Config if serviceTLSEnabled == "true" { serverPool, err := x509.SystemCertPool() if serverPool == nil || err != nil { zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err) serverPool = x509.NewCertPool() } if len(serviceCAFile) > 0 { path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } // Try to load system CA certs and add them to the system cert pool caCert := readCertFile(path) zap.S().Debugf("Adding server certificate `%s` to the pool.", path) serverPool.AppendCertsFromPEM(caCert) } var clientPool []tls.Certificate if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 { certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem") if err != nil { zap.S().Fatal("Unable to get key file path: %v.", err) } // Read Cert and Key files certFileBytes := readCertFile(certPath) keyFileBytes := readCertFile(keyPath) // Decode PEM data keyBlock, _ := pem.Decode(keyFileBytes) certBlock, _ := pem.Decode(certFileBytes) if keyBlock == nil || certBlock == nil { zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath) } // Encode PEM data keyPEM := pem.EncodeToMemory(keyBlock) certPEM := pem.EncodeToMemory(certBlock) if keyPEM == nil || certPEM == nil { zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err) } zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath) clientPool = append(clientPool, cert) } tlsConfig = &tls.Config{ Certificates: clientPool, RootCAs: serverPool, InsecureSkipVerify: false, PreferServerCipherSuites: true, } tlsConfig.BuildNameToCertificate() } return tlsConfig } // Get certificate file path func getCertFilePath(configMountPoint string, certFile string, fileName string) (string, error) { if certFile == "" { return "", fmt.Errorf("certificate file name empty") } parsedCertFile := strings.Split(certFile, ":") switch len(parsedCertFile) { case 1: return certFile, nil case 2: switch parsedCertFile[0] { case "file": return parsedCertFile[1], nil case "b64enc": return configMountPoint + "/certs/" + fileName, nil default: return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0]) } } // Should not reach here
// Update global variables from ENV variable inputs func initVars() { zap.S().Info("Initializing variables.") podIP, ok := os.LookupEnv("MY_POD_IP") if ok { myPodIP = podIP } secEnabled, ok := os.LookupEnv("SECURITY_ENABLED") if ok { securityEnabled = secEnabled } helmusr, ok := os.LookupEnv("HELM_USERNAME") if ok { helmUsername = helmusr } helmpass, ok := os.LookupEnv("HELM_PASSWORD") if ok { helmPassword = helmpass } adminusr, ok := os.LookupEnv("ADMIN_USERNAME") if ok { adminUsername = adminusr } adminpass, ok := os.LookupEnv("ADMIN_PASSWORD") if ok { adminPassword = adminpass } auth, ok := os.LookupEnv("AUTH_MODE") if ok { authMode = auth } tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED") if ok { serviceTLSEnabled = tlsEnabled } tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE") if ok { serviceCAFile = tlsCAFile } tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE") if ok { serviceCertFile = tlsCertFile } tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE") if ok { serviceKeyFile = tlsKeyFile } tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME") if ok { serviceTLSName = tlsName } tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH") if ok { serviceMutualAuth = tlsMutualAuth } tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT") if ok { serviceTLSPort = tlsPort } plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT") if ok { servicePlainPort = plainPort } } // InfoParser provides a reader for Aerospike cluster's response for any of the metric type InfoParser struct { *bufio.Reader } // NewInfoParser provides an instance of the InfoParser func NewInfoParser(s string) *InfoParser { return &InfoParser{bufio.NewReader(strings.NewReader(s))} } // PeekAndExpect checks if the expected value is present without advancing the reader func (ip *InfoParser) PeekAndExpect(s string) error { bytes, err := ip.Peek(len(s)) if err != nil { return err } v := string(bytes) if v != s { return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v) } return nil } // Expect validates the expected value against the one returned by the InfoParser // This advances the reader by length of the input string. func (ip *InfoParser) Expect(s string) error { bytes := make([]byte, len(s)) v, err := ip.Read(bytes) if err != nil { return err } if string(bytes) != s { return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v) } return nil } // ReadUntil reads bytes from the InfoParser by handeling some edge-cases func (ip *InfoParser) ReadUntil(delim byte) (string, error) { v, err := ip.ReadBytes(delim) switch len(v) { case 0: return string(v), err case 1: if v[0] == delim { return "", err } return string(v), err } return string(v[:len(v)-1]), err } // Get ops/sec // Format (with and without latency data) // {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00; // error-no-data-yet-or-back-too-small; // or, // {test}-write:; func getOpsPerSecLegacy(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err := ip.Expect("{"); err != nil { // it's an error string, read to next section if _, err := ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err := ip.ReadUntil('}') if err != nil { break } if err := ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output if there's no latency data (in 5.1), so continue to next section if err := ip.PeekAndExpect(";"); err == nil { if err := ip.Expect(";"); err != nil { break } continue } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Ignore labels _, err = ip.ReadUntil(';') if err != nil { break } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } // Get ops/sec // Format (with and without latency data) // {test}-write:msec,4234.9,28.75,7.40,1.63,0.26,0.03,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00; // {test}-read:; func getOpsPerSecNew(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err = ip.Expect("{"); err != nil { if _, err = ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err = ip.ReadUntil('}') if err != nil { break } if err = ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output due to no latency data available, so continue to next section if err = ip.PeekAndExpect(";"); err == nil { if err = ip.Expect(";"); err != nil { break } continue } // time unit - msec or usec _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } func parseStats(s, sep string) map[string]string { stats := make(map[string]string, strings.Count(s, sep)+1) s2 := strings.Split(s, sep) for _, s := range s2 { list := strings.SplitN(s, "=", 2) switch len(list) { case 0, 1: case 2: stats[list[0]] = list[1] default: stats[list[0]] = strings.Join(list[1:], "=") } } return stats }
return "", fmt.Errorf("Unable to parse cert file: %s", certFile) }
random_line_split
aku-utils.go
package main import ( "bufio" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "io" "io/ioutil" "os" "strconv" "strings" "time" aerospike "github.com/aerospike/aerospike-client-go" "go.uber.org/zap" ) // Read certificate file and abort if any errors // Returns file content as byte array func readCertFile(filename string) []byte { dataBytes, err := ioutil.ReadFile(filename) if err != nil { zap.S().Fatalf("Failed to read certificate or key file `%s` : `%s`", filename, err) } return dataBytes } // Initialize Aerospike client func initAerospikeClient(host string, username string, password string) (*aerospike.Client, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) client, err := aerospike.NewClientWithPolicyAndHost(clientPolicy, server) if err != nil { return nil, err } return client, nil } // Create a connection to Aerospike node func initAerospikeConnection(host string, username string, password string) (*aerospike.Connection, error) { clientPolicy := aerospike.NewClientPolicy() tlsConfig := initTLSConfig() if securityEnabled == "true" { clientPolicy.User = username clientPolicy.Password = password if authMode == "external" { clientPolicy.AuthMode = aerospike.AuthModeExternal } } // only one connection clientPolicy.ConnectionQueueSize = 1 clientPolicy.Timeout = 5 * time.Second clientPolicy.TlsConfig = tlsConfig port := servicePlainPort tlsName := "" if clientPolicy.TlsConfig != nil { port = serviceTLSPort tlsName = serviceTLSName } portInt, _ := strconv.Atoi(port) server := aerospike.NewHost(host, portInt) server.TLSName = tlsName zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt) connection, err := aerospike.NewConnection(clientPolicy, server) if err != nil { return nil, err } if clientPolicy.RequiresAuthentication() { if err := connection.Login(clientPolicy); err != nil { return nil, err } } return connection, nil } // Initialize TLS config func initTLSConfig() *tls.Config { var tlsConfig *tls.Config if serviceTLSEnabled == "true" { serverPool, err := x509.SystemCertPool() if serverPool == nil || err != nil { zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err) serverPool = x509.NewCertPool() } if len(serviceCAFile) > 0 { path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } // Try to load system CA certs and add them to the system cert pool caCert := readCertFile(path) zap.S().Debugf("Adding server certificate `%s` to the pool.", path) serverPool.AppendCertsFromPEM(caCert) } var clientPool []tls.Certificate if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 { certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem") if err != nil { zap.S().Fatal("Unable to get certificate file path: %v.", err) } keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem") if err != nil { zap.S().Fatal("Unable to get key file path: %v.", err) } // Read Cert and Key files certFileBytes := readCertFile(certPath) keyFileBytes := readCertFile(keyPath) // Decode PEM data keyBlock, _ := pem.Decode(keyFileBytes) certBlock, _ := pem.Decode(certFileBytes) if keyBlock == nil || certBlock == nil { zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath) } // Encode PEM data keyPEM := pem.EncodeToMemory(keyBlock) certPEM := pem.EncodeToMemory(certBlock) if keyPEM == nil || certPEM == nil { zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err) } zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath) clientPool = append(clientPool, cert) } tlsConfig = &tls.Config{ Certificates: clientPool, RootCAs: serverPool, InsecureSkipVerify: false, PreferServerCipherSuites: true, } tlsConfig.BuildNameToCertificate() } return tlsConfig } // Get certificate file path func
(configMountPoint string, certFile string, fileName string) (string, error) { if certFile == "" { return "", fmt.Errorf("certificate file name empty") } parsedCertFile := strings.Split(certFile, ":") switch len(parsedCertFile) { case 1: return certFile, nil case 2: switch parsedCertFile[0] { case "file": return parsedCertFile[1], nil case "b64enc": return configMountPoint + "/certs/" + fileName, nil default: return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0]) } } // Should not reach here return "", fmt.Errorf("Unable to parse cert file: %s", certFile) } // Update global variables from ENV variable inputs func initVars() { zap.S().Info("Initializing variables.") podIP, ok := os.LookupEnv("MY_POD_IP") if ok { myPodIP = podIP } secEnabled, ok := os.LookupEnv("SECURITY_ENABLED") if ok { securityEnabled = secEnabled } helmusr, ok := os.LookupEnv("HELM_USERNAME") if ok { helmUsername = helmusr } helmpass, ok := os.LookupEnv("HELM_PASSWORD") if ok { helmPassword = helmpass } adminusr, ok := os.LookupEnv("ADMIN_USERNAME") if ok { adminUsername = adminusr } adminpass, ok := os.LookupEnv("ADMIN_PASSWORD") if ok { adminPassword = adminpass } auth, ok := os.LookupEnv("AUTH_MODE") if ok { authMode = auth } tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED") if ok { serviceTLSEnabled = tlsEnabled } tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE") if ok { serviceCAFile = tlsCAFile } tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE") if ok { serviceCertFile = tlsCertFile } tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE") if ok { serviceKeyFile = tlsKeyFile } tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME") if ok { serviceTLSName = tlsName } tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH") if ok { serviceMutualAuth = tlsMutualAuth } tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT") if ok { serviceTLSPort = tlsPort } plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT") if ok { servicePlainPort = plainPort } } // InfoParser provides a reader for Aerospike cluster's response for any of the metric type InfoParser struct { *bufio.Reader } // NewInfoParser provides an instance of the InfoParser func NewInfoParser(s string) *InfoParser { return &InfoParser{bufio.NewReader(strings.NewReader(s))} } // PeekAndExpect checks if the expected value is present without advancing the reader func (ip *InfoParser) PeekAndExpect(s string) error { bytes, err := ip.Peek(len(s)) if err != nil { return err } v := string(bytes) if v != s { return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v) } return nil } // Expect validates the expected value against the one returned by the InfoParser // This advances the reader by length of the input string. func (ip *InfoParser) Expect(s string) error { bytes := make([]byte, len(s)) v, err := ip.Read(bytes) if err != nil { return err } if string(bytes) != s { return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v) } return nil } // ReadUntil reads bytes from the InfoParser by handeling some edge-cases func (ip *InfoParser) ReadUntil(delim byte) (string, error) { v, err := ip.ReadBytes(delim) switch len(v) { case 0: return string(v), err case 1: if v[0] == delim { return "", err } return string(v), err } return string(v[:len(v)-1]), err } // Get ops/sec // Format (with and without latency data) // {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00; // error-no-data-yet-or-back-too-small; // or, // {test}-write:; func getOpsPerSecLegacy(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err := ip.Expect("{"); err != nil { // it's an error string, read to next section if _, err := ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err := ip.ReadUntil('}') if err != nil { break } if err := ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output if there's no latency data (in 5.1), so continue to next section if err := ip.PeekAndExpect(";"); err == nil { if err := ip.Expect(";"); err != nil { break } continue } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Ignore labels _, err = ip.ReadUntil(';') if err != nil { break } // Ignore timestamp _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } // Get ops/sec // Format (with and without latency data) // {test}-write:msec,4234.9,28.75,7.40,1.63,0.26,0.03,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00; // {test}-read:; func getOpsPerSecNew(s string) (opsPerSec float64, err error) { ip := NewInfoParser(s) for { if err = ip.Expect("{"); err != nil { if _, err = ip.ReadUntil(';'); err != nil { break } continue } // namespace name _, err = ip.ReadUntil('}') if err != nil { break } if err = ip.Expect("-"); err != nil { break } // operation (read, write etc.) _, err = ip.ReadUntil(':') if err != nil { break } // Might be an empty output due to no latency data available, so continue to next section if err = ip.PeekAndExpect(";"); err == nil { if err = ip.Expect(";"); err != nil { break } continue } // time unit - msec or usec _, err = ip.ReadUntil(',') if err != nil { break } // Read bucket values bucketValuesStr, err := ip.ReadUntil(';') if err != nil && err != io.EOF { break } bucketValues := strings.Split(bucketValuesStr, ",") val, err := strconv.ParseFloat(bucketValues[0], 64) if err != nil { break } opsPerSec += val } return opsPerSec, nil } func parseStats(s, sep string) map[string]string { stats := make(map[string]string, strings.Count(s, sep)+1) s2 := strings.Split(s, sep) for _, s := range s2 { list := strings.SplitN(s, "=", 2) switch len(list) { case 0, 1: case 2: stats[list[0]] = list[1] default: stats[list[0]] = strings.Join(list[1:], "=") } } return stats }
getCertFilePath
identifier_name
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, ) { const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // Discard all the contacts. self.solver_contacts.clear(); } } CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase.
/// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn filter_intersection_pair(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
///
random_line_split
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, )
} bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase. /// /// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn filter_intersection_pair(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
{ const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // Discard all the contacts. self.solver_contacts.clear(); } } CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } }
identifier_body
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, ) { const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else
} CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase. /// /// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn filter_intersection_pair(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
{ // Discard all the contacts. self.solver_contacts.clear(); }
conditional_block
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, ) { const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // Discard all the contacts. self.solver_contacts.clear(); } } CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase. /// /// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn
(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
filter_intersection_pair
identifier_name
envmon_sensor_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: envmon_sensor_info.proto package cisco_ios_xr_invmgr_oper_inventory_racks_rack_entity_slot_tsi1s_tsi1_tsi2s_tsi2_tsi3s_tsi3_tsi4s_tsi4_tsi5s_tsi5_tsi6s_tsi6_attributes_env_sensor_info import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type EnvmonSensorInfo_KEYS struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name_1 string `protobuf:"bytes,2,opt,name=name_1,json=name1,proto3" json:"name_1,omitempty"` Name_2 string `protobuf:"bytes,3,opt,name=name_2,json=name2,proto3" json:"name_2,omitempty"` Name_3 string `protobuf:"bytes,4,opt,name=name_3,json=name3,proto3" json:"name_3,omitempty"` Name_4 string `protobuf:"bytes,5,opt,name=name_4,json=name4,proto3" json:"name_4,omitempty"` Name_5 string `protobuf:"bytes,6,opt,name=name_5,json=name5,proto3" json:"name_5,omitempty"` Name_6 string `protobuf:"bytes,7,opt,name=name_6,json=name6,proto3" json:"name_6,omitempty"` Name_7 string `protobuf:"bytes,8,opt,name=name_7,json=name7,proto3" json:"name_7,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo_KEYS) Reset() { *m = EnvmonSensorInfo_KEYS{} } func (m *EnvmonSensorInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo_KEYS) ProtoMessage() {} func (*EnvmonSensorInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{0} } func (m *EnvmonSensorInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Unmarshal(m, b) } func (m *EnvmonSensorInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo_KEYS.Merge(m, src) } func (m *EnvmonSensorInfo_KEYS) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Size(m) } func (m *EnvmonSensorInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo_KEYS proto.InternalMessageInfo func (m *EnvmonSensorInfo_KEYS) GetName() string { if m != nil { return m.Name } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_1() string { if m != nil { return m.Name_1 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_2() string { if m != nil { return m.Name_2 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_3() string { if m != nil { return m.Name_3 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_4() string { if m != nil { return m.Name_4 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_5() string { if m != nil { return m.Name_5 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_6() string { if m != nil { return m.Name_6 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_7() string { if m != nil
return "" } type EnvmonSensorInfo struct { FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"` DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"` Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"` DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"` AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"` DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"` Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"` Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"` Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"` AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"` UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"` Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"` Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"` Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"` Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} } func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo) ProtoMessage() {} func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{1} } func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b) } func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo.Merge(m, src) } func (m *EnvmonSensorInfo) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo.Size(m) } func (m *EnvmonSensorInfo) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string { if m != nil { return m.FieldValidityBitmap } return "" } func (m *EnvmonSensorInfo) GetDeviceDescription() string { if m != nil { return m.DeviceDescription } return "" } func (m *EnvmonSensorInfo) GetUnits() string { if m != nil { return m.Units } return "" } func (m *EnvmonSensorInfo) GetDeviceId() uint32 { if m != nil { return m.DeviceId } return 0 } func (m *EnvmonSensorInfo) GetValue() uint32 { if m != nil { return m.Value } return 0 } func (m *EnvmonSensorInfo) GetAlarmType() uint32 { if m != nil { return m.AlarmType } return 0 } func (m *EnvmonSensorInfo) GetDataType() uint32 { if m != nil { return m.DataType } return 0 } func (m *EnvmonSensorInfo) GetScale() uint32 { if m != nil { return m.Scale } return 0 } func (m *EnvmonSensorInfo) GetPrecision() uint32 { if m != nil { return m.Precision } return 0 } func (m *EnvmonSensorInfo) GetStatus() uint32 { if m != nil { return m.Status } return 0 } func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 { if m != nil { return m.AgeTimeStamp } return 0 } func (m *EnvmonSensorInfo) GetUpdateRate() uint32 { if m != nil { return m.UpdateRate } return 0 } func (m *EnvmonSensorInfo) GetAverage() int32 { if m != nil { return m.Average } return 0 } func (m *EnvmonSensorInfo) GetMinimum() int32 { if m != nil { return m.Minimum } return 0 } func (m *EnvmonSensorInfo) GetMaximum() int32 { if m != nil { return m.Maximum } return 0 } func (m *EnvmonSensorInfo) GetInterval() int32 { if m != nil { return m.Interval } return 0 } func init() { proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS") proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info") } func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) } var fileDescriptor_bc03e94ffc42a321 = []byte{ // 506 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c, 0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a, 0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a, 0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, 0xdc, 0x0f, 0x3b, 0x34, 0xc7, 0xc6, 0x46, 0xea, 0xe6, 0xcd, 0x79, 0x9f, 0x27, 0x73, 0x34, 0xb3, 0xb0, 0x50, 0x68, 0xdb, 0xb2, 0xb2, 0xda, 0xa3, 0xf5, 0x95, 0xd3, 0xc6, 0x5e, 0x55, 0x71, 0xed, 0x2a, 0xaa, 0xe4, 0xcf, 0x28, 0x33, 0x3e, 0xab, 0xb4, 0xa9, 0xbc, 0xbe, 0x0e, 0xa6, 0x2d, 0x37, 0x4e, 0x57, 0x35, 0xba, 0xd8, 0xd8, 0x16, 0x2d, 0x55, 0x6e, 0x1b, 0x3b, 0xc8, 0x7e, 0x78, 0xce, 0x18, 0x2d, 0x19, 0xda, 0xc6, 0xbe, 0xa8, 0x28, 0x26, 0x6f, 0xce, 0x3c, 0x67, 0x88, 0x84, 0xc7, 0x24, 0x44, 0xca, 0x63, 0x1a, 0x62, 0xce, 0xe3, 0x3c, 0xc4, 0x82, 0xc7, 0x45, 0x88, 0x25, 0x8f, 0xcb, 0x18, 0x88, 0x9c, 0x59, 0x37, 0x84, 0x3e, 0x46, 0xdb, 0xfe, 0x7b, 0xbd, 0x93, 0x5f, 0x91, 0xb8, 0x7f, 0xf3, 0xd6, 0xfa, 0xeb, 0xe7, 0xef, 0x17, 0x52, 0x8a, 0x5d, 0x0b, 0x25, 0xaa, 0xe8, 0x38, 0x3a, 0x9d, 0xad, 0x78, 0x96, 0x47, 0x62, 0x1a, 0x7e, 0xf5, 0x99, 0xfa, 0x8f, 0xe9, 0x24, 0xb4, 0xb3, 0x01, 0x27, 0x6a, 0x67, 0xc4, 0xc9, 0x80, 0x53, 0xb5, 0x3b, 0xe2, 0x74, 0xc0, 0x73, 0x35, 0x19, 0xf1, 0x7c, 0xc0, 0x0b, 0x35, 0x1d, 0xf1, 0x62, 0xc0, 0x4b, 0xb5, 0x37, 0xe2, 0xe5, 0x80, 0xcf, 0xd5, 0xfe, 0x88, 0xcf, 0x4f, 0x7e, 0xef, 0x08, 0x79, 0xf3, 0x41, 0x32, 0x11, 0x47, 0x57, 0x06, 0x8b, 0x5c, 0xb7, 0x50, 0x98, 0xdc, 0xd0, 0x56, 0xaf, 0x0d, 0x95, 0x50, 0xab, 0x84, 0x0f, 0xdf, 0x65, 0xf9, 0xad, 0x77, 0x1f, 0x59, 0xc9, 0x67, 0x42, 0xe6, 0xd8, 0x9a, 0x0c, 0x75, 0x8e, 0x3e, 0x73, 0xa6, 0x26, 0x53, 0x59, 0x95, 0xf2, 0x81, 0x83, 0xce, 0x7c, 0x1a, 0x85, 0x3c, 0x14, 0x93, 0xc6, 0x1a, 0xf2, 0x6a, 0xde, 0xdd, 0x87, 0x8b, 0x7c, 0x28, 0x66, 0xfd, 0x12, 0x93, 0xab, 0xc5, 0x71, 0x74, 0x7a, 0x7b, 0xb5, 0xdf, 0x81, 0x2f, 0x79, 0x38, 0xd2, 0x42, 0xd1, 0xa0, 0x5a, 0xb2, 0xe8, 0x8a, 0x7c, 0x2c, 0x04, 0x14, 0xe0, 0x4a, 0x4d, 0xdb, 0x1a, 0xd5, 0x39, 0xab, 0x19, 0x93, 0xcb, 0x6d, 0x8d, 0xbc, 0x11, 0x08, 0x3a, 0xfb, 0xbc, 0xdf, 0x08, 0x04, 0x2c, 0x0f, 0xc5, 0xc4, 0x67, 0x50, 0xa0, 0x7a, 0xd1, 0x6d, 0xe4, 0x22, 0x1f, 0x89, 0x59, 0xed, 0x30, 0x33, 0x3e, 0x3c, 0xe0, 0x65, 0xb7, 0x70, 0x00, 0xf2, 0x9e, 0x98, 0x7a, 0x02, 0x6a, 0xbc, 0x7a, 0xc5, 0xaa, 0x6f, 0xf2, 0xa9, 0xb8, 0x03, 0x1b, 0xd4, 0x64, 0x4a, 0xd4, 0x9e, 0xa0, 0xac, 0xd5, 0x6b, 0xf6, 0xb7, 0x60, 0x83, 0x97, 0xa6, 0xc4, 0x8b, 0xc0, 0xe4, 0x13, 0xf1, 0x7f, 0x53, 0xe7, 0x40, 0xa8, 0x1d, 0x10, 0xaa, 0x37, 0xfc, 0x17, 0xd1, 0xa1, 0x15, 0x10, 0x4a, 0x25, 0xf6, 0xa0, 0x45, 0x07, 0x1b, 0x54, 0x6f, 0x8f, 0xa3, 0xd3, 0x83, 0xd5, 0xdf, 0x1a, 0x4c, 0x69, 0xac, 0x29, 0x9b, 0x52, 0xbd, 0xeb, 0x4c, 0x5f, 0xd9, 0xc0, 0x35, 0x9b, 0xf7, 0xbd, 0xe9, 0xaa, 0x7c, 0x20, 0xf6, 0x8d, 0x25, 0x74, 0x2d, 0x14, 0xea, 0x03, 0xab, 0xa1, 0xaf, 0xa7, 0xfc, 0xb1, 0xa5, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x6f, 0x66, 0x72, 0x88, 0x03, 0x00, 0x00, }
{ return m.Name_7 }
conditional_block
envmon_sensor_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: envmon_sensor_info.proto package cisco_ios_xr_invmgr_oper_inventory_racks_rack_entity_slot_tsi1s_tsi1_tsi2s_tsi2_tsi3s_tsi3_tsi4s_tsi4_tsi5s_tsi5_tsi6s_tsi6_attributes_env_sensor_info import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type EnvmonSensorInfo_KEYS struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name_1 string `protobuf:"bytes,2,opt,name=name_1,json=name1,proto3" json:"name_1,omitempty"` Name_2 string `protobuf:"bytes,3,opt,name=name_2,json=name2,proto3" json:"name_2,omitempty"` Name_3 string `protobuf:"bytes,4,opt,name=name_3,json=name3,proto3" json:"name_3,omitempty"` Name_4 string `protobuf:"bytes,5,opt,name=name_4,json=name4,proto3" json:"name_4,omitempty"` Name_5 string `protobuf:"bytes,6,opt,name=name_5,json=name5,proto3" json:"name_5,omitempty"` Name_6 string `protobuf:"bytes,7,opt,name=name_6,json=name6,proto3" json:"name_6,omitempty"` Name_7 string `protobuf:"bytes,8,opt,name=name_7,json=name7,proto3" json:"name_7,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo_KEYS) Reset() { *m = EnvmonSensorInfo_KEYS{} } func (m *EnvmonSensorInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo_KEYS) ProtoMessage() {} func (*EnvmonSensorInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{0} } func (m *EnvmonSensorInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Unmarshal(m, b) } func (m *EnvmonSensorInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo_KEYS.Merge(m, src) } func (m *EnvmonSensorInfo_KEYS) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Size(m) } func (m *EnvmonSensorInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo_KEYS proto.InternalMessageInfo func (m *EnvmonSensorInfo_KEYS) GetName() string { if m != nil { return m.Name } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_1() string { if m != nil { return m.Name_1 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_2() string { if m != nil { return m.Name_2 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_3() string { if m != nil { return m.Name_3 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_4() string { if m != nil { return m.Name_4 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_5() string { if m != nil { return m.Name_5 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_6() string { if m != nil { return m.Name_6 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_7() string { if m != nil { return m.Name_7 } return "" } type EnvmonSensorInfo struct { FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"` DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"` Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"` DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"` AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"` DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"` Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"` Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"` Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"` AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"` UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"` Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"` Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"` Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"` Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} } func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo) ProtoMessage() {} func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{1} } func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b) } func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo.Merge(m, src) } func (m *EnvmonSensorInfo) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo.Size(m) } func (m *EnvmonSensorInfo) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string { if m != nil { return m.FieldValidityBitmap } return "" } func (m *EnvmonSensorInfo) GetDeviceDescription() string { if m != nil { return m.DeviceDescription } return "" } func (m *EnvmonSensorInfo) GetUnits() string { if m != nil { return m.Units } return "" } func (m *EnvmonSensorInfo) GetDeviceId() uint32 { if m != nil { return m.DeviceId } return 0 } func (m *EnvmonSensorInfo) GetValue() uint32 { if m != nil { return m.Value } return 0 } func (m *EnvmonSensorInfo) GetAlarmType() uint32 { if m != nil { return m.AlarmType } return 0 } func (m *EnvmonSensorInfo) GetDataType() uint32 { if m != nil { return m.DataType } return 0 } func (m *EnvmonSensorInfo) GetScale() uint32 { if m != nil { return m.Scale } return 0 } func (m *EnvmonSensorInfo) GetPrecision() uint32 { if m != nil { return m.Precision } return 0 } func (m *EnvmonSensorInfo) GetStatus() uint32 { if m != nil { return m.Status } return 0 } func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 { if m != nil { return m.AgeTimeStamp } return 0 } func (m *EnvmonSensorInfo) GetUpdateRate() uint32 { if m != nil { return m.UpdateRate } return 0 } func (m *EnvmonSensorInfo) GetAverage() int32 { if m != nil { return m.Average } return 0 } func (m *EnvmonSensorInfo) GetMinimum() int32 { if m != nil { return m.Minimum } return 0 } func (m *EnvmonSensorInfo) GetMaximum() int32 { if m != nil { return m.Maximum } return 0 } func (m *EnvmonSensorInfo) GetInterval() int32 { if m != nil { return m.Interval } return 0 } func init() { proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS") proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info") } func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) } var fileDescriptor_bc03e94ffc42a321 = []byte{ // 506 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c, 0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a, 0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a, 0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, 0xdc, 0x0f, 0x3b, 0x34, 0xc7, 0xc6, 0x46, 0xea, 0xe6, 0xcd, 0x79, 0x9f, 0x27, 0x73, 0x34, 0xb3, 0xb0, 0x50, 0x68, 0xdb, 0xb2, 0xb2, 0xda, 0xa3, 0xf5, 0x95, 0xd3, 0xc6, 0x5e, 0x55, 0x71, 0xed, 0x2a, 0xaa,
0x88, 0x9c, 0x59, 0x37, 0x84, 0x3e, 0x46, 0xdb, 0xfe, 0x7b, 0xbd, 0x93, 0x5f, 0x91, 0xb8, 0x7f, 0xf3, 0xd6, 0xfa, 0xeb, 0xe7, 0xef, 0x17, 0x52, 0x8a, 0x5d, 0x0b, 0x25, 0xaa, 0xe8, 0x38, 0x3a, 0x9d, 0xad, 0x78, 0x96, 0x47, 0x62, 0x1a, 0x7e, 0xf5, 0x99, 0xfa, 0x8f, 0xe9, 0x24, 0xb4, 0xb3, 0x01, 0x27, 0x6a, 0x67, 0xc4, 0xc9, 0x80, 0x53, 0xb5, 0x3b, 0xe2, 0x74, 0xc0, 0x73, 0x35, 0x19, 0xf1, 0x7c, 0xc0, 0x0b, 0x35, 0x1d, 0xf1, 0x62, 0xc0, 0x4b, 0xb5, 0x37, 0xe2, 0xe5, 0x80, 0xcf, 0xd5, 0xfe, 0x88, 0xcf, 0x4f, 0x7e, 0xef, 0x08, 0x79, 0xf3, 0x41, 0x32, 0x11, 0x47, 0x57, 0x06, 0x8b, 0x5c, 0xb7, 0x50, 0x98, 0xdc, 0xd0, 0x56, 0xaf, 0x0d, 0x95, 0x50, 0xab, 0x84, 0x0f, 0xdf, 0x65, 0xf9, 0xad, 0x77, 0x1f, 0x59, 0xc9, 0x67, 0x42, 0xe6, 0xd8, 0x9a, 0x0c, 0x75, 0x8e, 0x3e, 0x73, 0xa6, 0x26, 0x53, 0x59, 0x95, 0xf2, 0x81, 0x83, 0xce, 0x7c, 0x1a, 0x85, 0x3c, 0x14, 0x93, 0xc6, 0x1a, 0xf2, 0x6a, 0xde, 0xdd, 0x87, 0x8b, 0x7c, 0x28, 0x66, 0xfd, 0x12, 0x93, 0xab, 0xc5, 0x71, 0x74, 0x7a, 0x7b, 0xb5, 0xdf, 0x81, 0x2f, 0x79, 0x38, 0xd2, 0x42, 0xd1, 0xa0, 0x5a, 0xb2, 0xe8, 0x8a, 0x7c, 0x2c, 0x04, 0x14, 0xe0, 0x4a, 0x4d, 0xdb, 0x1a, 0xd5, 0x39, 0xab, 0x19, 0x93, 0xcb, 0x6d, 0x8d, 0xbc, 0x11, 0x08, 0x3a, 0xfb, 0xbc, 0xdf, 0x08, 0x04, 0x2c, 0x0f, 0xc5, 0xc4, 0x67, 0x50, 0xa0, 0x7a, 0xd1, 0x6d, 0xe4, 0x22, 0x1f, 0x89, 0x59, 0xed, 0x30, 0x33, 0x3e, 0x3c, 0xe0, 0x65, 0xb7, 0x70, 0x00, 0xf2, 0x9e, 0x98, 0x7a, 0x02, 0x6a, 0xbc, 0x7a, 0xc5, 0xaa, 0x6f, 0xf2, 0xa9, 0xb8, 0x03, 0x1b, 0xd4, 0x64, 0x4a, 0xd4, 0x9e, 0xa0, 0xac, 0xd5, 0x6b, 0xf6, 0xb7, 0x60, 0x83, 0x97, 0xa6, 0xc4, 0x8b, 0xc0, 0xe4, 0x13, 0xf1, 0x7f, 0x53, 0xe7, 0x40, 0xa8, 0x1d, 0x10, 0xaa, 0x37, 0xfc, 0x17, 0xd1, 0xa1, 0x15, 0x10, 0x4a, 0x25, 0xf6, 0xa0, 0x45, 0x07, 0x1b, 0x54, 0x6f, 0x8f, 0xa3, 0xd3, 0x83, 0xd5, 0xdf, 0x1a, 0x4c, 0x69, 0xac, 0x29, 0x9b, 0x52, 0xbd, 0xeb, 0x4c, 0x5f, 0xd9, 0xc0, 0x35, 0x9b, 0xf7, 0xbd, 0xe9, 0xaa, 0x7c, 0x20, 0xf6, 0x8d, 0x25, 0x74, 0x2d, 0x14, 0xea, 0x03, 0xab, 0xa1, 0xaf, 0xa7, 0xfc, 0xb1, 0xa5, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x6f, 0x66, 0x72, 0x88, 0x03, 0x00, 0x00, }
0xe4, 0xcf, 0x28, 0x33, 0x3e, 0xab, 0xb4, 0xa9, 0xbc, 0xbe, 0x0e, 0xa6, 0x2d, 0x37, 0x4e, 0x57, 0x35, 0xba, 0xd8, 0xd8, 0x16, 0x2d, 0x55, 0x6e, 0x1b, 0x3b, 0xc8, 0x7e, 0x78, 0xce, 0x18, 0x2d, 0x19, 0xda, 0xc6, 0xbe, 0xa8, 0x28, 0x26, 0x6f, 0xce, 0x3c, 0x67, 0x88, 0x84, 0xc7, 0x24, 0x44, 0xca, 0x63, 0x1a, 0x62, 0xce, 0xe3, 0x3c, 0xc4, 0x82, 0xc7, 0x45, 0x88, 0x25, 0x8f, 0xcb, 0x18,
random_line_split
envmon_sensor_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: envmon_sensor_info.proto package cisco_ios_xr_invmgr_oper_inventory_racks_rack_entity_slot_tsi1s_tsi1_tsi2s_tsi2_tsi3s_tsi3_tsi4s_tsi4_tsi5s_tsi5_tsi6s_tsi6_attributes_env_sensor_info import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type EnvmonSensorInfo_KEYS struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name_1 string `protobuf:"bytes,2,opt,name=name_1,json=name1,proto3" json:"name_1,omitempty"` Name_2 string `protobuf:"bytes,3,opt,name=name_2,json=name2,proto3" json:"name_2,omitempty"` Name_3 string `protobuf:"bytes,4,opt,name=name_3,json=name3,proto3" json:"name_3,omitempty"` Name_4 string `protobuf:"bytes,5,opt,name=name_4,json=name4,proto3" json:"name_4,omitempty"` Name_5 string `protobuf:"bytes,6,opt,name=name_5,json=name5,proto3" json:"name_5,omitempty"` Name_6 string `protobuf:"bytes,7,opt,name=name_6,json=name6,proto3" json:"name_6,omitempty"` Name_7 string `protobuf:"bytes,8,opt,name=name_7,json=name7,proto3" json:"name_7,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo_KEYS) Reset() { *m = EnvmonSensorInfo_KEYS{} } func (m *EnvmonSensorInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo_KEYS) ProtoMessage() {} func (*EnvmonSensorInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{0} } func (m *EnvmonSensorInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Unmarshal(m, b) } func (m *EnvmonSensorInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo_KEYS)
(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo_KEYS.Merge(m, src) } func (m *EnvmonSensorInfo_KEYS) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Size(m) } func (m *EnvmonSensorInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo_KEYS proto.InternalMessageInfo func (m *EnvmonSensorInfo_KEYS) GetName() string { if m != nil { return m.Name } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_1() string { if m != nil { return m.Name_1 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_2() string { if m != nil { return m.Name_2 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_3() string { if m != nil { return m.Name_3 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_4() string { if m != nil { return m.Name_4 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_5() string { if m != nil { return m.Name_5 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_6() string { if m != nil { return m.Name_6 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_7() string { if m != nil { return m.Name_7 } return "" } type EnvmonSensorInfo struct { FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"` DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"` Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"` DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"` AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"` DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"` Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"` Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"` Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"` AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"` UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"` Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"` Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"` Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"` Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} } func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo) ProtoMessage() {} func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{1} } func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b) } func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo.Merge(m, src) } func (m *EnvmonSensorInfo) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo.Size(m) } func (m *EnvmonSensorInfo) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string { if m != nil { return m.FieldValidityBitmap } return "" } func (m *EnvmonSensorInfo) GetDeviceDescription() string { if m != nil { return m.DeviceDescription } return "" } func (m *EnvmonSensorInfo) GetUnits() string { if m != nil { return m.Units } return "" } func (m *EnvmonSensorInfo) GetDeviceId() uint32 { if m != nil { return m.DeviceId } return 0 } func (m *EnvmonSensorInfo) GetValue() uint32 { if m != nil { return m.Value } return 0 } func (m *EnvmonSensorInfo) GetAlarmType() uint32 { if m != nil { return m.AlarmType } return 0 } func (m *EnvmonSensorInfo) GetDataType() uint32 { if m != nil { return m.DataType } return 0 } func (m *EnvmonSensorInfo) GetScale() uint32 { if m != nil { return m.Scale } return 0 } func (m *EnvmonSensorInfo) GetPrecision() uint32 { if m != nil { return m.Precision } return 0 } func (m *EnvmonSensorInfo) GetStatus() uint32 { if m != nil { return m.Status } return 0 } func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 { if m != nil { return m.AgeTimeStamp } return 0 } func (m *EnvmonSensorInfo) GetUpdateRate() uint32 { if m != nil { return m.UpdateRate } return 0 } func (m *EnvmonSensorInfo) GetAverage() int32 { if m != nil { return m.Average } return 0 } func (m *EnvmonSensorInfo) GetMinimum() int32 { if m != nil { return m.Minimum } return 0 } func (m *EnvmonSensorInfo) GetMaximum() int32 { if m != nil { return m.Maximum } return 0 } func (m *EnvmonSensorInfo) GetInterval() int32 { if m != nil { return m.Interval } return 0 } func init() { proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS") proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info") } func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) } var fileDescriptor_bc03e94ffc42a321 = []byte{ // 506 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c, 0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a, 0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a, 0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, 0xdc, 0x0f, 0x3b, 0x34, 0xc7, 0xc6, 0x46, 0xea, 0xe6, 0xcd, 0x79, 0x9f, 0x27, 0x73, 0x34, 0xb3, 0xb0, 0x50, 0x68, 0xdb, 0xb2, 0xb2, 0xda, 0xa3, 0xf5, 0x95, 0xd3, 0xc6, 0x5e, 0x55, 0x71, 0xed, 0x2a, 0xaa, 0xe4, 0xcf, 0x28, 0x33, 0x3e, 0xab, 0xb4, 0xa9, 0xbc, 0xbe, 0x0e, 0xa6, 0x2d, 0x37, 0x4e, 0x57, 0x35, 0xba, 0xd8, 0xd8, 0x16, 0x2d, 0x55, 0x6e, 0x1b, 0x3b, 0xc8, 0x7e, 0x78, 0xce, 0x18, 0x2d, 0x19, 0xda, 0xc6, 0xbe, 0xa8, 0x28, 0x26, 0x6f, 0xce, 0x3c, 0x67, 0x88, 0x84, 0xc7, 0x24, 0x44, 0xca, 0x63, 0x1a, 0x62, 0xce, 0xe3, 0x3c, 0xc4, 0x82, 0xc7, 0x45, 0x88, 0x25, 0x8f, 0xcb, 0x18, 0x88, 0x9c, 0x59, 0x37, 0x84, 0x3e, 0x46, 0xdb, 0xfe, 0x7b, 0xbd, 0x93, 0x5f, 0x91, 0xb8, 0x7f, 0xf3, 0xd6, 0xfa, 0xeb, 0xe7, 0xef, 0x17, 0x52, 0x8a, 0x5d, 0x0b, 0x25, 0xaa, 0xe8, 0x38, 0x3a, 0x9d, 0xad, 0x78, 0x96, 0x47, 0x62, 0x1a, 0x7e, 0xf5, 0x99, 0xfa, 0x8f, 0xe9, 0x24, 0xb4, 0xb3, 0x01, 0x27, 0x6a, 0x67, 0xc4, 0xc9, 0x80, 0x53, 0xb5, 0x3b, 0xe2, 0x74, 0xc0, 0x73, 0x35, 0x19, 0xf1, 0x7c, 0xc0, 0x0b, 0x35, 0x1d, 0xf1, 0x62, 0xc0, 0x4b, 0xb5, 0x37, 0xe2, 0xe5, 0x80, 0xcf, 0xd5, 0xfe, 0x88, 0xcf, 0x4f, 0x7e, 0xef, 0x08, 0x79, 0xf3, 0x41, 0x32, 0x11, 0x47, 0x57, 0x06, 0x8b, 0x5c, 0xb7, 0x50, 0x98, 0xdc, 0xd0, 0x56, 0xaf, 0x0d, 0x95, 0x50, 0xab, 0x84, 0x0f, 0xdf, 0x65, 0xf9, 0xad, 0x77, 0x1f, 0x59, 0xc9, 0x67, 0x42, 0xe6, 0xd8, 0x9a, 0x0c, 0x75, 0x8e, 0x3e, 0x73, 0xa6, 0x26, 0x53, 0x59, 0x95, 0xf2, 0x81, 0x83, 0xce, 0x7c, 0x1a, 0x85, 0x3c, 0x14, 0x93, 0xc6, 0x1a, 0xf2, 0x6a, 0xde, 0xdd, 0x87, 0x8b, 0x7c, 0x28, 0x66, 0xfd, 0x12, 0x93, 0xab, 0xc5, 0x71, 0x74, 0x7a, 0x7b, 0xb5, 0xdf, 0x81, 0x2f, 0x79, 0x38, 0xd2, 0x42, 0xd1, 0xa0, 0x5a, 0xb2, 0xe8, 0x8a, 0x7c, 0x2c, 0x04, 0x14, 0xe0, 0x4a, 0x4d, 0xdb, 0x1a, 0xd5, 0x39, 0xab, 0x19, 0x93, 0xcb, 0x6d, 0x8d, 0xbc, 0x11, 0x08, 0x3a, 0xfb, 0xbc, 0xdf, 0x08, 0x04, 0x2c, 0x0f, 0xc5, 0xc4, 0x67, 0x50, 0xa0, 0x7a, 0xd1, 0x6d, 0xe4, 0x22, 0x1f, 0x89, 0x59, 0xed, 0x30, 0x33, 0x3e, 0x3c, 0xe0, 0x65, 0xb7, 0x70, 0x00, 0xf2, 0x9e, 0x98, 0x7a, 0x02, 0x6a, 0xbc, 0x7a, 0xc5, 0xaa, 0x6f, 0xf2, 0xa9, 0xb8, 0x03, 0x1b, 0xd4, 0x64, 0x4a, 0xd4, 0x9e, 0xa0, 0xac, 0xd5, 0x6b, 0xf6, 0xb7, 0x60, 0x83, 0x97, 0xa6, 0xc4, 0x8b, 0xc0, 0xe4, 0x13, 0xf1, 0x7f, 0x53, 0xe7, 0x40, 0xa8, 0x1d, 0x10, 0xaa, 0x37, 0xfc, 0x17, 0xd1, 0xa1, 0x15, 0x10, 0x4a, 0x25, 0xf6, 0xa0, 0x45, 0x07, 0x1b, 0x54, 0x6f, 0x8f, 0xa3, 0xd3, 0x83, 0xd5, 0xdf, 0x1a, 0x4c, 0x69, 0xac, 0x29, 0x9b, 0x52, 0xbd, 0xeb, 0x4c, 0x5f, 0xd9, 0xc0, 0x35, 0x9b, 0xf7, 0xbd, 0xe9, 0xaa, 0x7c, 0x20, 0xf6, 0x8d, 0x25, 0x74, 0x2d, 0x14, 0xea, 0x03, 0xab, 0xa1, 0xaf, 0xa7, 0xfc, 0xb1, 0xa5, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x6f, 0x66, 0x72, 0x88, 0x03, 0x00, 0x00, }
XXX_Merge
identifier_name
envmon_sensor_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: envmon_sensor_info.proto package cisco_ios_xr_invmgr_oper_inventory_racks_rack_entity_slot_tsi1s_tsi1_tsi2s_tsi2_tsi3s_tsi3_tsi4s_tsi4_tsi5s_tsi5_tsi6s_tsi6_attributes_env_sensor_info import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type EnvmonSensorInfo_KEYS struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name_1 string `protobuf:"bytes,2,opt,name=name_1,json=name1,proto3" json:"name_1,omitempty"` Name_2 string `protobuf:"bytes,3,opt,name=name_2,json=name2,proto3" json:"name_2,omitempty"` Name_3 string `protobuf:"bytes,4,opt,name=name_3,json=name3,proto3" json:"name_3,omitempty"` Name_4 string `protobuf:"bytes,5,opt,name=name_4,json=name4,proto3" json:"name_4,omitempty"` Name_5 string `protobuf:"bytes,6,opt,name=name_5,json=name5,proto3" json:"name_5,omitempty"` Name_6 string `protobuf:"bytes,7,opt,name=name_6,json=name6,proto3" json:"name_6,omitempty"` Name_7 string `protobuf:"bytes,8,opt,name=name_7,json=name7,proto3" json:"name_7,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo_KEYS) Reset() { *m = EnvmonSensorInfo_KEYS{} } func (m *EnvmonSensorInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo_KEYS) ProtoMessage() {} func (*EnvmonSensorInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{0} } func (m *EnvmonSensorInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Unmarshal(m, b) } func (m *EnvmonSensorInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo_KEYS.Merge(m, src) } func (m *EnvmonSensorInfo_KEYS) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo_KEYS.Size(m) } func (m *EnvmonSensorInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo_KEYS proto.InternalMessageInfo func (m *EnvmonSensorInfo_KEYS) GetName() string { if m != nil { return m.Name } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_1() string { if m != nil { return m.Name_1 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_2() string { if m != nil { return m.Name_2 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_3() string
func (m *EnvmonSensorInfo_KEYS) GetName_4() string { if m != nil { return m.Name_4 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_5() string { if m != nil { return m.Name_5 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_6() string { if m != nil { return m.Name_6 } return "" } func (m *EnvmonSensorInfo_KEYS) GetName_7() string { if m != nil { return m.Name_7 } return "" } type EnvmonSensorInfo struct { FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"` DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"` Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"` DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"` AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"` DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"` Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"` Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"` Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"` AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"` UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"` Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"` Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"` Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"` Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} } func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) } func (*EnvmonSensorInfo) ProtoMessage() {} func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) { return fileDescriptor_bc03e94ffc42a321, []int{1} } func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b) } func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic) } func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_EnvmonSensorInfo.Merge(m, src) } func (m *EnvmonSensorInfo) XXX_Size() int { return xxx_messageInfo_EnvmonSensorInfo.Size(m) } func (m *EnvmonSensorInfo) XXX_DiscardUnknown() { xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m) } var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string { if m != nil { return m.FieldValidityBitmap } return "" } func (m *EnvmonSensorInfo) GetDeviceDescription() string { if m != nil { return m.DeviceDescription } return "" } func (m *EnvmonSensorInfo) GetUnits() string { if m != nil { return m.Units } return "" } func (m *EnvmonSensorInfo) GetDeviceId() uint32 { if m != nil { return m.DeviceId } return 0 } func (m *EnvmonSensorInfo) GetValue() uint32 { if m != nil { return m.Value } return 0 } func (m *EnvmonSensorInfo) GetAlarmType() uint32 { if m != nil { return m.AlarmType } return 0 } func (m *EnvmonSensorInfo) GetDataType() uint32 { if m != nil { return m.DataType } return 0 } func (m *EnvmonSensorInfo) GetScale() uint32 { if m != nil { return m.Scale } return 0 } func (m *EnvmonSensorInfo) GetPrecision() uint32 { if m != nil { return m.Precision } return 0 } func (m *EnvmonSensorInfo) GetStatus() uint32 { if m != nil { return m.Status } return 0 } func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 { if m != nil { return m.AgeTimeStamp } return 0 } func (m *EnvmonSensorInfo) GetUpdateRate() uint32 { if m != nil { return m.UpdateRate } return 0 } func (m *EnvmonSensorInfo) GetAverage() int32 { if m != nil { return m.Average } return 0 } func (m *EnvmonSensorInfo) GetMinimum() int32 { if m != nil { return m.Minimum } return 0 } func (m *EnvmonSensorInfo) GetMaximum() int32 { if m != nil { return m.Maximum } return 0 } func (m *EnvmonSensorInfo) GetInterval() int32 { if m != nil { return m.Interval } return 0 } func init() { proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS") proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info") } func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) } var fileDescriptor_bc03e94ffc42a321 = []byte{ // 506 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c, 0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a, 0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a, 0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, 0xdc, 0x0f, 0x3b, 0x34, 0xc7, 0xc6, 0x46, 0xea, 0xe6, 0xcd, 0x79, 0x9f, 0x27, 0x73, 0x34, 0xb3, 0xb0, 0x50, 0x68, 0xdb, 0xb2, 0xb2, 0xda, 0xa3, 0xf5, 0x95, 0xd3, 0xc6, 0x5e, 0x55, 0x71, 0xed, 0x2a, 0xaa, 0xe4, 0xcf, 0x28, 0x33, 0x3e, 0xab, 0xb4, 0xa9, 0xbc, 0xbe, 0x0e, 0xa6, 0x2d, 0x37, 0x4e, 0x57, 0x35, 0xba, 0xd8, 0xd8, 0x16, 0x2d, 0x55, 0x6e, 0x1b, 0x3b, 0xc8, 0x7e, 0x78, 0xce, 0x18, 0x2d, 0x19, 0xda, 0xc6, 0xbe, 0xa8, 0x28, 0x26, 0x6f, 0xce, 0x3c, 0x67, 0x88, 0x84, 0xc7, 0x24, 0x44, 0xca, 0x63, 0x1a, 0x62, 0xce, 0xe3, 0x3c, 0xc4, 0x82, 0xc7, 0x45, 0x88, 0x25, 0x8f, 0xcb, 0x18, 0x88, 0x9c, 0x59, 0x37, 0x84, 0x3e, 0x46, 0xdb, 0xfe, 0x7b, 0xbd, 0x93, 0x5f, 0x91, 0xb8, 0x7f, 0xf3, 0xd6, 0xfa, 0xeb, 0xe7, 0xef, 0x17, 0x52, 0x8a, 0x5d, 0x0b, 0x25, 0xaa, 0xe8, 0x38, 0x3a, 0x9d, 0xad, 0x78, 0x96, 0x47, 0x62, 0x1a, 0x7e, 0xf5, 0x99, 0xfa, 0x8f, 0xe9, 0x24, 0xb4, 0xb3, 0x01, 0x27, 0x6a, 0x67, 0xc4, 0xc9, 0x80, 0x53, 0xb5, 0x3b, 0xe2, 0x74, 0xc0, 0x73, 0x35, 0x19, 0xf1, 0x7c, 0xc0, 0x0b, 0x35, 0x1d, 0xf1, 0x62, 0xc0, 0x4b, 0xb5, 0x37, 0xe2, 0xe5, 0x80, 0xcf, 0xd5, 0xfe, 0x88, 0xcf, 0x4f, 0x7e, 0xef, 0x08, 0x79, 0xf3, 0x41, 0x32, 0x11, 0x47, 0x57, 0x06, 0x8b, 0x5c, 0xb7, 0x50, 0x98, 0xdc, 0xd0, 0x56, 0xaf, 0x0d, 0x95, 0x50, 0xab, 0x84, 0x0f, 0xdf, 0x65, 0xf9, 0xad, 0x77, 0x1f, 0x59, 0xc9, 0x67, 0x42, 0xe6, 0xd8, 0x9a, 0x0c, 0x75, 0x8e, 0x3e, 0x73, 0xa6, 0x26, 0x53, 0x59, 0x95, 0xf2, 0x81, 0x83, 0xce, 0x7c, 0x1a, 0x85, 0x3c, 0x14, 0x93, 0xc6, 0x1a, 0xf2, 0x6a, 0xde, 0xdd, 0x87, 0x8b, 0x7c, 0x28, 0x66, 0xfd, 0x12, 0x93, 0xab, 0xc5, 0x71, 0x74, 0x7a, 0x7b, 0xb5, 0xdf, 0x81, 0x2f, 0x79, 0x38, 0xd2, 0x42, 0xd1, 0xa0, 0x5a, 0xb2, 0xe8, 0x8a, 0x7c, 0x2c, 0x04, 0x14, 0xe0, 0x4a, 0x4d, 0xdb, 0x1a, 0xd5, 0x39, 0xab, 0x19, 0x93, 0xcb, 0x6d, 0x8d, 0xbc, 0x11, 0x08, 0x3a, 0xfb, 0xbc, 0xdf, 0x08, 0x04, 0x2c, 0x0f, 0xc5, 0xc4, 0x67, 0x50, 0xa0, 0x7a, 0xd1, 0x6d, 0xe4, 0x22, 0x1f, 0x89, 0x59, 0xed, 0x30, 0x33, 0x3e, 0x3c, 0xe0, 0x65, 0xb7, 0x70, 0x00, 0xf2, 0x9e, 0x98, 0x7a, 0x02, 0x6a, 0xbc, 0x7a, 0xc5, 0xaa, 0x6f, 0xf2, 0xa9, 0xb8, 0x03, 0x1b, 0xd4, 0x64, 0x4a, 0xd4, 0x9e, 0xa0, 0xac, 0xd5, 0x6b, 0xf6, 0xb7, 0x60, 0x83, 0x97, 0xa6, 0xc4, 0x8b, 0xc0, 0xe4, 0x13, 0xf1, 0x7f, 0x53, 0xe7, 0x40, 0xa8, 0x1d, 0x10, 0xaa, 0x37, 0xfc, 0x17, 0xd1, 0xa1, 0x15, 0x10, 0x4a, 0x25, 0xf6, 0xa0, 0x45, 0x07, 0x1b, 0x54, 0x6f, 0x8f, 0xa3, 0xd3, 0x83, 0xd5, 0xdf, 0x1a, 0x4c, 0x69, 0xac, 0x29, 0x9b, 0x52, 0xbd, 0xeb, 0x4c, 0x5f, 0xd9, 0xc0, 0x35, 0x9b, 0xf7, 0xbd, 0xe9, 0xaa, 0x7c, 0x20, 0xf6, 0x8d, 0x25, 0x74, 0x2d, 0x14, 0xea, 0x03, 0xab, 0xa1, 0xaf, 0xa7, 0xfc, 0xb1, 0xa5, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x6f, 0x66, 0x72, 0x88, 0x03, 0x00, 0x00, }
{ if m != nil { return m.Name_3 } return "" }
identifier_body
battleship-final.py
from random import randrange import random
# input: boat, taken_positions # this func checks if the boat outside the playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat.sort() for i in range(len(boat)): if boat[i] in taken_positions: #this condition checks if the block boat[i] is already in the list taken_positions boat = [-1] break elif boat[i] > 99 or boat[i] < 0: #this condition checks border 1 and 3 boat = [-1] break elif boat[i] % 10 == 9 and i < len(boat) - 1: #this condition checks border 2 and 4 if boat[i + 1] % 10 == 0: boat = [-1] break if i != 0: # this condition checks if there is any hole in the boat if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10: boat = [-1] break return boat def check_shot(shot, ships, hit, miss, comp, sinked_boats): # input: shot, all the boats (ships), hit, miss, comp, sinked_boats # this func initially assumes that the shot is missed (cond = 0) # given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships # if yes, remove the block of the boat that is hitted by the shot # append the shot to hit or comp. If comp, sinked_boats += 1 # if not, append the shot to miss # return: all the boats (ships), hit, miss, comp, cond, sinked_boats cond = 0 # miss for i in range(len(ships)): if shot in ships[i]: ships[i].remove(shot) if len(ships[i]) > 0: hit.append(shot) cond = 1 # hit else: comp.append(shot) cond = 2 # comp sinked_boats += 1 if cond == 0: # miss miss.append(shot) return ships, hit, miss, comp, cond, sinked_boats def create_playground(hit, miss, comp): # input: hit, miss, comp # this func creates the playground with the status of each block # print the playground print(" battleship") print(" 0 1 2 3 4 5 6 7 8 9") block = 0 #this variable keep track of the spot of the block for i in range(10): #create each row row = "" for j in range(10): #create each spot on the specific row character = "_ " if block in miss: character = "x " elif block in hit: character = "o " elif block in comp: character = "Q " row += character block += 1 #the block var increments 1 after each character is add to row print(i, " ", row) print("") def check_empty(ships): # input: ships # [] = False, [#have element] = True # this func checks each ship in the 2D list ships # if ship is empty, return True, and vice versa # if all ships are empty, return True, else return False # return True or False return all([not elem for elem in ships]) """ user - 2 funcs: """ def create_ships_u(taken_positions, num_boats): # input: num_boats # this func has a loop that makes all boats, # which calls the get_ship(len_of_boat, taken_positions) that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: ship, taken_positions = get_ship(len_of_boat, taken_positions) ships.append(ship) return ships, taken_positions def create_playground_u(taken_positions): print(" battleships ") print(" 0 1 2 3 4 5 6 7 8 9") place = 0 for x in range(10): row = "" for y in range(10): ch = " _ " if place in taken_positions: ch = " o " row = row + ch place = place + 1 print(x," ",row) def get_ship(len_of_boat, taken_positions): # input: len_of_boat, taken_positions # this func gets the boat's position from the user's input # this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order # return a valid ship while True: ship = [] print("enter your ship of length", len_of_boat) for i in range(len_of_boat): while True: try: boat_num = input("please enter a number: ") ship.append(int(boat_num)) except ValueError: # better try again... Return to the start of the loop print("wrong type of input") continue else: # is is a correct input, and we're ready to exit the loop break ship = check_ok(ship, taken_positions) if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break taken_positions += ship break else: print("invalid number - please enter again") return ship, taken_positions def get_shot_user(guesses): # input: guesses is the combined list of hit, miss, comp # this funcs asks the user to enter the shot, then checks the validity of the shot # return: the valid shot while True: try: shot = int(input("Enter your shot: ")) if shot < 0 or shot > 99: shot = int(input("Enter your shot:")) elif shot in guesses: print("already guessed - please enter again") else: return shot except: print("incorrect - please enter integer only") """ computer - 1 funcs: """ def create_ships_c(taken_positions, num_boats): # input: num_boats # this funcs has a loop that makes all boats, # which calls the create_boat() that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: boat_position = [-1] #create the initial position of every boat is [-1] while -1 in boat_position: boat_start = randrange(99) #boat starting point boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"} boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat #a new boat is created after finishing the while loop ships.append(boat_position) taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions return ships, taken_positions def create_boat(len_of_boat, boat_start, boat_direction, taken_positions): # input: len_of_boat, boat_start, boat_direction, taken_positions # this func initializes boat = [] # with len_of_boat, boat_start, boat_direction, this func create the position of the boat # calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat = [] if boat_direction == 1: for i in range(len_of_boat): boat.append(boat_start - i * 10) # already have the position of boat after this line boat = check_ok(boat, taken_positions) elif boat_direction == 2: for i in range(len_of_boat): boat.append(boat_start + i) boat = check_ok(boat, taken_positions) elif boat_direction == 3: for i in range(len_of_boat): boat.append(boat_start + i * 10) boat = check_ok(boat, taken_positions) elif boat_direction == 4: for i in range(len_of_boat): boat.append(boat_start - i) boat = check_ok(boat, taken_positions) return boat def get_shot_comp(guesses, tactics): # input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot) # in the first mơve, tactics = [] # this func checks if len(tactics) > 0 # if yes, pick shot = tactics[0] # if no, pick shot = randrange(99) # this func check if shot not in guesses(which is the list of all moves) # if yes, guess.append(shot), and break # return: the valid shot, guesses while True: try: if len(tactics) > 0: shot = tactics[0] else: shot = randrange(99) if shot not in guesses: guesses.append(shot) break except: print("incorrect - please enter integer only") return shot, guesses def calculate_tactics(shot, tactics, guesses, hit): # input: shot, tactics, guesses, hit # this function takes the newly shot, and changes the tactics list accordingly # the list temp is the possible positions that the next shot can be # if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot # else, the list temp will be created based on the last 2 shots # candidate is the list of valid possible shots that is created from temp # shuffle the order of elements inside candidate # return: candidate (candidate is tactics) temp = [] if len(tactics) < 1: # got 1 hit the first time temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be else: # got at least 2 hits # checks to see if the 4 spots around is in hit if shot - 1 in hit: # east temp = [shot + 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot - num not in hit: temp.append(shot - num) break elif shot + 1 in hit: # west temp = [shot - 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot + num not in hit: temp.append(shot + num) break elif shot - 10 in hit: # south temp = [shot + 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot - num not in hit: temp.append(shot - num) break elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40 temp = [shot - 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot + num not in hit: temp.append(shot + num) break candidate = [] # list of valid places that the next shot could be for i in range(len(temp)): if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp candidate.append(temp[i]) random.shuffle(candidate) # shuffle the element order of the list candidate return candidate """ main program: """ num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length # before game # computer - 1 hit1 = [] miss1 = [] comp1 = [] guesses1 = [] cond1 = 0 tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to [] taken_positions1 = [] sinked_boats1 = [] # user - 2 hit2 = [] miss2 = [] comp2 = [] guesses2 = [] cond2 = 0 tactics2 = [] taken_positions2 = [] sinked_boats2 = [] # computer creates ships for player 1 ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) # user creates boat for player 2 - show board ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats) create_playground_u(taken_positions2) # loop for user and computer takes turn to shoot, and repeat until finding a winner: turns = 0 while True: turns += 1 # USER SHOOTS: using 1 because it is checking the data of computer guesses1 = hit1 + miss1 + comp1 shot1 = get_shot_user(guesses1) ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1) create_playground(hit1, miss1, comp1) # check if all of the computer ships are empty: if check_empty(ships1): print("end of game - winner in", turns) break # COMPUTER SHOOTS: guesses2 = hit2 + miss2 + comp2 shot2, guesses2 = get_shot_comp(guesses2, tactics2) ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2) create_playground(hit2, miss2, comp2) if cond2 == 1: # got 1 hit tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2) elif cond2 == 2: # comp, and sunk the boat # reset tactics = [] tactics2 = [] elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves # got 1 hit, then miss # remove the newly shot from tactics tactics2.pop(0) # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still [] # check if all of the computer ships are empty: if check_empty(ships2): print("end of game - computer wins in", turns) break # after both the user and computer shoot, start a new loop:
""" both user and computer funcs: """ def check_ok(boat, taken_positions):
random_line_split
battleship-final.py
from random import randrange import random """ both user and computer funcs: """ def check_ok(boat, taken_positions): # input: boat, taken_positions # this func checks if the boat outside the playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat.sort() for i in range(len(boat)): if boat[i] in taken_positions: #this condition checks if the block boat[i] is already in the list taken_positions boat = [-1] break elif boat[i] > 99 or boat[i] < 0: #this condition checks border 1 and 3 boat = [-1] break elif boat[i] % 10 == 9 and i < len(boat) - 1: #this condition checks border 2 and 4 if boat[i + 1] % 10 == 0: boat = [-1] break if i != 0: # this condition checks if there is any hole in the boat if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10: boat = [-1] break return boat def check_shot(shot, ships, hit, miss, comp, sinked_boats): # input: shot, all the boats (ships), hit, miss, comp, sinked_boats # this func initially assumes that the shot is missed (cond = 0) # given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships # if yes, remove the block of the boat that is hitted by the shot # append the shot to hit or comp. If comp, sinked_boats += 1 # if not, append the shot to miss # return: all the boats (ships), hit, miss, comp, cond, sinked_boats cond = 0 # miss for i in range(len(ships)): if shot in ships[i]: ships[i].remove(shot) if len(ships[i]) > 0: hit.append(shot) cond = 1 # hit else: comp.append(shot) cond = 2 # comp sinked_boats += 1 if cond == 0: # miss miss.append(shot) return ships, hit, miss, comp, cond, sinked_boats def create_playground(hit, miss, comp): # input: hit, miss, comp # this func creates the playground with the status of each block # print the playground print(" battleship") print(" 0 1 2 3 4 5 6 7 8 9") block = 0 #this variable keep track of the spot of the block for i in range(10): #create each row row = "" for j in range(10): #create each spot on the specific row character = "_ " if block in miss: character = "x " elif block in hit: character = "o " elif block in comp: character = "Q " row += character block += 1 #the block var increments 1 after each character is add to row print(i, " ", row) print("") def check_empty(ships): # input: ships # [] = False, [#have element] = True # this func checks each ship in the 2D list ships # if ship is empty, return True, and vice versa # if all ships are empty, return True, else return False # return True or False return all([not elem for elem in ships]) """ user - 2 funcs: """ def create_ships_u(taken_positions, num_boats): # input: num_boats # this func has a loop that makes all boats, # which calls the get_ship(len_of_boat, taken_positions) that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: ship, taken_positions = get_ship(len_of_boat, taken_positions) ships.append(ship) return ships, taken_positions def create_playground_u(taken_positions): print(" battleships ") print(" 0 1 2 3 4 5 6 7 8 9") place = 0 for x in range(10): row = "" for y in range(10): ch = " _ " if place in taken_positions: ch = " o " row = row + ch place = place + 1 print(x," ",row) def get_ship(len_of_boat, taken_positions): # input: len_of_boat, taken_positions # this func gets the boat's position from the user's input # this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order # return a valid ship while True: ship = [] print("enter your ship of length", len_of_boat) for i in range(len_of_boat): while True: try: boat_num = input("please enter a number: ") ship.append(int(boat_num)) except ValueError: # better try again... Return to the start of the loop print("wrong type of input") continue else: # is is a correct input, and we're ready to exit the loop break ship = check_ok(ship, taken_positions) if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break taken_positions += ship break else: print("invalid number - please enter again") return ship, taken_positions def
(guesses): # input: guesses is the combined list of hit, miss, comp # this funcs asks the user to enter the shot, then checks the validity of the shot # return: the valid shot while True: try: shot = int(input("Enter your shot: ")) if shot < 0 or shot > 99: shot = int(input("Enter your shot:")) elif shot in guesses: print("already guessed - please enter again") else: return shot except: print("incorrect - please enter integer only") """ computer - 1 funcs: """ def create_ships_c(taken_positions, num_boats): # input: num_boats # this funcs has a loop that makes all boats, # which calls the create_boat() that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: boat_position = [-1] #create the initial position of every boat is [-1] while -1 in boat_position: boat_start = randrange(99) #boat starting point boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"} boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat #a new boat is created after finishing the while loop ships.append(boat_position) taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions return ships, taken_positions def create_boat(len_of_boat, boat_start, boat_direction, taken_positions): # input: len_of_boat, boat_start, boat_direction, taken_positions # this func initializes boat = [] # with len_of_boat, boat_start, boat_direction, this func create the position of the boat # calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat = [] if boat_direction == 1: for i in range(len_of_boat): boat.append(boat_start - i * 10) # already have the position of boat after this line boat = check_ok(boat, taken_positions) elif boat_direction == 2: for i in range(len_of_boat): boat.append(boat_start + i) boat = check_ok(boat, taken_positions) elif boat_direction == 3: for i in range(len_of_boat): boat.append(boat_start + i * 10) boat = check_ok(boat, taken_positions) elif boat_direction == 4: for i in range(len_of_boat): boat.append(boat_start - i) boat = check_ok(boat, taken_positions) return boat def get_shot_comp(guesses, tactics): # input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot) # in the first mơve, tactics = [] # this func checks if len(tactics) > 0 # if yes, pick shot = tactics[0] # if no, pick shot = randrange(99) # this func check if shot not in guesses(which is the list of all moves) # if yes, guess.append(shot), and break # return: the valid shot, guesses while True: try: if len(tactics) > 0: shot = tactics[0] else: shot = randrange(99) if shot not in guesses: guesses.append(shot) break except: print("incorrect - please enter integer only") return shot, guesses def calculate_tactics(shot, tactics, guesses, hit): # input: shot, tactics, guesses, hit # this function takes the newly shot, and changes the tactics list accordingly # the list temp is the possible positions that the next shot can be # if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot # else, the list temp will be created based on the last 2 shots # candidate is the list of valid possible shots that is created from temp # shuffle the order of elements inside candidate # return: candidate (candidate is tactics) temp = [] if len(tactics) < 1: # got 1 hit the first time temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be else: # got at least 2 hits # checks to see if the 4 spots around is in hit if shot - 1 in hit: # east temp = [shot + 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot - num not in hit: temp.append(shot - num) break elif shot + 1 in hit: # west temp = [shot - 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot + num not in hit: temp.append(shot + num) break elif shot - 10 in hit: # south temp = [shot + 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot - num not in hit: temp.append(shot - num) break elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40 temp = [shot - 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot + num not in hit: temp.append(shot + num) break candidate = [] # list of valid places that the next shot could be for i in range(len(temp)): if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp candidate.append(temp[i]) random.shuffle(candidate) # shuffle the element order of the list candidate return candidate """ main program: """ num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length # before game # computer - 1 hit1 = [] miss1 = [] comp1 = [] guesses1 = [] cond1 = 0 tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to [] taken_positions1 = [] sinked_boats1 = [] # user - 2 hit2 = [] miss2 = [] comp2 = [] guesses2 = [] cond2 = 0 tactics2 = [] taken_positions2 = [] sinked_boats2 = [] # computer creates ships for player 1 ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) # user creates boat for player 2 - show board ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats) create_playground_u(taken_positions2) # loop for user and computer takes turn to shoot, and repeat until finding a winner: turns = 0 while True: turns += 1 # USER SHOOTS: using 1 because it is checking the data of computer guesses1 = hit1 + miss1 + comp1 shot1 = get_shot_user(guesses1) ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1) create_playground(hit1, miss1, comp1) # check if all of the computer ships are empty: if check_empty(ships1): print("end of game - winner in", turns) break # COMPUTER SHOOTS: guesses2 = hit2 + miss2 + comp2 shot2, guesses2 = get_shot_comp(guesses2, tactics2) ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2) create_playground(hit2, miss2, comp2) if cond2 == 1: # got 1 hit tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2) elif cond2 == 2: # comp, and sunk the boat # reset tactics = [] tactics2 = [] elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves # got 1 hit, then miss # remove the newly shot from tactics tactics2.pop(0) # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still [] # check if all of the computer ships are empty: if check_empty(ships2): print("end of game - computer wins in", turns) break # after both the user and computer shoot, start a new loop:
get_shot_user
identifier_name
battleship-final.py
from random import randrange import random """ both user and computer funcs: """ def check_ok(boat, taken_positions): # input: boat, taken_positions # this func checks if the boat outside the playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat.sort() for i in range(len(boat)): if boat[i] in taken_positions: #this condition checks if the block boat[i] is already in the list taken_positions boat = [-1] break elif boat[i] > 99 or boat[i] < 0: #this condition checks border 1 and 3 boat = [-1] break elif boat[i] % 10 == 9 and i < len(boat) - 1: #this condition checks border 2 and 4 if boat[i + 1] % 10 == 0: boat = [-1] break if i != 0: # this condition checks if there is any hole in the boat if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10: boat = [-1] break return boat def check_shot(shot, ships, hit, miss, comp, sinked_boats): # input: shot, all the boats (ships), hit, miss, comp, sinked_boats # this func initially assumes that the shot is missed (cond = 0) # given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships # if yes, remove the block of the boat that is hitted by the shot # append the shot to hit or comp. If comp, sinked_boats += 1 # if not, append the shot to miss # return: all the boats (ships), hit, miss, comp, cond, sinked_boats cond = 0 # miss for i in range(len(ships)): if shot in ships[i]: ships[i].remove(shot) if len(ships[i]) > 0: hit.append(shot) cond = 1 # hit else: comp.append(shot) cond = 2 # comp sinked_boats += 1 if cond == 0: # miss miss.append(shot) return ships, hit, miss, comp, cond, sinked_boats def create_playground(hit, miss, comp): # input: hit, miss, comp # this func creates the playground with the status of each block # print the playground print(" battleship") print(" 0 1 2 3 4 5 6 7 8 9") block = 0 #this variable keep track of the spot of the block for i in range(10): #create each row row = "" for j in range(10): #create each spot on the specific row character = "_ " if block in miss: character = "x " elif block in hit: character = "o " elif block in comp: character = "Q " row += character block += 1 #the block var increments 1 after each character is add to row print(i, " ", row) print("") def check_empty(ships): # input: ships # [] = False, [#have element] = True # this func checks each ship in the 2D list ships # if ship is empty, return True, and vice versa # if all ships are empty, return True, else return False # return True or False return all([not elem for elem in ships]) """ user - 2 funcs: """ def create_ships_u(taken_positions, num_boats): # input: num_boats # this func has a loop that makes all boats, # which calls the get_ship(len_of_boat, taken_positions) that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: ship, taken_positions = get_ship(len_of_boat, taken_positions) ships.append(ship) return ships, taken_positions def create_playground_u(taken_positions): print(" battleships ") print(" 0 1 2 3 4 5 6 7 8 9") place = 0 for x in range(10): row = "" for y in range(10): ch = " _ " if place in taken_positions: ch = " o " row = row + ch place = place + 1 print(x," ",row) def get_ship(len_of_boat, taken_positions): # input: len_of_boat, taken_positions # this func gets the boat's position from the user's input # this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order # return a valid ship while True: ship = [] print("enter your ship of length", len_of_boat) for i in range(len_of_boat): while True: try: boat_num = input("please enter a number: ") ship.append(int(boat_num)) except ValueError: # better try again... Return to the start of the loop print("wrong type of input") continue else: # is is a correct input, and we're ready to exit the loop break ship = check_ok(ship, taken_positions) if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break taken_positions += ship break else: print("invalid number - please enter again") return ship, taken_positions def get_shot_user(guesses): # input: guesses is the combined list of hit, miss, comp # this funcs asks the user to enter the shot, then checks the validity of the shot # return: the valid shot while True: try: shot = int(input("Enter your shot: ")) if shot < 0 or shot > 99: shot = int(input("Enter your shot:")) elif shot in guesses: print("already guessed - please enter again") else: return shot except: print("incorrect - please enter integer only") """ computer - 1 funcs: """ def create_ships_c(taken_positions, num_boats): # input: num_boats # this funcs has a loop that makes all boats, # which calls the create_boat() that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions): # input: len_of_boat, boat_start, boat_direction, taken_positions # this func initializes boat = [] # with len_of_boat, boat_start, boat_direction, this func create the position of the boat # calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat = [] if boat_direction == 1: for i in range(len_of_boat): boat.append(boat_start - i * 10) # already have the position of boat after this line boat = check_ok(boat, taken_positions) elif boat_direction == 2: for i in range(len_of_boat): boat.append(boat_start + i) boat = check_ok(boat, taken_positions) elif boat_direction == 3: for i in range(len_of_boat): boat.append(boat_start + i * 10) boat = check_ok(boat, taken_positions) elif boat_direction == 4: for i in range(len_of_boat): boat.append(boat_start - i) boat = check_ok(boat, taken_positions) return boat def get_shot_comp(guesses, tactics): # input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot) # in the first mơve, tactics = [] # this func checks if len(tactics) > 0 # if yes, pick shot = tactics[0] # if no, pick shot = randrange(99) # this func check if shot not in guesses(which is the list of all moves) # if yes, guess.append(shot), and break # return: the valid shot, guesses while True: try: if len(tactics) > 0: shot = tactics[0] else: shot = randrange(99) if shot not in guesses: guesses.append(shot) break except: print("incorrect - please enter integer only") return shot, guesses def calculate_tactics(shot, tactics, guesses, hit): # input: shot, tactics, guesses, hit # this function takes the newly shot, and changes the tactics list accordingly # the list temp is the possible positions that the next shot can be # if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot # else, the list temp will be created based on the last 2 shots # candidate is the list of valid possible shots that is created from temp # shuffle the order of elements inside candidate # return: candidate (candidate is tactics) temp = [] if len(tactics) < 1: # got 1 hit the first time temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be else: # got at least 2 hits # checks to see if the 4 spots around is in hit if shot - 1 in hit: # east temp = [shot + 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot - num not in hit: temp.append(shot - num) break elif shot + 1 in hit: # west temp = [shot - 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot + num not in hit: temp.append(shot + num) break elif shot - 10 in hit: # south temp = [shot + 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot - num not in hit: temp.append(shot - num) break elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40 temp = [shot - 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot + num not in hit: temp.append(shot + num) break candidate = [] # list of valid places that the next shot could be for i in range(len(temp)): if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp candidate.append(temp[i]) random.shuffle(candidate) # shuffle the element order of the list candidate return candidate """ main program: """ num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length # before game # computer - 1 hit1 = [] miss1 = [] comp1 = [] guesses1 = [] cond1 = 0 tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to [] taken_positions1 = [] sinked_boats1 = [] # user - 2 hit2 = [] miss2 = [] comp2 = [] guesses2 = [] cond2 = 0 tactics2 = [] taken_positions2 = [] sinked_boats2 = [] # computer creates ships for player 1 ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) # user creates boat for player 2 - show board ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats) create_playground_u(taken_positions2) # loop for user and computer takes turn to shoot, and repeat until finding a winner: turns = 0 while True: turns += 1 # USER SHOOTS: using 1 because it is checking the data of computer guesses1 = hit1 + miss1 + comp1 shot1 = get_shot_user(guesses1) ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1) create_playground(hit1, miss1, comp1) # check if all of the computer ships are empty: if check_empty(ships1): print("end of game - winner in", turns) break # COMPUTER SHOOTS: guesses2 = hit2 + miss2 + comp2 shot2, guesses2 = get_shot_comp(guesses2, tactics2) ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2) create_playground(hit2, miss2, comp2) if cond2 == 1: # got 1 hit tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2) elif cond2 == 2: # comp, and sunk the boat # reset tactics = [] tactics2 = [] elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves # got 1 hit, then miss # remove the newly shot from tactics tactics2.pop(0) # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still [] # check if all of the computer ships are empty: if check_empty(ships2): print("end of game - computer wins in", turns) break # after both the user and computer shoot, start a new loop:
ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: boat_position = [-1] #create the initial position of every boat is [-1] while -1 in boat_position: boat_start = randrange(99) #boat starting point boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"} boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat #a new boat is created after finishing the while loop ships.append(boat_position) taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions return ships, taken_positions
identifier_body
battleship-final.py
from random import randrange import random """ both user and computer funcs: """ def check_ok(boat, taken_positions): # input: boat, taken_positions # this func checks if the boat outside the playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat.sort() for i in range(len(boat)): if boat[i] in taken_positions: #this condition checks if the block boat[i] is already in the list taken_positions boat = [-1] break elif boat[i] > 99 or boat[i] < 0: #this condition checks border 1 and 3 boat = [-1] break elif boat[i] % 10 == 9 and i < len(boat) - 1: #this condition checks border 2 and 4 if boat[i + 1] % 10 == 0: boat = [-1] break if i != 0: # this condition checks if there is any hole in the boat if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10: boat = [-1] break return boat def check_shot(shot, ships, hit, miss, comp, sinked_boats): # input: shot, all the boats (ships), hit, miss, comp, sinked_boats # this func initially assumes that the shot is missed (cond = 0) # given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships # if yes, remove the block of the boat that is hitted by the shot # append the shot to hit or comp. If comp, sinked_boats += 1 # if not, append the shot to miss # return: all the boats (ships), hit, miss, comp, cond, sinked_boats cond = 0 # miss for i in range(len(ships)): if shot in ships[i]: ships[i].remove(shot) if len(ships[i]) > 0: hit.append(shot) cond = 1 # hit else: comp.append(shot) cond = 2 # comp sinked_boats += 1 if cond == 0: # miss miss.append(shot) return ships, hit, miss, comp, cond, sinked_boats def create_playground(hit, miss, comp): # input: hit, miss, comp # this func creates the playground with the status of each block # print the playground print(" battleship") print(" 0 1 2 3 4 5 6 7 8 9") block = 0 #this variable keep track of the spot of the block for i in range(10): #create each row row = "" for j in range(10): #create each spot on the specific row character = "_ " if block in miss: character = "x " elif block in hit: character = "o " elif block in comp: character = "Q " row += character block += 1 #the block var increments 1 after each character is add to row print(i, " ", row) print("") def check_empty(ships): # input: ships # [] = False, [#have element] = True # this func checks each ship in the 2D list ships # if ship is empty, return True, and vice versa # if all ships are empty, return True, else return False # return True or False return all([not elem for elem in ships]) """ user - 2 funcs: """ def create_ships_u(taken_positions, num_boats): # input: num_boats # this func has a loop that makes all boats, # which calls the get_ship(len_of_boat, taken_positions) that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: ship, taken_positions = get_ship(len_of_boat, taken_positions) ships.append(ship) return ships, taken_positions def create_playground_u(taken_positions): print(" battleships ") print(" 0 1 2 3 4 5 6 7 8 9") place = 0 for x in range(10): row = "" for y in range(10): ch = " _ " if place in taken_positions: ch = " o " row = row + ch place = place + 1 print(x," ",row) def get_ship(len_of_boat, taken_positions): # input: len_of_boat, taken_positions # this func gets the boat's position from the user's input # this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order # return a valid ship while True: ship = [] print("enter your ship of length", len_of_boat) for i in range(len_of_boat): while True: try: boat_num = input("please enter a number: ") ship.append(int(boat_num)) except ValueError: # better try again... Return to the start of the loop print("wrong type of input") continue else: # is is a correct input, and we're ready to exit the loop break ship = check_ok(ship, taken_positions) if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break taken_positions += ship break else: print("invalid number - please enter again") return ship, taken_positions def get_shot_user(guesses): # input: guesses is the combined list of hit, miss, comp # this funcs asks the user to enter the shot, then checks the validity of the shot # return: the valid shot while True: try: shot = int(input("Enter your shot: ")) if shot < 0 or shot > 99: shot = int(input("Enter your shot:")) elif shot in guesses: print("already guessed - please enter again") else: return shot except: print("incorrect - please enter integer only") """ computer - 1 funcs: """ def create_ships_c(taken_positions, num_boats): # input: num_boats # this funcs has a loop that makes all boats, # which calls the create_boat() that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: boat_position = [-1] #create the initial position of every boat is [-1] while -1 in boat_position: boat_start = randrange(99) #boat starting point boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"} boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat #a new boat is created after finishing the while loop ships.append(boat_position) taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions return ships, taken_positions def create_boat(len_of_boat, boat_start, boat_direction, taken_positions): # input: len_of_boat, boat_start, boat_direction, taken_positions # this func initializes boat = [] # with len_of_boat, boat_start, boat_direction, this func create the position of the boat # calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat = [] if boat_direction == 1: for i in range(len_of_boat): boat.append(boat_start - i * 10) # already have the position of boat after this line boat = check_ok(boat, taken_positions) elif boat_direction == 2: for i in range(len_of_boat): boat.append(boat_start + i) boat = check_ok(boat, taken_positions) elif boat_direction == 3: for i in range(len_of_boat): boat.append(boat_start + i * 10) boat = check_ok(boat, taken_positions) elif boat_direction == 4:
return boat def get_shot_comp(guesses, tactics): # input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot) # in the first mơve, tactics = [] # this func checks if len(tactics) > 0 # if yes, pick shot = tactics[0] # if no, pick shot = randrange(99) # this func check if shot not in guesses(which is the list of all moves) # if yes, guess.append(shot), and break # return: the valid shot, guesses while True: try: if len(tactics) > 0: shot = tactics[0] else: shot = randrange(99) if shot not in guesses: guesses.append(shot) break except: print("incorrect - please enter integer only") return shot, guesses def calculate_tactics(shot, tactics, guesses, hit): # input: shot, tactics, guesses, hit # this function takes the newly shot, and changes the tactics list accordingly # the list temp is the possible positions that the next shot can be # if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot # else, the list temp will be created based on the last 2 shots # candidate is the list of valid possible shots that is created from temp # shuffle the order of elements inside candidate # return: candidate (candidate is tactics) temp = [] if len(tactics) < 1: # got 1 hit the first time temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be else: # got at least 2 hits # checks to see if the 4 spots around is in hit if shot - 1 in hit: # east temp = [shot + 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot - num not in hit: temp.append(shot - num) break elif shot + 1 in hit: # west temp = [shot - 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot + num not in hit: temp.append(shot + num) break elif shot - 10 in hit: # south temp = [shot + 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot - num not in hit: temp.append(shot - num) break elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40 temp = [shot - 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot + num not in hit: temp.append(shot + num) break candidate = [] # list of valid places that the next shot could be for i in range(len(temp)): if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp candidate.append(temp[i]) random.shuffle(candidate) # shuffle the element order of the list candidate return candidate """ main program: """ num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length # before game # computer - 1 hit1 = [] miss1 = [] comp1 = [] guesses1 = [] cond1 = 0 tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to [] taken_positions1 = [] sinked_boats1 = [] # user - 2 hit2 = [] miss2 = [] comp2 = [] guesses2 = [] cond2 = 0 tactics2 = [] taken_positions2 = [] sinked_boats2 = [] # computer creates ships for player 1 ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) # user creates boat for player 2 - show board ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats) create_playground_u(taken_positions2) # loop for user and computer takes turn to shoot, and repeat until finding a winner: turns = 0 while True: turns += 1 # USER SHOOTS: using 1 because it is checking the data of computer guesses1 = hit1 + miss1 + comp1 shot1 = get_shot_user(guesses1) ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1) create_playground(hit1, miss1, comp1) # check if all of the computer ships are empty: if check_empty(ships1): print("end of game - winner in", turns) break # COMPUTER SHOOTS: guesses2 = hit2 + miss2 + comp2 shot2, guesses2 = get_shot_comp(guesses2, tactics2) ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2) create_playground(hit2, miss2, comp2) if cond2 == 1: # got 1 hit tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2) elif cond2 == 2: # comp, and sunk the boat # reset tactics = [] tactics2 = [] elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves # got 1 hit, then miss # remove the newly shot from tactics tactics2.pop(0) # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still [] # check if all of the computer ships are empty: if check_empty(ships2): print("end of game - computer wins in", turns) break # after both the user and computer shoot, start a new loop:
for i in range(len_of_boat): boat.append(boat_start - i) boat = check_ok(boat, taken_positions)
conditional_block
ei_formulario.js
ei_formulario.prototype = new ei(); ei_formulario.prototype.constructor = ei_formulario; /** * @class Un formulario simple presenta una grilla de campos editables. * A cada uno de estos campos se los denomina Elementos de Formulario (efs). * @see ef * @constructor * @phpdoc classes/toba_ei_formulario.html toba_ei_formulario */ function
(id, instancia, rango_tabs, input_submit, maestros, esclavos, invalidos) { this._id = id; this._instancia = instancia; //Nombre de la instancia del objeto, permite asociar al objeto con el arbol DOM this._rango_tabs = rango_tabs; this._input_submit = input_submit; //Campo que se setea en el submit del form this.controlador = null; //Referencia al CI contenedor this._efs = {}; //Lista de objeto_ef contenidos this._efs_procesar = {}; //ID de los ef's que poseen procesamiento this._silencioso = false; //¿Silenciar confirmaciones y alertas? Util para testing this._evento_implicito = null; //No hay evento prefijado this._expandido = false; //El formulario comienza sin expandirse this._maestros = maestros; this._esclavos = esclavos; this._invalidos = invalidos; this._estado_inicial = {}; this._con_examen_cambios = false; this._cambios_excluir_efs = []; this._tmp_valores_esclavos = {}; //lista temporal de valores a guardar hasta que retorna la cascada } /** * @private * @param {ef} ef objeto que representa al ef * @param {string} identificador Id. del ef */ ei_formulario.prototype.agregar_ef = function (ef, identificador) { if (ef) { this._efs[identificador] = ef; } }; /** *@private *@param {ef} objeto_ef Objeto que representa al ef */ ei_formulario.prototype.instancia_ef = function (objeto_ef) { var id = objeto_ef.get_id(); return this._instancia + ".ef('"+ id + "')"; }; ei_formulario.prototype.iniciar = function () { var id_ef; for (id_ef in this._efs) { this._efs[id_ef].iniciar(id_ef, this); this._estado_inicial[id_ef] = this._efs[id_ef].get_estado(); this._efs[id_ef].cuando_cambia_valor(this._instancia + '.validar_ef("' + id_ef + '", true)'); if (this._invalidos[id_ef]) { this._efs[id_ef].resaltar(this._invalidos[id_ef]); } } if (this._con_examen_cambios) { this._examinar_cambios(); } this.agregar_procesamientos(); this.refrescar_procesamientos(true); this.reset_evento(); if (this.configurar) { this.configurar(); } }; //---Consultas /** * Accede a la instancia de un ef especifico * @param {string} id del ef * @type ef * @see ef */ ei_formulario.prototype.ef = function(id) { return this._efs[id]; }; /** * Retorna un objeto asociativo id_ef => ef, para usarlo en algun ciclo, por ej. * for (id_ef in this.efs()) { * this.ef(id_ef).metodo() * } * @type Object * @see ef */ ei_formulario.prototype.efs = function() { return this._efs; }; /** * Retorna el estado actual de los efs en un Objeto asociativo id_ef=>valor * @type Object */ ei_formulario.prototype.get_datos = function() { var datos = {}; for (var id_ef in this._efs) { datos[id_ef] = this._efs[id_ef].get_estado(); } return datos; }; //---Submit ei_formulario.prototype.submit = function() { var id_ef; if (this.controlador && !this.controlador.en_submit()) { return this.controlador.submit(); } if (this._evento && this.debe_disparar_evento()) { //Enviar la noticia del submit a los efs for (id_ef in this._efs) { this._efs[id_ef].submit(); } //Marco la ejecucion del evento para que la clase PHP lo reconozca document.getElementById(this._input_submit).value = this._evento.id; } }; //Chequea si es posible realiza el submit de todos los objetos asociados ei_formulario.prototype.puede_submit = function() { if(this._evento) //Si hay un evento seteado... { //- 1 - Hay que realizar las validaciones if(! this.validar() ) { this.reset_evento(); return false; } if (! ei.prototype.puede_submit.call(this)) { return false; } } return true; }; ei_formulario.prototype.debe_disparar_evento = function() { var debe = true, id_ef; if (this._evento_condicionado_a_datos && this._evento.es_implicito) { var cambios = false; for (id_ef in this._efs) { cambios = (cambios || this.hay_cambios(id_ef)); } debe = cambios; } return debe; }; //---- Cascadas /** * Esquema de Cascadas:<br> * Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse * @param {string} id_ef Identificador del ef maestro que sufrio una modificación */ ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila) { if (this._esclavos[id_ef]) { this.evt__cascadas_inicio(this.ef(id_ef)); //--Se recorren los esclavos del master modificado for (var i=0; i < this._esclavos[id_ef].length; i++) { this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila); } } }; /** * Esquema de Cascadas:<br> * Determina si los maestros de un ef esclavo tienen sus valores cargados * @param {string} id_esclavo Identificador del ef esclavo * @type boolean */ ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila) { for (var i=0; i< this._maestros[id_esclavo].length; i++) { var ef = this.ef(this._maestros[id_esclavo][i]); if (ef && typeof fila != 'undefined') { ef.ir_a_fila(fila); } if (ef && ! ef.tiene_estado()) { return false; } } return true; }; /** * Esquema de Cascadas:<br> * Un ef esclavo esta listo para refrescar su valor en base a sus maestros, * para esto en este metodo se recolecta los valores de sus maestros y se dispara * la comunicación con el servidor * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila) { //Primero se resetea por si la consulta nunca retorna this.cascadas_en_espera(id_esclavo); //---Todos los maestros tienen estado? var con_estado = true; var valores = ''; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = this.ef(id_maestro); if (ef && ef.tiene_estado()) { var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); valores += id_maestro + '-;-' + valor + '-|-'; } else if (ef) { //-- Evita caso del oculto con_estado = false; break; } } //--- Si estan todos los maestros puedo ir al server a preguntar el valor de este if (con_estado) { if (this.ef(id_esclavo)._cascadas_ajax) { //Caso normal this.cascadas_comunicar(id_esclavo, valores, fila); } else { //Caso combo_editable this.ef(id_esclavo).set_solo_lectura(false); } } }; /** * Esquema de Cascadas:<br> * Retorna el estado actual de los maestros directos de un esclavo * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.get_valores_maestros = function (id_esclavo, fila) { var maestros = {}; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = (typeof fila == 'undefined') ? this.ef(id_maestro): this.ef(id_maestro).ir_a_fila(fila); if (ef && ef.tiene_estado()) { maestros[id_maestro] = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); } } return maestros; }; /** * @private * @param {string} id_ef Id. del ef */ ei_formulario.prototype.cascadas_en_espera = function(id_ef) { if (this.ef(id_ef).tiene_estado() && this.ef(id_ef).mantiene_valor_cascada()) { //Guardo el estado actual por si acaso vuelve en la respuesta this._tmp_valores_esclavos[id_ef] = this.ef(id_ef).get_estado(); } //Se resetea y desactiva al ef y todos sus esclavos this.ef(id_ef).borrar_opciones(); this.ef(id_ef).desactivar(); if (this._esclavos[id_ef]) { for (var i=0; i< this._esclavos[id_ef].length; i++) { this.cascadas_en_espera(this._esclavos[id_ef][i]); } } }; /** * Esquema de Cascadas:<br> * Se comunica al servidor que debe refrescar el valor de un ef en base a valores especificos de sus efs maestros * Este método dispara la llamada asincronica al servidor * @see #cascadas_respuesta * @param {string} id_ef Id. del ef a refrescar (un ef esclavo) * @param {string valores Lista plana de valores. Formato: ef1-;-valor1-|-ef2-;-valor2-|- etc. */ ei_formulario.prototype.cascadas_comunicar = function(id_ef, valores, fila) { //Empaqueto toda la informacion que tengo que mandar. var parametros = {'cascadas-ef': id_ef, 'cascadas-maestros' : valores}; if (typeof fila != 'undefined') { parametros['cascadas-fila'] = fila; } var callback = { success: this.cascadas_respuesta, failure: toba.error_comunicacion, argument: id_ef, scope: this }; var vinculo = vinculador.get_url(null, null, 'cascadas_efs', parametros, [this._id]); var con = conexion.asyncRequest('GET', vinculo, callback, null); }; /** * Esquema de Cascadas:<br> * Respuesta del servidor ante el pedido de refresco de un ef puntual * @param {Object} respuesta La respuesta es un objeto asociativo con claves responseText que contiene el nuevo valor del ef */ ei_formulario.prototype.cascadas_respuesta = function(respuesta) { if (respuesta.responseText === '') { var error = 'Error en la respuesta de la cascada, para más información consulte el log'; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } else { try { var datos_rs = JSON.parse(respuesta.responseText); var datos_asociativo; if ('Array' == getObjectClass(datos_rs)) { datos_asociativo = []; for (var ind = 0; ind < datos_rs.length ; ind++) { datos_asociativo[datos_rs[ind][0]] = datos_rs[ind][1]; } //Se le pasa el formato RS para que no se rompa el ordenamiento, para el resto se usa el asociativo por BC this.ef(respuesta.argument).set_opciones_rs(datos_rs); } else { datos_asociativo = datos_rs; this.ef(respuesta.argument).set_opciones(datos_asociativo); } if(this.ef(respuesta.argument).mantiene_valor_cascada() && isset(this._tmp_valores_esclavos[respuesta.argument])) { var valor_viejo = this._tmp_valores_esclavos[respuesta.argument]; if (isset(datos_asociativo[valor_viejo])) { this.ef(respuesta.argument).set_estado(valor_viejo); } } this.evt__cascadas_fin(this.ef(respuesta.argument), datos_asociativo); } catch (e) { var componente = "<textarea id='displayMore' class='ef-input-solo-lectura' cols='30' rows='35' readonly='true' style='display:none;'>" + respuesta.responseText + '</textarea>'; var error = 'Error en la respueta.<br>' + 'Error JS:<br>' + e + '<br>Mensaje Server:<br>' + "<a href='#' onclick='toggle_nodo(document.getElementById(\"displayMore\"));'>Mas</a><br>" + componente; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } } }; /** * Esquema de Cascadas:<br> * Ventana de ejecución anterior al pedido de respuesta de la cascada * Extender para agregar un comportamiento anterior a la respuesta * @param {ef} ef_maestro Instancia del ef maestro que inicia la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_inicio = function(ef_maestro) { }; /** * Esquema de Cascadas:<br> * Ventana de ejecución posterior a la respuesta de una cascada. * Extender para agregar un comportamiento post-respuesta * @param {ef} ef_esclavo Instancia del ef esclavo destino de la cascada * @param {Object} datos Datos de respuesta usados en la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_fin = function(ef_esclavo, datos) { }; //----Validación /** * Realiza la validación de este componente * Para agregar validaciones particulares globales al formulario, definir el metodo <em>evt__validar_datos</em>.<br> * Para validar efs especificos, definir el método <em>evt__idef__validar</em> */ ei_formulario.prototype.validar = function() { var ok = true, id_ef; var validacion_particular = 'evt__validar_datos'; if(this._evento && this._evento.validar) { if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular](); } for (id_ef in this._efs) { ok = this.validar_ef(id_ef) && ok; } } else { this.resetear_errores(); } if (!ok) { this.reset_evento(); } return ok; }; /** *@private *@param {string} id_ef Id del ef a validar *@param {boolean} es_online */ ei_formulario.prototype.validar_ef = function(id_ef, es_online) { var ef = this._efs[id_ef]; var validacion_particular = 'evt__' + id_ef + '__validar'; var ok = ef.validar(); if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular]() && ok; } this.set_ef_valido(ef, ok, es_online); if (es_online && this._con_examen_cambios) { this._examinar_cambios(id_ef); } return ok; }; ei_formulario.prototype._examinar_cambios = function (ef_actual) { var hay_cambio = this.hay_cambios(ef_actual); if (this.evt__procesar_cambios) { this.evt__procesar_cambios(hay_cambio); } }; ei_formulario.prototype.set_procesar_cambios = function(examinar, boton_destino, excluir_efs) { this._con_examen_cambios = examinar; if (! isset(excluir_efs)) { excluir_efs = []; } this._cambios_excluir_efs = excluir_efs; if (boton_destino) { this._boton_procesar_cambios = boton_destino; this.evt__procesar_cambios = this._procesar_cambios; //Se brinda una implementacion por defecto } }; ei_formulario.prototype._procesar_cambios = function(existen_cambios) { if (existen_cambios) { this.activar_boton(this._boton_procesar_cambios); } else { this.desactivar_boton(this._boton_procesar_cambios); } }; /** * Determina si algún ef del formulario se modifico * Opcionalmente resalta o no un ef puntual * @param {string} ef_actual Id del ef a verificar si tuvo cambios */ ei_formulario.prototype.hay_cambios = function(ef_actual) { var hay_cambio = false, id_ef; for (id_ef in this._efs) { if (! in_array(id_ef, this._cambios_excluir_efs)) { var es_igual = this._es_estado_igual(this._estado_inicial[id_ef], this._efs[id_ef].get_estado()); if (! es_igual) { hay_cambio = true; if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(true); } } else { if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(false); } } } } return hay_cambio; }; ei_formulario.prototype._es_estado_igual = function(inicial, actual) { var es_igual; if (typeof actual == 'object' && isset(actual)) { es_igual = comparar_arreglos(inicial, actual); } else { es_igual = (inicial === actual); } return es_igual; }; /** * Informa que un ef cumple o no una validación especifica. * En caso de que no sea valido el estado del ef se informa al usuario * Si es valido se quita el estado de invalido (la cruz al lado del campo). * @param {ef} ef Ef en cuestión * @param {boolean} es_valido * @param {boolean} solo_online En caso que no sea valido sólo muestra la cruz al lado del campo y no un mensaje explícito */ ei_formulario.prototype.set_ef_valido = function(ef, es_valido, solo_online) { if (!es_valido) { if (! this._silencioso) { ef.resaltar(ef.get_error()); } if (typeof solo_online == 'undefined' || !solo_online) { notificacion.agregar(ef.get_error(), 'error', ef._etiqueta); } ef.resetear_error(); } else { ef.no_resaltar(); } }; ei_formulario.prototype.resetear_errores = function() { if (! this._silencioso) { for (var id_ef in this._efs) { if (! this._silencioso) { this._efs[id_ef].no_resaltar(); } } } }; //---Procesamiento /** *@private *@param {string} id_ef Id del ef a procesar *@param {boolean} es_inicial Indica si se lanza el procesamiento por primera vez o no */ ei_formulario.prototype.procesar = function (id_ef, es_inicial) { if (this.hay_procesamiento_particular_ef(id_ef)) { return this['evt__' + id_ef + '__procesar'](es_inicial); //Procesamiento particular, no hay proceso por defecto } }; /** * Hace reflexion sobre la clase en busqueda de extensiones * @private */ ei_formulario.prototype.agregar_procesamientos = function() { var id_ef; for (id_ef in this._efs) { if (this.hay_procesamiento_particular_ef(id_ef)) { this.agregar_procesamiento(id_ef); } } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.agregar_procesamiento = function (id_ef) { if (this._efs[id_ef]) { this._efs_procesar[id_ef] = true; var callback = this._instancia + '.procesar("' + id_ef + '")'; this._efs[id_ef].cuando_cambia_valor(callback); } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.hay_procesamiento_particular_ef = function(id_ef) { return existe_funcion(this, 'evt__' + id_ef + '__procesar'); }; //---Cambios graficos /** * Invierte la expansión del formulario * Cuando el formulario se encuentra contraido los efs marcados como colapsados en el editor no se muestran * Este metodo no tiene relacion con el colapsar/descolapsar que se encargan de colapsar el componente como un todo */ ei_formulario.prototype.cambiar_expansion = function() { this._expandido = ! this._expandido; for (var id_ef in this._efs) { this._efs[id_ef].cambiar_expansion(this._expandido); } var img = document.getElementById(this._instancia + '_cambiar_expansion'); img.src = (this._expandido) ? toba.imagen('contraer') : toba.imagen('expandir'); }; //---Refresco Grafico /** * Fuerza un refuerzo grafico del componente */ ei_formulario.prototype.refrescar_todo = function () { this.refrescar_procesamientos(); }; /** *@private *@param {boolean} es_inicial Indica si el procesamiento se lanza por primera vez */ ei_formulario.prototype.refrescar_procesamientos = function (es_inicial) { for (var id_ef in this._efs) { if (this._efs_procesar[id_ef]) { this.procesar(id_ef, es_inicial); } } }; toba.confirmar_inclusion('componentes/ei_formulario');
ei_formulario
identifier_name
ei_formulario.js
ei_formulario.prototype = new ei(); ei_formulario.prototype.constructor = ei_formulario; /** * @class Un formulario simple presenta una grilla de campos editables. * A cada uno de estos campos se los denomina Elementos de Formulario (efs). * @see ef * @constructor * @phpdoc classes/toba_ei_formulario.html toba_ei_formulario */ function ei_formulario(id, instancia, rango_tabs, input_submit, maestros, esclavos, invalidos) { this._id = id; this._instancia = instancia; //Nombre de la instancia del objeto, permite asociar al objeto con el arbol DOM this._rango_tabs = rango_tabs; this._input_submit = input_submit; //Campo que se setea en el submit del form this.controlador = null; //Referencia al CI contenedor this._efs = {}; //Lista de objeto_ef contenidos this._efs_procesar = {}; //ID de los ef's que poseen procesamiento this._silencioso = false; //¿Silenciar confirmaciones y alertas? Util para testing this._evento_implicito = null; //No hay evento prefijado this._expandido = false; //El formulario comienza sin expandirse this._maestros = maestros; this._esclavos = esclavos; this._invalidos = invalidos; this._estado_inicial = {}; this._con_examen_cambios = false; this._cambios_excluir_efs = []; this._tmp_valores_esclavos = {}; //lista temporal de valores a guardar hasta que retorna la cascada } /** * @private * @param {ef} ef objeto que representa al ef * @param {string} identificador Id. del ef */ ei_formulario.prototype.agregar_ef = function (ef, identificador) { if (ef) { this._efs[identificador] = ef; } }; /** *@private *@param {ef} objeto_ef Objeto que representa al ef */ ei_formulario.prototype.instancia_ef = function (objeto_ef) { var id = objeto_ef.get_id(); return this._instancia + ".ef('"+ id + "')"; }; ei_formulario.prototype.iniciar = function () { var id_ef; for (id_ef in this._efs) { this._efs[id_ef].iniciar(id_ef, this); this._estado_inicial[id_ef] = this._efs[id_ef].get_estado(); this._efs[id_ef].cuando_cambia_valor(this._instancia + '.validar_ef("' + id_ef + '", true)'); if (this._invalidos[id_ef]) { this._efs[id_ef].resaltar(this._invalidos[id_ef]); } } if (this._con_examen_cambios) { this._examinar_cambios(); } this.agregar_procesamientos(); this.refrescar_procesamientos(true); this.reset_evento(); if (this.configurar) { this.configurar(); } }; //---Consultas /** * Accede a la instancia de un ef especifico * @param {string} id del ef * @type ef * @see ef */ ei_formulario.prototype.ef = function(id) { return this._efs[id]; }; /** * Retorna un objeto asociativo id_ef => ef, para usarlo en algun ciclo, por ej. * for (id_ef in this.efs()) { * this.ef(id_ef).metodo() * } * @type Object * @see ef */ ei_formulario.prototype.efs = function() { return this._efs; }; /** * Retorna el estado actual de los efs en un Objeto asociativo id_ef=>valor * @type Object */ ei_formulario.prototype.get_datos = function() { var datos = {}; for (var id_ef in this._efs) { datos[id_ef] = this._efs[id_ef].get_estado(); } return datos; }; //---Submit ei_formulario.prototype.submit = function() { var id_ef; if (this.controlador && !this.controlador.en_submit()) { return this.controlador.submit(); } if (this._evento && this.debe_disparar_evento()) { //Enviar la noticia del submit a los efs for (id_ef in this._efs) { this._efs[id_ef].submit(); } //Marco la ejecucion del evento para que la clase PHP lo reconozca document.getElementById(this._input_submit).value = this._evento.id; } }; //Chequea si es posible realiza el submit de todos los objetos asociados ei_formulario.prototype.puede_submit = function() { if(this._evento) //Si hay un evento seteado... { //- 1 - Hay que realizar las validaciones if(! this.validar() ) { this.reset_evento(); return false; } if (! ei.prototype.puede_submit.call(this)) { return false; } } return true; }; ei_formulario.prototype.debe_disparar_evento = function() { var debe = true, id_ef; if (this._evento_condicionado_a_datos && this._evento.es_implicito) { var cambios = false; for (id_ef in this._efs) { cambios = (cambios || this.hay_cambios(id_ef)); } debe = cambios; } return debe; }; //---- Cascadas /** * Esquema de Cascadas:<br> * Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse * @param {string} id_ef Identificador del ef maestro que sufrio una modificación */ ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila) { if (this._esclavos[id_ef]) { this.evt__cascadas_inicio(this.ef(id_ef)); //--Se recorren los esclavos del master modificado for (var i=0; i < this._esclavos[id_ef].length; i++) { this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila); } } }; /** * Esquema de Cascadas:<br> * Determina si los maestros de un ef esclavo tienen sus valores cargados * @param {string} id_esclavo Identificador del ef esclavo * @type boolean */ ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila) { for (var i=0; i< this._maestros[id_esclavo].length; i++) { var ef = this.ef(this._maestros[id_esclavo][i]); if (ef && typeof fila != 'undefined') { ef.ir_a_fila(fila); } if (ef && ! ef.tiene_estado()) { return false; } } return true; }; /** * Esquema de Cascadas:<br> * Un ef esclavo esta listo para refrescar su valor en base a sus maestros, * para esto en este metodo se recolecta los valores de sus maestros y se dispara * la comunicación con el servidor * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila) { //Primero se resetea por si la consulta nunca retorna this.cascadas_en_espera(id_esclavo); //---Todos los maestros tienen estado? var con_estado = true; var valores = ''; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = this.ef(id_maestro); if (ef && ef.tiene_estado()) { var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); valores += id_maestro + '-;-' + valor + '-|-'; } else if (ef) { //-- Evita caso del oculto con_estado = false; break; } } //--- Si estan todos los maestros puedo ir al server a preguntar el valor de este if (con_estado) { if (this.ef(id_esclavo)._cascadas_ajax) { //Caso normal this.cascadas_comunicar(id_esclavo, valores, fila); } else { //Caso combo_editable this.ef(id_esclavo).set_solo_lectura(false); } } }; /** * Esquema de Cascadas:<br> * Retorna el estado actual de los maestros directos de un esclavo * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.get_valores_maestros = function (id_esclavo, fila) { var maestros = {}; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = (typeof fila == 'undefined') ? this.ef(id_maestro): this.ef(id_maestro).ir_a_fila(fila); if (ef && ef.tiene_estado()) { maestros[id_maestro] = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); } } return maestros; }; /** * @private * @param {string} id_ef Id. del ef */ ei_formulario.prototype.cascadas_en_espera = function(id_ef) { if (this.ef(id_ef).tiene_estado() && this.ef(id_ef).mantiene_valor_cascada()) { //Guardo el estado actual por si acaso vuelve en la respuesta this._tmp_valores_esclavos[id_ef] = this.ef(id_ef).get_estado(); } //Se resetea y desactiva al ef y todos sus esclavos this.ef(id_ef).borrar_opciones(); this.ef(id_ef).desactivar(); if (this._esclavos[id_ef]) { for (var i=0; i< this._esclavos[id_ef].length; i++) { this.cascadas_en_espera(this._esclavos[id_ef][i]); } } }; /** * Esquema de Cascadas:<br> * Se comunica al servidor que debe refrescar el valor de un ef en base a valores especificos de sus efs maestros * Este método dispara la llamada asincronica al servidor * @see #cascadas_respuesta * @param {string} id_ef Id. del ef a refrescar (un ef esclavo) * @param {string valores Lista plana de valores. Formato: ef1-;-valor1-|-ef2-;-valor2-|- etc. */ ei_formulario.prototype.cascadas_comunicar = function(id_ef, valores, fila) { //Empaqueto toda la informacion que tengo que mandar. var parametros = {'cascadas-ef': id_ef, 'cascadas-maestros' : valores}; if (typeof fila != 'undefined') { parametros['cascadas-fila'] = fila; } var callback = { success: this.cascadas_respuesta, failure: toba.error_comunicacion, argument: id_ef, scope: this }; var vinculo = vinculador.get_url(null, null, 'cascadas_efs', parametros, [this._id]); var con = conexion.asyncRequest('GET', vinculo, callback, null); }; /** * Esquema de Cascadas:<br> * Respuesta del servidor ante el pedido de refresco de un ef puntual * @param {Object} respuesta La respuesta es un objeto asociativo con claves responseText que contiene el nuevo valor del ef */ ei_formulario.prototype.cascadas_respuesta = function(respuesta) { if (respuesta.responseText === '') { var error = 'Error en la respuesta de la cascada, para más información consulte el log'; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } else { try { var datos_rs = JSON.parse(respuesta.responseText); var datos_asociativo; if ('Array' == getObjectClass(datos_rs)) { datos_asociativo = []; for (var ind = 0; ind < datos_rs.length ; ind++) { datos_asociativo[datos_rs[ind][0]] = datos_rs[ind][1]; } //Se le pasa el formato RS para que no se rompa el ordenamiento, para el resto se usa el asociativo por BC this.ef(respuesta.argument).set_opciones_rs(datos_rs); } else { datos_asociativo = datos_rs; this.ef(respuesta.argument).set_opciones(datos_asociativo); } if(this.ef(respuesta.argument).mantiene_valor_cascada() && isset(this._tmp_valores_esclavos[respuesta.argument])) { var valor_viejo = this._tmp_valores_esclavos[respuesta.argument]; if (isset(datos_asociativo[valor_viejo])) { this.ef(respuesta.argument).set_estado(valor_viejo); } } this.evt__cascadas_fin(this.ef(respuesta.argument), datos_asociativo); } catch (e) { var componente = "<textarea id='displayMore' class='ef-input-solo-lectura' cols='30' rows='35' readonly='true' style='display:none;'>" + respuesta.responseText + '</textarea>'; var error = 'Error en la respueta.<br>' + 'Error JS:<br>' + e + '<br>Mensaje Server:<br>' + "<a href='#' onclick='toggle_nodo(document.getElementById(\"displayMore\"));'>Mas</a><br>" + componente; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); }
} }; /** * Esquema de Cascadas:<br> * Ventana de ejecución anterior al pedido de respuesta de la cascada * Extender para agregar un comportamiento anterior a la respuesta * @param {ef} ef_maestro Instancia del ef maestro que inicia la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_inicio = function(ef_maestro) { }; /** * Esquema de Cascadas:<br> * Ventana de ejecución posterior a la respuesta de una cascada. * Extender para agregar un comportamiento post-respuesta * @param {ef} ef_esclavo Instancia del ef esclavo destino de la cascada * @param {Object} datos Datos de respuesta usados en la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_fin = function(ef_esclavo, datos) { }; //----Validación /** * Realiza la validación de este componente * Para agregar validaciones particulares globales al formulario, definir el metodo <em>evt__validar_datos</em>.<br> * Para validar efs especificos, definir el método <em>evt__idef__validar</em> */ ei_formulario.prototype.validar = function() { var ok = true, id_ef; var validacion_particular = 'evt__validar_datos'; if(this._evento && this._evento.validar) { if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular](); } for (id_ef in this._efs) { ok = this.validar_ef(id_ef) && ok; } } else { this.resetear_errores(); } if (!ok) { this.reset_evento(); } return ok; }; /** *@private *@param {string} id_ef Id del ef a validar *@param {boolean} es_online */ ei_formulario.prototype.validar_ef = function(id_ef, es_online) { var ef = this._efs[id_ef]; var validacion_particular = 'evt__' + id_ef + '__validar'; var ok = ef.validar(); if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular]() && ok; } this.set_ef_valido(ef, ok, es_online); if (es_online && this._con_examen_cambios) { this._examinar_cambios(id_ef); } return ok; }; ei_formulario.prototype._examinar_cambios = function (ef_actual) { var hay_cambio = this.hay_cambios(ef_actual); if (this.evt__procesar_cambios) { this.evt__procesar_cambios(hay_cambio); } }; ei_formulario.prototype.set_procesar_cambios = function(examinar, boton_destino, excluir_efs) { this._con_examen_cambios = examinar; if (! isset(excluir_efs)) { excluir_efs = []; } this._cambios_excluir_efs = excluir_efs; if (boton_destino) { this._boton_procesar_cambios = boton_destino; this.evt__procesar_cambios = this._procesar_cambios; //Se brinda una implementacion por defecto } }; ei_formulario.prototype._procesar_cambios = function(existen_cambios) { if (existen_cambios) { this.activar_boton(this._boton_procesar_cambios); } else { this.desactivar_boton(this._boton_procesar_cambios); } }; /** * Determina si algún ef del formulario se modifico * Opcionalmente resalta o no un ef puntual * @param {string} ef_actual Id del ef a verificar si tuvo cambios */ ei_formulario.prototype.hay_cambios = function(ef_actual) { var hay_cambio = false, id_ef; for (id_ef in this._efs) { if (! in_array(id_ef, this._cambios_excluir_efs)) { var es_igual = this._es_estado_igual(this._estado_inicial[id_ef], this._efs[id_ef].get_estado()); if (! es_igual) { hay_cambio = true; if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(true); } } else { if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(false); } } } } return hay_cambio; }; ei_formulario.prototype._es_estado_igual = function(inicial, actual) { var es_igual; if (typeof actual == 'object' && isset(actual)) { es_igual = comparar_arreglos(inicial, actual); } else { es_igual = (inicial === actual); } return es_igual; }; /** * Informa que un ef cumple o no una validación especifica. * En caso de que no sea valido el estado del ef se informa al usuario * Si es valido se quita el estado de invalido (la cruz al lado del campo). * @param {ef} ef Ef en cuestión * @param {boolean} es_valido * @param {boolean} solo_online En caso que no sea valido sólo muestra la cruz al lado del campo y no un mensaje explícito */ ei_formulario.prototype.set_ef_valido = function(ef, es_valido, solo_online) { if (!es_valido) { if (! this._silencioso) { ef.resaltar(ef.get_error()); } if (typeof solo_online == 'undefined' || !solo_online) { notificacion.agregar(ef.get_error(), 'error', ef._etiqueta); } ef.resetear_error(); } else { ef.no_resaltar(); } }; ei_formulario.prototype.resetear_errores = function() { if (! this._silencioso) { for (var id_ef in this._efs) { if (! this._silencioso) { this._efs[id_ef].no_resaltar(); } } } }; //---Procesamiento /** *@private *@param {string} id_ef Id del ef a procesar *@param {boolean} es_inicial Indica si se lanza el procesamiento por primera vez o no */ ei_formulario.prototype.procesar = function (id_ef, es_inicial) { if (this.hay_procesamiento_particular_ef(id_ef)) { return this['evt__' + id_ef + '__procesar'](es_inicial); //Procesamiento particular, no hay proceso por defecto } }; /** * Hace reflexion sobre la clase en busqueda de extensiones * @private */ ei_formulario.prototype.agregar_procesamientos = function() { var id_ef; for (id_ef in this._efs) { if (this.hay_procesamiento_particular_ef(id_ef)) { this.agregar_procesamiento(id_ef); } } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.agregar_procesamiento = function (id_ef) { if (this._efs[id_ef]) { this._efs_procesar[id_ef] = true; var callback = this._instancia + '.procesar("' + id_ef + '")'; this._efs[id_ef].cuando_cambia_valor(callback); } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.hay_procesamiento_particular_ef = function(id_ef) { return existe_funcion(this, 'evt__' + id_ef + '__procesar'); }; //---Cambios graficos /** * Invierte la expansión del formulario * Cuando el formulario se encuentra contraido los efs marcados como colapsados en el editor no se muestran * Este metodo no tiene relacion con el colapsar/descolapsar que se encargan de colapsar el componente como un todo */ ei_formulario.prototype.cambiar_expansion = function() { this._expandido = ! this._expandido; for (var id_ef in this._efs) { this._efs[id_ef].cambiar_expansion(this._expandido); } var img = document.getElementById(this._instancia + '_cambiar_expansion'); img.src = (this._expandido) ? toba.imagen('contraer') : toba.imagen('expandir'); }; //---Refresco Grafico /** * Fuerza un refuerzo grafico del componente */ ei_formulario.prototype.refrescar_todo = function () { this.refrescar_procesamientos(); }; /** *@private *@param {boolean} es_inicial Indica si el procesamiento se lanza por primera vez */ ei_formulario.prototype.refrescar_procesamientos = function (es_inicial) { for (var id_ef in this._efs) { if (this._efs_procesar[id_ef]) { this.procesar(id_ef, es_inicial); } } }; toba.confirmar_inclusion('componentes/ei_formulario');
random_line_split
ei_formulario.js
ei_formulario.prototype = new ei(); ei_formulario.prototype.constructor = ei_formulario; /** * @class Un formulario simple presenta una grilla de campos editables. * A cada uno de estos campos se los denomina Elementos de Formulario (efs). * @see ef * @constructor * @phpdoc classes/toba_ei_formulario.html toba_ei_formulario */ function ei_formulario(id, instancia, rango_tabs, input_submit, maestros, esclavos, invalidos) { this._id = id; this._instancia = instancia; //Nombre de la instancia del objeto, permite asociar al objeto con el arbol DOM this._rango_tabs = rango_tabs; this._input_submit = input_submit; //Campo que se setea en el submit del form this.controlador = null; //Referencia al CI contenedor this._efs = {}; //Lista de objeto_ef contenidos this._efs_procesar = {}; //ID de los ef's que poseen procesamiento this._silencioso = false; //¿Silenciar confirmaciones y alertas? Util para testing this._evento_implicito = null; //No hay evento prefijado this._expandido = false; //El formulario comienza sin expandirse this._maestros = maestros; this._esclavos = esclavos; this._invalidos = invalidos; this._estado_inicial = {}; this._con_examen_cambios = false; this._cambios_excluir_efs = []; this._tmp_valores_esclavos = {}; //lista temporal de valores a guardar hasta que retorna la cascada } /** * @private * @param {ef} ef objeto que representa al ef * @param {string} identificador Id. del ef */ ei_formulario.prototype.agregar_ef = function (ef, identificador) { if (ef) { this._efs[identificador] = ef; } }; /** *@private *@param {ef} objeto_ef Objeto que representa al ef */ ei_formulario.prototype.instancia_ef = function (objeto_ef) { var id = objeto_ef.get_id(); return this._instancia + ".ef('"+ id + "')"; }; ei_formulario.prototype.iniciar = function () { var id_ef; for (id_ef in this._efs) { this._efs[id_ef].iniciar(id_ef, this); this._estado_inicial[id_ef] = this._efs[id_ef].get_estado(); this._efs[id_ef].cuando_cambia_valor(this._instancia + '.validar_ef("' + id_ef + '", true)'); if (this._invalidos[id_ef]) { this._efs[id_ef].resaltar(this._invalidos[id_ef]); } } if (this._con_examen_cambios) { this._examinar_cambios(); } this.agregar_procesamientos(); this.refrescar_procesamientos(true); this.reset_evento(); if (this.configurar) { this.configurar(); } }; //---Consultas /** * Accede a la instancia de un ef especifico * @param {string} id del ef * @type ef * @see ef */ ei_formulario.prototype.ef = function(id) { return this._efs[id]; }; /** * Retorna un objeto asociativo id_ef => ef, para usarlo en algun ciclo, por ej. * for (id_ef in this.efs()) { * this.ef(id_ef).metodo() * } * @type Object * @see ef */ ei_formulario.prototype.efs = function() { return this._efs; }; /** * Retorna el estado actual de los efs en un Objeto asociativo id_ef=>valor * @type Object */ ei_formulario.prototype.get_datos = function() { var datos = {}; for (var id_ef in this._efs) { datos[id_ef] = this._efs[id_ef].get_estado(); } return datos; }; //---Submit ei_formulario.prototype.submit = function() { var id_ef; if (this.controlador && !this.controlador.en_submit()) { return this.controlador.submit(); } if (this._evento && this.debe_disparar_evento()) { //Enviar la noticia del submit a los efs for (id_ef in this._efs) { this._efs[id_ef].submit(); } //Marco la ejecucion del evento para que la clase PHP lo reconozca document.getElementById(this._input_submit).value = this._evento.id; } }; //Chequea si es posible realiza el submit de todos los objetos asociados ei_formulario.prototype.puede_submit = function() { if(this._evento) //Si hay un evento seteado... { //- 1 - Hay que realizar las validaciones if(! this.validar() ) { this.reset_evento(); return false; } if (! ei.prototype.puede_submit.call(this)) { return false; } } return true; }; ei_formulario.prototype.debe_disparar_evento = function() { var debe = true, id_ef; if (this._evento_condicionado_a_datos && this._evento.es_implicito) { var cambios = false; for (id_ef in this._efs) { cambios = (cambios || this.hay_cambios(id_ef)); } debe = cambios; } return debe; }; //---- Cascadas /** * Esquema de Cascadas:<br> * Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse * @param {string} id_ef Identificador del ef maestro que sufrio una modificación */ ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila) { if (this._esclavos[id_ef]) { this.evt__cascadas_inicio(this.ef(id_ef)); //--Se recorren los esclavos del master modificado for (var i=0; i < this._esclavos[id_ef].length; i++) { this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila); } } }; /** * Esquema de Cascadas:<br> * Determina si los maestros de un ef esclavo tienen sus valores cargados * @param {string} id_esclavo Identificador del ef esclavo * @type boolean */ ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila) { for (var i=0; i< this._maestros[id_esclavo].length; i++) { var ef = this.ef(this._maestros[id_esclavo][i]); if (ef && typeof fila != 'undefined') { ef.ir_a_fila(fila); } if (ef && ! ef.tiene_estado()) { return false; } } return true; }; /** * Esquema de Cascadas:<br> * Un ef esclavo esta listo para refrescar su valor en base a sus maestros, * para esto en este metodo se recolecta los valores de sus maestros y se dispara * la comunicación con el servidor * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila) { //Primero se resetea por si la consulta nunca retorna this.cascadas_en_espera(id_esclavo); //---Todos los maestros tienen estado? var con_estado = true; var valores = ''; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = this.ef(id_maestro); if (ef && ef.tiene_estado()) { var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); valores += id_maestro + '-;-' + valor + '-|-'; } else if (ef) { //-- Evita caso del oculto con_estado = false; break; } } //--- Si estan todos los maestros puedo ir al server a preguntar el valor de este if (con_estado) { if (this.ef(id_esclavo)._cascadas_ajax) { //Caso normal this.cascadas_comunicar(id_esclavo, valores, fila); } else { //Caso combo_editable this.ef(id_esclavo).set_solo_lectura(false); } } }; /** * Esquema de Cascadas:<br> * Retorna el estado actual de los maestros directos de un esclavo * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.get_valores_maestros = function (id_esclavo, fila) { var maestros = {}; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = (typeof fila == 'undefined') ? this.ef(id_maestro): this.ef(id_maestro).ir_a_fila(fila); if (ef && ef.tiene_estado()) { maestros[id_maestro] = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); } } return maestros; }; /** * @private * @param {string} id_ef Id. del ef */ ei_formulario.prototype.cascadas_en_espera = function(id_ef) { if (this.ef(id_ef).tiene_estado() && this.ef(id_ef).mantiene_valor_cascada()) { //Guardo el estado actual por si acaso vuelve en la respuesta this._tmp_valores_esclavos[id_ef] = this.ef(id_ef).get_estado(); } //Se resetea y desactiva al ef y todos sus esclavos this.ef(id_ef).borrar_opciones(); this.ef(id_ef).desactivar(); if (this._esclavos[id_ef]) { for (var i=0; i< this._esclavos[id_ef].length; i++) { this.cascadas_en_espera(this._esclavos[id_ef][i]); } } }; /** * Esquema de Cascadas:<br> * Se comunica al servidor que debe refrescar el valor de un ef en base a valores especificos de sus efs maestros * Este método dispara la llamada asincronica al servidor * @see #cascadas_respuesta * @param {string} id_ef Id. del ef a refrescar (un ef esclavo) * @param {string valores Lista plana de valores. Formato: ef1-;-valor1-|-ef2-;-valor2-|- etc. */ ei_formulario.prototype.cascadas_comunicar = function(id_ef, valores, fila) { //Empaqueto toda la informacion que tengo que mandar. var parametros = {'cascadas-ef': id_ef, 'cascadas-maestros' : valores}; if (typeof fila != 'undefined') { parametros['cascadas-fila'] = fila; } var callback = { success: this.cascadas_respuesta, failure: toba.error_comunicacion, argument: id_ef, scope: this }; var vinculo = vinculador.get_url(null, null, 'cascadas_efs', parametros, [this._id]); var con = conexion.asyncRequest('GET', vinculo, callback, null); }; /** * Esquema de Cascadas:<br> * Respuesta del servidor ante el pedido de refresco de un ef puntual * @param {Object} respuesta La respuesta es un objeto asociativo con claves responseText que contiene el nuevo valor del ef */ ei_formulario.prototype.cascadas_respuesta = function(respuesta) { if (respuesta.responseText === '') { var error = 'Error en la respuesta de la cascada, para más información consulte el log'; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } else { try { var datos_rs = JSON.parse(respuesta.responseText); var datos_asociativo; if ('Array' == getObjectClass(datos_rs)) {
{ datos_asociativo = datos_rs; this.ef(respuesta.argument).set_opciones(datos_asociativo); } if(this.ef(respuesta.argument).mantiene_valor_cascada() && isset(this._tmp_valores_esclavos[respuesta.argument])) { var valor_viejo = this._tmp_valores_esclavos[respuesta.argument]; if (isset(datos_asociativo[valor_viejo])) { this.ef(respuesta.argument).set_estado(valor_viejo); } } this.evt__cascadas_fin(this.ef(respuesta.argument), datos_asociativo); } catch (e) { var componente = "<textarea id='displayMore' class='ef-input-solo-lectura' cols='30' rows='35' readonly='true' style='display:none;'>" + respuesta.responseText + '</textarea>'; var error = 'Error en la respueta.<br>' + 'Error JS:<br>' + e + '<br>Mensaje Server:<br>' + "<a href='#' onclick='toggle_nodo(document.getElementById(\"displayMore\"));'>Mas</a><br>" + componente; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } } }; /** * Esquema de Cascadas:<br> * Ventana de ejecución anterior al pedido de respuesta de la cascada * Extender para agregar un comportamiento anterior a la respuesta * @param {ef} ef_maestro Instancia del ef maestro que inicia la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_inicio = function(ef_maestro) { }; /** * Esquema de Cascadas:<br> * Ventana de ejecución posterior a la respuesta de una cascada. * Extender para agregar un comportamiento post-respuesta * @param {ef} ef_esclavo Instancia del ef esclavo destino de la cascada * @param {Object} datos Datos de respuesta usados en la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_fin = function(ef_esclavo, datos) { }; //----Validación /** * Realiza la validación de este componente * Para agregar validaciones particulares globales al formulario, definir el metodo <em>evt__validar_datos</em>.<br> * Para validar efs especificos, definir el método <em>evt__idef__validar</em> */ ei_formulario.prototype.validar = function() { var ok = true, id_ef; var validacion_particular = 'evt__validar_datos'; if(this._evento && this._evento.validar) { if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular](); } for (id_ef in this._efs) { ok = this.validar_ef(id_ef) && ok; } } else { this.resetear_errores(); } if (!ok) { this.reset_evento(); } return ok; }; /** *@private *@param {string} id_ef Id del ef a validar *@param {boolean} es_online */ ei_formulario.prototype.validar_ef = function(id_ef, es_online) { var ef = this._efs[id_ef]; var validacion_particular = 'evt__' + id_ef + '__validar'; var ok = ef.validar(); if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular]() && ok; } this.set_ef_valido(ef, ok, es_online); if (es_online && this._con_examen_cambios) { this._examinar_cambios(id_ef); } return ok; }; ei_formulario.prototype._examinar_cambios = function (ef_actual) { var hay_cambio = this.hay_cambios(ef_actual); if (this.evt__procesar_cambios) { this.evt__procesar_cambios(hay_cambio); } }; ei_formulario.prototype.set_procesar_cambios = function(examinar, boton_destino, excluir_efs) { this._con_examen_cambios = examinar; if (! isset(excluir_efs)) { excluir_efs = []; } this._cambios_excluir_efs = excluir_efs; if (boton_destino) { this._boton_procesar_cambios = boton_destino; this.evt__procesar_cambios = this._procesar_cambios; //Se brinda una implementacion por defecto } }; ei_formulario.prototype._procesar_cambios = function(existen_cambios) { if (existen_cambios) { this.activar_boton(this._boton_procesar_cambios); } else { this.desactivar_boton(this._boton_procesar_cambios); } }; /** * Determina si algún ef del formulario se modifico * Opcionalmente resalta o no un ef puntual * @param {string} ef_actual Id del ef a verificar si tuvo cambios */ ei_formulario.prototype.hay_cambios = function(ef_actual) { var hay_cambio = false, id_ef; for (id_ef in this._efs) { if (! in_array(id_ef, this._cambios_excluir_efs)) { var es_igual = this._es_estado_igual(this._estado_inicial[id_ef], this._efs[id_ef].get_estado()); if (! es_igual) { hay_cambio = true; if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(true); } } else { if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(false); } } } } return hay_cambio; }; ei_formulario.prototype._es_estado_igual = function(inicial, actual) { var es_igual; if (typeof actual == 'object' && isset(actual)) { es_igual = comparar_arreglos(inicial, actual); } else { es_igual = (inicial === actual); } return es_igual; }; /** * Informa que un ef cumple o no una validación especifica. * En caso de que no sea valido el estado del ef se informa al usuario * Si es valido se quita el estado de invalido (la cruz al lado del campo). * @param {ef} ef Ef en cuestión * @param {boolean} es_valido * @param {boolean} solo_online En caso que no sea valido sólo muestra la cruz al lado del campo y no un mensaje explícito */ ei_formulario.prototype.set_ef_valido = function(ef, es_valido, solo_online) { if (!es_valido) { if (! this._silencioso) { ef.resaltar(ef.get_error()); } if (typeof solo_online == 'undefined' || !solo_online) { notificacion.agregar(ef.get_error(), 'error', ef._etiqueta); } ef.resetear_error(); } else { ef.no_resaltar(); } }; ei_formulario.prototype.resetear_errores = function() { if (! this._silencioso) { for (var id_ef in this._efs) { if (! this._silencioso) { this._efs[id_ef].no_resaltar(); } } } }; //---Procesamiento /** *@private *@param {string} id_ef Id del ef a procesar *@param {boolean} es_inicial Indica si se lanza el procesamiento por primera vez o no */ ei_formulario.prototype.procesar = function (id_ef, es_inicial) { if (this.hay_procesamiento_particular_ef(id_ef)) { return this['evt__' + id_ef + '__procesar'](es_inicial); //Procesamiento particular, no hay proceso por defecto } }; /** * Hace reflexion sobre la clase en busqueda de extensiones * @private */ ei_formulario.prototype.agregar_procesamientos = function() { var id_ef; for (id_ef in this._efs) { if (this.hay_procesamiento_particular_ef(id_ef)) { this.agregar_procesamiento(id_ef); } } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.agregar_procesamiento = function (id_ef) { if (this._efs[id_ef]) { this._efs_procesar[id_ef] = true; var callback = this._instancia + '.procesar("' + id_ef + '")'; this._efs[id_ef].cuando_cambia_valor(callback); } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.hay_procesamiento_particular_ef = function(id_ef) { return existe_funcion(this, 'evt__' + id_ef + '__procesar'); }; //---Cambios graficos /** * Invierte la expansión del formulario * Cuando el formulario se encuentra contraido los efs marcados como colapsados en el editor no se muestran * Este metodo no tiene relacion con el colapsar/descolapsar que se encargan de colapsar el componente como un todo */ ei_formulario.prototype.cambiar_expansion = function() { this._expandido = ! this._expandido; for (var id_ef in this._efs) { this._efs[id_ef].cambiar_expansion(this._expandido); } var img = document.getElementById(this._instancia + '_cambiar_expansion'); img.src = (this._expandido) ? toba.imagen('contraer') : toba.imagen('expandir'); }; //---Refresco Grafico /** * Fuerza un refuerzo grafico del componente */ ei_formulario.prototype.refrescar_todo = function () { this.refrescar_procesamientos(); }; /** *@private *@param {boolean} es_inicial Indica si el procesamiento se lanza por primera vez */ ei_formulario.prototype.refrescar_procesamientos = function (es_inicial) { for (var id_ef in this._efs) { if (this._efs_procesar[id_ef]) { this.procesar(id_ef, es_inicial); } } }; toba.confirmar_inclusion('componentes/ei_formulario');
datos_asociativo = []; for (var ind = 0; ind < datos_rs.length ; ind++) { datos_asociativo[datos_rs[ind][0]] = datos_rs[ind][1]; } //Se le pasa el formato RS para que no se rompa el ordenamiento, para el resto se usa el asociativo por BC this.ef(respuesta.argument).set_opciones_rs(datos_rs); } else
conditional_block
ei_formulario.js
ei_formulario.prototype = new ei(); ei_formulario.prototype.constructor = ei_formulario; /** * @class Un formulario simple presenta una grilla de campos editables. * A cada uno de estos campos se los denomina Elementos de Formulario (efs). * @see ef * @constructor * @phpdoc classes/toba_ei_formulario.html toba_ei_formulario */ function ei_formulario(id, instancia, rango_tabs, input_submit, maestros, esclavos, invalidos)
/** * @private * @param {ef} ef objeto que representa al ef * @param {string} identificador Id. del ef */ ei_formulario.prototype.agregar_ef = function (ef, identificador) { if (ef) { this._efs[identificador] = ef; } }; /** *@private *@param {ef} objeto_ef Objeto que representa al ef */ ei_formulario.prototype.instancia_ef = function (objeto_ef) { var id = objeto_ef.get_id(); return this._instancia + ".ef('"+ id + "')"; }; ei_formulario.prototype.iniciar = function () { var id_ef; for (id_ef in this._efs) { this._efs[id_ef].iniciar(id_ef, this); this._estado_inicial[id_ef] = this._efs[id_ef].get_estado(); this._efs[id_ef].cuando_cambia_valor(this._instancia + '.validar_ef("' + id_ef + '", true)'); if (this._invalidos[id_ef]) { this._efs[id_ef].resaltar(this._invalidos[id_ef]); } } if (this._con_examen_cambios) { this._examinar_cambios(); } this.agregar_procesamientos(); this.refrescar_procesamientos(true); this.reset_evento(); if (this.configurar) { this.configurar(); } }; //---Consultas /** * Accede a la instancia de un ef especifico * @param {string} id del ef * @type ef * @see ef */ ei_formulario.prototype.ef = function(id) { return this._efs[id]; }; /** * Retorna un objeto asociativo id_ef => ef, para usarlo en algun ciclo, por ej. * for (id_ef in this.efs()) { * this.ef(id_ef).metodo() * } * @type Object * @see ef */ ei_formulario.prototype.efs = function() { return this._efs; }; /** * Retorna el estado actual de los efs en un Objeto asociativo id_ef=>valor * @type Object */ ei_formulario.prototype.get_datos = function() { var datos = {}; for (var id_ef in this._efs) { datos[id_ef] = this._efs[id_ef].get_estado(); } return datos; }; //---Submit ei_formulario.prototype.submit = function() { var id_ef; if (this.controlador && !this.controlador.en_submit()) { return this.controlador.submit(); } if (this._evento && this.debe_disparar_evento()) { //Enviar la noticia del submit a los efs for (id_ef in this._efs) { this._efs[id_ef].submit(); } //Marco la ejecucion del evento para que la clase PHP lo reconozca document.getElementById(this._input_submit).value = this._evento.id; } }; //Chequea si es posible realiza el submit de todos los objetos asociados ei_formulario.prototype.puede_submit = function() { if(this._evento) //Si hay un evento seteado... { //- 1 - Hay que realizar las validaciones if(! this.validar() ) { this.reset_evento(); return false; } if (! ei.prototype.puede_submit.call(this)) { return false; } } return true; }; ei_formulario.prototype.debe_disparar_evento = function() { var debe = true, id_ef; if (this._evento_condicionado_a_datos && this._evento.es_implicito) { var cambios = false; for (id_ef in this._efs) { cambios = (cambios || this.hay_cambios(id_ef)); } debe = cambios; } return debe; }; //---- Cascadas /** * Esquema de Cascadas:<br> * Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse * @param {string} id_ef Identificador del ef maestro que sufrio una modificación */ ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila) { if (this._esclavos[id_ef]) { this.evt__cascadas_inicio(this.ef(id_ef)); //--Se recorren los esclavos del master modificado for (var i=0; i < this._esclavos[id_ef].length; i++) { this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila); } } }; /** * Esquema de Cascadas:<br> * Determina si los maestros de un ef esclavo tienen sus valores cargados * @param {string} id_esclavo Identificador del ef esclavo * @type boolean */ ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila) { for (var i=0; i< this._maestros[id_esclavo].length; i++) { var ef = this.ef(this._maestros[id_esclavo][i]); if (ef && typeof fila != 'undefined') { ef.ir_a_fila(fila); } if (ef && ! ef.tiene_estado()) { return false; } } return true; }; /** * Esquema de Cascadas:<br> * Un ef esclavo esta listo para refrescar su valor en base a sus maestros, * para esto en este metodo se recolecta los valores de sus maestros y se dispara * la comunicación con el servidor * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila) { //Primero se resetea por si la consulta nunca retorna this.cascadas_en_espera(id_esclavo); //---Todos los maestros tienen estado? var con_estado = true; var valores = ''; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = this.ef(id_maestro); if (ef && ef.tiene_estado()) { var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); valores += id_maestro + '-;-' + valor + '-|-'; } else if (ef) { //-- Evita caso del oculto con_estado = false; break; } } //--- Si estan todos los maestros puedo ir al server a preguntar el valor de este if (con_estado) { if (this.ef(id_esclavo)._cascadas_ajax) { //Caso normal this.cascadas_comunicar(id_esclavo, valores, fila); } else { //Caso combo_editable this.ef(id_esclavo).set_solo_lectura(false); } } }; /** * Esquema de Cascadas:<br> * Retorna el estado actual de los maestros directos de un esclavo * @param {string} id_esclavo Identificador del ef esclavo que se refrescara */ ei_formulario.prototype.get_valores_maestros = function (id_esclavo, fila) { var maestros = {}; for (var i=0; i< this._maestros[id_esclavo].length; i++) { var id_maestro = this._maestros[id_esclavo][i]; var ef = (typeof fila == 'undefined') ? this.ef(id_maestro): this.ef(id_maestro).ir_a_fila(fila); if (ef && ef.tiene_estado()) { maestros[id_maestro] = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado(); } } return maestros; }; /** * @private * @param {string} id_ef Id. del ef */ ei_formulario.prototype.cascadas_en_espera = function(id_ef) { if (this.ef(id_ef).tiene_estado() && this.ef(id_ef).mantiene_valor_cascada()) { //Guardo el estado actual por si acaso vuelve en la respuesta this._tmp_valores_esclavos[id_ef] = this.ef(id_ef).get_estado(); } //Se resetea y desactiva al ef y todos sus esclavos this.ef(id_ef).borrar_opciones(); this.ef(id_ef).desactivar(); if (this._esclavos[id_ef]) { for (var i=0; i< this._esclavos[id_ef].length; i++) { this.cascadas_en_espera(this._esclavos[id_ef][i]); } } }; /** * Esquema de Cascadas:<br> * Se comunica al servidor que debe refrescar el valor de un ef en base a valores especificos de sus efs maestros * Este método dispara la llamada asincronica al servidor * @see #cascadas_respuesta * @param {string} id_ef Id. del ef a refrescar (un ef esclavo) * @param {string valores Lista plana de valores. Formato: ef1-;-valor1-|-ef2-;-valor2-|- etc. */ ei_formulario.prototype.cascadas_comunicar = function(id_ef, valores, fila) { //Empaqueto toda la informacion que tengo que mandar. var parametros = {'cascadas-ef': id_ef, 'cascadas-maestros' : valores}; if (typeof fila != 'undefined') { parametros['cascadas-fila'] = fila; } var callback = { success: this.cascadas_respuesta, failure: toba.error_comunicacion, argument: id_ef, scope: this }; var vinculo = vinculador.get_url(null, null, 'cascadas_efs', parametros, [this._id]); var con = conexion.asyncRequest('GET', vinculo, callback, null); }; /** * Esquema de Cascadas:<br> * Respuesta del servidor ante el pedido de refresco de un ef puntual * @param {Object} respuesta La respuesta es un objeto asociativo con claves responseText que contiene el nuevo valor del ef */ ei_formulario.prototype.cascadas_respuesta = function(respuesta) { if (respuesta.responseText === '') { var error = 'Error en la respuesta de la cascada, para más información consulte el log'; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } else { try { var datos_rs = JSON.parse(respuesta.responseText); var datos_asociativo; if ('Array' == getObjectClass(datos_rs)) { datos_asociativo = []; for (var ind = 0; ind < datos_rs.length ; ind++) { datos_asociativo[datos_rs[ind][0]] = datos_rs[ind][1]; } //Se le pasa el formato RS para que no se rompa el ordenamiento, para el resto se usa el asociativo por BC this.ef(respuesta.argument).set_opciones_rs(datos_rs); } else { datos_asociativo = datos_rs; this.ef(respuesta.argument).set_opciones(datos_asociativo); } if(this.ef(respuesta.argument).mantiene_valor_cascada() && isset(this._tmp_valores_esclavos[respuesta.argument])) { var valor_viejo = this._tmp_valores_esclavos[respuesta.argument]; if (isset(datos_asociativo[valor_viejo])) { this.ef(respuesta.argument).set_estado(valor_viejo); } } this.evt__cascadas_fin(this.ef(respuesta.argument), datos_asociativo); } catch (e) { var componente = "<textarea id='displayMore' class='ef-input-solo-lectura' cols='30' rows='35' readonly='true' style='display:none;'>" + respuesta.responseText + '</textarea>'; var error = 'Error en la respueta.<br>' + 'Error JS:<br>' + e + '<br>Mensaje Server:<br>' + "<a href='#' onclick='toggle_nodo(document.getElementById(\"displayMore\"));'>Mas</a><br>" + componente; notificacion.limpiar(); notificacion.agregar(error); notificacion.mostrar(); } } }; /** * Esquema de Cascadas:<br> * Ventana de ejecución anterior al pedido de respuesta de la cascada * Extender para agregar un comportamiento anterior a la respuesta * @param {ef} ef_maestro Instancia del ef maestro que inicia la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_inicio = function(ef_maestro) { }; /** * Esquema de Cascadas:<br> * Ventana de ejecución posterior a la respuesta de una cascada. * Extender para agregar un comportamiento post-respuesta * @param {ef} ef_esclavo Instancia del ef esclavo destino de la cascada * @param {Object} datos Datos de respuesta usados en la cascada * @ventana */ ei_formulario.prototype.evt__cascadas_fin = function(ef_esclavo, datos) { }; //----Validación /** * Realiza la validación de este componente * Para agregar validaciones particulares globales al formulario, definir el metodo <em>evt__validar_datos</em>.<br> * Para validar efs especificos, definir el método <em>evt__idef__validar</em> */ ei_formulario.prototype.validar = function() { var ok = true, id_ef; var validacion_particular = 'evt__validar_datos'; if(this._evento && this._evento.validar) { if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular](); } for (id_ef in this._efs) { ok = this.validar_ef(id_ef) && ok; } } else { this.resetear_errores(); } if (!ok) { this.reset_evento(); } return ok; }; /** *@private *@param {string} id_ef Id del ef a validar *@param {boolean} es_online */ ei_formulario.prototype.validar_ef = function(id_ef, es_online) { var ef = this._efs[id_ef]; var validacion_particular = 'evt__' + id_ef + '__validar'; var ok = ef.validar(); if (existe_funcion(this, validacion_particular)) { ok = this[validacion_particular]() && ok; } this.set_ef_valido(ef, ok, es_online); if (es_online && this._con_examen_cambios) { this._examinar_cambios(id_ef); } return ok; }; ei_formulario.prototype._examinar_cambios = function (ef_actual) { var hay_cambio = this.hay_cambios(ef_actual); if (this.evt__procesar_cambios) { this.evt__procesar_cambios(hay_cambio); } }; ei_formulario.prototype.set_procesar_cambios = function(examinar, boton_destino, excluir_efs) { this._con_examen_cambios = examinar; if (! isset(excluir_efs)) { excluir_efs = []; } this._cambios_excluir_efs = excluir_efs; if (boton_destino) { this._boton_procesar_cambios = boton_destino; this.evt__procesar_cambios = this._procesar_cambios; //Se brinda una implementacion por defecto } }; ei_formulario.prototype._procesar_cambios = function(existen_cambios) { if (existen_cambios) { this.activar_boton(this._boton_procesar_cambios); } else { this.desactivar_boton(this._boton_procesar_cambios); } }; /** * Determina si algún ef del formulario se modifico * Opcionalmente resalta o no un ef puntual * @param {string} ef_actual Id del ef a verificar si tuvo cambios */ ei_formulario.prototype.hay_cambios = function(ef_actual) { var hay_cambio = false, id_ef; for (id_ef in this._efs) { if (! in_array(id_ef, this._cambios_excluir_efs)) { var es_igual = this._es_estado_igual(this._estado_inicial[id_ef], this._efs[id_ef].get_estado()); if (! es_igual) { hay_cambio = true; if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(true); } } else { if (id_ef == ef_actual) { this._efs[id_ef].resaltar_cambio(false); } } } } return hay_cambio; }; ei_formulario.prototype._es_estado_igual = function(inicial, actual) { var es_igual; if (typeof actual == 'object' && isset(actual)) { es_igual = comparar_arreglos(inicial, actual); } else { es_igual = (inicial === actual); } return es_igual; }; /** * Informa que un ef cumple o no una validación especifica. * En caso de que no sea valido el estado del ef se informa al usuario * Si es valido se quita el estado de invalido (la cruz al lado del campo). * @param {ef} ef Ef en cuestión * @param {boolean} es_valido * @param {boolean} solo_online En caso que no sea valido sólo muestra la cruz al lado del campo y no un mensaje explícito */ ei_formulario.prototype.set_ef_valido = function(ef, es_valido, solo_online) { if (!es_valido) { if (! this._silencioso) { ef.resaltar(ef.get_error()); } if (typeof solo_online == 'undefined' || !solo_online) { notificacion.agregar(ef.get_error(), 'error', ef._etiqueta); } ef.resetear_error(); } else { ef.no_resaltar(); } }; ei_formulario.prototype.resetear_errores = function() { if (! this._silencioso) { for (var id_ef in this._efs) { if (! this._silencioso) { this._efs[id_ef].no_resaltar(); } } } }; //---Procesamiento /** *@private *@param {string} id_ef Id del ef a procesar *@param {boolean} es_inicial Indica si se lanza el procesamiento por primera vez o no */ ei_formulario.prototype.procesar = function (id_ef, es_inicial) { if (this.hay_procesamiento_particular_ef(id_ef)) { return this['evt__' + id_ef + '__procesar'](es_inicial); //Procesamiento particular, no hay proceso por defecto } }; /** * Hace reflexion sobre la clase en busqueda de extensiones * @private */ ei_formulario.prototype.agregar_procesamientos = function() { var id_ef; for (id_ef in this._efs) { if (this.hay_procesamiento_particular_ef(id_ef)) { this.agregar_procesamiento(id_ef); } } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.agregar_procesamiento = function (id_ef) { if (this._efs[id_ef]) { this._efs_procesar[id_ef] = true; var callback = this._instancia + '.procesar("' + id_ef + '")'; this._efs[id_ef].cuando_cambia_valor(callback); } }; /** * @private * @param {string} id_ef Id del ef a procesar */ ei_formulario.prototype.hay_procesamiento_particular_ef = function(id_ef) { return existe_funcion(this, 'evt__' + id_ef + '__procesar'); }; //---Cambios graficos /** * Invierte la expansión del formulario * Cuando el formulario se encuentra contraido los efs marcados como colapsados en el editor no se muestran * Este metodo no tiene relacion con el colapsar/descolapsar que se encargan de colapsar el componente como un todo */ ei_formulario.prototype.cambiar_expansion = function() { this._expandido = ! this._expandido; for (var id_ef in this._efs) { this._efs[id_ef].cambiar_expansion(this._expandido); } var img = document.getElementById(this._instancia + '_cambiar_expansion'); img.src = (this._expandido) ? toba.imagen('contraer') : toba.imagen('expandir'); }; //---Refresco Grafico /** * Fuerza un refuerzo grafico del componente */ ei_formulario.prototype.refrescar_todo = function () { this.refrescar_procesamientos(); }; /** *@private *@param {boolean} es_inicial Indica si el procesamiento se lanza por primera vez */ ei_formulario.prototype.refrescar_procesamientos = function (es_inicial) { for (var id_ef in this._efs) { if (this._efs_procesar[id_ef]) { this.procesar(id_ef, es_inicial); } } }; toba.confirmar_inclusion('componentes/ei_formulario');
{ this._id = id; this._instancia = instancia; //Nombre de la instancia del objeto, permite asociar al objeto con el arbol DOM this._rango_tabs = rango_tabs; this._input_submit = input_submit; //Campo que se setea en el submit del form this.controlador = null; //Referencia al CI contenedor this._efs = {}; //Lista de objeto_ef contenidos this._efs_procesar = {}; //ID de los ef's que poseen procesamiento this._silencioso = false; //¿Silenciar confirmaciones y alertas? Util para testing this._evento_implicito = null; //No hay evento prefijado this._expandido = false; //El formulario comienza sin expandirse this._maestros = maestros; this._esclavos = esclavos; this._invalidos = invalidos; this._estado_inicial = {}; this._con_examen_cambios = false; this._cambios_excluir_efs = []; this._tmp_valores_esclavos = {}; //lista temporal de valores a guardar hasta que retorna la cascada }
identifier_body
webots_launcher.py
#!/usr/bin/env python # Copyright 1996-2023 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This launcher simply starts Webots.""" import os import re import shutil import subprocess import sys import tempfile from pathlib import Path from launch.actions import ExecuteProcess from launch_ros.actions import Node from launch.launch_context import LaunchContext from launch.substitution import Substitution from launch.substitutions import TextSubstitution from launch.substitutions.path_join_substitution import PathJoinSubstitution from ament_index_python.packages import get_package_share_directory, get_package_prefix from webots_ros2_driver.utils import (get_webots_home, handle_webots_installation, is_wsl, has_shared_folder, container_shared_folder, controller_url_prefix) class _ConditionalSubstitution(Substitution): def __init__(self, *, condition, false_value='', true_value=''): self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition)) self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value) self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value) def perform(self, context): if context.perform_substitution(self.__condition).lower() in ['false', '0', '']: return context.perform_substitution(self.__false_value) return context.perform_substitution(self.__true_value) class WebotsLauncher(ExecuteProcess): def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False, port='1234', **kwargs): if sys.platform == 'win32': print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a ' 'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr) print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more ' 'information.', file=sys.stderr) self.__is_wsl = is_wsl() self.__has_shared_folder = has_shared_folder() self.__is_supervisor = ros2_supervisor if self.__is_supervisor: self._supervisor = Ros2SupervisorLauncher(port=port) # Find Webots executable if not self.__has_shared_folder: webots_path = get_webots_home(show_warning=True) if webots_path is None: handle_webots_installation() webots_path = get_webots_home() if self.__is_wsl: webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe') else: webots_path = os.path.join(webots_path, 'webots') else: webots_path = '' mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode) self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False) self.__world = world if not isinstance(world, Substitution): world = TextSubstitution(text=self.__world_copy.name) if self.__is_wsl: wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8') world = TextSubstitution(text=wsl_tmp_path) no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering') stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout') stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr') minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize') if isinstance(stream, bool): stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream') else: stream_argument = "--stream=" + stream port_argument = '--port=' + port xvfb_run_prefix = [] if 'WEBOTS_OFFSCREEN' in os.environ: xvfb_run_prefix.append('xvfb-run') xvfb_run_prefix.append('--auto-servernum') no_rendering = '--no-rendering' # Initialize command to start Webots remotely through TCP if self.__has_shared_folder: webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts', 'webots_tcp_client.py')) super().__init__( output=output, cmd=[ 'python3', webots_tcp_client, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, '--batch', ['--mode=', mode], os.path.basename(self.__world_copy.name), ], name='webots_tcp_client', **kwargs ) # Initialize command to start Webots locally else: # no_rendering, stdout, stderr, minimize super().__init__( output=output, cmd=xvfb_run_prefix + [ webots_path, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, world, '--batch', ['--mode=', mode], ], name='webots', **kwargs ) def execute(self, context: LaunchContext): # User can give a PathJoinSubstitution world or an absolute path world if isinstance(self.__world, PathJoinSubstitution): world_path = self.__world.perform(context) context.launch_configurations['world'] = self.__world_copy.name else: world_path = self.__world shutil.copy2(world_path, self.__world_copy.name) # look for a wbproj file and copy if available wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj') if wbproj_path.exists(): wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem + '.wbproj') shutil.copy2(wbproj_path, wbproj_copy_path) # copy sumo network file if it exists sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net') if sumonet_path.exists(): sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net') shutil.copytree(sumonet_path, sumonet_copy_path) # Update relative paths in the world with open(self.__world_copy.name, 'r') as file: content = file.read() for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content): url_path = match.group(1) # Absolute path or Webots relative path or Web paths if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \ or url_path.startswith('https://'): continue new_url_path = os.path.split(world_path)[0] + '/' + url_path if self.__has_shared_folder: # Copy asset to shared folder shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path))) new_url_path = './' + os.path.basename(new_url_path) if self.__is_wsl: command = ['wslpath', '-w', new_url_path] new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/') new_url_path = '"' + new_url_path + '"' url_path = '"' + url_path + '"' content = content.replace(url_path, new_url_path) with open(self.__world_copy.name, 'w') as file: file.write(content)
# Add the Ros2Supervisor if self.__is_supervisor: indent = ' ' world_file = open(self.__world_copy.name, 'a') world_file.write('Robot {\n') world_file.write(indent + 'name "Ros2Supervisor"\n') world_file.write(indent + 'controller "<extern>"\n') world_file.write(indent + 'supervisor TRUE\n') world_file.write('}\n') world_file.close() # Copy world file to shared folder if self.__has_shared_folder: shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name)) shutil.copy(self.__world_copy.name, shared_world_file) if wbproj_path.exists(): shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj') shutil.copy(wbproj_path, shared_wbproj_copy_path) # Execute process return super().execute(context) def _shutdown_process(self, context, *, send_sigint): # Remove copy of the world and the corresponding ".wbproj" file if self.__world_copy: self.__world_copy.close() if os.path.isfile(self.__world_copy.name): os.unlink(self.__world_copy.name) path, file = os.path.split(self.__world_copy.name) world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj') if os.path.isfile(world_copy_secondary_file): os.unlink(world_copy_secondary_file) # Clean the content of the shared directory for next run if self.__has_shared_folder: for filename in os.listdir(container_shared_folder()): file_path = os.path.join(container_shared_folder(), filename) try: if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as error: print(f'Failed to delete {file_path}. Reason: {error}.') return super()._shutdown_process(context, send_sigint=send_sigint) class Ros2SupervisorLauncher(Node): def __init__(self, output='screen', respawn=True, port='1234', **kwargs): # Launch the Ros2Supervisor node super().__init__( package='webots_ros2_driver', executable='ros2_supervisor.py', namespace='Ros2Supervisor', remappings=[('/Ros2Supervisor/clock', '/clock')], output=output, # Set WEBOTS_HOME to the webots_ros2_driver installation folder # to load the correct libController libraries from the Python API additional_env={'WEBOTS_CONTROLLER_URL': controller_url_prefix(port) + 'Ros2Supervisor', 'WEBOTS_HOME': get_package_prefix('webots_ros2_driver')}, respawn=respawn, **kwargs )
random_line_split
webots_launcher.py
#!/usr/bin/env python # Copyright 1996-2023 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This launcher simply starts Webots.""" import os import re import shutil import subprocess import sys import tempfile from pathlib import Path from launch.actions import ExecuteProcess from launch_ros.actions import Node from launch.launch_context import LaunchContext from launch.substitution import Substitution from launch.substitutions import TextSubstitution from launch.substitutions.path_join_substitution import PathJoinSubstitution from ament_index_python.packages import get_package_share_directory, get_package_prefix from webots_ros2_driver.utils import (get_webots_home, handle_webots_installation, is_wsl, has_shared_folder, container_shared_folder, controller_url_prefix) class _ConditionalSubstitution(Substitution): def __init__(self, *, condition, false_value='', true_value=''): self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition)) self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value) self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value) def perform(self, context): if context.perform_substitution(self.__condition).lower() in ['false', '0', '']: return context.perform_substitution(self.__false_value) return context.perform_substitution(self.__true_value) class WebotsLauncher(ExecuteProcess): def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False, port='1234', **kwargs): if sys.platform == 'win32': print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a ' 'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr) print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more ' 'information.', file=sys.stderr) self.__is_wsl = is_wsl() self.__has_shared_folder = has_shared_folder() self.__is_supervisor = ros2_supervisor if self.__is_supervisor: self._supervisor = Ros2SupervisorLauncher(port=port) # Find Webots executable if not self.__has_shared_folder: webots_path = get_webots_home(show_warning=True) if webots_path is None: handle_webots_installation() webots_path = get_webots_home() if self.__is_wsl: webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe') else: webots_path = os.path.join(webots_path, 'webots') else: webots_path = '' mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode) self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False) self.__world = world if not isinstance(world, Substitution): world = TextSubstitution(text=self.__world_copy.name) if self.__is_wsl: wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8') world = TextSubstitution(text=wsl_tmp_path) no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering') stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout') stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr') minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize') if isinstance(stream, bool): stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream') else: stream_argument = "--stream=" + stream port_argument = '--port=' + port xvfb_run_prefix = [] if 'WEBOTS_OFFSCREEN' in os.environ: xvfb_run_prefix.append('xvfb-run') xvfb_run_prefix.append('--auto-servernum') no_rendering = '--no-rendering' # Initialize command to start Webots remotely through TCP if self.__has_shared_folder: webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts', 'webots_tcp_client.py')) super().__init__( output=output, cmd=[ 'python3', webots_tcp_client, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, '--batch', ['--mode=', mode], os.path.basename(self.__world_copy.name), ], name='webots_tcp_client', **kwargs ) # Initialize command to start Webots locally else: # no_rendering, stdout, stderr, minimize super().__init__( output=output, cmd=xvfb_run_prefix + [ webots_path, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, world, '--batch', ['--mode=', mode], ], name='webots', **kwargs ) def execute(self, context: LaunchContext): # User can give a PathJoinSubstitution world or an absolute path world if isinstance(self.__world, PathJoinSubstitution): world_path = self.__world.perform(context) context.launch_configurations['world'] = self.__world_copy.name else: world_path = self.__world shutil.copy2(world_path, self.__world_copy.name) # look for a wbproj file and copy if available wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj') if wbproj_path.exists(): wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem + '.wbproj') shutil.copy2(wbproj_path, wbproj_copy_path) # copy sumo network file if it exists sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net') if sumonet_path.exists(): sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net') shutil.copytree(sumonet_path, sumonet_copy_path) # Update relative paths in the world with open(self.__world_copy.name, 'r') as file: content = file.read() for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content): url_path = match.group(1) # Absolute path or Webots relative path or Web paths if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \ or url_path.startswith('https://'): continue new_url_path = os.path.split(world_path)[0] + '/' + url_path if self.__has_shared_folder: # Copy asset to shared folder shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path))) new_url_path = './' + os.path.basename(new_url_path) if self.__is_wsl: command = ['wslpath', '-w', new_url_path] new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/') new_url_path = '"' + new_url_path + '"' url_path = '"' + url_path + '"' content = content.replace(url_path, new_url_path) with open(self.__world_copy.name, 'w') as file: file.write(content) # Add the Ros2Supervisor if self.__is_supervisor: indent = ' ' world_file = open(self.__world_copy.name, 'a') world_file.write('Robot {\n') world_file.write(indent + 'name "Ros2Supervisor"\n') world_file.write(indent + 'controller "<extern>"\n') world_file.write(indent + 'supervisor TRUE\n') world_file.write('}\n') world_file.close() # Copy world file to shared folder if self.__has_shared_folder: shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name)) shutil.copy(self.__world_copy.name, shared_world_file) if wbproj_path.exists(): shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj') shutil.copy(wbproj_path, shared_wbproj_copy_path) # Execute process return super().execute(context) def _shutdown_process(self, context, *, send_sigint): # Remove copy of the world and the corresponding ".wbproj" file if self.__world_copy: self.__world_copy.close() if os.path.isfile(self.__world_copy.name): os.unlink(self.__world_copy.name) path, file = os.path.split(self.__world_copy.name) world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj') if os.path.isfile(world_copy_secondary_file): os.unlink(world_copy_secondary_file) # Clean the content of the shared directory for next run if self.__has_shared_folder: for filename in os.listdir(container_shared_folder()): file_path = os.path.join(container_shared_folder(), filename) try: if os.path.isfile(file_path):
elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as error: print(f'Failed to delete {file_path}. Reason: {error}.') return super()._shutdown_process(context, send_sigint=send_sigint) class Ros2SupervisorLauncher(Node): def __init__(self, output='screen', respawn=True, port='1234', **kwargs): # Launch the Ros2Supervisor node super().__init__( package='webots_ros2_driver', executable='ros2_supervisor.py', namespace='Ros2Supervisor', remappings=[('/Ros2Supervisor/clock', '/clock')], output=output, # Set WEBOTS_HOME to the webots_ros2_driver installation folder # to load the correct libController libraries from the Python API additional_env={'WEBOTS_CONTROLLER_URL': controller_url_prefix(port) + 'Ros2Supervisor', 'WEBOTS_HOME': get_package_prefix('webots_ros2_driver')}, respawn=respawn, **kwargs )
os.unlink(file_path)
conditional_block
webots_launcher.py
#!/usr/bin/env python # Copyright 1996-2023 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This launcher simply starts Webots.""" import os import re import shutil import subprocess import sys import tempfile from pathlib import Path from launch.actions import ExecuteProcess from launch_ros.actions import Node from launch.launch_context import LaunchContext from launch.substitution import Substitution from launch.substitutions import TextSubstitution from launch.substitutions.path_join_substitution import PathJoinSubstitution from ament_index_python.packages import get_package_share_directory, get_package_prefix from webots_ros2_driver.utils import (get_webots_home, handle_webots_installation, is_wsl, has_shared_folder, container_shared_folder, controller_url_prefix) class _ConditionalSubstitution(Substitution): def
(self, *, condition, false_value='', true_value=''): self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition)) self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value) self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value) def perform(self, context): if context.perform_substitution(self.__condition).lower() in ['false', '0', '']: return context.perform_substitution(self.__false_value) return context.perform_substitution(self.__true_value) class WebotsLauncher(ExecuteProcess): def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False, port='1234', **kwargs): if sys.platform == 'win32': print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a ' 'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr) print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more ' 'information.', file=sys.stderr) self.__is_wsl = is_wsl() self.__has_shared_folder = has_shared_folder() self.__is_supervisor = ros2_supervisor if self.__is_supervisor: self._supervisor = Ros2SupervisorLauncher(port=port) # Find Webots executable if not self.__has_shared_folder: webots_path = get_webots_home(show_warning=True) if webots_path is None: handle_webots_installation() webots_path = get_webots_home() if self.__is_wsl: webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe') else: webots_path = os.path.join(webots_path, 'webots') else: webots_path = '' mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode) self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False) self.__world = world if not isinstance(world, Substitution): world = TextSubstitution(text=self.__world_copy.name) if self.__is_wsl: wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8') world = TextSubstitution(text=wsl_tmp_path) no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering') stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout') stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr') minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize') if isinstance(stream, bool): stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream') else: stream_argument = "--stream=" + stream port_argument = '--port=' + port xvfb_run_prefix = [] if 'WEBOTS_OFFSCREEN' in os.environ: xvfb_run_prefix.append('xvfb-run') xvfb_run_prefix.append('--auto-servernum') no_rendering = '--no-rendering' # Initialize command to start Webots remotely through TCP if self.__has_shared_folder: webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts', 'webots_tcp_client.py')) super().__init__( output=output, cmd=[ 'python3', webots_tcp_client, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, '--batch', ['--mode=', mode], os.path.basename(self.__world_copy.name), ], name='webots_tcp_client', **kwargs ) # Initialize command to start Webots locally else: # no_rendering, stdout, stderr, minimize super().__init__( output=output, cmd=xvfb_run_prefix + [ webots_path, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, world, '--batch', ['--mode=', mode], ], name='webots', **kwargs ) def execute(self, context: LaunchContext): # User can give a PathJoinSubstitution world or an absolute path world if isinstance(self.__world, PathJoinSubstitution): world_path = self.__world.perform(context) context.launch_configurations['world'] = self.__world_copy.name else: world_path = self.__world shutil.copy2(world_path, self.__world_copy.name) # look for a wbproj file and copy if available wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj') if wbproj_path.exists(): wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem + '.wbproj') shutil.copy2(wbproj_path, wbproj_copy_path) # copy sumo network file if it exists sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net') if sumonet_path.exists(): sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net') shutil.copytree(sumonet_path, sumonet_copy_path) # Update relative paths in the world with open(self.__world_copy.name, 'r') as file: content = file.read() for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content): url_path = match.group(1) # Absolute path or Webots relative path or Web paths if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \ or url_path.startswith('https://'): continue new_url_path = os.path.split(world_path)[0] + '/' + url_path if self.__has_shared_folder: # Copy asset to shared folder shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path))) new_url_path = './' + os.path.basename(new_url_path) if self.__is_wsl: command = ['wslpath', '-w', new_url_path] new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/') new_url_path = '"' + new_url_path + '"' url_path = '"' + url_path + '"' content = content.replace(url_path, new_url_path) with open(self.__world_copy.name, 'w') as file: file.write(content) # Add the Ros2Supervisor if self.__is_supervisor: indent = ' ' world_file = open(self.__world_copy.name, 'a') world_file.write('Robot {\n') world_file.write(indent + 'name "Ros2Supervisor"\n') world_file.write(indent + 'controller "<extern>"\n') world_file.write(indent + 'supervisor TRUE\n') world_file.write('}\n') world_file.close() # Copy world file to shared folder if self.__has_shared_folder: shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name)) shutil.copy(self.__world_copy.name, shared_world_file) if wbproj_path.exists(): shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj') shutil.copy(wbproj_path, shared_wbproj_copy_path) # Execute process return super().execute(context) def _shutdown_process(self, context, *, send_sigint): # Remove copy of the world and the corresponding ".wbproj" file if self.__world_copy: self.__world_copy.close() if os.path.isfile(self.__world_copy.name): os.unlink(self.__world_copy.name) path, file = os.path.split(self.__world_copy.name) world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj') if os.path.isfile(world_copy_secondary_file): os.unlink(world_copy_secondary_file) # Clean the content of the shared directory for next run if self.__has_shared_folder: for filename in os.listdir(container_shared_folder()): file_path = os.path.join(container_shared_folder(), filename) try: if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as error: print(f'Failed to delete {file_path}. Reason: {error}.') return super()._shutdown_process(context, send_sigint=send_sigint) class Ros2SupervisorLauncher(Node): def __init__(self, output='screen', respawn=True, port='1234', **kwargs): # Launch the Ros2Supervisor node super().__init__( package='webots_ros2_driver', executable='ros2_supervisor.py', namespace='Ros2Supervisor', remappings=[('/Ros2Supervisor/clock', '/clock')], output=output, # Set WEBOTS_HOME to the webots_ros2_driver installation folder # to load the correct libController libraries from the Python API additional_env={'WEBOTS_CONTROLLER_URL': controller_url_prefix(port) + 'Ros2Supervisor', 'WEBOTS_HOME': get_package_prefix('webots_ros2_driver')}, respawn=respawn, **kwargs )
__init__
identifier_name
webots_launcher.py
#!/usr/bin/env python # Copyright 1996-2023 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This launcher simply starts Webots.""" import os import re import shutil import subprocess import sys import tempfile from pathlib import Path from launch.actions import ExecuteProcess from launch_ros.actions import Node from launch.launch_context import LaunchContext from launch.substitution import Substitution from launch.substitutions import TextSubstitution from launch.substitutions.path_join_substitution import PathJoinSubstitution from ament_index_python.packages import get_package_share_directory, get_package_prefix from webots_ros2_driver.utils import (get_webots_home, handle_webots_installation, is_wsl, has_shared_folder, container_shared_folder, controller_url_prefix) class _ConditionalSubstitution(Substitution):
class WebotsLauncher(ExecuteProcess): def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False, port='1234', **kwargs): if sys.platform == 'win32': print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a ' 'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr) print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more ' 'information.', file=sys.stderr) self.__is_wsl = is_wsl() self.__has_shared_folder = has_shared_folder() self.__is_supervisor = ros2_supervisor if self.__is_supervisor: self._supervisor = Ros2SupervisorLauncher(port=port) # Find Webots executable if not self.__has_shared_folder: webots_path = get_webots_home(show_warning=True) if webots_path is None: handle_webots_installation() webots_path = get_webots_home() if self.__is_wsl: webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe') else: webots_path = os.path.join(webots_path, 'webots') else: webots_path = '' mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode) self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False) self.__world = world if not isinstance(world, Substitution): world = TextSubstitution(text=self.__world_copy.name) if self.__is_wsl: wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8') world = TextSubstitution(text=wsl_tmp_path) no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering') stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout') stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr') minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize') if isinstance(stream, bool): stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream') else: stream_argument = "--stream=" + stream port_argument = '--port=' + port xvfb_run_prefix = [] if 'WEBOTS_OFFSCREEN' in os.environ: xvfb_run_prefix.append('xvfb-run') xvfb_run_prefix.append('--auto-servernum') no_rendering = '--no-rendering' # Initialize command to start Webots remotely through TCP if self.__has_shared_folder: webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts', 'webots_tcp_client.py')) super().__init__( output=output, cmd=[ 'python3', webots_tcp_client, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, '--batch', ['--mode=', mode], os.path.basename(self.__world_copy.name), ], name='webots_tcp_client', **kwargs ) # Initialize command to start Webots locally else: # no_rendering, stdout, stderr, minimize super().__init__( output=output, cmd=xvfb_run_prefix + [ webots_path, stream_argument, port_argument, no_rendering, stdout, stderr, minimize, world, '--batch', ['--mode=', mode], ], name='webots', **kwargs ) def execute(self, context: LaunchContext): # User can give a PathJoinSubstitution world or an absolute path world if isinstance(self.__world, PathJoinSubstitution): world_path = self.__world.perform(context) context.launch_configurations['world'] = self.__world_copy.name else: world_path = self.__world shutil.copy2(world_path, self.__world_copy.name) # look for a wbproj file and copy if available wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj') if wbproj_path.exists(): wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem + '.wbproj') shutil.copy2(wbproj_path, wbproj_copy_path) # copy sumo network file if it exists sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net') if sumonet_path.exists(): sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net') shutil.copytree(sumonet_path, sumonet_copy_path) # Update relative paths in the world with open(self.__world_copy.name, 'r') as file: content = file.read() for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content): url_path = match.group(1) # Absolute path or Webots relative path or Web paths if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \ or url_path.startswith('https://'): continue new_url_path = os.path.split(world_path)[0] + '/' + url_path if self.__has_shared_folder: # Copy asset to shared folder shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path))) new_url_path = './' + os.path.basename(new_url_path) if self.__is_wsl: command = ['wslpath', '-w', new_url_path] new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/') new_url_path = '"' + new_url_path + '"' url_path = '"' + url_path + '"' content = content.replace(url_path, new_url_path) with open(self.__world_copy.name, 'w') as file: file.write(content) # Add the Ros2Supervisor if self.__is_supervisor: indent = ' ' world_file = open(self.__world_copy.name, 'a') world_file.write('Robot {\n') world_file.write(indent + 'name "Ros2Supervisor"\n') world_file.write(indent + 'controller "<extern>"\n') world_file.write(indent + 'supervisor TRUE\n') world_file.write('}\n') world_file.close() # Copy world file to shared folder if self.__has_shared_folder: shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name)) shutil.copy(self.__world_copy.name, shared_world_file) if wbproj_path.exists(): shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj') shutil.copy(wbproj_path, shared_wbproj_copy_path) # Execute process return super().execute(context) def _shutdown_process(self, context, *, send_sigint): # Remove copy of the world and the corresponding ".wbproj" file if self.__world_copy: self.__world_copy.close() if os.path.isfile(self.__world_copy.name): os.unlink(self.__world_copy.name) path, file = os.path.split(self.__world_copy.name) world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj') if os.path.isfile(world_copy_secondary_file): os.unlink(world_copy_secondary_file) # Clean the content of the shared directory for next run if self.__has_shared_folder: for filename in os.listdir(container_shared_folder()): file_path = os.path.join(container_shared_folder(), filename) try: if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as error: print(f'Failed to delete {file_path}. Reason: {error}.') return super()._shutdown_process(context, send_sigint=send_sigint) class Ros2SupervisorLauncher(Node): def __init__(self, output='screen', respawn=True, port='1234', **kwargs): # Launch the Ros2Supervisor node super().__init__( package='webots_ros2_driver', executable='ros2_supervisor.py', namespace='Ros2Supervisor', remappings=[('/Ros2Supervisor/clock', '/clock')], output=output, # Set WEBOTS_HOME to the webots_ros2_driver installation folder # to load the correct libController libraries from the Python API additional_env={'WEBOTS_CONTROLLER_URL': controller_url_prefix(port) + 'Ros2Supervisor', 'WEBOTS_HOME': get_package_prefix('webots_ros2_driver')}, respawn=respawn, **kwargs )
def __init__(self, *, condition, false_value='', true_value=''): self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition)) self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value) self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value) def perform(self, context): if context.perform_substitution(self.__condition).lower() in ['false', '0', '']: return context.perform_substitution(self.__false_value) return context.perform_substitution(self.__true_value)
identifier_body
bikeshare_2.py
import time import pandas as pd import numpy as np CITY_DATA = { 'chicago': 'chicago.csv', 'new york': 'new_york_city.csv', 'washington': 'washington.csv' } # my_list cities = ["chicago", "new york", "washington"] filters = ["month", "day", "both", "none"] months = ["all", "january", "february", "march", "april", "may","june"] days = ["all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"] # question question_1 = "Would you like to see data for Chicago, New York, or Washington?\n" question_2 = "Would you like to filter the data by month, day, both or not at all? Type none for no time filter\n" question_3 = "Which month - January, February, March, April, May, or June?\n" question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n" def handle_invalid_inputs(question,my_list): """ Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend and handle invalid inputs of the user. Args: (str) question - the question for what we want to get and test the input of the user. (list) my_list - the list of answer that we wish to have. Returns: (str) final_answer - a string containing a good input typed by the user. """ final_answer = None while final_answer not in my_list: final_answer = input(question).lower() return final_answer def get_month(): """ Gets the input month choosed by the user in case where filter_choosed equal to "month". Returns: month - name of the month """ return handle_invalid_inputs(question_3, months) def get_day(): """ Gets the input day choosed by the user in case where filter_choosed equal to "day". Returns: day - string contening the name of the day """ return handle_invalid_inputs(question_4, days) def get_both(): """ Gets the input month and day choosed by the user in case where filter_choosed equal to "both". Returns: (str) get_month() (str) get_day() """ return get_month(), get_day() def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed (str) filter_choosed - name of the the choosed filter_choosed """ print('Hello! Let\'s explore some US bikeshare data!') # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs city = handle_invalid_inputs(question_1, cities) # get the user input of the filter_choosed (month, day, both, or not at all(none)) filter_choosed = handle_invalid_inputs(question_2, filters) # if filter_choosed == "month" if filter_choosed == "month": # get user input for month (all, january, february, ... , june) month = get_month() day = "all" # if filter_choosed == "day" if filter_choosed == "day": # get user input for day of week (all, monday, tuesday, ... sunday) day = get_day() month = "all" # if filter_choosed == "both" if filter_choosed == "both": # get user input for day of week and month month, day = get_both() # if filter_choosed == none if filter_choosed == "none": month = "all" day = "all" print('-'*40) return city, month, day, filter_choosed def load_data(city, month, day): """ Loads data for the specified city and filter_chooseds by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
# load data file into a dataframe df = pd.read_csv(CITY_DATA[city]) # convert the Start Time column to datetime df['Start Time'] = pd.to_datetime(df['Start Time']) # extract month, day of week and hour from Start Time to create new columns df['month'] = df['Start Time'].dt.month df['day_of_week'] = df['Start Time'].dt.weekday_name df['hour'] = df['Start Time'].dt.hour # filter_choosed by month if applicable if month != 'all': # use the index of the months list to get the corresponding int months = ["january", "february", "march", "april", "may", "june"] month = months.index(month) + 1 # filter_choosed by month to create the new dataframe df = df[df['month'] == month] # filter_choosed by day of week if applicable if day != 'all': # filter_choosed by day of week to create the new dataframe df = df[df['day_of_week'] == day.title()] return df def popular_counts_column(column): """ calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel. Args: (pd.Series) column - column of a DataFrame Returns: popular_anything - string containing the popular entry counts_anything - int containing number of occurence of that popular entry """ popular_anything = column.mode()[0] counts_anything = column.value_counts()[popular_anything] return popular_anything, counts_anything def time_stats(df, filter_choosed): """Displays statistics on the most frequent times of travel.""" print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() # display the most common month and number of occurrence popular_month, counts_month = popular_counts_column(df['month']) print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ') # display the most common day of week and number of occurence popular_day, counts_day = popular_counts_column(df['day_of_week']) print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ') # display the most common start hour and number of occurrence popular_hour, counts_hour = popular_counts_column(df['hour']) print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df, filter_choosed): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # display most commonly used start station popular_start, counts_start = popular_counts_column(df['Start Station']) print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ') # display most commonly used end station popular_end, counts_end = popular_counts_column(df['End Station']) print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ') # display most frequent combination of start station and end station trip popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station']) print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df, filter_choosed): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() # display total travel time total_travel_time = df['Trip Duration'].sum() travel_number = df['Trip Duration'].size print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ') # display mean travel time mean_travel_time = df['Trip Duration'].mean() print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df, city, filter_choosed): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # Display counts of user types print('Statistics for User Types ...... \n') user_types_dict = dict(df['User Type'].value_counts()) for key, value in user_types_dict.items(): print('{}:{}'.format(key,value), end = ' ') print('filter:', filter_choosed) # Display counts of gender print('\nStatistics for gender ...... \n') if city != 'washington': gender_dict = dict(df['Gender'].value_counts()) for key, value in gender_dict.items(): print('{}:{}'.format(key,value), end = ' ') print(' filter:', filter_choosed) else: print('No data about gender') # Display earliest, most recent, and most common year of birth print('\nStatistics for year of birth ...... \n') if city != 'washington': earliest_year = df['Birth Year'].min() most_recent_year = df['Birth Year'].max() popular_year = df['Birth Year'].mode()[0] print('Earliest Year:{}, Most Recent Year:{}, Most Popular Year:{}, filter:{}'.format(earliest_year, most_recent_year, popular_year, filter_choosed)) else: print('No data about birth of year') print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def individual_trip_data(df): """Displays individual trip data of each user.""" data = df.to_dict('records') i = 0 j = 5 length = len(data) while True: see_trip = input('\nWould you like to individual trip data? Type yes or no.\n') if see_trip.lower() != 'yes': break else: if i < j and i < length: for i in range(j): print(data[i]) i = j j += 5 def main(): while True: city, month, day, filter_choosed = get_filters() df = load_data(city, month, day) time_stats(df, filter_choosed) station_stats(df, filter_choosed) trip_duration_stats(df, filter_choosed) user_stats(df, city, filter_choosed) individual_trip_data(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed Returns: df - Pandas DataFrame containing city data filter_chooseded by month and day """
random_line_split
bikeshare_2.py
import time import pandas as pd import numpy as np CITY_DATA = { 'chicago': 'chicago.csv', 'new york': 'new_york_city.csv', 'washington': 'washington.csv' } # my_list cities = ["chicago", "new york", "washington"] filters = ["month", "day", "both", "none"] months = ["all", "january", "february", "march", "april", "may","june"] days = ["all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"] # question question_1 = "Would you like to see data for Chicago, New York, or Washington?\n" question_2 = "Would you like to filter the data by month, day, both or not at all? Type none for no time filter\n" question_3 = "Which month - January, February, March, April, May, or June?\n" question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n" def handle_invalid_inputs(question,my_list): """ Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend and handle invalid inputs of the user. Args: (str) question - the question for what we want to get and test the input of the user. (list) my_list - the list of answer that we wish to have. Returns: (str) final_answer - a string containing a good input typed by the user. """ final_answer = None while final_answer not in my_list: final_answer = input(question).lower() return final_answer def get_month(): """ Gets the input month choosed by the user in case where filter_choosed equal to "month". Returns: month - name of the month """ return handle_invalid_inputs(question_3, months) def get_day(): """ Gets the input day choosed by the user in case where filter_choosed equal to "day". Returns: day - string contening the name of the day """ return handle_invalid_inputs(question_4, days) def get_both(): """ Gets the input month and day choosed by the user in case where filter_choosed equal to "both". Returns: (str) get_month() (str) get_day() """ return get_month(), get_day() def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed (str) filter_choosed - name of the the choosed filter_choosed """ print('Hello! Let\'s explore some US bikeshare data!') # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs city = handle_invalid_inputs(question_1, cities) # get the user input of the filter_choosed (month, day, both, or not at all(none)) filter_choosed = handle_invalid_inputs(question_2, filters) # if filter_choosed == "month" if filter_choosed == "month": # get user input for month (all, january, february, ... , june) month = get_month() day = "all" # if filter_choosed == "day" if filter_choosed == "day": # get user input for day of week (all, monday, tuesday, ... sunday)
# if filter_choosed == "both" if filter_choosed == "both": # get user input for day of week and month month, day = get_both() # if filter_choosed == none if filter_choosed == "none": month = "all" day = "all" print('-'*40) return city, month, day, filter_choosed def load_data(city, month, day): """ Loads data for the specified city and filter_chooseds by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed Returns: df - Pandas DataFrame containing city data filter_chooseded by month and day """ # load data file into a dataframe df = pd.read_csv(CITY_DATA[city]) # convert the Start Time column to datetime df['Start Time'] = pd.to_datetime(df['Start Time']) # extract month, day of week and hour from Start Time to create new columns df['month'] = df['Start Time'].dt.month df['day_of_week'] = df['Start Time'].dt.weekday_name df['hour'] = df['Start Time'].dt.hour # filter_choosed by month if applicable if month != 'all': # use the index of the months list to get the corresponding int months = ["january", "february", "march", "april", "may", "june"] month = months.index(month) + 1 # filter_choosed by month to create the new dataframe df = df[df['month'] == month] # filter_choosed by day of week if applicable if day != 'all': # filter_choosed by day of week to create the new dataframe df = df[df['day_of_week'] == day.title()] return df def popular_counts_column(column): """ calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel. Args: (pd.Series) column - column of a DataFrame Returns: popular_anything - string containing the popular entry counts_anything - int containing number of occurence of that popular entry """ popular_anything = column.mode()[0] counts_anything = column.value_counts()[popular_anything] return popular_anything, counts_anything def time_stats(df, filter_choosed): """Displays statistics on the most frequent times of travel.""" print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() # display the most common month and number of occurrence popular_month, counts_month = popular_counts_column(df['month']) print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ') # display the most common day of week and number of occurence popular_day, counts_day = popular_counts_column(df['day_of_week']) print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ') # display the most common start hour and number of occurrence popular_hour, counts_hour = popular_counts_column(df['hour']) print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df, filter_choosed): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # display most commonly used start station popular_start, counts_start = popular_counts_column(df['Start Station']) print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ') # display most commonly used end station popular_end, counts_end = popular_counts_column(df['End Station']) print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ') # display most frequent combination of start station and end station trip popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station']) print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df, filter_choosed): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() # display total travel time total_travel_time = df['Trip Duration'].sum() travel_number = df['Trip Duration'].size print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ') # display mean travel time mean_travel_time = df['Trip Duration'].mean() print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df, city, filter_choosed): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # Display counts of user types print('Statistics for User Types ...... \n') user_types_dict = dict(df['User Type'].value_counts()) for key, value in user_types_dict.items(): print('{}:{}'.format(key,value), end = ' ') print('filter:', filter_choosed) # Display counts of gender print('\nStatistics for gender ...... \n') if city != 'washington': gender_dict = dict(df['Gender'].value_counts()) for key, value in gender_dict.items(): print('{}:{}'.format(key,value), end = ' ') print(' filter:', filter_choosed) else: print('No data about gender') # Display earliest, most recent, and most common year of birth print('\nStatistics for year of birth ...... \n') if city != 'washington': earliest_year = df['Birth Year'].min() most_recent_year = df['Birth Year'].max() popular_year = df['Birth Year'].mode()[0] print('Earliest Year:{}, Most Recent Year:{}, Most Popular Year:{}, filter:{}'.format(earliest_year, most_recent_year, popular_year, filter_choosed)) else: print('No data about birth of year') print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def individual_trip_data(df): """Displays individual trip data of each user.""" data = df.to_dict('records') i = 0 j = 5 length = len(data) while True: see_trip = input('\nWould you like to individual trip data? Type yes or no.\n') if see_trip.lower() != 'yes': break else: if i < j and i < length: for i in range(j): print(data[i]) i = j j += 5 def main(): while True: city, month, day, filter_choosed = get_filters() df = load_data(city, month, day) time_stats(df, filter_choosed) station_stats(df, filter_choosed) trip_duration_stats(df, filter_choosed) user_stats(df, city, filter_choosed) individual_trip_data(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
day = get_day() month = "all"
conditional_block
bikeshare_2.py
import time import pandas as pd import numpy as np CITY_DATA = { 'chicago': 'chicago.csv', 'new york': 'new_york_city.csv', 'washington': 'washington.csv' } # my_list cities = ["chicago", "new york", "washington"] filters = ["month", "day", "both", "none"] months = ["all", "january", "february", "march", "april", "may","june"] days = ["all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"] # question question_1 = "Would you like to see data for Chicago, New York, or Washington?\n" question_2 = "Would you like to filter the data by month, day, both or not at all? Type none for no time filter\n" question_3 = "Which month - January, February, March, April, May, or June?\n" question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n" def handle_invalid_inputs(question,my_list): """ Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend and handle invalid inputs of the user. Args: (str) question - the question for what we want to get and test the input of the user. (list) my_list - the list of answer that we wish to have. Returns: (str) final_answer - a string containing a good input typed by the user. """ final_answer = None while final_answer not in my_list: final_answer = input(question).lower() return final_answer def get_month(): """ Gets the input month choosed by the user in case where filter_choosed equal to "month". Returns: month - name of the month """ return handle_invalid_inputs(question_3, months) def get_day(): """ Gets the input day choosed by the user in case where filter_choosed equal to "day". Returns: day - string contening the name of the day """ return handle_invalid_inputs(question_4, days) def get_both(): """ Gets the input month and day choosed by the user in case where filter_choosed equal to "both". Returns: (str) get_month() (str) get_day() """ return get_month(), get_day() def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed (str) filter_choosed - name of the the choosed filter_choosed """ print('Hello! Let\'s explore some US bikeshare data!') # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs city = handle_invalid_inputs(question_1, cities) # get the user input of the filter_choosed (month, day, both, or not at all(none)) filter_choosed = handle_invalid_inputs(question_2, filters) # if filter_choosed == "month" if filter_choosed == "month": # get user input for month (all, january, february, ... , june) month = get_month() day = "all" # if filter_choosed == "day" if filter_choosed == "day": # get user input for day of week (all, monday, tuesday, ... sunday) day = get_day() month = "all" # if filter_choosed == "both" if filter_choosed == "both": # get user input for day of week and month month, day = get_both() # if filter_choosed == none if filter_choosed == "none": month = "all" day = "all" print('-'*40) return city, month, day, filter_choosed def load_data(city, month, day): """ Loads data for the specified city and filter_chooseds by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed Returns: df - Pandas DataFrame containing city data filter_chooseded by month and day """ # load data file into a dataframe df = pd.read_csv(CITY_DATA[city]) # convert the Start Time column to datetime df['Start Time'] = pd.to_datetime(df['Start Time']) # extract month, day of week and hour from Start Time to create new columns df['month'] = df['Start Time'].dt.month df['day_of_week'] = df['Start Time'].dt.weekday_name df['hour'] = df['Start Time'].dt.hour # filter_choosed by month if applicable if month != 'all': # use the index of the months list to get the corresponding int months = ["january", "february", "march", "april", "may", "june"] month = months.index(month) + 1 # filter_choosed by month to create the new dataframe df = df[df['month'] == month] # filter_choosed by day of week if applicable if day != 'all': # filter_choosed by day of week to create the new dataframe df = df[df['day_of_week'] == day.title()] return df def popular_counts_column(column): """ calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel. Args: (pd.Series) column - column of a DataFrame Returns: popular_anything - string containing the popular entry counts_anything - int containing number of occurence of that popular entry """ popular_anything = column.mode()[0] counts_anything = column.value_counts()[popular_anything] return popular_anything, counts_anything def time_stats(df, filter_choosed): """Displays statistics on the most frequent times of travel.""" print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() # display the most common month and number of occurrence popular_month, counts_month = popular_counts_column(df['month']) print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ') # display the most common day of week and number of occurence popular_day, counts_day = popular_counts_column(df['day_of_week']) print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ') # display the most common start hour and number of occurrence popular_hour, counts_hour = popular_counts_column(df['hour']) print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df, filter_choosed): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # display most commonly used start station popular_start, counts_start = popular_counts_column(df['Start Station']) print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ') # display most commonly used end station popular_end, counts_end = popular_counts_column(df['End Station']) print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ') # display most frequent combination of start station and end station trip popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station']) print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df, filter_choosed): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() # display total travel time total_travel_time = df['Trip Duration'].sum() travel_number = df['Trip Duration'].size print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ') # display mean travel time mean_travel_time = df['Trip Duration'].mean() print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df, city, filter_choosed): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # Display counts of user types print('Statistics for User Types ...... \n') user_types_dict = dict(df['User Type'].value_counts()) for key, value in user_types_dict.items(): print('{}:{}'.format(key,value), end = ' ') print('filter:', filter_choosed) # Display counts of gender print('\nStatistics for gender ...... \n') if city != 'washington': gender_dict = dict(df['Gender'].value_counts()) for key, value in gender_dict.items(): print('{}:{}'.format(key,value), end = ' ') print(' filter:', filter_choosed) else: print('No data about gender') # Display earliest, most recent, and most common year of birth print('\nStatistics for year of birth ...... \n') if city != 'washington': earliest_year = df['Birth Year'].min() most_recent_year = df['Birth Year'].max() popular_year = df['Birth Year'].mode()[0] print('Earliest Year:{}, Most Recent Year:{}, Most Popular Year:{}, filter:{}'.format(earliest_year, most_recent_year, popular_year, filter_choosed)) else: print('No data about birth of year') print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def individual_trip_data(df):
def main(): while True: city, month, day, filter_choosed = get_filters() df = load_data(city, month, day) time_stats(df, filter_choosed) station_stats(df, filter_choosed) trip_duration_stats(df, filter_choosed) user_stats(df, city, filter_choosed) individual_trip_data(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
"""Displays individual trip data of each user.""" data = df.to_dict('records') i = 0 j = 5 length = len(data) while True: see_trip = input('\nWould you like to individual trip data? Type yes or no.\n') if see_trip.lower() != 'yes': break else: if i < j and i < length: for i in range(j): print(data[i]) i = j j += 5
identifier_body
bikeshare_2.py
import time import pandas as pd import numpy as np CITY_DATA = { 'chicago': 'chicago.csv', 'new york': 'new_york_city.csv', 'washington': 'washington.csv' } # my_list cities = ["chicago", "new york", "washington"] filters = ["month", "day", "both", "none"] months = ["all", "january", "february", "march", "april", "may","june"] days = ["all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"] # question question_1 = "Would you like to see data for Chicago, New York, or Washington?\n" question_2 = "Would you like to filter the data by month, day, both or not at all? Type none for no time filter\n" question_3 = "Which month - January, February, March, April, May, or June?\n" question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n" def handle_invalid_inputs(question,my_list): """ Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend and handle invalid inputs of the user. Args: (str) question - the question for what we want to get and test the input of the user. (list) my_list - the list of answer that we wish to have. Returns: (str) final_answer - a string containing a good input typed by the user. """ final_answer = None while final_answer not in my_list: final_answer = input(question).lower() return final_answer def get_month(): """ Gets the input month choosed by the user in case where filter_choosed equal to "month". Returns: month - name of the month """ return handle_invalid_inputs(question_3, months) def get_day(): """ Gets the input day choosed by the user in case where filter_choosed equal to "day". Returns: day - string contening the name of the day """ return handle_invalid_inputs(question_4, days) def get_both(): """ Gets the input month and day choosed by the user in case where filter_choosed equal to "both". Returns: (str) get_month() (str) get_day() """ return get_month(), get_day() def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed (str) filter_choosed - name of the the choosed filter_choosed """ print('Hello! Let\'s explore some US bikeshare data!') # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs city = handle_invalid_inputs(question_1, cities) # get the user input of the filter_choosed (month, day, both, or not at all(none)) filter_choosed = handle_invalid_inputs(question_2, filters) # if filter_choosed == "month" if filter_choosed == "month": # get user input for month (all, january, february, ... , june) month = get_month() day = "all" # if filter_choosed == "day" if filter_choosed == "day": # get user input for day of week (all, monday, tuesday, ... sunday) day = get_day() month = "all" # if filter_choosed == "both" if filter_choosed == "both": # get user input for day of week and month month, day = get_both() # if filter_choosed == none if filter_choosed == "none": month = "all" day = "all" print('-'*40) return city, month, day, filter_choosed def load_data(city, month, day): """ Loads data for the specified city and filter_chooseds by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed Returns: df - Pandas DataFrame containing city data filter_chooseded by month and day """ # load data file into a dataframe df = pd.read_csv(CITY_DATA[city]) # convert the Start Time column to datetime df['Start Time'] = pd.to_datetime(df['Start Time']) # extract month, day of week and hour from Start Time to create new columns df['month'] = df['Start Time'].dt.month df['day_of_week'] = df['Start Time'].dt.weekday_name df['hour'] = df['Start Time'].dt.hour # filter_choosed by month if applicable if month != 'all': # use the index of the months list to get the corresponding int months = ["january", "february", "march", "april", "may", "june"] month = months.index(month) + 1 # filter_choosed by month to create the new dataframe df = df[df['month'] == month] # filter_choosed by day of week if applicable if day != 'all': # filter_choosed by day of week to create the new dataframe df = df[df['day_of_week'] == day.title()] return df def
(column): """ calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel. Args: (pd.Series) column - column of a DataFrame Returns: popular_anything - string containing the popular entry counts_anything - int containing number of occurence of that popular entry """ popular_anything = column.mode()[0] counts_anything = column.value_counts()[popular_anything] return popular_anything, counts_anything def time_stats(df, filter_choosed): """Displays statistics on the most frequent times of travel.""" print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() # display the most common month and number of occurrence popular_month, counts_month = popular_counts_column(df['month']) print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ') # display the most common day of week and number of occurence popular_day, counts_day = popular_counts_column(df['day_of_week']) print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ') # display the most common start hour and number of occurrence popular_hour, counts_hour = popular_counts_column(df['hour']) print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df, filter_choosed): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # display most commonly used start station popular_start, counts_start = popular_counts_column(df['Start Station']) print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ') # display most commonly used end station popular_end, counts_end = popular_counts_column(df['End Station']) print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ') # display most frequent combination of start station and end station trip popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station']) print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df, filter_choosed): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() # display total travel time total_travel_time = df['Trip Duration'].sum() travel_number = df['Trip Duration'].size print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ') # display mean travel time mean_travel_time = df['Trip Duration'].mean() print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df, city, filter_choosed): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # Display counts of user types print('Statistics for User Types ...... \n') user_types_dict = dict(df['User Type'].value_counts()) for key, value in user_types_dict.items(): print('{}:{}'.format(key,value), end = ' ') print('filter:', filter_choosed) # Display counts of gender print('\nStatistics for gender ...... \n') if city != 'washington': gender_dict = dict(df['Gender'].value_counts()) for key, value in gender_dict.items(): print('{}:{}'.format(key,value), end = ' ') print(' filter:', filter_choosed) else: print('No data about gender') # Display earliest, most recent, and most common year of birth print('\nStatistics for year of birth ...... \n') if city != 'washington': earliest_year = df['Birth Year'].min() most_recent_year = df['Birth Year'].max() popular_year = df['Birth Year'].mode()[0] print('Earliest Year:{}, Most Recent Year:{}, Most Popular Year:{}, filter:{}'.format(earliest_year, most_recent_year, popular_year, filter_choosed)) else: print('No data about birth of year') print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def individual_trip_data(df): """Displays individual trip data of each user.""" data = df.to_dict('records') i = 0 j = 5 length = len(data) while True: see_trip = input('\nWould you like to individual trip data? Type yes or no.\n') if see_trip.lower() != 'yes': break else: if i < j and i < length: for i in range(j): print(data[i]) i = j j += 5 def main(): while True: city, month, day, filter_choosed = get_filters() df = load_data(city, month, day) time_stats(df, filter_choosed) station_stats(df, filter_choosed) trip_duration_stats(df, filter_choosed) user_stats(df, city, filter_choosed) individual_trip_data(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
popular_counts_column
identifier_name
main.rs
use colored::*; use dialoguer::Confirm; use hyper::{http, server::conn::AddrStream, Body, Request, Response}; use indoc::printdoc; use ipnetwork::IpNetwork; use parking_lot::{Mutex, RwLock}; use rusqlite::Connection; use serde::{Deserialize, Serialize}; use shared::{ AddCidrOpts, AddPeerOpts, DeleteCidrOpts, IoErrorContext, NetworkOpt, RenamePeerOpts, INNERNET_PUBKEY_HEADER, }; use std::{ collections::{HashMap, VecDeque}, convert::TryInto, env, fs::File, io::prelude::*, net::{IpAddr, SocketAddr, TcpListener}, ops::Deref, path::{Path, PathBuf}, sync::Arc, time::Duration, }; use structopt::{clap::AppSettings, StructOpt}; use subtle::ConstantTimeEq; use wgctrl::{Backend, Device, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn admin_capable(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled && !self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> { let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) } pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if !Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()?
Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref() != Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if !peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, key.to_base64()) .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_unparseable_public_key() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, "!!!") .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } }
{ println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); }
conditional_block
main.rs
use colored::*; use dialoguer::Confirm; use hyper::{http, server::conn::AddrStream, Body, Request, Response}; use indoc::printdoc; use ipnetwork::IpNetwork; use parking_lot::{Mutex, RwLock}; use rusqlite::Connection; use serde::{Deserialize, Serialize}; use shared::{ AddCidrOpts, AddPeerOpts, DeleteCidrOpts, IoErrorContext, NetworkOpt, RenamePeerOpts, INNERNET_PUBKEY_HEADER, }; use std::{ collections::{HashMap, VecDeque}, convert::TryInto, env, fs::File, io::prelude::*, net::{IpAddr, SocketAddr, TcpListener}, ops::Deref, path::{Path, PathBuf}, sync::Arc, time::Duration, }; use structopt::{clap::AppSettings, StructOpt}; use subtle::ConstantTimeEq; use wgctrl::{Backend, Device, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn admin_capable(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled && !self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error>
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if !Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()? { println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); } Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref() != Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if !peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, key.to_base64()) .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_unparseable_public_key() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, "!!!") .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } }
{ let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) }
identifier_body
main.rs
use colored::*; use dialoguer::Confirm; use hyper::{http, server::conn::AddrStream, Body, Request, Response}; use indoc::printdoc; use ipnetwork::IpNetwork; use parking_lot::{Mutex, RwLock}; use rusqlite::Connection; use serde::{Deserialize, Serialize}; use shared::{ AddCidrOpts, AddPeerOpts, DeleteCidrOpts, IoErrorContext, NetworkOpt, RenamePeerOpts, INNERNET_PUBKEY_HEADER, }; use std::{ collections::{HashMap, VecDeque}, convert::TryInto, env, fs::File, io::prelude::*, net::{IpAddr, SocketAddr, TcpListener}, ops::Deref, path::{Path, PathBuf}, sync::Arc, time::Duration, }; use structopt::{clap::AppSettings, StructOpt}; use subtle::ConstantTimeEq; use wgctrl::{Backend, Device, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn admin_capable(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled && !self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> { let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) } pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if !Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()? { println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); } Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref() != Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if !peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, key.to_base64()) .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) }
let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, "!!!") .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } }
#[tokio::test] async fn test_unparseable_public_key() -> Result<(), Error> { let server = test::Server::new()?;
random_line_split
main.rs
use colored::*; use dialoguer::Confirm; use hyper::{http, server::conn::AddrStream, Body, Request, Response}; use indoc::printdoc; use ipnetwork::IpNetwork; use parking_lot::{Mutex, RwLock}; use rusqlite::Connection; use serde::{Deserialize, Serialize}; use shared::{ AddCidrOpts, AddPeerOpts, DeleteCidrOpts, IoErrorContext, NetworkOpt, RenamePeerOpts, INNERNET_PUBKEY_HEADER, }; use std::{ collections::{HashMap, VecDeque}, convert::TryInto, env, fs::File, io::prelude::*, net::{IpAddr, SocketAddr, TcpListener}, ops::Deref, path::{Path, PathBuf}, sync::Arc, time::Duration, }; use structopt::{clap::AppSettings, StructOpt}; use subtle::ConstantTimeEq; use wgctrl::{Backend, Device, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn
(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled && !self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> { let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) } pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if !Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()? { println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); } Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref() != Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if !peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, key.to_base64()) .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_unparseable_public_key() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, "!!!") .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } }
admin_capable
identifier_name
population.rs
// Copyright (c) 2017 Ashley Jeffs // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use unit::Unit; use crossbeam::scope; use rand::{SeedableRng, StdRng}; use rand::distributions::{IndependentSample, Range}; use std::mem; use std::sync::{Arc, Mutex, Condvar}; use std::cmp::Ordering; use std::sync::mpsc::sync_channel; /// Wraps a unit within a struct that lazily evaluates its fitness to avoid /// duplicate work. struct LazyUnit<T: Unit> { unit: T, lazy_fitness: Option<f64>, } impl<T: Unit> LazyUnit<T> { fn from(unit: T) -> Self { LazyUnit { unit: unit, lazy_fitness: None, } } fn fitness(&mut self) -> f64 { match self.lazy_fitness { Some(x) => x, None => { let fitness = self.unit.fitness(); self.lazy_fitness = Some(fitness); fitness } } } } /// Population is an abstraction that represents a collection of units. Each /// unit is a combination of variables, which produces an overall fitness. Units /// mate with other units to produce mutated offspring combining traits from /// both units. /// /// The population is responsible for iterating new generations of units by /// mating fit units and killing unfit units. pub struct Population<T: Unit> { units: Vec<T>, seed: usize, breed_factor: f64, survival_factor: f64, max_size: usize, } impl<T: Unit> Population<T> { /// Creates a new population, starts off with an empty population. If you /// wish to start with a preset population of units you can call /// `set_population` before calling epochs. pub fn new(init_pop: Vec<T>) -> Self { Population { units: init_pop, seed: 1, breed_factor: 0.5, survival_factor: 0.5, max_size: 100, } } //-------------------------------------------------------------------------- /// Sets the random seed of the population. pub fn set_rand_seed(&mut self, seed: usize) -> &mut Self { self.seed = seed; self } /// Sets the maximum size of the population. If already populated with more /// than this amount a random section of the population is killed. pub fn set_size(&mut self, size: usize) -> &mut Self { self.units.truncate(size); self.max_size = size; self } /// Sets the breed_factor (0 < b <= 1) of the genetic algorithm, which is /// the percentage of the population that will be able to breed per epoch. /// Units that are more fit are preferred for breeding, and so a high /// breed_factor results in more poorly performing units being able to /// breed, which will slow the algorithm down but allow it to escape local /// peaks. pub fn set_breed_factor(&mut self, breed_factor: f64) -> &mut Self { assert!(breed_factor > 0.0 && breed_factor <= 1.0); self.breed_factor = breed_factor; self } /// Sets the survival_factor (0 <= b <= 1) of the genetic algorithm, which /// is the percentage of the breeding population that will survive each /// epoch. Units that are more fit are preferred for survival, and so a high /// survival rate results in more poorly performing units being carried into /// the next epoch. /// /// Note that this value is a percentage of the breeding population. So if /// your breeding factor is 0.5, and your survival factor is 0.9, the /// percentage of units that will survive the next epoch is: /// /// 0.5 * 0.9 * 100 = 45% /// pub fn set_survival_factor(&mut self, survival_factor: f64) -> &mut Self { assert!(survival_factor >= 0.0 && survival_factor <= 1.0); self.survival_factor = survival_factor; self } //-------------------------------------------------------------------------- /// An epoch that allows units to breed and mutate without harsh culling. /// It's important to sometimes allow 'weak' units to produce generations /// that might escape local peaks in certain dimensions. fn epoch(&self, units: &mut Vec<LazyUnit<T>>, mut rng: StdRng) -> StdRng { assert!(units.len() > 0); // breed_factor dicates how large a percentage of the population will be // able to breed. let breed_up_to = (self.breed_factor * (units.len() as f64)) as usize; let mut breeders: Vec<LazyUnit<T>> = Vec::new(); while let Some(unit) = units.pop() { breeders.push(unit); if breeders.len() == breed_up_to { break; } } units.clear(); // The strongest half of our breeders will survive each epoch. Always at // least one. let surviving_parents = (breeders.len() as f64 * self.survival_factor).ceil() as usize; let pcnt_range = Range::new(0, breeders.len()); for i in 0..self.max_size - surviving_parents { let rs = pcnt_range.ind_sample(&mut rng); units.push(LazyUnit::from( breeders[i % breeders.len()].unit.breed_with( &breeders[rs].unit, ), )); } // Move our survivors into the new generation. units.append(&mut breeders.drain(0..surviving_parents).collect()); rng } /// Runs a number of epochs where fitness is calculated across n parallel /// processes. This is useful when the fitness calcuation is an expensive /// operation. pub fn epochs_parallel(&mut self, n_epochs: u32, n_processes: u32) -> &mut Self { scope(|scope| { let cvar_pair = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = sync_channel(0);
for _ in 0..n_processes { let cvar_pair_clone = cvar_pair.clone(); let processed_stack_clone = processed_stack.clone(); let process_queue_clone = process_queue.clone(); scope.spawn(move || { let &(ref lock, ref cvar) = &*cvar_pair_clone; loop { let mut l_unit: LazyUnit<T> = match process_queue_clone.lock().ok().unwrap().recv() { Ok(u) => u, Err(_) => return, }; l_unit.fitness(); processed_stack_clone.lock().ok().unwrap().push(l_unit); { let mut processed = lock.lock().unwrap(); *processed += 1; cvar.notify_all(); } } }); } let &(ref lock, ref cvar) = &*cvar_pair; let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { let jobs_total = active_stack.len(); while let Some(unit) = active_stack.pop() { tx.send(unit).unwrap(); } let mut jobs_processed = lock.lock().unwrap(); while *jobs_processed != jobs_total { jobs_processed = cvar.wait(jobs_processed).unwrap(); } *jobs_processed = 0; // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack.lock().ok().unwrap()); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i != n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } }); self } /// Runs a number of epochs on a single process. pub fn epochs(&mut self, n_epochs: u32) -> &mut Self { let mut processed_stack = Vec::new(); let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { while let Some(mut unit) = active_stack.pop() { unit.fitness(); processed_stack.push(unit); } // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i != n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } self } //-------------------------------------------------------------------------- /// Returns the full population of units, ordered such that the first /// element is the strongest candidate. This collection can be used to /// create a new population. pub fn finish(&mut self) -> Vec<T> { let mut empty_units: Vec<T> = Vec::new(); mem::swap(&mut empty_units, &mut self.units); empty_units } }
let process_queue = Arc::new(Mutex::new(rx)); let processed_stack = Arc::new(Mutex::new(Vec::new()));
random_line_split
population.rs
// Copyright (c) 2017 Ashley Jeffs // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use unit::Unit; use crossbeam::scope; use rand::{SeedableRng, StdRng}; use rand::distributions::{IndependentSample, Range}; use std::mem; use std::sync::{Arc, Mutex, Condvar}; use std::cmp::Ordering; use std::sync::mpsc::sync_channel; /// Wraps a unit within a struct that lazily evaluates its fitness to avoid /// duplicate work. struct LazyUnit<T: Unit> { unit: T, lazy_fitness: Option<f64>, } impl<T: Unit> LazyUnit<T> { fn from(unit: T) -> Self { LazyUnit { unit: unit, lazy_fitness: None, } } fn fitness(&mut self) -> f64 { match self.lazy_fitness { Some(x) => x, None => { let fitness = self.unit.fitness(); self.lazy_fitness = Some(fitness); fitness } } } } /// Population is an abstraction that represents a collection of units. Each /// unit is a combination of variables, which produces an overall fitness. Units /// mate with other units to produce mutated offspring combining traits from /// both units. /// /// The population is responsible for iterating new generations of units by /// mating fit units and killing unfit units. pub struct Population<T: Unit> { units: Vec<T>, seed: usize, breed_factor: f64, survival_factor: f64, max_size: usize, } impl<T: Unit> Population<T> { /// Creates a new population, starts off with an empty population. If you /// wish to start with a preset population of units you can call /// `set_population` before calling epochs. pub fn
(init_pop: Vec<T>) -> Self { Population { units: init_pop, seed: 1, breed_factor: 0.5, survival_factor: 0.5, max_size: 100, } } //-------------------------------------------------------------------------- /// Sets the random seed of the population. pub fn set_rand_seed(&mut self, seed: usize) -> &mut Self { self.seed = seed; self } /// Sets the maximum size of the population. If already populated with more /// than this amount a random section of the population is killed. pub fn set_size(&mut self, size: usize) -> &mut Self { self.units.truncate(size); self.max_size = size; self } /// Sets the breed_factor (0 < b <= 1) of the genetic algorithm, which is /// the percentage of the population that will be able to breed per epoch. /// Units that are more fit are preferred for breeding, and so a high /// breed_factor results in more poorly performing units being able to /// breed, which will slow the algorithm down but allow it to escape local /// peaks. pub fn set_breed_factor(&mut self, breed_factor: f64) -> &mut Self { assert!(breed_factor > 0.0 && breed_factor <= 1.0); self.breed_factor = breed_factor; self } /// Sets the survival_factor (0 <= b <= 1) of the genetic algorithm, which /// is the percentage of the breeding population that will survive each /// epoch. Units that are more fit are preferred for survival, and so a high /// survival rate results in more poorly performing units being carried into /// the next epoch. /// /// Note that this value is a percentage of the breeding population. So if /// your breeding factor is 0.5, and your survival factor is 0.9, the /// percentage of units that will survive the next epoch is: /// /// 0.5 * 0.9 * 100 = 45% /// pub fn set_survival_factor(&mut self, survival_factor: f64) -> &mut Self { assert!(survival_factor >= 0.0 && survival_factor <= 1.0); self.survival_factor = survival_factor; self } //-------------------------------------------------------------------------- /// An epoch that allows units to breed and mutate without harsh culling. /// It's important to sometimes allow 'weak' units to produce generations /// that might escape local peaks in certain dimensions. fn epoch(&self, units: &mut Vec<LazyUnit<T>>, mut rng: StdRng) -> StdRng { assert!(units.len() > 0); // breed_factor dicates how large a percentage of the population will be // able to breed. let breed_up_to = (self.breed_factor * (units.len() as f64)) as usize; let mut breeders: Vec<LazyUnit<T>> = Vec::new(); while let Some(unit) = units.pop() { breeders.push(unit); if breeders.len() == breed_up_to { break; } } units.clear(); // The strongest half of our breeders will survive each epoch. Always at // least one. let surviving_parents = (breeders.len() as f64 * self.survival_factor).ceil() as usize; let pcnt_range = Range::new(0, breeders.len()); for i in 0..self.max_size - surviving_parents { let rs = pcnt_range.ind_sample(&mut rng); units.push(LazyUnit::from( breeders[i % breeders.len()].unit.breed_with( &breeders[rs].unit, ), )); } // Move our survivors into the new generation. units.append(&mut breeders.drain(0..surviving_parents).collect()); rng } /// Runs a number of epochs where fitness is calculated across n parallel /// processes. This is useful when the fitness calcuation is an expensive /// operation. pub fn epochs_parallel(&mut self, n_epochs: u32, n_processes: u32) -> &mut Self { scope(|scope| { let cvar_pair = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = sync_channel(0); let process_queue = Arc::new(Mutex::new(rx)); let processed_stack = Arc::new(Mutex::new(Vec::new())); for _ in 0..n_processes { let cvar_pair_clone = cvar_pair.clone(); let processed_stack_clone = processed_stack.clone(); let process_queue_clone = process_queue.clone(); scope.spawn(move || { let &(ref lock, ref cvar) = &*cvar_pair_clone; loop { let mut l_unit: LazyUnit<T> = match process_queue_clone.lock().ok().unwrap().recv() { Ok(u) => u, Err(_) => return, }; l_unit.fitness(); processed_stack_clone.lock().ok().unwrap().push(l_unit); { let mut processed = lock.lock().unwrap(); *processed += 1; cvar.notify_all(); } } }); } let &(ref lock, ref cvar) = &*cvar_pair; let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { let jobs_total = active_stack.len(); while let Some(unit) = active_stack.pop() { tx.send(unit).unwrap(); } let mut jobs_processed = lock.lock().unwrap(); while *jobs_processed != jobs_total { jobs_processed = cvar.wait(jobs_processed).unwrap(); } *jobs_processed = 0; // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack.lock().ok().unwrap()); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i != n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } }); self } /// Runs a number of epochs on a single process. pub fn epochs(&mut self, n_epochs: u32) -> &mut Self { let mut processed_stack = Vec::new(); let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { while let Some(mut unit) = active_stack.pop() { unit.fitness(); processed_stack.push(unit); } // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i != n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } self } //-------------------------------------------------------------------------- /// Returns the full population of units, ordered such that the first /// element is the strongest candidate. This collection can be used to /// create a new population. pub fn finish(&mut self) -> Vec<T> { let mut empty_units: Vec<T> = Vec::new(); mem::swap(&mut empty_units, &mut self.units); empty_units } }
new
identifier_name
constructor.go
package output import ( "fmt" "strconv" "gopkg.in/yaml.v3" "github.com/benthosdev/benthos/v4/internal/component" "github.com/benthosdev/benthos/v4/internal/component/metrics" "github.com/benthosdev/benthos/v4/internal/component/output" iprocessor "github.com/benthosdev/benthos/v4/internal/component/processor" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/old/processor" "github.com/benthosdev/benthos/v4/internal/pipeline" ) // TypeSpec is a constructor and a usage description for each output type. type TypeSpec struct { constructor ConstructorFunc // Async indicates whether this output benefits from sending multiple // messages asynchronously over the protocol. Async bool // Batches indicates whether this output benefits from batching of messages. Batches bool Status docs.Status Summary string Description string Categories []string Footnotes string Config docs.FieldSpec Examples []docs.AnnotatedExample Version string } // AppendProcessorsFromConfig takes a variant arg of pipeline constructor // functions and returns a new slice of them where the processors of the // provided output configuration will also be initialized. func AppendProcessorsFromConfig(conf Config, mgr interop.Manager, pipelines ...iprocessor.PipelineConstructorFunc) []iprocessor.PipelineConstructorFunc { if len(conf.Processors) > 0 { pipelines = append(pipelines, []iprocessor.PipelineConstructorFunc{func() (iprocessor.Pipeline, error) { processors := make([]iprocessor.V1, len(conf.Processors)) for j, procConf := range conf.Processors { var err error pMgr := mgr.IntoPath("processors", strconv.Itoa(j)) processors[j], err = processor.New(procConf, pMgr) if err != nil
} return pipeline.NewProcessor(processors...), nil }}...) } return pipelines } func fromSimpleConstructor(fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc { return func( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { output, err := fn(conf, mgr, log, stats) if err != nil { return nil, err } pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...) return WrapWithPipelines(output, pipelines...) } } // ConstructorFunc is a func signature able to construct an output. type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) // WalkConstructors iterates each component constructor. func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) { inferred := docs.ComponentFieldsFromConf(NewConfig()) for k, v := range Constructors { conf := v.Config conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k]) spec := docs.ComponentSpec{ Type: docs.TypeOutput, Name: k, Summary: v.Summary, Description: v.Description, Footnotes: v.Footnotes, Categories: v.Categories, Config: conf, Examples: v.Examples, Status: v.Status, Version: v.Version, } spec.Description = output.Description(v.Async, v.Batches, spec.Description) fn(v.constructor, spec) } } // Constructors is a map of all output types with their specs. var Constructors = map[string]TypeSpec{} //------------------------------------------------------------------------------ // String constants representing each output type. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl const ( TypeAMQP09 = "amqp_0_9" TypeAMQP1 = "amqp_1" TypeAWSDynamoDB = "aws_dynamodb" TypeAWSKinesis = "aws_kinesis" TypeAWSKinesisFirehose = "aws_kinesis_firehose" TypeAWSS3 = "aws_s3" TypeAWSSNS = "aws_sns" TypeAWSSQS = "aws_sqs" TypeAzureBlobStorage = "azure_blob_storage" TypeAzureQueueStorage = "azure_queue_storage" TypeAzureTableStorage = "azure_table_storage" TypeBroker = "broker" TypeCache = "cache" TypeCassandra = "cassandra" TypeDrop = "drop" TypeDropOn = "drop_on" TypeDynamic = "dynamic" TypeDynamoDB = "dynamodb" TypeElasticsearch = "elasticsearch" TypeFallback = "fallback" TypeFile = "file" TypeGCPCloudStorage = "gcp_cloud_storage" TypeGCPPubSub = "gcp_pubsub" TypeHDFS = "hdfs" TypeHTTPClient = "http_client" TypeHTTPServer = "http_server" TypeInproc = "inproc" TypeKafka = "kafka" TypeMongoDB = "mongodb" TypeMQTT = "mqtt" TypeNanomsg = "nanomsg" TypeNATS = "nats" TypeNATSJetStream = "nats_jetstream" TypeNATSStream = "nats_stream" TypeNSQ = "nsq" TypeRedisHash = "redis_hash" TypeRedisList = "redis_list" TypeRedisPubSub = "redis_pubsub" TypeRedisStreams = "redis_streams" TypeReject = "reject" TypeResource = "resource" TypeRetry = "retry" TypeSFTP = "sftp" TypeSTDOUT = "stdout" TypeSubprocess = "subprocess" TypeSwitch = "switch" TypeSyncResponse = "sync_response" TypeSocket = "socket" TypeWebsocket = "websocket" ) //------------------------------------------------------------------------------ // Config is the all encompassing configuration struct for all output types. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl type Config struct { Label string `json:"label" yaml:"label"` Type string `json:"type" yaml:"type"` AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"` AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"` AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"` AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"` AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"` AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"` AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"` AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"` AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"` AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"` AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"` Broker BrokerConfig `json:"broker" yaml:"broker"` Cache CacheConfig `json:"cache" yaml:"cache"` Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"` Drop DropConfig `json:"drop" yaml:"drop"` DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"` Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"` Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"` Fallback TryConfig `json:"fallback" yaml:"fallback"` File FileConfig `json:"file" yaml:"file"` GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"` GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"` HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"` HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"` HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"` Inproc string `json:"inproc" yaml:"inproc"` Kafka KafkaConfig `json:"kafka" yaml:"kafka"` MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"` MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"` Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"` NATS NATSConfig `json:"nats" yaml:"nats"` NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"` NSQ NSQConfig `json:"nsq" yaml:"nsq"` Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"` RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"` RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"` RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"` RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"` Reject string `json:"reject" yaml:"reject"` Resource string `json:"resource" yaml:"resource"` Retry RetryConfig `json:"retry" yaml:"retry"` SFTP SFTPConfig `json:"sftp" yaml:"sftp"` STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"` Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"` Switch SwitchConfig `json:"switch" yaml:"switch"` SyncResponse struct{} `json:"sync_response" yaml:"sync_response"` Socket SocketConfig `json:"socket" yaml:"socket"` Websocket WebsocketConfig `json:"websocket" yaml:"websocket"` Processors []processor.Config `json:"processors" yaml:"processors"` } // NewConfig returns a configuration struct fully populated with default values. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl func NewConfig() Config { return Config{ Label: "", Type: "stdout", AMQP09: NewAMQPConfig(), AMQP1: NewAMQP1Config(), AWSDynamoDB: NewDynamoDBConfig(), AWSKinesis: NewKinesisConfig(), AWSKinesisFirehose: NewKinesisFirehoseConfig(), AWSS3: NewAmazonS3Config(), AWSSNS: NewSNSConfig(), AWSSQS: NewAmazonSQSConfig(), AzureBlobStorage: NewAzureBlobStorageConfig(), AzureQueueStorage: NewAzureQueueStorageConfig(), AzureTableStorage: NewAzureTableStorageConfig(), Broker: NewBrokerConfig(), Cache: NewCacheConfig(), Cassandra: NewCassandraConfig(), Drop: NewDropConfig(), DropOn: NewDropOnConfig(), Dynamic: NewDynamicConfig(), Elasticsearch: NewElasticsearchConfig(), Fallback: NewTryConfig(), File: NewFileConfig(), GCPCloudStorage: NewGCPCloudStorageConfig(), GCPPubSub: NewGCPPubSubConfig(), HDFS: NewHDFSConfig(), HTTPClient: NewHTTPClientConfig(), HTTPServer: NewHTTPServerConfig(), Inproc: "", Kafka: NewKafkaConfig(), MQTT: NewMQTTConfig(), MongoDB: NewMongoDBConfig(), Nanomsg: NewNanomsgConfig(), NATS: NewNATSConfig(), NATSStream: NewNATSStreamConfig(), NSQ: NewNSQConfig(), Plugin: nil, RedisHash: NewRedisHashConfig(), RedisList: NewRedisListConfig(), RedisPubSub: NewRedisPubSubConfig(), RedisStreams: NewRedisStreamsConfig(), Reject: "", Resource: "", Retry: NewRetryConfig(), SFTP: NewSFTPConfig(), STDOUT: NewSTDOUTConfig(), Subprocess: NewSubprocessConfig(), Switch: NewSwitchConfig(), SyncResponse: struct{}{}, Socket: NewSocketConfig(), Websocket: NewWebsocketConfig(), Processors: []processor.Config{}, } } //------------------------------------------------------------------------------ // UnmarshalYAML ensures that when parsing configs that are in a map or slice // the default values are still applied. func (conf *Config) UnmarshalYAML(value *yaml.Node) error { type confAlias Config aliased := confAlias(NewConfig()) err := value.Decode(&aliased) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } var spec docs.ComponentSpec if aliased.Type, spec, err = docs.GetInferenceCandidateFromYAML(docs.DeprecatedProvider, docs.TypeOutput, value); err != nil { return fmt.Errorf("line %v: %w", value.Line, err) } if spec.Plugin { pluginNode, err := docs.GetPluginConfigYAML(aliased.Type, value) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } aliased.Plugin = &pluginNode } else { aliased.Plugin = nil } *conf = Config(aliased) return nil } //------------------------------------------------------------------------------ // New creates an output type based on an output configuration. func New( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { if mgrV2, ok := mgr.(interface { NewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) }); ok { return mgrV2.NewOutput(conf, pipelines...) } if c, ok := Constructors[conf.Type]; ok { return c.constructor(conf, mgr, log, stats, pipelines...) } return nil, component.ErrInvalidType("output", conf.Type) }
{ return nil, err }
conditional_block
constructor.go
package output import ( "fmt" "strconv" "gopkg.in/yaml.v3" "github.com/benthosdev/benthos/v4/internal/component" "github.com/benthosdev/benthos/v4/internal/component/metrics" "github.com/benthosdev/benthos/v4/internal/component/output" iprocessor "github.com/benthosdev/benthos/v4/internal/component/processor" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/old/processor" "github.com/benthosdev/benthos/v4/internal/pipeline" ) // TypeSpec is a constructor and a usage description for each output type. type TypeSpec struct { constructor ConstructorFunc // Async indicates whether this output benefits from sending multiple // messages asynchronously over the protocol. Async bool // Batches indicates whether this output benefits from batching of messages. Batches bool
Summary string Description string Categories []string Footnotes string Config docs.FieldSpec Examples []docs.AnnotatedExample Version string } // AppendProcessorsFromConfig takes a variant arg of pipeline constructor // functions and returns a new slice of them where the processors of the // provided output configuration will also be initialized. func AppendProcessorsFromConfig(conf Config, mgr interop.Manager, pipelines ...iprocessor.PipelineConstructorFunc) []iprocessor.PipelineConstructorFunc { if len(conf.Processors) > 0 { pipelines = append(pipelines, []iprocessor.PipelineConstructorFunc{func() (iprocessor.Pipeline, error) { processors := make([]iprocessor.V1, len(conf.Processors)) for j, procConf := range conf.Processors { var err error pMgr := mgr.IntoPath("processors", strconv.Itoa(j)) processors[j], err = processor.New(procConf, pMgr) if err != nil { return nil, err } } return pipeline.NewProcessor(processors...), nil }}...) } return pipelines } func fromSimpleConstructor(fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc { return func( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { output, err := fn(conf, mgr, log, stats) if err != nil { return nil, err } pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...) return WrapWithPipelines(output, pipelines...) } } // ConstructorFunc is a func signature able to construct an output. type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) // WalkConstructors iterates each component constructor. func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) { inferred := docs.ComponentFieldsFromConf(NewConfig()) for k, v := range Constructors { conf := v.Config conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k]) spec := docs.ComponentSpec{ Type: docs.TypeOutput, Name: k, Summary: v.Summary, Description: v.Description, Footnotes: v.Footnotes, Categories: v.Categories, Config: conf, Examples: v.Examples, Status: v.Status, Version: v.Version, } spec.Description = output.Description(v.Async, v.Batches, spec.Description) fn(v.constructor, spec) } } // Constructors is a map of all output types with their specs. var Constructors = map[string]TypeSpec{} //------------------------------------------------------------------------------ // String constants representing each output type. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl const ( TypeAMQP09 = "amqp_0_9" TypeAMQP1 = "amqp_1" TypeAWSDynamoDB = "aws_dynamodb" TypeAWSKinesis = "aws_kinesis" TypeAWSKinesisFirehose = "aws_kinesis_firehose" TypeAWSS3 = "aws_s3" TypeAWSSNS = "aws_sns" TypeAWSSQS = "aws_sqs" TypeAzureBlobStorage = "azure_blob_storage" TypeAzureQueueStorage = "azure_queue_storage" TypeAzureTableStorage = "azure_table_storage" TypeBroker = "broker" TypeCache = "cache" TypeCassandra = "cassandra" TypeDrop = "drop" TypeDropOn = "drop_on" TypeDynamic = "dynamic" TypeDynamoDB = "dynamodb" TypeElasticsearch = "elasticsearch" TypeFallback = "fallback" TypeFile = "file" TypeGCPCloudStorage = "gcp_cloud_storage" TypeGCPPubSub = "gcp_pubsub" TypeHDFS = "hdfs" TypeHTTPClient = "http_client" TypeHTTPServer = "http_server" TypeInproc = "inproc" TypeKafka = "kafka" TypeMongoDB = "mongodb" TypeMQTT = "mqtt" TypeNanomsg = "nanomsg" TypeNATS = "nats" TypeNATSJetStream = "nats_jetstream" TypeNATSStream = "nats_stream" TypeNSQ = "nsq" TypeRedisHash = "redis_hash" TypeRedisList = "redis_list" TypeRedisPubSub = "redis_pubsub" TypeRedisStreams = "redis_streams" TypeReject = "reject" TypeResource = "resource" TypeRetry = "retry" TypeSFTP = "sftp" TypeSTDOUT = "stdout" TypeSubprocess = "subprocess" TypeSwitch = "switch" TypeSyncResponse = "sync_response" TypeSocket = "socket" TypeWebsocket = "websocket" ) //------------------------------------------------------------------------------ // Config is the all encompassing configuration struct for all output types. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl type Config struct { Label string `json:"label" yaml:"label"` Type string `json:"type" yaml:"type"` AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"` AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"` AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"` AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"` AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"` AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"` AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"` AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"` AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"` AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"` AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"` Broker BrokerConfig `json:"broker" yaml:"broker"` Cache CacheConfig `json:"cache" yaml:"cache"` Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"` Drop DropConfig `json:"drop" yaml:"drop"` DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"` Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"` Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"` Fallback TryConfig `json:"fallback" yaml:"fallback"` File FileConfig `json:"file" yaml:"file"` GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"` GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"` HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"` HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"` HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"` Inproc string `json:"inproc" yaml:"inproc"` Kafka KafkaConfig `json:"kafka" yaml:"kafka"` MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"` MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"` Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"` NATS NATSConfig `json:"nats" yaml:"nats"` NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"` NSQ NSQConfig `json:"nsq" yaml:"nsq"` Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"` RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"` RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"` RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"` RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"` Reject string `json:"reject" yaml:"reject"` Resource string `json:"resource" yaml:"resource"` Retry RetryConfig `json:"retry" yaml:"retry"` SFTP SFTPConfig `json:"sftp" yaml:"sftp"` STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"` Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"` Switch SwitchConfig `json:"switch" yaml:"switch"` SyncResponse struct{} `json:"sync_response" yaml:"sync_response"` Socket SocketConfig `json:"socket" yaml:"socket"` Websocket WebsocketConfig `json:"websocket" yaml:"websocket"` Processors []processor.Config `json:"processors" yaml:"processors"` } // NewConfig returns a configuration struct fully populated with default values. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl func NewConfig() Config { return Config{ Label: "", Type: "stdout", AMQP09: NewAMQPConfig(), AMQP1: NewAMQP1Config(), AWSDynamoDB: NewDynamoDBConfig(), AWSKinesis: NewKinesisConfig(), AWSKinesisFirehose: NewKinesisFirehoseConfig(), AWSS3: NewAmazonS3Config(), AWSSNS: NewSNSConfig(), AWSSQS: NewAmazonSQSConfig(), AzureBlobStorage: NewAzureBlobStorageConfig(), AzureQueueStorage: NewAzureQueueStorageConfig(), AzureTableStorage: NewAzureTableStorageConfig(), Broker: NewBrokerConfig(), Cache: NewCacheConfig(), Cassandra: NewCassandraConfig(), Drop: NewDropConfig(), DropOn: NewDropOnConfig(), Dynamic: NewDynamicConfig(), Elasticsearch: NewElasticsearchConfig(), Fallback: NewTryConfig(), File: NewFileConfig(), GCPCloudStorage: NewGCPCloudStorageConfig(), GCPPubSub: NewGCPPubSubConfig(), HDFS: NewHDFSConfig(), HTTPClient: NewHTTPClientConfig(), HTTPServer: NewHTTPServerConfig(), Inproc: "", Kafka: NewKafkaConfig(), MQTT: NewMQTTConfig(), MongoDB: NewMongoDBConfig(), Nanomsg: NewNanomsgConfig(), NATS: NewNATSConfig(), NATSStream: NewNATSStreamConfig(), NSQ: NewNSQConfig(), Plugin: nil, RedisHash: NewRedisHashConfig(), RedisList: NewRedisListConfig(), RedisPubSub: NewRedisPubSubConfig(), RedisStreams: NewRedisStreamsConfig(), Reject: "", Resource: "", Retry: NewRetryConfig(), SFTP: NewSFTPConfig(), STDOUT: NewSTDOUTConfig(), Subprocess: NewSubprocessConfig(), Switch: NewSwitchConfig(), SyncResponse: struct{}{}, Socket: NewSocketConfig(), Websocket: NewWebsocketConfig(), Processors: []processor.Config{}, } } //------------------------------------------------------------------------------ // UnmarshalYAML ensures that when parsing configs that are in a map or slice // the default values are still applied. func (conf *Config) UnmarshalYAML(value *yaml.Node) error { type confAlias Config aliased := confAlias(NewConfig()) err := value.Decode(&aliased) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } var spec docs.ComponentSpec if aliased.Type, spec, err = docs.GetInferenceCandidateFromYAML(docs.DeprecatedProvider, docs.TypeOutput, value); err != nil { return fmt.Errorf("line %v: %w", value.Line, err) } if spec.Plugin { pluginNode, err := docs.GetPluginConfigYAML(aliased.Type, value) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } aliased.Plugin = &pluginNode } else { aliased.Plugin = nil } *conf = Config(aliased) return nil } //------------------------------------------------------------------------------ // New creates an output type based on an output configuration. func New( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { if mgrV2, ok := mgr.(interface { NewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) }); ok { return mgrV2.NewOutput(conf, pipelines...) } if c, ok := Constructors[conf.Type]; ok { return c.constructor(conf, mgr, log, stats, pipelines...) } return nil, component.ErrInvalidType("output", conf.Type) }
Status docs.Status
random_line_split
constructor.go
package output import ( "fmt" "strconv" "gopkg.in/yaml.v3" "github.com/benthosdev/benthos/v4/internal/component" "github.com/benthosdev/benthos/v4/internal/component/metrics" "github.com/benthosdev/benthos/v4/internal/component/output" iprocessor "github.com/benthosdev/benthos/v4/internal/component/processor" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/old/processor" "github.com/benthosdev/benthos/v4/internal/pipeline" ) // TypeSpec is a constructor and a usage description for each output type. type TypeSpec struct { constructor ConstructorFunc // Async indicates whether this output benefits from sending multiple // messages asynchronously over the protocol. Async bool // Batches indicates whether this output benefits from batching of messages. Batches bool Status docs.Status Summary string Description string Categories []string Footnotes string Config docs.FieldSpec Examples []docs.AnnotatedExample Version string } // AppendProcessorsFromConfig takes a variant arg of pipeline constructor // functions and returns a new slice of them where the processors of the // provided output configuration will also be initialized. func AppendProcessorsFromConfig(conf Config, mgr interop.Manager, pipelines ...iprocessor.PipelineConstructorFunc) []iprocessor.PipelineConstructorFunc { if len(conf.Processors) > 0 { pipelines = append(pipelines, []iprocessor.PipelineConstructorFunc{func() (iprocessor.Pipeline, error) { processors := make([]iprocessor.V1, len(conf.Processors)) for j, procConf := range conf.Processors { var err error pMgr := mgr.IntoPath("processors", strconv.Itoa(j)) processors[j], err = processor.New(procConf, pMgr) if err != nil { return nil, err } } return pipeline.NewProcessor(processors...), nil }}...) } return pipelines } func
(fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc { return func( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { output, err := fn(conf, mgr, log, stats) if err != nil { return nil, err } pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...) return WrapWithPipelines(output, pipelines...) } } // ConstructorFunc is a func signature able to construct an output. type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) // WalkConstructors iterates each component constructor. func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) { inferred := docs.ComponentFieldsFromConf(NewConfig()) for k, v := range Constructors { conf := v.Config conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k]) spec := docs.ComponentSpec{ Type: docs.TypeOutput, Name: k, Summary: v.Summary, Description: v.Description, Footnotes: v.Footnotes, Categories: v.Categories, Config: conf, Examples: v.Examples, Status: v.Status, Version: v.Version, } spec.Description = output.Description(v.Async, v.Batches, spec.Description) fn(v.constructor, spec) } } // Constructors is a map of all output types with their specs. var Constructors = map[string]TypeSpec{} //------------------------------------------------------------------------------ // String constants representing each output type. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl const ( TypeAMQP09 = "amqp_0_9" TypeAMQP1 = "amqp_1" TypeAWSDynamoDB = "aws_dynamodb" TypeAWSKinesis = "aws_kinesis" TypeAWSKinesisFirehose = "aws_kinesis_firehose" TypeAWSS3 = "aws_s3" TypeAWSSNS = "aws_sns" TypeAWSSQS = "aws_sqs" TypeAzureBlobStorage = "azure_blob_storage" TypeAzureQueueStorage = "azure_queue_storage" TypeAzureTableStorage = "azure_table_storage" TypeBroker = "broker" TypeCache = "cache" TypeCassandra = "cassandra" TypeDrop = "drop" TypeDropOn = "drop_on" TypeDynamic = "dynamic" TypeDynamoDB = "dynamodb" TypeElasticsearch = "elasticsearch" TypeFallback = "fallback" TypeFile = "file" TypeGCPCloudStorage = "gcp_cloud_storage" TypeGCPPubSub = "gcp_pubsub" TypeHDFS = "hdfs" TypeHTTPClient = "http_client" TypeHTTPServer = "http_server" TypeInproc = "inproc" TypeKafka = "kafka" TypeMongoDB = "mongodb" TypeMQTT = "mqtt" TypeNanomsg = "nanomsg" TypeNATS = "nats" TypeNATSJetStream = "nats_jetstream" TypeNATSStream = "nats_stream" TypeNSQ = "nsq" TypeRedisHash = "redis_hash" TypeRedisList = "redis_list" TypeRedisPubSub = "redis_pubsub" TypeRedisStreams = "redis_streams" TypeReject = "reject" TypeResource = "resource" TypeRetry = "retry" TypeSFTP = "sftp" TypeSTDOUT = "stdout" TypeSubprocess = "subprocess" TypeSwitch = "switch" TypeSyncResponse = "sync_response" TypeSocket = "socket" TypeWebsocket = "websocket" ) //------------------------------------------------------------------------------ // Config is the all encompassing configuration struct for all output types. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl type Config struct { Label string `json:"label" yaml:"label"` Type string `json:"type" yaml:"type"` AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"` AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"` AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"` AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"` AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"` AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"` AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"` AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"` AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"` AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"` AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"` Broker BrokerConfig `json:"broker" yaml:"broker"` Cache CacheConfig `json:"cache" yaml:"cache"` Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"` Drop DropConfig `json:"drop" yaml:"drop"` DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"` Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"` Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"` Fallback TryConfig `json:"fallback" yaml:"fallback"` File FileConfig `json:"file" yaml:"file"` GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"` GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"` HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"` HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"` HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"` Inproc string `json:"inproc" yaml:"inproc"` Kafka KafkaConfig `json:"kafka" yaml:"kafka"` MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"` MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"` Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"` NATS NATSConfig `json:"nats" yaml:"nats"` NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"` NSQ NSQConfig `json:"nsq" yaml:"nsq"` Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"` RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"` RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"` RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"` RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"` Reject string `json:"reject" yaml:"reject"` Resource string `json:"resource" yaml:"resource"` Retry RetryConfig `json:"retry" yaml:"retry"` SFTP SFTPConfig `json:"sftp" yaml:"sftp"` STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"` Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"` Switch SwitchConfig `json:"switch" yaml:"switch"` SyncResponse struct{} `json:"sync_response" yaml:"sync_response"` Socket SocketConfig `json:"socket" yaml:"socket"` Websocket WebsocketConfig `json:"websocket" yaml:"websocket"` Processors []processor.Config `json:"processors" yaml:"processors"` } // NewConfig returns a configuration struct fully populated with default values. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl func NewConfig() Config { return Config{ Label: "", Type: "stdout", AMQP09: NewAMQPConfig(), AMQP1: NewAMQP1Config(), AWSDynamoDB: NewDynamoDBConfig(), AWSKinesis: NewKinesisConfig(), AWSKinesisFirehose: NewKinesisFirehoseConfig(), AWSS3: NewAmazonS3Config(), AWSSNS: NewSNSConfig(), AWSSQS: NewAmazonSQSConfig(), AzureBlobStorage: NewAzureBlobStorageConfig(), AzureQueueStorage: NewAzureQueueStorageConfig(), AzureTableStorage: NewAzureTableStorageConfig(), Broker: NewBrokerConfig(), Cache: NewCacheConfig(), Cassandra: NewCassandraConfig(), Drop: NewDropConfig(), DropOn: NewDropOnConfig(), Dynamic: NewDynamicConfig(), Elasticsearch: NewElasticsearchConfig(), Fallback: NewTryConfig(), File: NewFileConfig(), GCPCloudStorage: NewGCPCloudStorageConfig(), GCPPubSub: NewGCPPubSubConfig(), HDFS: NewHDFSConfig(), HTTPClient: NewHTTPClientConfig(), HTTPServer: NewHTTPServerConfig(), Inproc: "", Kafka: NewKafkaConfig(), MQTT: NewMQTTConfig(), MongoDB: NewMongoDBConfig(), Nanomsg: NewNanomsgConfig(), NATS: NewNATSConfig(), NATSStream: NewNATSStreamConfig(), NSQ: NewNSQConfig(), Plugin: nil, RedisHash: NewRedisHashConfig(), RedisList: NewRedisListConfig(), RedisPubSub: NewRedisPubSubConfig(), RedisStreams: NewRedisStreamsConfig(), Reject: "", Resource: "", Retry: NewRetryConfig(), SFTP: NewSFTPConfig(), STDOUT: NewSTDOUTConfig(), Subprocess: NewSubprocessConfig(), Switch: NewSwitchConfig(), SyncResponse: struct{}{}, Socket: NewSocketConfig(), Websocket: NewWebsocketConfig(), Processors: []processor.Config{}, } } //------------------------------------------------------------------------------ // UnmarshalYAML ensures that when parsing configs that are in a map or slice // the default values are still applied. func (conf *Config) UnmarshalYAML(value *yaml.Node) error { type confAlias Config aliased := confAlias(NewConfig()) err := value.Decode(&aliased) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } var spec docs.ComponentSpec if aliased.Type, spec, err = docs.GetInferenceCandidateFromYAML(docs.DeprecatedProvider, docs.TypeOutput, value); err != nil { return fmt.Errorf("line %v: %w", value.Line, err) } if spec.Plugin { pluginNode, err := docs.GetPluginConfigYAML(aliased.Type, value) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } aliased.Plugin = &pluginNode } else { aliased.Plugin = nil } *conf = Config(aliased) return nil } //------------------------------------------------------------------------------ // New creates an output type based on an output configuration. func New( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { if mgrV2, ok := mgr.(interface { NewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) }); ok { return mgrV2.NewOutput(conf, pipelines...) } if c, ok := Constructors[conf.Type]; ok { return c.constructor(conf, mgr, log, stats, pipelines...) } return nil, component.ErrInvalidType("output", conf.Type) }
fromSimpleConstructor
identifier_name
constructor.go
package output import ( "fmt" "strconv" "gopkg.in/yaml.v3" "github.com/benthosdev/benthos/v4/internal/component" "github.com/benthosdev/benthos/v4/internal/component/metrics" "github.com/benthosdev/benthos/v4/internal/component/output" iprocessor "github.com/benthosdev/benthos/v4/internal/component/processor" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/old/processor" "github.com/benthosdev/benthos/v4/internal/pipeline" ) // TypeSpec is a constructor and a usage description for each output type. type TypeSpec struct { constructor ConstructorFunc // Async indicates whether this output benefits from sending multiple // messages asynchronously over the protocol. Async bool // Batches indicates whether this output benefits from batching of messages. Batches bool Status docs.Status Summary string Description string Categories []string Footnotes string Config docs.FieldSpec Examples []docs.AnnotatedExample Version string } // AppendProcessorsFromConfig takes a variant arg of pipeline constructor // functions and returns a new slice of them where the processors of the // provided output configuration will also be initialized. func AppendProcessorsFromConfig(conf Config, mgr interop.Manager, pipelines ...iprocessor.PipelineConstructorFunc) []iprocessor.PipelineConstructorFunc { if len(conf.Processors) > 0 { pipelines = append(pipelines, []iprocessor.PipelineConstructorFunc{func() (iprocessor.Pipeline, error) { processors := make([]iprocessor.V1, len(conf.Processors)) for j, procConf := range conf.Processors { var err error pMgr := mgr.IntoPath("processors", strconv.Itoa(j)) processors[j], err = processor.New(procConf, pMgr) if err != nil { return nil, err } } return pipeline.NewProcessor(processors...), nil }}...) } return pipelines } func fromSimpleConstructor(fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc { return func( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error) { output, err := fn(conf, mgr, log, stats) if err != nil { return nil, err } pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...) return WrapWithPipelines(output, pipelines...) } } // ConstructorFunc is a func signature able to construct an output. type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) // WalkConstructors iterates each component constructor. func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) { inferred := docs.ComponentFieldsFromConf(NewConfig()) for k, v := range Constructors { conf := v.Config conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k]) spec := docs.ComponentSpec{ Type: docs.TypeOutput, Name: k, Summary: v.Summary, Description: v.Description, Footnotes: v.Footnotes, Categories: v.Categories, Config: conf, Examples: v.Examples, Status: v.Status, Version: v.Version, } spec.Description = output.Description(v.Async, v.Batches, spec.Description) fn(v.constructor, spec) } } // Constructors is a map of all output types with their specs. var Constructors = map[string]TypeSpec{} //------------------------------------------------------------------------------ // String constants representing each output type. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl const ( TypeAMQP09 = "amqp_0_9" TypeAMQP1 = "amqp_1" TypeAWSDynamoDB = "aws_dynamodb" TypeAWSKinesis = "aws_kinesis" TypeAWSKinesisFirehose = "aws_kinesis_firehose" TypeAWSS3 = "aws_s3" TypeAWSSNS = "aws_sns" TypeAWSSQS = "aws_sqs" TypeAzureBlobStorage = "azure_blob_storage" TypeAzureQueueStorage = "azure_queue_storage" TypeAzureTableStorage = "azure_table_storage" TypeBroker = "broker" TypeCache = "cache" TypeCassandra = "cassandra" TypeDrop = "drop" TypeDropOn = "drop_on" TypeDynamic = "dynamic" TypeDynamoDB = "dynamodb" TypeElasticsearch = "elasticsearch" TypeFallback = "fallback" TypeFile = "file" TypeGCPCloudStorage = "gcp_cloud_storage" TypeGCPPubSub = "gcp_pubsub" TypeHDFS = "hdfs" TypeHTTPClient = "http_client" TypeHTTPServer = "http_server" TypeInproc = "inproc" TypeKafka = "kafka" TypeMongoDB = "mongodb" TypeMQTT = "mqtt" TypeNanomsg = "nanomsg" TypeNATS = "nats" TypeNATSJetStream = "nats_jetstream" TypeNATSStream = "nats_stream" TypeNSQ = "nsq" TypeRedisHash = "redis_hash" TypeRedisList = "redis_list" TypeRedisPubSub = "redis_pubsub" TypeRedisStreams = "redis_streams" TypeReject = "reject" TypeResource = "resource" TypeRetry = "retry" TypeSFTP = "sftp" TypeSTDOUT = "stdout" TypeSubprocess = "subprocess" TypeSwitch = "switch" TypeSyncResponse = "sync_response" TypeSocket = "socket" TypeWebsocket = "websocket" ) //------------------------------------------------------------------------------ // Config is the all encompassing configuration struct for all output types. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl type Config struct { Label string `json:"label" yaml:"label"` Type string `json:"type" yaml:"type"` AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"` AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"` AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"` AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"` AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"` AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"` AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"` AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"` AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"` AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"` AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"` Broker BrokerConfig `json:"broker" yaml:"broker"` Cache CacheConfig `json:"cache" yaml:"cache"` Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"` Drop DropConfig `json:"drop" yaml:"drop"` DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"` Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"` Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"` Fallback TryConfig `json:"fallback" yaml:"fallback"` File FileConfig `json:"file" yaml:"file"` GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"` GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"` HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"` HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"` HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"` Inproc string `json:"inproc" yaml:"inproc"` Kafka KafkaConfig `json:"kafka" yaml:"kafka"` MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"` MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"` Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"` NATS NATSConfig `json:"nats" yaml:"nats"` NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"` NSQ NSQConfig `json:"nsq" yaml:"nsq"` Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"` RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"` RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"` RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"` RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"` Reject string `json:"reject" yaml:"reject"` Resource string `json:"resource" yaml:"resource"` Retry RetryConfig `json:"retry" yaml:"retry"` SFTP SFTPConfig `json:"sftp" yaml:"sftp"` STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"` Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"` Switch SwitchConfig `json:"switch" yaml:"switch"` SyncResponse struct{} `json:"sync_response" yaml:"sync_response"` Socket SocketConfig `json:"socket" yaml:"socket"` Websocket WebsocketConfig `json:"websocket" yaml:"websocket"` Processors []processor.Config `json:"processors" yaml:"processors"` } // NewConfig returns a configuration struct fully populated with default values. // Deprecated: Do not add new components here. Instead, use the public plugin // APIs. Examples can be found in: ./internal/impl func NewConfig() Config { return Config{ Label: "", Type: "stdout", AMQP09: NewAMQPConfig(), AMQP1: NewAMQP1Config(), AWSDynamoDB: NewDynamoDBConfig(), AWSKinesis: NewKinesisConfig(), AWSKinesisFirehose: NewKinesisFirehoseConfig(), AWSS3: NewAmazonS3Config(), AWSSNS: NewSNSConfig(), AWSSQS: NewAmazonSQSConfig(), AzureBlobStorage: NewAzureBlobStorageConfig(), AzureQueueStorage: NewAzureQueueStorageConfig(), AzureTableStorage: NewAzureTableStorageConfig(), Broker: NewBrokerConfig(), Cache: NewCacheConfig(), Cassandra: NewCassandraConfig(), Drop: NewDropConfig(), DropOn: NewDropOnConfig(), Dynamic: NewDynamicConfig(), Elasticsearch: NewElasticsearchConfig(), Fallback: NewTryConfig(), File: NewFileConfig(), GCPCloudStorage: NewGCPCloudStorageConfig(), GCPPubSub: NewGCPPubSubConfig(), HDFS: NewHDFSConfig(), HTTPClient: NewHTTPClientConfig(), HTTPServer: NewHTTPServerConfig(), Inproc: "", Kafka: NewKafkaConfig(), MQTT: NewMQTTConfig(), MongoDB: NewMongoDBConfig(), Nanomsg: NewNanomsgConfig(), NATS: NewNATSConfig(), NATSStream: NewNATSStreamConfig(), NSQ: NewNSQConfig(), Plugin: nil, RedisHash: NewRedisHashConfig(), RedisList: NewRedisListConfig(), RedisPubSub: NewRedisPubSubConfig(), RedisStreams: NewRedisStreamsConfig(), Reject: "", Resource: "", Retry: NewRetryConfig(), SFTP: NewSFTPConfig(), STDOUT: NewSTDOUTConfig(), Subprocess: NewSubprocessConfig(), Switch: NewSwitchConfig(), SyncResponse: struct{}{}, Socket: NewSocketConfig(), Websocket: NewWebsocketConfig(), Processors: []processor.Config{}, } } //------------------------------------------------------------------------------ // UnmarshalYAML ensures that when parsing configs that are in a map or slice // the default values are still applied. func (conf *Config) UnmarshalYAML(value *yaml.Node) error { type confAlias Config aliased := confAlias(NewConfig()) err := value.Decode(&aliased) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } var spec docs.ComponentSpec if aliased.Type, spec, err = docs.GetInferenceCandidateFromYAML(docs.DeprecatedProvider, docs.TypeOutput, value); err != nil { return fmt.Errorf("line %v: %w", value.Line, err) } if spec.Plugin { pluginNode, err := docs.GetPluginConfigYAML(aliased.Type, value) if err != nil { return fmt.Errorf("line %v: %v", value.Line, err) } aliased.Plugin = &pluginNode } else { aliased.Plugin = nil } *conf = Config(aliased) return nil } //------------------------------------------------------------------------------ // New creates an output type based on an output configuration. func New( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (output.Streamed, error)
{ if mgrV2, ok := mgr.(interface { NewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error) }); ok { return mgrV2.NewOutput(conf, pipelines...) } if c, ok := Constructors[conf.Type]; ok { return c.constructor(conf, mgr, log, stats, pipelines...) } return nil, component.ErrInvalidType("output", conf.Type) }
identifier_body
assistant.js
 var sexArray = []; var docTypesArray = []; var academicLevelsArray = []; var cropper; $(document).ready(function () { $("#main-menu-assistant").addClass("text-primary"); formValidator.Validate("#create-assistant-form", createAssistant); //formValidator.Validate("#edit-availability-form", editAvailability); loadAssistants(); $(document).on("change", "#create-assistant-img-file", function () { loadAssistantSelectedImage(this); }); $(document).on("click", ".create-assistant-link", function (ev) { ev.preventDefault(); openAssistanFormModal(); }); $(document).on("click", ".edit-assistant-link", function (ev) { ev.preventDefault(); openAssistanFormModal($(this).data("assistant")); }); $(document).on("hidden.bs.modal", "#create-assistant-modal", function () { if (typeof cropper !== 'undefined' && cropper !== null) cropper.destroy(); $("#create-assistant-img").prop("src", USER_IMAGE_PLACEHOLDER); formValidator.ResetForm("#create-assistant-form"); }); $(document).on("keyup", "#searchAssistantInput", function () { $(".assistant-list-thumb").addClass("d-none").removeClass("d-flex"); $(".assistant-list-thumb:contains('" + $(this).val() + "')").removeClass("d-none").addClass("d-flex"); }); }); var openAssistanFormModal = function (assistant = false) { var title = assistant ? "<i class='iconsminds-file-edit with-rotate-icon'></i> Editar Asistente" : "<i class='iconsminds-add-user'></i> Crear Nuevo Asistente"; var label = assistant ? "Editar" : "Crear"; $("#create-assistant-modal-title").html(title); $("#create-assistant-modal-button").html(label); if (assistant) setEditAssistantFormValues(assistant); $("#create-assistant-modal").modal(); }; var setEditAssistantFormValues = function (assistant) { $.each(assistant, function (key, value) { $("#create-assistant-form [name='" + key + "']").val(value); $("#create-assistant-form input[type='checkbox'][name='" + key + "']").prop("checked", value == 1); $("#create-assistant-form textarea[name='" + key + "']").html(value); }); $("#create-assistant-img").attr("src", assistant.basicDataPhoto); } var loadAssistantSelectedImage = async function (input) { if (typeof cropper !== 'undefined' && cropper !== null) cropper.destroy(); var loadedImage = await getInputImageData(input); $("#create-assistant-img").prop("src", loadedImage); var image = document.querySelector('#create-assistant-img'); cropper = new Cropper(image, { aspectRatio: 1, movable: true, dragMode: "move", viewMode: 2 }); } var loadAssistants = async function () { let assistants = await webClient.RequestAsync("assistant/getAssistants", "", webClient.ContentType.DEFAULT); if (assistants.status === REQUEST_STATUS.ERROR) return; if (typeof assistants.data === "undefined" || assistants.data === '' || assistants.data.length === 0) return; showAssistants(assistants.data); }; var showAssistants = function (assistants) { var assistantsHtml = ""; $.each(assistants, function (index, assistant) { assistantsHtml += "<div class='card mb-3 assistant-list-thumb'>" + "<div class='d-flex flex-row'>" + getAssistantThumbImageHtml(assistant) + getAssistantBasicDataHtml(assistant) + "<div class='d-flex p-4'>" + "<button class='btn btn-outline-theme-3 icon-button rotate-icon-click collapsed align-self-center'" + " type='button' data-toggle='collapse' data-target='#assistant-data-collapse-container-" + index + "' aria-expanded='false' aria-controls='q2'>" + "<i class='simple-icon-arrow-down with-rotate-icon'></i>" + "</button>" + "</div>" + "</div>" + getAssistantCollapsedDataHtml(index, assistant) + "</div>"; }); $("#assistants-list-container").html(assistantsHtml); initAssistantsCalendars(); }; var initAssistantsCalendars = function () { $.each($(".assistant-availability-calendar"), function (index, calendar) { var assistant = $(calendar).data("assistant"); $(calendar).fullCalendar ({ header: { left: 'prev,next today', center: 'title', right: 'month,agendaWeek,agendaDay,listWeek' }, defaultDate: moment().format("YYYY-MM-DD"), navLinks: true, eventLimit: true, events: assistant.Events, dayClick: function (date, jsEvent, view) { console.log("DAY SELECTED ==> ", date, jsEvent, view); $("#edit-availability-date-title").html(moment(date).format("MMM DD YYYY")); $("#assistant-availability-name").html(assistant.Name); $("#edit-availability-form [name='assistantId']").val(assistant.Id); $("#edit-availability-form [name='AvailabilityDate']").val(moment(date).format("YYYY-MM-DD")); $("#edit-availability-modal").modal(); } }); }); }; var getAssistantThumbImageHtml = function (assistant) { var imageHtml = "<div class='border-right list-thumbnail card-img-left h-auto d-none d-lg-block' " + "style='background: url(" + assistant.basicDataPhoto + ") center no-repeat; background-size: cover; width: 8%;'></div>" + "<div class='border-right list-thumbnail card-img-left w-20 h-auto d-lg-none' " + "style='background: url(" + assistant.basicDataPhoto + ") center no-repeat; background-size: cover;'></div>"; return imageHtml; }; var getAssistantBasicDataHtml = function (assistant) { var assistantName = assistant.basicDataFirstName + " " + assistant.basicDataLastName; var basicDataHtml = "<div class='d-flex flex-grow-1 min-width-zero'>" + "<div class='card-body align-self-center d-flex flex-column flex-lg-row justify-content-between min-width-zero align-items-lg-center'>" + "<a href='' class='w-20 w-sm-100'><p class='list-item-heading mb-0 truncate'>" + assistantName + "</p></a>" + "<p class='mb-0 text-muted w-15 w-sm-100'>" + "<span class='glyph-icon iconsminds-id-card align-text-top' style='font-size: 25px;'></span> " + "<span class='align-middle'>" + assistant.basicDataDocNumber + "</span>" + "</p>" + "<p class='mb-0 text-muted w-15 w-sm-100'>" + "<span class='glyph-icon iconsminds-smartphone-4 align-text-top' style='font-size: 25px;'></span> " + "<span class='align-middle'>" + assistant.personalDataCellphone + "</span>" + "</p>" + "<div class='mb-2 d-md-none'></div>" + "</div>" + "</div>"; //+ getAssistantStatusSelectHtml() return basicDataHtml; }; var getAssistantStatusSelectHtml = function () { var assistantStatusSelect = "<div class='w-15 w-sm-100 form-group m-0'>" + "<select id='inputState' class='form-control'>" + "<option value='1'>Activo</option>" + "</select >" + "</div > "; return assistantStatusSelect; }; var getAssistantCollapsedDataHtml = function (index, assistant) { var assistantAddress = assistant.personalDataAddress + " " + assistant.personalDataAddressComplement + " " + assistant.personalDataAddressLocality; var assistantName = assistant.basicDataFirstName + " " + assistant.basicDataLastName; var assistantCalendarData = { Name: assistantName, Id: assistant.assistantId, Events: assistant.Availability }; var assistatntDataHtml = "<div class='collapse p-3 border-top' id='assistant-data-collapse-container-" + index + "'>" + "<div class='row'>" + "<div class='col-sm-5'>" + "<div class='card'>" + "<div class='card-body'>" + "<h4>Información personal</h4>" + "<a class='edit-assistant-link position-absolute border p-2 rounded-lg' href='' style='top: 15px; right: 15px;' data-assistant='" + JSON.stringify(assistant) + "'>" + "<i class='iconsminds-file-edit with-rotate-icon'></i>Editar"
+ "<div class='overflow-auto'><table class='table table-sm table-striped'>" + "<tr><th>Dirección:</th><td>" + assistantAddress + "<td></tr>" + "<tr><th>Email:</th><td>" + assistant.personalDataEmailAddress + "<td></tr>" + "<tr><th>Teléfono:</th><td>" + assistant.personalDataTelephone + "<td></tr>" + "<tr><th>Sexo:</th><td>" + assistant.sex + "<td></tr>" + "</table></div>" + "<h4>Información profesional</h4>" + "<div class='overflow-auto'><table class='table table-sm table-striped'>" + "<tr><th>Estudios:</th><td>" + assistant.School + "</td></tr>" + "<tr><th>Título:</th><td>" + assistant.professionalJobTitle + "</td></tr>" + "<tr><th>Compañía:</th><td>" + assistant.CompanyName + "</td></tr>" + "<tr><th>Fecha ingreso:</th><td>" + assistant.companyBeginDate + "</td></tr>" + "<tr><th>Aspiración salarial:</th><td>" + assistant.professionalSalaryAspiration + "</td></tr>" + "<tr><th>Resumen profesional</th><td>" + assistant.professionalResume + "</td></tr>" + "<tr><th>Valoración del jefe</th><td>" + assistant.bossObservation + "</td></tr>" + getAvailableSkillStatus("Pago quincenal", assistant.paymentFifteen) + getAvailableSkillStatus("Técnica de movilización", assistant.experienceMovility) + "</table></div>" + "<h4>Habilidades</h4>" + "<div class='overflow-auto'><table class='table table-sm table-striped m-0'>" + getAvailableSkillStatus("Cateterismo Vesical", assistant.experienceCateter) + getAvailableSkillStatus("Traqueotomía", assistant.experienceTraqueo) + getAvailableSkillStatus("Medicamentos Intravenosos", assistant.experienceIntraVain) + "</table></div>" + "</div>" + "</div>" + "</div>" + "<div class='col-sm-7'>" + "<h4>Disponibilidad</h4>" + "<div class='card'>" + "<div class='card-body'><div class='assistant-availability-calendar' data-assistant='" + JSON.stringify(assistantCalendarData) + "'></div></div>" + "</div>" + "</div>" + "</div>" + "</div>"; return assistatntDataHtml; }; var getAvailableSkillStatus = function (skillName, skillValue) { var skillStatus = skillValue === "1" ? "<span class='glyph-icon simple-icon-check text-primary' style='font-size: 22px;'></span>" : "<span class='glyph-icon simple-icon-close text-danger' style='font-size: 22px;'></span>"; var skill = "<tr><th>" + skillName + "</th><td>" + skillStatus + "</td></tr>"; return skill; } var createAssistant = async function () { var vhInputFile = $('#curriculum-vitae-input-file').get(0); var assistantFormData = $("#create-assistant-form").serialize(); showLoading(); if (typeof cropper !== "undefined") assistantFormData += "&basicDataPhoto=" + cropper.getCroppedCanvas().toDataURL(); if (vhInputFile.files.length > 0) { var professionalHV = await getInputImageData(vhInputFile); assistantFormData += "&professionalHVUrl=" + professionalHV; } var result = await webClient.RequestAsync("Assistant/createAssistant", assistantFormData, webClient.ContentType.DEFAULT); if (result.status === REQUEST_STATUS.ERROR) { hideLoading(); return; } hideLoading(); if (result.status === REQUEST_STATUS.SUCCESS) { $("#create-assistant-modal").modal("hide"); await loadAssistants(); } };
+ "</a>"
random_line_split
infer_lst.py
# ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ import argparse import datetime import json import random import time from pathlib import Path from PIL import Image, ImageFont, ImageDraw, ImageEnhance import torchvision.transforms as T import numpy as np import torch from torch.utils.data import DataLoader import datasets import util.misc as utils from util import box_ops import datasets.samplers as samplers from datasets import build_dataset, get_coco_api_from_dataset from engine import evaluate, train_one_epoch from models import build_model import time import os def get_args_parser(): parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False) parser.add_argument('--lr', default=2e-4, type=float) parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+') parser.add_argument('--lr_backbone', default=2e-5, type=float) parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+') parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float) parser.add_argument('--batch_size', default=2, type=int) parser.add_argument('--weight_decay', default=1e-4, type=float) parser.add_argument('--epochs', default=50, type=int) parser.add_argument('--lr_drop', default=40, type=int) parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+') parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm') parser.add_argument('--sgd', action='store_true') # Variants of Deformable DETR parser.add_argument('--with_box_refine', default=False, action='store_true') parser.add_argument('--two_stage', default=False, action='store_true') # Model parameters parser.add_argument('--frozen_weights', type=str, default=None, help="Path to the pretrained model. If set, only the mask head will be trained") # * Backbone parser.add_argument('--backbone', default='resnet50', type=str, help="Name of the convolutional backbone to use") parser.add_argument('--dilation', action='store_true', help="If true, we replace stride with dilation in the last convolutional block (DC5)") parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help="Type of positional embedding to use on top of the image features") parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float, help="position / size * scale") parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels') # * Transformer parser.add_argument('--enc_layers', default=6, type=int, help="Number of encoding layers in the transformer") parser.add_argument('--dec_layers', default=6, type=int, help="Number of decoding layers in the transformer") parser.add_argument('--dim_feedforward', default=1024, type=int, help="Intermediate size of the feedforward layers in the transformer blocks") parser.add_argument('--hidden_dim', default=256, type=int, help="Size of the embeddings (dimension of the transformer)") parser.add_argument('--dropout', default=0.1, type=float, help="Dropout applied in the transformer") parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions") parser.add_argument('--num_queries', default=300, type=int, help="Number of query slots") parser.add_argument('--dec_n_points', default=4, type=int) parser.add_argument('--enc_n_points', default=4, type=int) # * Segmentation parser.add_argument('--masks', action='store_true', help="Train segmentation head if the flag is provided") # Loss parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help="Disables auxiliary decoding losses (loss at each layer)") # * Matcher parser.add_argument('--set_cost_class', default=2, type=float, help="Class coefficient in the matching cost") parser.add_argument('--set_cost_bbox', default=5, type=float, help="L1 box coefficient in the matching cost") parser.add_argument('--set_cost_giou', default=2, type=float, help="giou box coefficient in the matching cost") # * Loss coefficients parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--focal_alpha', default=0.25, type=float) # dataset parameters parser.add_argument('--dataset_file', default='ICDAR2013') parser.add_argument('--coco_path', default='./data/coco', type=str) parser.add_argument('--coco_panoptic_path', type=str) parser.add_argument('--remove_difficult', action='store_true') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--imgs_dir', type=str, help='input images folder for inference') parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory') return parser # standard PyTorch mean-std input image normalization transform = T.Compose([ T.Resize(800), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) label_names = ['table', 'figure', 'natural_image', 'logo', 'signature'] colors = ['red', 'blue', 'green', 'yellow', 'black'] def main(args): utils.init_distributed_mode(args) print("git:\n {}\n".format(utils.get_sha())) if args.frozen_weights is not None: assert args.masks, "Frozen training is meant for segmentation only" print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) model, criterion, postprocessors = build_model(args) model.to(device) checkpoint = torch.load(args.resume, map_location='cpu') model.load_state_dict(checkpoint['model'], strict=False) if torch.cuda.is_available(): model.cuda() model.eval() for img_file in os.listdir(args.imgs_dir): t0 = time.time() img_path = os.path.join(args.imgs_dir, img_file) out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png' im = Image.open(img_path) # mean-std normalize the input image (batch-size: 1) img = transform(im).unsqueeze(0) img=img.cuda() # propagate through the model outputs = model(img) out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) keep = scores[0] > 0.2 boxes = boxes[0, keep] labels = labels[0, keep] # and from relative [0, 1] to absolute [0, height] coordinates im_h,im_w = im.size #print('im_h,im_w',im_h,im_w) target_sizes =torch.tensor([[im_w,im_h]]) target_sizes =target_sizes.cuda() img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] print(time.time()-t0) #plot_results source_img = Image.open(img_path).convert("RGBA") fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18) draw = ImageDraw.Draw(source_img) #print ('label' , labels.tolist()) label_list = labels.tolist() #print("Boxes",boxes,boxes.tolist()) i=0 for xmin, ymin, xmax, ymax in boxes[0].tolist(): draw.rectangle(((xmin, ymin), (xmax, ymax)), outline =colors[label_list[i]-1]) # print('--------') # print('i= ', i) # print('label is = ', label_list[i]-1) # print(label_names[label_list[i]-1]) if ymin-18 >=0 : ymin = ymin-18 draw.text((xmin, ymin), label_names[label_list[i]-1], anchor = 'md', font=fnt, fill=colors[label_list[i]-1]) i+=1 source_img.save(out_imgName, "png") results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] print("Outputs",results) if __name__ == '__main__': parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()]) args = parser.parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
parser.add_argument('--dice_loss_coef', default=1, type=float) parser.add_argument('--cls_loss_coef', default=2, type=float) parser.add_argument('--bbox_loss_coef', default=5, type=float) parser.add_argument('--giou_loss_coef', default=2, type=float)
random_line_split
infer_lst.py
# ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ import argparse import datetime import json import random import time from pathlib import Path from PIL import Image, ImageFont, ImageDraw, ImageEnhance import torchvision.transforms as T import numpy as np import torch from torch.utils.data import DataLoader import datasets import util.misc as utils from util import box_ops import datasets.samplers as samplers from datasets import build_dataset, get_coco_api_from_dataset from engine import evaluate, train_one_epoch from models import build_model import time import os def get_args_parser(): parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False) parser.add_argument('--lr', default=2e-4, type=float) parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+') parser.add_argument('--lr_backbone', default=2e-5, type=float) parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+') parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float) parser.add_argument('--batch_size', default=2, type=int) parser.add_argument('--weight_decay', default=1e-4, type=float) parser.add_argument('--epochs', default=50, type=int) parser.add_argument('--lr_drop', default=40, type=int) parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+') parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm') parser.add_argument('--sgd', action='store_true') # Variants of Deformable DETR parser.add_argument('--with_box_refine', default=False, action='store_true') parser.add_argument('--two_stage', default=False, action='store_true') # Model parameters parser.add_argument('--frozen_weights', type=str, default=None, help="Path to the pretrained model. If set, only the mask head will be trained") # * Backbone parser.add_argument('--backbone', default='resnet50', type=str, help="Name of the convolutional backbone to use") parser.add_argument('--dilation', action='store_true', help="If true, we replace stride with dilation in the last convolutional block (DC5)") parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help="Type of positional embedding to use on top of the image features") parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float, help="position / size * scale") parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels') # * Transformer parser.add_argument('--enc_layers', default=6, type=int, help="Number of encoding layers in the transformer") parser.add_argument('--dec_layers', default=6, type=int, help="Number of decoding layers in the transformer") parser.add_argument('--dim_feedforward', default=1024, type=int, help="Intermediate size of the feedforward layers in the transformer blocks") parser.add_argument('--hidden_dim', default=256, type=int, help="Size of the embeddings (dimension of the transformer)") parser.add_argument('--dropout', default=0.1, type=float, help="Dropout applied in the transformer") parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions") parser.add_argument('--num_queries', default=300, type=int, help="Number of query slots") parser.add_argument('--dec_n_points', default=4, type=int) parser.add_argument('--enc_n_points', default=4, type=int) # * Segmentation parser.add_argument('--masks', action='store_true', help="Train segmentation head if the flag is provided") # Loss parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help="Disables auxiliary decoding losses (loss at each layer)") # * Matcher parser.add_argument('--set_cost_class', default=2, type=float, help="Class coefficient in the matching cost") parser.add_argument('--set_cost_bbox', default=5, type=float, help="L1 box coefficient in the matching cost") parser.add_argument('--set_cost_giou', default=2, type=float, help="giou box coefficient in the matching cost") # * Loss coefficients parser.add_argument('--mask_loss_coef', default=1, type=float) parser.add_argument('--dice_loss_coef', default=1, type=float) parser.add_argument('--cls_loss_coef', default=2, type=float) parser.add_argument('--bbox_loss_coef', default=5, type=float) parser.add_argument('--giou_loss_coef', default=2, type=float) parser.add_argument('--focal_alpha', default=0.25, type=float) # dataset parameters parser.add_argument('--dataset_file', default='ICDAR2013') parser.add_argument('--coco_path', default='./data/coco', type=str) parser.add_argument('--coco_panoptic_path', type=str) parser.add_argument('--remove_difficult', action='store_true') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--imgs_dir', type=str, help='input images folder for inference') parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory') return parser # standard PyTorch mean-std input image normalization transform = T.Compose([ T.Resize(800), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) label_names = ['table', 'figure', 'natural_image', 'logo', 'signature'] colors = ['red', 'blue', 'green', 'yellow', 'black'] def main(args): utils.init_distributed_mode(args) print("git:\n {}\n".format(utils.get_sha())) if args.frozen_weights is not None: assert args.masks, "Frozen training is meant for segmentation only" print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) model, criterion, postprocessors = build_model(args) model.to(device) checkpoint = torch.load(args.resume, map_location='cpu') model.load_state_dict(checkpoint['model'], strict=False) if torch.cuda.is_available(): model.cuda() model.eval() for img_file in os.listdir(args.imgs_dir): t0 = time.time() img_path = os.path.join(args.imgs_dir, img_file) out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png' im = Image.open(img_path) # mean-std normalize the input image (batch-size: 1) img = transform(im).unsqueeze(0) img=img.cuda() # propagate through the model outputs = model(img) out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) keep = scores[0] > 0.2 boxes = boxes[0, keep] labels = labels[0, keep] # and from relative [0, 1] to absolute [0, height] coordinates im_h,im_w = im.size #print('im_h,im_w',im_h,im_w) target_sizes =torch.tensor([[im_w,im_h]]) target_sizes =target_sizes.cuda() img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] print(time.time()-t0) #plot_results source_img = Image.open(img_path).convert("RGBA") fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18) draw = ImageDraw.Draw(source_img) #print ('label' , labels.tolist()) label_list = labels.tolist() #print("Boxes",boxes,boxes.tolist()) i=0 for xmin, ymin, xmax, ymax in boxes[0].tolist(): draw.rectangle(((xmin, ymin), (xmax, ymax)), outline =colors[label_list[i]-1]) # print('--------') # print('i= ', i) # print('label is = ', label_list[i]-1) # print(label_names[label_list[i]-1]) if ymin-18 >=0 : ymin = ymin-18 draw.text((xmin, ymin), label_names[label_list[i]-1], anchor = 'md', font=fnt, fill=colors[label_list[i]-1]) i+=1 source_img.save(out_imgName, "png") results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] print("Outputs",results) if __name__ == '__main__':
parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()]) args = parser.parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
conditional_block
infer_lst.py
# ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ import argparse import datetime import json import random import time from pathlib import Path from PIL import Image, ImageFont, ImageDraw, ImageEnhance import torchvision.transforms as T import numpy as np import torch from torch.utils.data import DataLoader import datasets import util.misc as utils from util import box_ops import datasets.samplers as samplers from datasets import build_dataset, get_coco_api_from_dataset from engine import evaluate, train_one_epoch from models import build_model import time import os def get_args_parser():
# standard PyTorch mean-std input image normalization transform = T.Compose([ T.Resize(800), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) label_names = ['table', 'figure', 'natural_image', 'logo', 'signature'] colors = ['red', 'blue', 'green', 'yellow', 'black'] def main(args): utils.init_distributed_mode(args) print("git:\n {}\n".format(utils.get_sha())) if args.frozen_weights is not None: assert args.masks, "Frozen training is meant for segmentation only" print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) model, criterion, postprocessors = build_model(args) model.to(device) checkpoint = torch.load(args.resume, map_location='cpu') model.load_state_dict(checkpoint['model'], strict=False) if torch.cuda.is_available(): model.cuda() model.eval() for img_file in os.listdir(args.imgs_dir): t0 = time.time() img_path = os.path.join(args.imgs_dir, img_file) out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png' im = Image.open(img_path) # mean-std normalize the input image (batch-size: 1) img = transform(im).unsqueeze(0) img=img.cuda() # propagate through the model outputs = model(img) out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) keep = scores[0] > 0.2 boxes = boxes[0, keep] labels = labels[0, keep] # and from relative [0, 1] to absolute [0, height] coordinates im_h,im_w = im.size #print('im_h,im_w',im_h,im_w) target_sizes =torch.tensor([[im_w,im_h]]) target_sizes =target_sizes.cuda() img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] print(time.time()-t0) #plot_results source_img = Image.open(img_path).convert("RGBA") fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18) draw = ImageDraw.Draw(source_img) #print ('label' , labels.tolist()) label_list = labels.tolist() #print("Boxes",boxes,boxes.tolist()) i=0 for xmin, ymin, xmax, ymax in boxes[0].tolist(): draw.rectangle(((xmin, ymin), (xmax, ymax)), outline =colors[label_list[i]-1]) # print('--------') # print('i= ', i) # print('label is = ', label_list[i]-1) # print(label_names[label_list[i]-1]) if ymin-18 >=0 : ymin = ymin-18 draw.text((xmin, ymin), label_names[label_list[i]-1], anchor = 'md', font=fnt, fill=colors[label_list[i]-1]) i+=1 source_img.save(out_imgName, "png") results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] print("Outputs",results) if __name__ == '__main__': parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()]) args = parser.parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False) parser.add_argument('--lr', default=2e-4, type=float) parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+') parser.add_argument('--lr_backbone', default=2e-5, type=float) parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+') parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float) parser.add_argument('--batch_size', default=2, type=int) parser.add_argument('--weight_decay', default=1e-4, type=float) parser.add_argument('--epochs', default=50, type=int) parser.add_argument('--lr_drop', default=40, type=int) parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+') parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm') parser.add_argument('--sgd', action='store_true') # Variants of Deformable DETR parser.add_argument('--with_box_refine', default=False, action='store_true') parser.add_argument('--two_stage', default=False, action='store_true') # Model parameters parser.add_argument('--frozen_weights', type=str, default=None, help="Path to the pretrained model. If set, only the mask head will be trained") # * Backbone parser.add_argument('--backbone', default='resnet50', type=str, help="Name of the convolutional backbone to use") parser.add_argument('--dilation', action='store_true', help="If true, we replace stride with dilation in the last convolutional block (DC5)") parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help="Type of positional embedding to use on top of the image features") parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float, help="position / size * scale") parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels') # * Transformer parser.add_argument('--enc_layers', default=6, type=int, help="Number of encoding layers in the transformer") parser.add_argument('--dec_layers', default=6, type=int, help="Number of decoding layers in the transformer") parser.add_argument('--dim_feedforward', default=1024, type=int, help="Intermediate size of the feedforward layers in the transformer blocks") parser.add_argument('--hidden_dim', default=256, type=int, help="Size of the embeddings (dimension of the transformer)") parser.add_argument('--dropout', default=0.1, type=float, help="Dropout applied in the transformer") parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions") parser.add_argument('--num_queries', default=300, type=int, help="Number of query slots") parser.add_argument('--dec_n_points', default=4, type=int) parser.add_argument('--enc_n_points', default=4, type=int) # * Segmentation parser.add_argument('--masks', action='store_true', help="Train segmentation head if the flag is provided") # Loss parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help="Disables auxiliary decoding losses (loss at each layer)") # * Matcher parser.add_argument('--set_cost_class', default=2, type=float, help="Class coefficient in the matching cost") parser.add_argument('--set_cost_bbox', default=5, type=float, help="L1 box coefficient in the matching cost") parser.add_argument('--set_cost_giou', default=2, type=float, help="giou box coefficient in the matching cost") # * Loss coefficients parser.add_argument('--mask_loss_coef', default=1, type=float) parser.add_argument('--dice_loss_coef', default=1, type=float) parser.add_argument('--cls_loss_coef', default=2, type=float) parser.add_argument('--bbox_loss_coef', default=5, type=float) parser.add_argument('--giou_loss_coef', default=2, type=float) parser.add_argument('--focal_alpha', default=0.25, type=float) # dataset parameters parser.add_argument('--dataset_file', default='ICDAR2013') parser.add_argument('--coco_path', default='./data/coco', type=str) parser.add_argument('--coco_panoptic_path', type=str) parser.add_argument('--remove_difficult', action='store_true') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--imgs_dir', type=str, help='input images folder for inference') parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory') return parser
identifier_body