file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
app_2_wl.py | ovid19](http://www.prevcovid19.com/#/teste)")
st.sidebar.markdown("[Tweetru ministre gui eub walu wergu yaram ](https://twitter.com/MinisteredelaS1)")
st.sidebar.markdown("[Booleb xéeti mbir ak màndargaay jumtukaayu ](https://github.com/maelfabien/COVID-19-Senegal)")
st.sidebar.markdown("---")
st.sidebar.header("Jokko ak wa ministere")
st.sidebar.markdown("Ministre gui eub walu wergu yaram ak boolem boko / Fann Residence")
st.sidebar.markdown("Rue Aimé Césaire, Dakar, Senegal")
st.sidebar.markdown("+221 800 00 50 50 - [email protected]")
st.sidebar.markdown("---")
st.sidebar.markdown("Ñi ka derale moye [Maël Fabien](https://maelfabien.github.io/) ak [Dakar Institute of Technology](https://dit.sn/)")
# I. Dataframe
df = pd.read_csv("COVID_Dakar.csv", sep=";")
df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
#st.write(df)
evol_cases = df[['Date', 'Positif', 'Negatif', 'Décédé', 'Guéri']].groupby("Date").sum().cumsum()
st.subheader("Ci tënkk")
total_positif = evol_cases.tail(1)['Positif'][0]
total_negatif = evol_cases.tail(1)['Negatif'][0]
total_decede = evol_cases.tail(1)['Décédé'][0]
total_geuri = evol_cases.tail(1)['Guéri'][0]
st.markdown("Limu ñi feebar: <span style='font-size:1.5em;'>%s</span>"%(total_positif - total_geuri), unsafe_allow_html=True)
st.markdown("Limu ñi faatu: <span style='font-size:1.5em;'>%s</span>"%(total_decede), unsafe_allow_html=True)
st.markdown("Limu ñi wer: <span style='font-size:1.5em;'>%s</span>"%(total_geuri), unsafe_allow_html=True)
st.markdown("dayob ñi wer : <span style='font-size:1.5em;'>%s</span>"%(np.round(total_geuri / total_positif, 3) * 100), unsafe_allow_html=True)
st.markdown("dàyob yoqute ñi feebar bis bu ay : <span style='font-size:1.5em;'>%s</span>"%(np.round(pd.DataFrame(np.sqrt(evol_cases['Positif'].pct_change(periods=2)+1)-1).tail(1)['Positif'][0] * 100, 2)), unsafe_allow_html=True)
st.markdown("Mboolem ñi ame Koronaa: <span style='font-size:1.5em;'>%s</span>"%(total_positif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu te ñu mùcc ci feebar bi: <span style='font-size:1.5em;'>%s</span>"%(total_negatif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(total_positif + total_negatif), unsafe_allow_html=True)
st.markdown("dayob ñi ame feebar bi ci ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(np.round(total_positif / (total_positif + total_negatif), 3) * 100), unsafe_allow_html=True)
# II. Map
st.markdown("---")
st.subheader("ñi ame feebar bi fu ñu féete")
shapefile = 'app/ne_110m_admin_0_countries.shp'
#Read shapefile using Geopandas
gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]
gdf.columns = ['country', 'country_code', 'geometry']
gdf = gdf[gdf['country']=="Senegal"]
grid_crs=gdf.crs
gdf_json = json.loads(gdf.to_json())
grid = json.dumps(gdf_json)
cities = pd.read_csv("city_coordinates.csv", index_col=0)
def find_lat(x):
try:
return float(citie | ['Ville'] == x]['Latitude'])
except TypeError:
return None
def find_long(x):
try:
return float(cities[cities['Ville'] == x]['Longitude'])
except TypeError:
return None
summary = df[['Positif', 'Ville']].groupby("Ville").sum().reset_index()
summary['latitude'] = summary['Ville'].apply(lambda x: find_lat(x))
summary['longitude'] = summary['Ville'].apply(lambda x: find_long(x))
geosource = GeoJSONDataSource(geojson = grid)
pointsource = ColumnDataSource(summary)
hover = HoverTool(
tooltips = [('Ville', '@Ville'), ('Limu ñi ame Koronaa ', '@Positif')]
)
#Create figure object.
p = figure(plot_height = 550 , plot_width = 700, tools=[hover, 'pan', 'wheel_zoom'])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.visible = False
p.yaxis.visible = False
p.outline_line_color = None
patch = p.patches('xs','ys', source = geosource, fill_color = '#fff7bc',
line_color = 'black', line_width = 0.35, fill_alpha = 1,
hover_fill_color="#fec44f")
#Add patch renderer to figure.
patch = p.patches('xs','ys', source = geosource, fill_color = 'lightgrey',
line_color = 'black', line_width = 0.25, fill_alpha = 1)
p.circle('longitude','latitude',source=pointsource, size=15)
st.bokeh_chart(p)
# III. Map
st.markdown("---")
st.subheader(" Yoqute limu ñi ame Koronaa ci Senegal")
highlight = alt.selection(type='single', on='mouseover',
fields=['Positif'], nearest=True)
chart = alt.Chart(evol_cases.reset_index()).mark_line(point=True, strokeWidth=5).encode(
x='Date:T',
y='Positif:Q',
tooltip='Positif:Q'
).add_selection(
highlight
).properties(height=400, width=700)
st.write(chart.interactive())
st.markdown("---")
st.subheader("Mingalé rewu Pays-Bas")
st.write("Senegaal rewle bigua xamanetané limu way-dëkké dafa méggo ak rewu Pays-bas (Fukk ak jurrom benn million), ba taxna ab mégele meuna dox di diganté ñaari dëkk yoyé. Doneté yoqute Jangorëy Koronaa gui ci rewum Senegaal la geune yéxé ci sinu dioni yalla taye, luñu setlu ci ni Jangoro gui di doxé diarna bayi xel wayé itameu lathe na niou xalate ci.Fi gua xamené mome leu rewu Senegaal tolu ci Jangorëy Koronaa dafa mengo ci fukki fan ak juroom ci guinaw fi rew mi di Pays-Bas Tolone,wayé xayma gogu boye seteu juroom ñaari faney le guir rew pays-bas té Senegaal fukki fan ak juroom ñeet. Lim yi aju ci rewu Pays-Bas ñuguike jeulé ci Wikipedia: https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Netherlands")
df_nl = pd.read_csv("df_nl.csv")
plt.figure(figsize=(16,10))
plt.plot(df_nl['Netherlands'], linestyle="--", linewidth=5, label="Pays-Bas")
plt.plot(df_nl['Senegal'],label="Sénégal", linewidth=5)
plt.figtext(.5,.9,'Evolution des cas au Sénégal et aux Pays-Bas', fontsize=30, ha='center')
plt.legend()
st.pyplot(plt)
# IV. Contamination
st.markdown("---")
st.subheader("Tassarok Jangorogui")
st.write("Ñugui xamé ñeneu ñu jeulé Jangoroji ci ñu jugué bimeu rew, ci niit ñu feebar yigua xamené ño waleu ñeni niit.Limu ñigua xamné ño ameu Jangoroji té jeuléko ci biir rew | s[cities | identifier_name |
app_2_wl.py | ovid19](http://www.prevcovid19.com/#/teste)")
st.sidebar.markdown("[Tweetru ministre gui eub walu wergu yaram ](https://twitter.com/MinisteredelaS1)")
st.sidebar.markdown("[Booleb xéeti mbir ak màndargaay jumtukaayu ](https://github.com/maelfabien/COVID-19-Senegal)")
st.sidebar.markdown("---")
st.sidebar.header("Jokko ak wa ministere")
st.sidebar.markdown("Ministre gui eub walu wergu yaram ak boolem boko / Fann Residence")
st.sidebar.markdown("Rue Aimé Césaire, Dakar, Senegal")
st.sidebar.markdown("+221 800 00 50 50 - [email protected]")
st.sidebar.markdown("---")
st.sidebar.markdown("Ñi ka derale moye [Maël Fabien](https://maelfabien.github.io/) ak [Dakar Institute of Technology](https://dit.sn/)")
# I. Dataframe
df = pd.read_csv("COVID_Dakar.csv", sep=";")
df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
#st.write(df)
evol_cases = df[['Date', 'Positif', 'Negatif', 'Décédé', 'Guéri']].groupby("Date").sum().cumsum()
st.subheader("Ci tënkk")
total_positif = evol_cases.tail(1)['Positif'][0]
total_negatif = evol_cases.tail(1)['Negatif'][0]
total_decede = evol_cases.tail(1)['Décédé'][0]
total_geuri = evol_cases.tail(1)['Guéri'][0]
st.markdown("Limu ñi feebar: <span style='font-size:1.5em;'>%s</span>"%(total_positif - total_geuri), unsafe_allow_html=True)
st.markdown("Limu ñi faatu: <span style='font-size:1.5em;'>%s</span>"%(total_decede), unsafe_allow_html=True)
st.markdown("Limu ñi wer: <span style='font-size:1.5em;'>%s</span>"%(total_geuri), unsafe_allow_html=True)
st.markdown("dayob ñi wer : <span style='font-size:1.5em;'>%s</span>"%(np.round(total_geuri / total_positif, 3) * 100), unsafe_allow_html=True)
st.markdown("dàyob yoqute ñi feebar bis bu ay : <span style='font-size:1.5em;'>%s</span>"%(np.round(pd.DataFrame(np.sqrt(evol_cases['Positif'].pct_change(periods=2)+1)-1).tail(1)['Positif'][0] * 100, 2)), unsafe_allow_html=True)
st.markdown("Mboolem ñi ame Koronaa: <span style='font-size:1.5em;'>%s</span>"%(total_positif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu te ñu mùcc ci feebar bi: <span style='font-size:1.5em;'>%s</span>"%(total_negatif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(total_positif + total_negatif), unsafe_allow_html=True)
st.markdown("dayob ñi ame feebar bi ci ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(np.round(total_positif / (total_positif + total_negatif), 3) * 100), unsafe_allow_html=True)
# II. Map
st.markdown("---")
st.subheader("ñi ame feebar bi fu ñu féete")
shapefile = 'app/ne_110m_admin_0_countries.shp'
#Read shapefile using Geopandas
gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]
gdf.columns = ['country', 'country_code', 'geometry']
gdf = gdf[gdf['country']=="Senegal"]
grid_crs=gdf.crs
gdf_json = json.loads(gdf.to_json())
grid = json.dumps(gdf_json)
cities = pd.read_csv("city_coordinates.csv", index_col=0)
def find_lat(x):
try:
return float(cities[cities['Ville'] == x]['Latitude'])
except TypeError:
return None
def find_long(x):
try:
return float(cities[cities['Ville'] | lle").sum().reset_index()
summary['latitude'] = summary['Ville'].apply(lambda x: find_lat(x))
summary['longitude'] = summary['Ville'].apply(lambda x: find_long(x))
geosource = GeoJSONDataSource(geojson = grid)
pointsource = ColumnDataSource(summary)
hover = HoverTool(
tooltips = [('Ville', '@Ville'), ('Limu ñi ame Koronaa ', '@Positif')]
)
#Create figure object.
p = figure(plot_height = 550 , plot_width = 700, tools=[hover, 'pan', 'wheel_zoom'])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.visible = False
p.yaxis.visible = False
p.outline_line_color = None
patch = p.patches('xs','ys', source = geosource, fill_color = '#fff7bc',
line_color = 'black', line_width = 0.35, fill_alpha = 1,
hover_fill_color="#fec44f")
#Add patch renderer to figure.
patch = p.patches('xs','ys', source = geosource, fill_color = 'lightgrey',
line_color = 'black', line_width = 0.25, fill_alpha = 1)
p.circle('longitude','latitude',source=pointsource, size=15)
st.bokeh_chart(p)
# III. Map
st.markdown("---")
st.subheader(" Yoqute limu ñi ame Koronaa ci Senegal")
highlight = alt.selection(type='single', on='mouseover',
fields=['Positif'], nearest=True)
chart = alt.Chart(evol_cases.reset_index()).mark_line(point=True, strokeWidth=5).encode(
x='Date:T',
y='Positif:Q',
tooltip='Positif:Q'
).add_selection(
highlight
).properties(height=400, width=700)
st.write(chart.interactive())
st.markdown("---")
st.subheader("Mingalé rewu Pays-Bas")
st.write("Senegaal rewle bigua xamanetané limu way-dëkké dafa méggo ak rewu Pays-bas (Fukk ak jurrom benn million), ba taxna ab mégele meuna dox di diganté ñaari dëkk yoyé. Doneté yoqute Jangorëy Koronaa gui ci rewum Senegaal la geune yéxé ci sinu dioni yalla taye, luñu setlu ci ni Jangoro gui di doxé diarna bayi xel wayé itameu lathe na niou xalate ci.Fi gua xamené mome leu rewu Senegaal tolu ci Jangorëy Koronaa dafa mengo ci fukki fan ak juroom ci guinaw fi rew mi di Pays-Bas Tolone,wayé xayma gogu boye seteu juroom ñaari faney le guir rew pays-bas té Senegaal fukki fan ak juroom ñeet. Lim yi aju ci rewu Pays-Bas ñuguike jeulé ci Wikipedia: https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Netherlands")
df_nl = pd.read_csv("df_nl.csv")
plt.figure(figsize=(16,10))
plt.plot(df_nl['Netherlands'], linestyle="--", linewidth=5, label="Pays-Bas")
plt.plot(df_nl['Senegal'],label="Sénégal", linewidth=5)
plt.figtext(.5,.9,'Evolution des cas au Sénégal et aux Pays-Bas', fontsize=30, ha='center')
plt.legend()
st.pyplot(plt)
# IV. Contamination
st.markdown("---")
st.subheader("Tassarok Jangorogui")
st.write("Ñugui xamé ñeneu ñu jeulé Jangoroji ci ñu jugué bimeu rew, ci niit ñu feebar yigua xamené ño waleu ñeni niit.Limu ñigua xamné ño ameu Jangoroji té jeuléko ci biir rewmi | == x]['Longitude'])
except TypeError:
return None
summary = df[['Positif', 'Ville']].groupby("Vi | identifier_body |
PerformanceTester.py |
def GenerateJobs(self):
utc = set()
for exec in self.executable:
for x in self.cx:
for y in self.cy:
for z in self.cz:
utc.add(numpy.prod([x,y,z]))
for iteration in self.iterations:
if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]:
for d in self.domains:
self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out","")))
# if numpy.prod([x,y,z]) >= 64 and numpy.prod([x,y,z]) <= 1600:
# elif numpy.prod([x,y,z]) < 64:
# for d in self.domains:
# self.Jobs.append(Job(d, [x,y,z], 250))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10):
utc = set()
cpu_configs = []
totalCPU_divisors = divisors(totalCPU)
for exec in self.executable:
for x in totalCPU_divisors:
for y in totalCPU_divisors:
for z in totalCPU_divisors:
code = "{0}_{1}_{2}".format(x,y,z)
if numpy.prod([x,y,z]) == totalCPU:
if x <= max_x and y <= max_y and z <= max_z:
if code not in cpu_configs:
cpu_configs.append(code)
utc.add(numpy.prod([x,y,z]))
for iteration in self.iterations:
if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]:
for d in self.domains:
self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out","")))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def MakeSubmits(self):
for J in self.Jobs:
J.MakeSubmit(self.template)
print('Prepared submit for job', J.job_name)
def MakeGroupSubmits(self):
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
print(utc)
for tc in utc:
first = True
for J in self.Jobs:
if J.total_cpu == tc:
if first:
J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc))
self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc))
first = False
else:
J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc))
def SubmitAll(self):
for J in self.Jobs:
J.Submit()
def SubmitGroupAll(self):
for f in self.group_submit_files:
run("sbatch {0}".format(f))
def ReadJobTimers(self):
for J in self.Jobs:
J.ReadTimer(self.timer)
def ReadGroupJobTimers(self):
printc("Processing outfiles... ", end="")
files = set()
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
# print(exec)
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
# print(utc)
for tc in utc:
outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix)
# files.add(outfile)
run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True)
files.add("{0}.clean".format(outfile))
with open("{0}.clean".format(outfile), 'r') as f:
fname = ''
for line in f:
#print(line.strip(),line[0:1] )
if "E.{0}".format(execn) in line:
fname = line.strip() + "." + self.output_suffix
files.add(fname)
elif self.timer in line:
if len(fname) > 0:
with open(fname, "w") as fw:
fw.write(line)
fname=""
printc("\tdone", color='green')
printc("Reading timers... ", end="")
for J in self.Jobs:
J.ReadTimer(self.timer)
printc("\tdone", color='green')
printc("Cleaning up files... ", end="")
for file in files:
run("rm -f {0}".format(file), quiet=True);
printc("\tdone", color='green')
def ProcessStats(self):
if enable_plotting:
fig = plt.figure(figsize=[16,8])
ax = fig.add_subplot(111)
ax.set_xscale("log", nonpositive='clip')
ax.set_yscale("log", nonpositive='clip')
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
for di in range(len(self.domains)):
d = self.domains[di]
min_times = numpy.empty(len(self.uniq_total_cpus))
min_times[:] = numpy.NAN
printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue')
for ci in range(len(self.uniq_total_cpus)):
c = self.uniq_total_cpus[ci]
JobsOK = []
JobsNK = []
printc('\tProcessing cpu config {0}'.format(str(c)), 'violet')
T = []
for J in self.Jobs:
if J.total_cpu == c and J.domain_size == d and J.executable==exec:
if type(J.timers_results[self.timer]) is list:
JobsOK.append(J)
else:
JobsNK.append(J)
JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3]))
if len(JobsOK) > 0:
min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3])
for J in JobsOK:
# printc('\t\tOK:', 'green', end=" ")
tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3])
tptss = '{0:7.4f} s'.format(tpts)
tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci])))
tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu)
# print(J.job_name,'\t', tpts)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc])
for J in JobsNK:
# printc('\t\tFAIL:', 'red', end=" ")
# print(J.job_name,'\t', J.timers_results)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-'])
header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core']
if len(T) > 0:
print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t'))
if enable_plotting:
Q, W = -1, -1
for q in range(len(self.uniq_total_cpus)):
print (len(self.uniq_total_cpus))
if min_times[q] > 0:
for w in range(len(self.uniq_total_cpus)-1,0,-1):
if min_times[w] > 0:
if Q==-1 and W==-1:
Q = q
W = w + 1
# print("Q, W", q, w)
x1, y1 = self.uniq_total_cpus[ | self.iterations.append([cpu_from, cpu_to, iterations]) | identifier_body |
|
PerformanceTester.py | < 64:
# for d in self.domains:
# self.Jobs.append(Job(d, [x,y,z], 250))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10):
utc = set()
cpu_configs = []
totalCPU_divisors = divisors(totalCPU)
for exec in self.executable:
for x in totalCPU_divisors:
for y in totalCPU_divisors:
for z in totalCPU_divisors:
code = "{0}_{1}_{2}".format(x,y,z)
if numpy.prod([x,y,z]) == totalCPU:
if x <= max_x and y <= max_y and z <= max_z:
if code not in cpu_configs:
cpu_configs.append(code)
utc.add(numpy.prod([x,y,z]))
for iteration in self.iterations:
|
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def MakeSubmits(self):
for J in self.Jobs:
J.MakeSubmit(self.template)
print('Prepared submit for job', J.job_name)
def MakeGroupSubmits(self):
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
print(utc)
for tc in utc:
first = True
for J in self.Jobs:
if J.total_cpu == tc:
if first:
J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc))
self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc))
first = False
else:
J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc))
def SubmitAll(self):
for J in self.Jobs:
J.Submit()
def SubmitGroupAll(self):
for f in self.group_submit_files:
run("sbatch {0}".format(f))
def ReadJobTimers(self):
for J in self.Jobs:
J.ReadTimer(self.timer)
def ReadGroupJobTimers(self):
printc("Processing outfiles... ", end="")
files = set()
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
# print(exec)
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
# print(utc)
for tc in utc:
outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix)
# files.add(outfile)
run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True)
files.add("{0}.clean".format(outfile))
with open("{0}.clean".format(outfile), 'r') as f:
fname = ''
for line in f:
#print(line.strip(),line[0:1] )
if "E.{0}".format(execn) in line:
fname = line.strip() + "." + self.output_suffix
files.add(fname)
elif self.timer in line:
if len(fname) > 0:
with open(fname, "w") as fw:
fw.write(line)
fname=""
printc("\tdone", color='green')
printc("Reading timers... ", end="")
for J in self.Jobs:
J.ReadTimer(self.timer)
printc("\tdone", color='green')
printc("Cleaning up files... ", end="")
for file in files:
run("rm -f {0}".format(file), quiet=True);
printc("\tdone", color='green')
def ProcessStats(self):
if enable_plotting:
fig = plt.figure(figsize=[16,8])
ax = fig.add_subplot(111)
ax.set_xscale("log", nonpositive='clip')
ax.set_yscale("log", nonpositive='clip')
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
for di in range(len(self.domains)):
d = self.domains[di]
min_times = numpy.empty(len(self.uniq_total_cpus))
min_times[:] = numpy.NAN
printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue')
for ci in range(len(self.uniq_total_cpus)):
c = self.uniq_total_cpus[ci]
JobsOK = []
JobsNK = []
printc('\tProcessing cpu config {0}'.format(str(c)), 'violet')
T = []
for J in self.Jobs:
if J.total_cpu == c and J.domain_size == d and J.executable==exec:
if type(J.timers_results[self.timer]) is list:
JobsOK.append(J)
else:
JobsNK.append(J)
JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3]))
if len(JobsOK) > 0:
min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3])
for J in JobsOK:
# printc('\t\tOK:', 'green', end=" ")
tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3])
tptss = '{0:7.4f} s'.format(tpts)
tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci])))
tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu)
# print(J.job_name,'\t', tpts)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc])
for J in JobsNK:
# printc('\t\tFAIL:', 'red', end=" ")
# print(J.job_name,'\t', J.timers_results)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-'])
header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core']
if len(T) > 0:
print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t'))
if enable_plotting:
Q, W = -1, -1
for q in range(len(self.uniq_total_cpus)):
print (len(self.uniq_total_cpus))
if min_times[q] > 0:
for w in range(len(self.uniq_total_cpus)-1,0,-1):
if min_times[w] > 0:
if Q==-1 and W==-1:
Q = q
W = w + 1
# print("Q, W", q, w)
x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W]
x2 = numpy.log(numpy.array(x1))
y2 = numpy.log(numpy.array(y1))
print (x1,y1)
if enable_scipy:
slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2)
#print("LS", slope, intercept, r_value, p_value, std_err)
#plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di])
plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*3 | if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]:
for d in self.domains:
self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out",""))) | conditional_block |
PerformanceTester.py | < 64:
# for d in self.domains:
# self.Jobs.append(Job(d, [x,y,z], 250))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10):
utc = set()
cpu_configs = []
totalCPU_divisors = divisors(totalCPU)
for exec in self.executable:
for x in totalCPU_divisors:
for y in totalCPU_divisors:
for z in totalCPU_divisors:
code = "{0}_{1}_{2}".format(x,y,z)
if numpy.prod([x,y,z]) == totalCPU:
if x <= max_x and y <= max_y and z <= max_z:
if code not in cpu_configs:
cpu_configs.append(code)
utc.add(numpy.prod([x,y,z]))
for iteration in self.iterations:
if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]:
for d in self.domains:
self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out","")))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def | (self):
for J in self.Jobs:
J.MakeSubmit(self.template)
print('Prepared submit for job', J.job_name)
def MakeGroupSubmits(self):
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
print(utc)
for tc in utc:
first = True
for J in self.Jobs:
if J.total_cpu == tc:
if first:
J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc))
self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc))
first = False
else:
J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc))
def SubmitAll(self):
for J in self.Jobs:
J.Submit()
def SubmitGroupAll(self):
for f in self.group_submit_files:
run("sbatch {0}".format(f))
def ReadJobTimers(self):
for J in self.Jobs:
J.ReadTimer(self.timer)
def ReadGroupJobTimers(self):
printc("Processing outfiles... ", end="")
files = set()
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
# print(exec)
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
# print(utc)
for tc in utc:
outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix)
# files.add(outfile)
run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True)
files.add("{0}.clean".format(outfile))
with open("{0}.clean".format(outfile), 'r') as f:
fname = ''
for line in f:
#print(line.strip(),line[0:1] )
if "E.{0}".format(execn) in line:
fname = line.strip() + "." + self.output_suffix
files.add(fname)
elif self.timer in line:
if len(fname) > 0:
with open(fname, "w") as fw:
fw.write(line)
fname=""
printc("\tdone", color='green')
printc("Reading timers... ", end="")
for J in self.Jobs:
J.ReadTimer(self.timer)
printc("\tdone", color='green')
printc("Cleaning up files... ", end="")
for file in files:
run("rm -f {0}".format(file), quiet=True);
printc("\tdone", color='green')
def ProcessStats(self):
if enable_plotting:
fig = plt.figure(figsize=[16,8])
ax = fig.add_subplot(111)
ax.set_xscale("log", nonpositive='clip')
ax.set_yscale("log", nonpositive='clip')
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
for di in range(len(self.domains)):
d = self.domains[di]
min_times = numpy.empty(len(self.uniq_total_cpus))
min_times[:] = numpy.NAN
printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue')
for ci in range(len(self.uniq_total_cpus)):
c = self.uniq_total_cpus[ci]
JobsOK = []
JobsNK = []
printc('\tProcessing cpu config {0}'.format(str(c)), 'violet')
T = []
for J in self.Jobs:
if J.total_cpu == c and J.domain_size == d and J.executable==exec:
if type(J.timers_results[self.timer]) is list:
JobsOK.append(J)
else:
JobsNK.append(J)
JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3]))
if len(JobsOK) > 0:
min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3])
for J in JobsOK:
# printc('\t\tOK:', 'green', end=" ")
tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3])
tptss = '{0:7.4f} s'.format(tpts)
tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci])))
tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu)
# print(J.job_name,'\t', tpts)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc])
for J in JobsNK:
# printc('\t\tFAIL:', 'red', end=" ")
# print(J.job_name,'\t', J.timers_results)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-'])
header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core']
if len(T) > 0:
print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t'))
if enable_plotting:
Q, W = -1, -1
for q in range(len(self.uniq_total_cpus)):
print (len(self.uniq_total_cpus))
if min_times[q] > 0:
for w in range(len(self.uniq_total_cpus)-1,0,-1):
if min_times[w] > 0:
if Q==-1 and W==-1:
Q = q
W = w + 1
# print("Q, W", q, w)
x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W]
x2 = numpy.log(numpy.array(x1))
y2 = numpy.log(numpy.array(y1))
print (x1,y1)
if enable_scipy:
slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2)
#print("LS", slope, intercept, r_value, p_value, std_err)
#plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di])
plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*3 | MakeSubmits | identifier_name |
PerformanceTester.py | < 64:
# for d in self.domains:
# self.Jobs.append(Job(d, [x,y,z], 250))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def GenerateJobsTotalCPU(self, totalCPU, max_x=2**10, max_y=2**10, max_z=2**10):
utc = set()
cpu_configs = []
totalCPU_divisors = divisors(totalCPU)
for exec in self.executable:
for x in totalCPU_divisors:
for y in totalCPU_divisors:
for z in totalCPU_divisors:
code = "{0}_{1}_{2}".format(x,y,z)
if numpy.prod([x,y,z]) == totalCPU:
if x <= max_x and y <= max_y and z <= max_z:
if code not in cpu_configs:
cpu_configs.append(code)
utc.add(numpy.prod([x,y,z]))
for iteration in self.iterations:
if numpy.prod([x,y,z]) > iteration[0] and numpy.prod([x,y,z]) <= iteration[1]:
for d in self.domains:
self.Jobs.append(Job(d, [x,y,z], iteration[2], output_suffix=self.output_suffix, executable=exec, job_exec=exec.replace("./","").replace(".out","")))
self.Jobs.sort()
self.uniq_total_cpus = list(utc)
self.uniq_total_cpus.sort()
def MakeSubmits(self):
for J in self.Jobs:
J.MakeSubmit(self.template)
print('Prepared submit for job', J.job_name)
def MakeGroupSubmits(self):
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu)
utc = list(utc)
utc.sort()
print(utc)
for tc in utc:
first = True
for J in self.Jobs:
if J.total_cpu == tc:
if first:
J.MakeSubmit(self.template, part="all", mode="w", alternative_name="E.group_{0:05d}".format(tc))
self.group_submit_files.append("E.group_{0:05d}.submit.sh".format(tc))
first = False
else:
J.MakeSubmit(self.template, part="mpirun", mode="a", alternative_name="E.group_{0:05d}".format(tc))
def SubmitAll(self):
for J in self.Jobs:
J.Submit()
def SubmitGroupAll(self):
for f in self.group_submit_files:
run("sbatch {0}".format(f))
def ReadJobTimers(self):
for J in self.Jobs:
J.ReadTimer(self.timer)
def ReadGroupJobTimers(self):
printc("Processing outfiles... ", end="")
files = set()
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
# print(exec)
utc = set()
for J in self.Jobs:
utc.add(J.total_cpu) | utc = list(utc)
utc.sort()
# print(utc)
for tc in utc:
outfile = "E.group_{0:05d}.{1}".format(tc, self.output_suffix)
# files.add(outfile)
run('cat {0} | grep -E "^E\.|{1}" > {0}.clean'.format(outfile, self.timer), quiet=True)
files.add("{0}.clean".format(outfile))
with open("{0}.clean".format(outfile), 'r') as f:
fname = ''
for line in f:
#print(line.strip(),line[0:1] )
if "E.{0}".format(execn) in line:
fname = line.strip() + "." + self.output_suffix
files.add(fname)
elif self.timer in line:
if len(fname) > 0:
with open(fname, "w") as fw:
fw.write(line)
fname=""
printc("\tdone", color='green')
printc("Reading timers... ", end="")
for J in self.Jobs:
J.ReadTimer(self.timer)
printc("\tdone", color='green')
printc("Cleaning up files... ", end="")
for file in files:
run("rm -f {0}".format(file), quiet=True);
printc("\tdone", color='green')
def ProcessStats(self):
if enable_plotting:
fig = plt.figure(figsize=[16,8])
ax = fig.add_subplot(111)
ax.set_xscale("log", nonpositive='clip')
ax.set_yscale("log", nonpositive='clip')
for iexec in range(len(self.executable)):
exec = self.executable[iexec]
execn= self.executable_name[iexec]
for di in range(len(self.domains)):
d = self.domains[di]
min_times = numpy.empty(len(self.uniq_total_cpus))
min_times[:] = numpy.NAN
printc('\nProcessing Domain {0} for {1}'.format(str(d),execn), 'blue')
for ci in range(len(self.uniq_total_cpus)):
c = self.uniq_total_cpus[ci]
JobsOK = []
JobsNK = []
printc('\tProcessing cpu config {0}'.format(str(c)), 'violet')
T = []
for J in self.Jobs:
if J.total_cpu == c and J.domain_size == d and J.executable==exec:
if type(J.timers_results[self.timer]) is list:
JobsOK.append(J)
else:
JobsNK.append(J)
JobsOK.sort(key=lambda x: float(x.timers_results[self.timer][4])/float(x.timers_results[self.timer][3]))
if len(JobsOK) > 0:
min_times[ci] = float(JobsOK[0].timers_results[self.timer][4])/float(JobsOK[0].timers_results[self.timer][3])
for J in JobsOK:
# printc('\t\tOK:', 'green', end=" ")
tpts = float(J.timers_results[self.timer][4])/float(J.timers_results[self.timer][3])
tptss = '{0:7.4f} s'.format(tpts)
tptsp = '{0:5.2f} x'.format(((tpts/min_times[ci])))
tptspc = '{0:10.7f} s'.format(tpts*J.total_cpu)
# print(J.job_name,'\t', tpts)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['OK', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], J.timesteps, tptss, tptsp,tptspc])
for J in JobsNK:
# printc('\t\tFAIL:', 'red', end=" ")
# print(J.job_name,'\t', J.timers_results)
ds = '{0} x {1} x {2}'.format(J.domain_size[0],J.domain_size[1],J.domain_size[2])
T.append(['FAIL', execn, ds, J.cpus[0], J.cpus[1], J.cpus[2], '-', '-', '-','-'])
header=['result', 'executable', 'domain size', 'cpu x', 'cpu y', 'cpu z', 'timesteps', 'time / iter', 'to fastest', 'time / iter / core']
if len(T) > 0:
print('\t\t'+tabulate(T, headers=header, tablefmt="fancy_grid").replace('\n','\n\t\t'))
if enable_plotting:
Q, W = -1, -1
for q in range(len(self.uniq_total_cpus)):
print (len(self.uniq_total_cpus))
if min_times[q] > 0:
for w in range(len(self.uniq_total_cpus)-1,0,-1):
if min_times[w] > 0:
if Q==-1 and W==-1:
Q = q
W = w + 1
# print("Q, W", q, w)
x1, y1 = self.uniq_total_cpus[Q:W],min_times[Q:W]
x2 = numpy.log(numpy.array(x1))
y2 = numpy.log(numpy.array(y1))
print (x1,y1)
if enable_scipy:
slope, intercept, r_value, p_value, std_err = stats.linregress(x2,y2)
#print("LS", slope, intercept, r_value, p_value, std_err)
#plt.plot([x1[0], x1[-1]*16], [numpy.exp(x2[0]*slope+intercept), numpy.exp(numpy.log(x1[-1]*16)*slope+intercept)],'-.', color=self.domains_color[di])
plt.plot([1, 1024*32], [numpy.exp(numpy.log(1)*slope+intercept), numpy.exp(numpy.log(1024*32 | random_line_split |
|
task_5.py | _t4 = -1
servo_handle_x_t3 = -1
servo_handle_y_t3 = -1
servo_handle_x_t2 = -1
servo_handle_y_t2 = -1
servo_handle_x_t1 = -1
servo_handle_y_t1 = -1
handle_list = {}
try:
with open('ball_details.json') as file:
ball_details = json.load(file)
except ImportError:
print('\n[ERROR] ball_details.json file is not present in the current directory.')
print('Your current directory is: ', os.getcwd())
print('Make sure ball_details.json is present in this current directory.\n')
map_start = {
"T4":[(0,5)],
"T3":[(4,9)],
"T2":[(0,4)],
"T1":[(5,0)]
} # do mapping of start and end point on the basis of color and json file.
map_end = {
"T4":[(5,9), (9,4), (4,0)],
"T3":[(9,5), (5,0), (0,4)],
"T2":[(4,9), (9,5), (5,0)],
"T1":[(0,4), (4,9), (9,5)]
}
t4_path = None #path to table req
aux_path = None #path to req cb
path_map = { #pixel path to each exit point on the table
"T1":[],
"T2":[],
"T3":[],
"T4":[]
}
path_box_map = { #box coordinates path to draw path on the tables
"T1":[],
"T2":[],
"T3":[],
"T4":[]
}
maze_map ={
}
collection_box = None #integer variable to store the number of the collection box
client_id = -1
############################################################
##############################################################
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: send_color_and_collection_box_identified
# Inputs: ball_color and collection_box_name
# Outputs: None
# Purpose: 1. This function should only be called when the task is being evaluated using
# test executable.
# 2. The format to send the data is as follows:
# 'color::collection_box_name'
def send_color_and_collection_box_identified(ball_color, collection_box_name):
global client_id
color_and_cb = [ball_color + '::' + collection_box_name]
inputBuffer = bytearray()
return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1',
sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking)
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
''' Function name: color_get
Inputs: Image from vision sensor
Outputs: Color of the ball detected in the image
Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image
Example call: color_get(image_from_vision_sensor)
'''
def color_get(img_file_path):
if(img_file_path is None):
return
#Read the image
if type(img_file_path) == type(str()):
img_file_path = cv2.imread(img_file_path)
else:
img_file_path= img_file_path
#cv2.imwrite("colorefromrailing.png",img_file_path)
imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT)
hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)
#To create a mask for red colour
red_lower = np.array([0, 50, 50])
red_upper = np.array([10, 255, 255])
red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)
kernal = np.ones((5, 5))
red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1]
gray_blur_red= cv2.Canny(red_gray,100,255)
#Create a mask for blue colour
blue_lower = np.array([94, 20, 0], np.uint8)
blue_upper = np.array([140,255 ,255], np.uint8)
blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)
kernal = np.ones((5, 5))
blue_mask = cv2.dilate(blue_mask, kernal)
blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1]
gray_blur_blue= cv2.Canny(blue_gray,100,255)
#Create a mask for green colour
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)
kernal = np.ones((5, 5))
green_mask = cv2.dilate(green_mask, kernal)
green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1]
gray_blur_green = cv2.Canny(green_gray,100,255)
#find contours on blue mask
cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None):
if len(cnts) == 2:
|
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'blue'
#Find red contours in the image
cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'red'
# Find green contours in the image
cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if(len(cnts)):
return 'green'
''' Function name: traverse_ball
Usage: traverses the ball from one point to another
Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow
Outputs: None
Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path)
'''
def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path):
global client_id
rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming)
current_time = ''
while(len(current_time) == 0 ):
rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer)
j = 0
k= 0
for i in pixel_path:
i.reverse()
task_3.change_setpoint(i)
while(1):
j+=1
k+=1
vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle)
transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution)
warped_img = task_1b.applyPerspectiveTransform(transformed_image)
shapes = task_1a_part1.scan_image(warped_img)
if(shapes):
warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB)
warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2)
warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2)
if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30):
break
else:
task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y)
return 1
''' Function | cnts = cnts[0] | conditional_block |
task_5.py | blue_upper = np.array([140,255 ,255], np.uint8)
blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)
kernal = np.ones((5, 5))
blue_mask = cv2.dilate(blue_mask, kernal)
blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1]
gray_blur_blue= cv2.Canny(blue_gray,100,255)
#Create a mask for green colour
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)
kernal = np.ones((5, 5))
green_mask = cv2.dilate(green_mask, kernal)
green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1]
gray_blur_green = cv2.Canny(green_gray,100,255)
#find contours on blue mask
cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None):
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'blue'
#Find red contours in the image
cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'red'
# Find green contours in the image
cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if(len(cnts)):
return 'green'
''' Function name: traverse_ball
Usage: traverses the ball from one point to another
Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow
Outputs: None
Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path)
'''
def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path):
global client_id
rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming)
current_time = ''
while(len(current_time) == 0 ):
rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer)
j = 0
k= 0
for i in pixel_path:
i.reverse()
task_3.change_setpoint(i)
while(1):
j+=1
k+=1
vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle)
transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution)
warped_img = task_1b.applyPerspectiveTransform(transformed_image)
shapes = task_1a_part1.scan_image(warped_img)
if(shapes):
warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB)
warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2)
warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2)
if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30):
break
else:
task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y)
return 1
''' Function name: send_data_to_draw_path
Usage: Draws path on the table in Coppleiasim scene
Inputs: table no and the box path to be drawn
Outputs: None
Example call: send_data_to_draw_path('T4', pixel_path_list)
'''
def send_data_to_draw_path(table,path):
global client_id
############## IF REQUIRED, CHANGE THE CODE FROM HERE ##############
coppelia_sim_coord_path = []
table_name = "top_plate_respondable_t" + str(table) + "_1"
for coord in path:
for element in coord:
coppelia_sim_coord_path.append(((10*element) - 45)/100)
inputBuffer = bytearray()
return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id, \
table_name, sim.sim_scripttype_customizationscript, 'drawPath', [], \
coppelia_sim_coord_path, [], inputBuffer, sim.simx_opmode_oneshot)
''' Function name: make_connection
Usage: Establishes connection with the Coppleiasim server and populates the global variable handle list with the updated values of servo handle and vision sensors
Inputs: None
Outputs: None
Example call: make_connection()
'''
def make_connection():
global client_id,handle_list
global vision_sensor_5,vision_sensor_4,vision_sensor_3,vision_sensor_2,vision_sensor_1,servo_handle_x_t1,servo_handle_y_t1,servo_handle_x_t4,servo_handle_y_t4
return_code,servo_handle_x_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_1",sim.simx_opmode_blocking)
return_code,servo_handle_y_t1 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t1_2",sim.simx_opmode_blocking)
return_code,servo_handle_x_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_1",sim.simx_opmode_blocking)
return_code,servo_handle_y_t4 = sim.simxGetObjectHandle(client_id,"revolute_joint_ss_t4_2",sim.simx_opmode_blocking)
return_code,vision_sensor_1 = sim.simxGetObjectHandle(client_id,"vision_sensor_1",sim.simx_opmode_blocking)
#return_code,vision_sensor_2 = sim.simxGetObjectHandle(client_id,"vision_sensor_2",sim.simx_opmode_blocking)
#return_code,vision_sensor_3 = sim.simxGetObjectHandle(client_id,"vision_sensor_3",sim.simx_opmode_blocking)
return_code,vision_sensor_4 = sim.simxGetObjectHandle(client_id,"vision_sensor_4",sim.simx_opmode_blocking)
return_code,vision_sensor_5 = sim.simxGetObjectHandle(client_id,"vision_sensor_5",sim.simx_opmode_blocking)
handle_list = {'T4' : [servo_handle_x_t4,servo_handle_y_t4,vision_sensor_4],
'T3' : [],
'T2' : [],
'T1' : [servo_handle_x_t1,servo_handle_y_t1,vision_sensor_1]
}
''' Function name: set_path
Usage: sets variables used to make the ball reach to its destination collection box according to the color using ball_details json dictionary.
It calls send_data_to_draw_path to draw the path on the table.
Inputs: color of the detected ball : string
Outputs: None
Example call: set_path('green')
'''
def set_path(color):
global t4_path,aux_path
table, collection_box = ball_details[color][0].split('_')
t4_path=path_map['T4'][int(table[-1])-1]
t4_path_drawn = path_box_map['T4'][int(table[-1])-1]
send_data_to_draw_path(4,t4_path_drawn)
aux_path = path_map[table][int(collection_box[-1])-1]
aux_path_drawn = path_box_map[table][int(collection_box[-1])-1]
send_data_to_draw_path(1,aux_path_drawn)
ball_details[color].pop(0)
''' Function name: complete_all_mapping_path
Usage: Sets all mapping path according to the values of entry and exit points of the table and the maze. It also manipulates the setpoints according to the
required collection box collection box(line no 452-478) to make the ball fall in the collection box.
Inputs: Table number for which the paths have to be set : string
Outputs: None
Example Call: complete_all_mapping_path('T4')
'''
def | complete_all_mapping_path | identifier_name |
|
task_5.py | _t4 = -1
servo_handle_x_t3 = -1
servo_handle_y_t3 = -1
servo_handle_x_t2 = -1
servo_handle_y_t2 = -1
servo_handle_x_t1 = -1
servo_handle_y_t1 = -1
handle_list = {}
try:
with open('ball_details.json') as file:
ball_details = json.load(file)
except ImportError:
print('\n[ERROR] ball_details.json file is not present in the current directory.')
print('Your current directory is: ', os.getcwd())
print('Make sure ball_details.json is present in this current directory.\n')
map_start = {
"T4":[(0,5)],
"T3":[(4,9)],
"T2":[(0,4)],
"T1":[(5,0)]
} # do mapping of start and end point on the basis of color and json file.
map_end = {
"T4":[(5,9), (9,4), (4,0)],
"T3":[(9,5), (5,0), (0,4)],
"T2":[(4,9), (9,5), (5,0)],
"T1":[(0,4), (4,9), (9,5)]
}
t4_path = None #path to table req
aux_path = None #path to req cb
path_map = { #pixel path to each exit point on the table
"T1":[],
"T2":[],
"T3":[],
"T4":[]
}
path_box_map = { #box coordinates path to draw path on the tables
"T1":[],
"T2":[],
"T3":[],
"T4":[]
}
maze_map ={
}
collection_box = None #integer variable to store the number of the collection box
client_id = -1
############################################################
##############################################################
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: send_color_and_collection_box_identified
# Inputs: ball_color and collection_box_name
# Outputs: None
# Purpose: 1. This function should only be called when the task is being evaluated using
# test executable.
# 2. The format to send the data is as follows:
# 'color::collection_box_name'
def send_color_and_collection_box_identified(ball_color, collection_box_name):
|
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
''' Function name: color_get
Inputs: Image from vision sensor
Outputs: Color of the ball detected in the image
Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image
Example call: color_get(image_from_vision_sensor)
'''
def color_get(img_file_path):
if(img_file_path is None):
return
#Read the image
if type(img_file_path) == type(str()):
img_file_path = cv2.imread(img_file_path)
else:
img_file_path= img_file_path
#cv2.imwrite("colorefromrailing.png",img_file_path)
imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT)
hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)
#To create a mask for red colour
red_lower = np.array([0, 50, 50])
red_upper = np.array([10, 255, 255])
red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)
kernal = np.ones((5, 5))
red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1]
gray_blur_red= cv2.Canny(red_gray,100,255)
#Create a mask for blue colour
blue_lower = np.array([94, 20, 0], np.uint8)
blue_upper = np.array([140,255 ,255], np.uint8)
blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)
kernal = np.ones((5, 5))
blue_mask = cv2.dilate(blue_mask, kernal)
blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1]
gray_blur_blue= cv2.Canny(blue_gray,100,255)
#Create a mask for green colour
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)
kernal = np.ones((5, 5))
green_mask = cv2.dilate(green_mask, kernal)
green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1]
gray_blur_green = cv2.Canny(green_gray,100,255)
#find contours on blue mask
cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None):
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'blue'
#Find red contours in the image
cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'red'
# Find green contours in the image
cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if(len(cnts)):
return 'green'
''' Function name: traverse_ball
Usage: traverses the ball from one point to another
Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow
Outputs: None
Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path)
'''
def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path):
global client_id
rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming)
current_time = ''
while(len(current_time) == 0 ):
rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer)
j = 0
k= 0
for i in pixel_path:
i.reverse()
task_3.change_setpoint(i)
while(1):
j+=1
k+=1
vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle)
transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution)
warped_img = task_1b.applyPerspectiveTransform(transformed_image)
shapes = task_1a_part1.scan_image(warped_img)
if(shapes):
warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB)
warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2)
warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2)
if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30):
break
else:
task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y)
return 1
''' | global client_id
color_and_cb = [ball_color + '::' + collection_box_name]
inputBuffer = bytearray()
return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1',
sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking) | identifier_body |
task_5.py | _t4 = -1
servo_handle_x_t3 = -1
servo_handle_y_t3 = -1
servo_handle_x_t2 = -1
servo_handle_y_t2 = -1
servo_handle_x_t1 = -1
servo_handle_y_t1 = -1
handle_list = {}
try:
with open('ball_details.json') as file:
ball_details = json.load(file)
except ImportError:
print('\n[ERROR] ball_details.json file is not present in the current directory.')
print('Your current directory is: ', os.getcwd())
| "T4":[(0,5)],
"T3":[(4,9)],
"T2":[(0,4)],
"T1":[(5,0)]
} # do mapping of start and end point on the basis of color and json file.
map_end = {
"T4":[(5,9), (9,4), (4,0)],
"T3":[(9,5), (5,0), (0,4)],
"T2":[(4,9), (9,5), (5,0)],
"T1":[(0,4), (4,9), (9,5)]
}
t4_path = None #path to table req
aux_path = None #path to req cb
path_map = { #pixel path to each exit point on the table
"T1":[],
"T2":[],
"T3":[],
"T4":[]
}
path_box_map = { #box coordinates path to draw path on the tables
"T1":[],
"T2":[],
"T3":[],
"T4":[]
}
maze_map ={
}
collection_box = None #integer variable to store the number of the collection box
client_id = -1
############################################################
##############################################################
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: send_color_and_collection_box_identified
# Inputs: ball_color and collection_box_name
# Outputs: None
# Purpose: 1. This function should only be called when the task is being evaluated using
# test executable.
# 2. The format to send the data is as follows:
# 'color::collection_box_name'
def send_color_and_collection_box_identified(ball_color, collection_box_name):
global client_id
color_and_cb = [ball_color + '::' + collection_box_name]
inputBuffer = bytearray()
return_code, retInts, retFloats, retStrings, retBuffer = sim.simxCallScriptFunction(client_id,'evaluation_screen_respondable_1',
sim.sim_scripttype_childscript,'color_and_cb_identification',[],[],color_and_cb,inputBuffer,sim.simx_opmode_blocking)
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
''' Function name: color_get
Inputs: Image from vision sensor
Outputs: Color of the ball detected in the image
Usage: Takes in the image from the vision sensors and returns the color of the ball detected in the image
Example call: color_get(image_from_vision_sensor)
'''
def color_get(img_file_path):
if(img_file_path is None):
return
#Read the image
if type(img_file_path) == type(str()):
img_file_path = cv2.imread(img_file_path)
else:
img_file_path= img_file_path
#cv2.imwrite("colorefromrailing.png",img_file_path)
imageFrame = cv2.GaussianBlur(img_file_path,(5,5),cv2.BORDER_TRANSPARENT)
hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)
#To create a mask for red colour
red_lower = np.array([0, 50, 50])
red_upper = np.array([10, 255, 255])
red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)
kernal = np.ones((5, 5))
red_gray=cv2.threshold(red_mask, 245,225, cv2.THRESH_BINARY)[1]
gray_blur_red= cv2.Canny(red_gray,100,255)
#Create a mask for blue colour
blue_lower = np.array([94, 20, 0], np.uint8)
blue_upper = np.array([140,255 ,255], np.uint8)
blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)
kernal = np.ones((5, 5))
blue_mask = cv2.dilate(blue_mask, kernal)
blue_gray=cv2.threshold(blue_mask, 245,225, cv2.THRESH_TRUNC)[1]
gray_blur_blue= cv2.Canny(blue_gray,100,255)
#Create a mask for green colour
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)
kernal = np.ones((5, 5))
green_mask = cv2.dilate(green_mask, kernal)
green_gray=cv2.threshold(green_mask, 250,255, cv2.THRESH_BINARY)[1]
gray_blur_green = cv2.Canny(green_gray,100,255)
#find contours on blue mask
cnts= cv2.findContours(gray_blur_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None):
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'blue'
#Find red contours in the image
cnts= cv2.findContours(gray_blur_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if (len(cnts)):
return 'red'
# Find green contours in the image
cnts= cv2.findContours(gray_blur_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if type(cnts[-1]) !=type(None) :
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
if(len(cnts)):
return 'green'
''' Function name: traverse_ball
Usage: traverses the ball from one point to another
Inputs: servo handles(x and y), vision sensor to be read and pixel path which the ball has to follow
Outputs: None
Example Call : traverse_ball(servohandle_x_t4, servo_handle_y_t4, visionsensor_4, t4_path)
'''
def traverse_ball(servohandle_x,servohandle_y,vision_sensor_handle,pixel_path):
global client_id
rt_code, prev_time = sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_streaming)
current_time = ''
while(len(current_time) == 0 ):
rt_code,current_time =sim.simxGetStringSignal(client_id,'time',sim.simx_opmode_buffer)
j = 0
k= 0
for i in pixel_path:
i.reverse()
task_3.change_setpoint(i)
while(1):
j+=1
k+=1
vision_sensor_image, image_resolution, return_code = task_2a.get_vision_sensor_image(client_id,vision_sensor_handle)
transformed_image = task_2a.transform_vision_sensor_image(vision_sensor_image,image_resolution)
warped_img = task_1b.applyPerspectiveTransform(transformed_image)
shapes = task_1a_part1.scan_image(warped_img)
if(shapes):
warped_img = cv2.cvtColor(warped_img,cv2.COLOR_GRAY2RGB)
warped_img = cv2.circle(warped_img,(shapes['Circle'][1],shapes['Circle'][2]),5,(0,255,0),2)
warped_img = cv2.circle(warped_img,(i[0],i[1]),5,(255,0,0),2)
if(abs(shapes['Circle'][1]-i[0]) <= 30 and abs(shapes['Circle'][2]-i[1]) <= 30):
break
else:
task_3.control_logic(client_id,shapes['Circle'][1],shapes['Circle'][2],servohandle_x,servohandle_y)
return 1
''' | print('Make sure ball_details.json is present in this current directory.\n')
map_start = {
| random_line_split |
delete.go | /pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
)
// DeleteResource returns a function that will handle a resource deletion
// TODO admission here becomes solely validating admission
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
admit = admission.WithAudit(admit)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs)
if err != nil {
scope.err(err, w, req)
return
}
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, w, req)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req)
return
}
span.AddEvent("Decoded delete options")
objGV := gvk.GroupVersion()
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs)
span.AddEvent("Recorded the audit event")
} else {
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
}
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
span.AddEvent("About to delete object from database")
wasDeleted := true
userInfo, _ := request.UserFrom(ctx)
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options)
wasDeleted = deleted
return obj, err
})
if err != nil {
scope.err(err, w, req) | }
span.AddEvent("Object deleted from database")
status := http.StatusOK
// Return http.StatusAccepted if the resource was not deleted immediately and
// user requested cascading deletion by setting OrphanDependents=false.
// Note: We want to do this always if resource was not deleted immediately, but
// that will break existing clients.
// Other cases where resource is not instantly deleted are: namespace deletion
// and pod graceful deletion.
//nolint:staticcheck // SA1019 backwards compatibility
//nolint: staticcheck
if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents {
status = http.StatusAccepted
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: int32(status),
Details: &metav1.StatusDetails{
Name: name,
Kind: scope.Kind.Kind,
},
}
}
span.AddEvent("About to write a response")
defer span.AddEvent("Writing http response done")
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
}
}
// DeleteCollection returns a function that will handle a collection deletion
func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, w, req)
return
}
// DELETECOLLECTION can be a lengthy operation,
// we should not impose any 34s timeout here.
// NOTE: This is similar to LIST which does not enforce a 34s timeout.
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
listOptions := metainternalversion.ListOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList))
if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs)
scope.err(err, w, req)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if listOptions.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value)
}
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Code | return | random_line_split |
delete.go | /pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
)
// DeleteResource returns a function that will handle a resource deletion
// TODO admission here becomes solely validating admission
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
admit = admission.WithAudit(admit)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs)
if err != nil {
scope.err(err, w, req)
return
}
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, w, req)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req)
return
}
span.AddEvent("Decoded delete options")
objGV := gvk.GroupVersion()
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs)
span.AddEvent("Recorded the audit event")
} else {
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
}
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
span.AddEvent("About to delete object from database")
wasDeleted := true
userInfo, _ := request.UserFrom(ctx)
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options)
wasDeleted = deleted
return obj, err
})
if err != nil {
scope.err(err, w, req)
return
}
span.AddEvent("Object deleted from database")
status := http.StatusOK
// Return http.StatusAccepted if the resource was not deleted immediately and
// user requested cascading deletion by setting OrphanDependents=false.
// Note: We want to do this always if resource was not deleted immediately, but
// that will break existing clients.
// Other cases where resource is not instantly deleted are: namespace deletion
// and pod graceful deletion.
//nolint:staticcheck // SA1019 backwards compatibility
//nolint: staticcheck
if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents {
status = http.StatusAccepted
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: int32(status),
Details: &metav1.StatusDetails{
Name: name,
Kind: scope.Kind.Kind,
},
}
}
span.AddEvent("About to write a response")
defer span.AddEvent("Writing http response done")
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
}
}
// DeleteCollection returns a function that will handle a collection deletion
func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc | return
}
listOptions := metainternalversion.ListOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList))
if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs)
scope.err(err, w, req)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if listOptions.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value)
}
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversions | {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, w, req)
return
}
// DELETECOLLECTION can be a lengthy operation,
// we should not impose any 34s timeout here.
// NOTE: This is similar to LIST which does not enforce a 34s timeout.
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req) | identifier_body |
delete.go | /pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
)
// DeleteResource returns a function that will handle a resource deletion
// TODO admission here becomes solely validating admission
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
admit = admission.WithAudit(admit)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs)
if err != nil {
scope.err(err, w, req)
return
}
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, w, req)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req)
return
}
span.AddEvent("Decoded delete options")
objGV := gvk.GroupVersion()
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs)
span.AddEvent("Recorded the audit event")
} else {
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
}
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
span.AddEvent("About to delete object from database")
wasDeleted := true
userInfo, _ := request.UserFrom(ctx)
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options)
wasDeleted = deleted
return obj, err
})
if err != nil {
scope.err(err, w, req)
return
}
span.AddEvent("Object deleted from database")
status := http.StatusOK
// Return http.StatusAccepted if the resource was not deleted immediately and
// user requested cascading deletion by setting OrphanDependents=false.
// Note: We want to do this always if resource was not deleted immediately, but
// that will break existing clients.
// Other cases where resource is not instantly deleted are: namespace deletion
// and pod graceful deletion.
//nolint:staticcheck // SA1019 backwards compatibility
//nolint: staticcheck
if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents {
status = http.StatusAccepted
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: int32(status),
Details: &metav1.StatusDetails{
Name: name,
Kind: scope.Kind.Kind,
},
}
}
span.AddEvent("About to write a response")
defer span.AddEvent("Writing http response done")
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
}
}
// DeleteCollection returns a function that will handle a collection deletion
func | (r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, w, req)
return
}
// DELETECOLLECTION can be a lengthy operation,
// we should not impose any 34s timeout here.
// NOTE: This is similar to LIST which does not enforce a 34s timeout.
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
listOptions := metainternalversion.ListOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList))
if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs)
scope.err(err, w, req)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if listOptions.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value)
}
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversions | DeleteCollection | identifier_name |
delete.go | /pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
)
// DeleteResource returns a function that will handle a resource deletion
// TODO admission here becomes solely validating admission
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
admit = admission.WithAudit(admit)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 | objGV := gvk.GroupVersion()
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs)
span.AddEvent("Recorded the audit event")
}
else {
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
}
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
span.AddEvent("About to delete object from database")
wasDeleted := true
userInfo, _ := request.UserFrom(ctx)
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options)
wasDeleted = deleted
return obj, err
})
if err != nil {
scope.err(err, w, req)
return
}
span.AddEvent("Object deleted from database")
status := http.StatusOK
// Return http.StatusAccepted if the resource was not deleted immediately and
// user requested cascading deletion by setting OrphanDependents=false.
// Note: We want to do this always if resource was not deleted immediately, but
// that will break existing clients.
// Other cases where resource is not instantly deleted are: namespace deletion
// and pod graceful deletion.
//nolint:staticcheck // SA1019 backwards compatibility
//nolint: staticcheck
if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents {
status = http.StatusAccepted
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: int32(status),
Details: &metav1.StatusDetails{
Name: name,
Kind: scope.Kind.Kind,
},
}
}
span.AddEvent("About to write a response")
defer span.AddEvent("Writing http response done")
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
}
}
// DeleteCollection returns a function that will handle a collection deletion
func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, w, req)
return
}
// DELETECOLLECTION can be a lengthy operation,
// we should not impose any 34s timeout here.
// NOTE: This is similar to LIST which does not enforce a 34s timeout.
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
listOptions := metainternalversion.ListOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList))
if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs)
scope.err(err, w, req)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if listOptions.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value)
}
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversions | {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs)
if err != nil {
scope.err(err, w, req)
return
}
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, w, req)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req)
return
}
span.AddEvent("Decoded delete options")
| conditional_block |
oldDH.py | elif(node['x']-rad<30):
startx=30+rad+20#aggiungo un margine
if(node['y']+rad>1000):
starty=1000-rad-20#tolgo un margine
elif(node['y']-rad<30):
starty=30+rad+20#aggiungo un margine
#radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni
"""angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0
nx=startx+Math.cos(angle)*rad
ny=starty+Math.sin(angle)*rad"""
#approccio quadrato, mi muovo nell'area di un quadrato
nx=uniform(startx-rad,startx+rad)
ny=uniform(starty-rad,starty+rad)
width=1000
height=800
maxNodeX=width
minNodeX=0
maxNodeY=height
minNodeY=0
log= open('./tmp/info.log',"a+")
log.write("("+str(node['x'])+", "+str(node['y'])+") =>("+str(nx)+", "+str(ny)+")\n")
log.close()
for n in nodes:
if(n['id']==node['id']):
n['x']=nx
n['y']=ny
break
for l in links:
if(l['source']==node['id']):
l['s']['x']=nx
l['s']['y']=ny
if(l['target']==node['id']):
l['t']['x']=nx
l['t']['y']=ny
def moveBack(vc,vx,vy,nodes,links):
for n in nodes:
if(n['id']==vc['id']):
n['x']=vx
n['y']=vy
break
for l in links:
if(l['source']==vc['id']):
l['s']['x']=vx
l['s']['y']=vx
if(l['target']==vc['id']):
l['t']['x']=vx
l['t']['y']=vx
def fixNodePos(nodes):
maxX=0
maxY=0
for v in nodes:
if(v['x']<30):
if(abs(v['x']-30)>maxX):
maxX=abs(v['x']-30)
if(v['y']<30):
if(abs(v['y']-30)>maxY):
maxY=abs(v['y']-30)
for v in nodes:
v['x']=v['x']+100+maxX
v['y']=v['y']+100+maxY
def saveConfiguration(filename,data):
fileContent = Path('parent.html').read_text()
fileContent=fileContent.replace("@@@@@@", filename+".json")
html= open('./html/'+filename+'.html',"w+")
html.write(fileContent)
html.close()
##json
content= open('./tmp/'+filename+'.json',"w+")
content.write(json.dumps(data))
content.close()
def polyn(input):
return 25*input#meglio 30, ma troppo lento per grossi input
def simulatedAnnealing(data):
#exit(str(len(data['nodes'])))
#inizio con posizioni casuali dei nodes
nodes=data['nodes']
links=data['links']
valorizzato=False
if(not valorizzato):
nodes=getStartingPositions(nodes,links)
f= open('originale.json',"w+")
f.write(json.dumps(data))
f.close()
else:
for n in nodes:
for l in links:
if(l['source']==n['id']):
l['s']={'x':n['x'],'y':n['y']}
if(l['target']==n['id']):
l['t']={'x':n['x'],'y':n['y']}
T=100
stages=11
fineTuningStages=4
numNodes=len(nodes)
currentStage=0
fineTuningStage=0
numMoves=polyn(numNodes)
currentMove=0
gamma=0.8
fuoriArea=False
###
vc=chooseANode(numNodes)
prevEnergy,ncp=computeEnergy(nodes,links,'')
if(ncp<=2000000):
saveConfiguration('outer',data)
while(currentStage<=stages and not fuoriArea):
sao_time=time.time()
currentMove=0
#print("currentStage: "+str(currentStage)+"\nT: "+str(T))
while(currentMove<=numMoves and not fuoriArea):
sai_time=time.time()
print("currentStage: "+str(currentStage)+"\nT: "+str(T))
print("currentMove: "+str(currentMove))
#print(vc)
vx=vc['x']
vy=vc['y']
"""for n in nodes:
if(n['x']>1400 or n['x']<0 or n['y']>1200 or n['y']<0):
print("fuori area, vado in fine tuning")
fuoriArea=True
fineTuningStages=fineTuningStages+(stages-currentStage)
print("fineTuningStages: "+str(fineTuningStages))
print("T: "+str(T))
T=100
break"""
move(vc,T,nodes,links)
newEnergy,ncn=computeEnergy(nodes,links,"s"+str(currentStage)+"i"+str(currentMove)+".json")
de=newEnergy-prevEnergy
#accetta la configurazione attuale
#print("de: "+str(de))
if(ncn<=ncp):
ncp=ncn
saveConfiguration('K_s'+str(currentStage)+'i'+str(currentMove),data)
if(de<0):
"""if(fuoriArea):
moveBack(vc,vx,vy,nodes,links)
fineTuningStages=fineTuningStages+(stages-currentStage)
print("fineTuningStages: "+str(fineTuningStages))
print("T: "+str(T))
T=100
break"""
prevEnergy=newEnergy
vc=chooseANode(numNodes)
#if(currentMove<10):
saveConfiguration('s'+str(currentStage)+'i'+str(currentMove),data)
elif(uniform(0,1)<Math.exp(-de/T)):
# print("accetto la configurazione, anche se è peggio")
"""if(fuoriArea):
moveBack(vc,vx,vy,nodes,links)
fineTuningStages=fineTuningStages+(stages-currentStage)
print("fineTuningStages: "+str(fineTuningStages))
print("T: "+str(T))
T=100
break"""
vc=chooseANode(numNodes)
else:
#print("move back")
moveBack(vc,vx,vy,nodes,links)
print("--- %s seconds ---" % (time.time() - sai_time))#0.6s a iterazione=>0.14s
currentMove=currentMove+1
currentStage=currentStage+1
print("--- %s seconds ---" % (time.time() - sao_time))
T=gamma*T
#fine-tuning phase
#exit("temperatura: "+str(T))
currentStage=0
#vc=chooseANode(numNodes)
#prevEnergy=computeFTEnergy(nodes,links)
T=100
prevEnergy,ncp=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove))
while(currentStage<=fineTuningStages):
currentMove=0
while(currentMove<=numMoves):
print("currentStage FT: "+str(currentStage))
print("currentMove 2: "+str(currentMove))
vx=vc['x']
vy=vc['y']
move(vc,T,nodes,links)
newEnergy,ncn=computeFTEnergy(nodes,links,str(currentStage)+str(currentMove))
if(ncn<=ncp):
ncp=ncn
saveConfiguration('K_FT_s'+str(currentStage)+'i'+str(currentMove),data)
de=newEnergy-prevEnergy
#accetta la configurazione attuale
if(de<0):
prevEnergy=newEnergy
saveConfiguration('FT_s'+str(currentStage)+'i'+str(currentMove),data)
vc=chooseANode(numNodes)
else:
moveBack(vc,vx,vy,nodes,links)
currentMove=currentMove+1
currentStage=currentStage+1
simulatedAnnealing(data)
#fixNodePos(nodes)
minimo=50
massimo=0
for n in nodes:
if(n['x']<minimo):
minimo=n['x']
if(n['x']>massimo):
mas | simo=n['x']
di | conditional_block |
|
oldDH.py | tky=k['t']['y']
#print(l['s'])
slx=l['s']['x']
sly=l['s']['y']
tlx=l['t']['x']
tly=l['t']['y']
except:
print(k)
print("-----")
print(l)
exit()
if(intersects(skx,sky,tkx,tky,slx,sly,tlx,tly)):
res=res+1
"""else:
#print("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name'])
if(sourceK['name']=="Ant-Man" and targetK['name']=='Avengers: Endgame' and sourceL['name']=="Black Panther" and targetL['name']=='Ant-Man and the Wasp'):
print("skx: "+str(skx)+"; sky: "+str(sky)+"\ntkx: "+str(tkx)+"; tky: "+str(tky)+"\nslx: "+str(slx)+"; sly: "+str(sly)+"\ntlx: "+str(tlx)+"; tly: "+str(tly))
exit("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name'])
exit("res: "+str(res))"""
return res
def distance_to_line(p0, p1, p2):
x_diff = p2['x'] - p1['x']
y_diff = p2['y'] - p1['y']
num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x'])
den = Math.sqrt(y_diff**2 + x_diff**2)
return num / den
#questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi
# l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo);
# num incroci bassa
#distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T
# nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini
def computeEnergy(nodes,links,logInfo):
sac_time=time.time()
width=1000
height=800
lambda1=8
#lambda2=100
lambda3=5
lambda4=1000
repulsiveTot=0
#fromBordersTot=0
edgeLenTot=0
numCrossTot=0
distanze=[]
for u in nodes:
ux=u['x']
uy=u['y']
posi=numpy.array([ux,uy])
"""dr=numpy.linalg.norm(posi-numpy.array([width,uy]))
dl=numpy.linalg.norm(posi-numpy.array([0,uy]))
dt=numpy.linalg.norm(posi-numpy.array([ux,0]))
db=numpy.linalg.norm(posi-numpy.array([ux,height]))
"""
"""rt={'x':width,'y':0}
rb={'x':width,'y':height}
tl={'x':0,'y':0}
tr={'x':width,'y':0}
bl={'x':0,'y':height}
br={'x':width,'y':height}
lt={'x':0,'y':0}
lb={'x':0,'y':height}
dr=distance_to_line(u, rt, rb)
dl=distance_to_line(u, lt, lb)
dt=distance_to_line(u, tl, tr)
db=distance_to_line(u, bl, br)
"""
"""
if(dr==0):
dr=1
if(dl==0):
dl=1
if(dt==0):
dt=1
if(db==0):
db=1
fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))"""
for v in nodes:
#se sono elementi diversi
if(u['id']!=v['id']):
vx=v['x']
vy=v['y']
posj=numpy.array([vx,vy])
duv=numpy.linalg.norm(posi-posj)
#se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto
if(duv==0):
iduv=1000
else:
iduv=1/pow(duv,2)
repulsiveTot=repulsiveTot+lambda1*duv
for l in links:
if(l['source']==u['id'] and l['target']==v['id']):
source=u
target=v
edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2)
break
#questo costa circa 0.28 s
numCrossTot=lambda4*numberOfIntersections(nodes,links)
#print("--- %s Compute energy seconds ---" % (time.time() - sac_time))
#exit("numCrossTot: "+str(numCrossTot))
#exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot))
#print(str(fromBordersTot))
msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n"
log= open('./tmp/info.log',"a+")
log.write(msg)
log.close()
tot=repulsiveTot+edgeLenTot+numCrossTot
totMax=1.7976931348623157e+8#va normalizzata in [0,1]
tot=tot/totMax
return tot,numCrossTot
def computeFTEnergy(nodes,links,logInfo):
tot,numCrossTot= computeEnergy(nodes,links,logInfo)
nodeLinkDist=0
gmin=60
lambda5=1
for v in nodes:
for l in links:
for n in nodes:
if(n['id']==l['source']):
source=n
if(n['id']==l['target']):
target=n
dl=lambda5*distance_to_line(v, source, target)
if(dl<gmin):
nodeLinkDist=nodeLinkDist+dl
totMax=1.7976931348623157e+8#va normalizzata in [0,1]
tot=(tot+nodeLinkDist)/totMax
return tot,numCrossTot
def move(node,rad,nodes,links):
#print("sposto "+node['name'])
#print("x: "+str(node['x'])+"y: "+str(node['y']))
startx=node['x']
starty=node['y']
if(node['x']+rad>1400):
startx=1400-rad-20#tolgo un margine
elif(node['x']-rad<30):
startx=30+rad+20#aggiungo un margine
if(node['y']+rad>1000):
starty=1000-rad-20#tolgo un margine
elif(node['y']-rad<30):
starty=30+rad+20#aggiungo un margine
#radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni
"""angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0
nx=startx+Math.cos(angle)*rad
ny=starty+Math.sin(angle)*rad"""
#approccio quadrato, mi muovo nell'area di un quadrato
nx=uniform(startx-rad,startx+rad)
ny=uniform(starty-rad,starty+rad)
width=1000
height=800
max | res=0
for k in links:
#print("ciclo esterno")
for l in links:
#print("ciclo interno")
"""for m in nodes:
if(m['id']==k['source']):
sourceK=m
if(m['id']==k['target']):
targetK=m
if(m['id']==l['source']):
sourceL=m
if(m['id']==l['target']):
targetL=m"""
#print(k['s'])
try:
skx=k['s']['x']
sky=k['s']['y']
#print(k['t'])
tkx=k['t']['x'] | identifier_body |
|
oldDH.py | ['id']==k['source']):
sourceK=m
if(m['id']==k['target']):
targetK=m
if(m['id']==l['source']):
sourceL=m
if(m['id']==l['target']):
targetL=m"""
#print(k['s'])
try:
skx=k['s']['x']
sky=k['s']['y']
#print(k['t'])
tkx=k['t']['x']
tky=k['t']['y']
#print(l['s'])
slx=l['s']['x']
sly=l['s']['y']
tlx=l['t']['x']
tly=l['t']['y']
except:
print(k)
print("-----")
print(l)
exit()
if(intersects(skx,sky,tkx,tky,slx,sly,tlx,tly)):
res=res+1
"""else:
#print("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name'])
if(sourceK['name']=="Ant-Man" and targetK['name']=='Avengers: Endgame' and sourceL['name']=="Black Panther" and targetL['name']=='Ant-Man and the Wasp'):
print("skx: "+str(skx)+"; sky: "+str(sky)+"\ntkx: "+str(tkx)+"; tky: "+str(tky)+"\nslx: "+str(slx)+"; sly: "+str(sly)+"\ntlx: "+str(tlx)+"; tly: "+str(tly))
exit("l'arco "+sourceK['name']+", "+targetK['name']+" non interseca "+sourceL['name']+", "+targetL['name'])
exit("res: "+str(res))"""
return res
def distance_to_line(p0, p1, p2):
x_diff = p2['x'] - p1['x']
y_diff = p2['y'] - p1['y'] | return num / den
#questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi
# l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo);
# num incroci bassa
#distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T
# nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini
def computeEnergy(nodes,links,logInfo):
sac_time=time.time()
width=1000
height=800
lambda1=8
#lambda2=100
lambda3=5
lambda4=1000
repulsiveTot=0
#fromBordersTot=0
edgeLenTot=0
numCrossTot=0
distanze=[]
for u in nodes:
ux=u['x']
uy=u['y']
posi=numpy.array([ux,uy])
"""dr=numpy.linalg.norm(posi-numpy.array([width,uy]))
dl=numpy.linalg.norm(posi-numpy.array([0,uy]))
dt=numpy.linalg.norm(posi-numpy.array([ux,0]))
db=numpy.linalg.norm(posi-numpy.array([ux,height]))
"""
"""rt={'x':width,'y':0}
rb={'x':width,'y':height}
tl={'x':0,'y':0}
tr={'x':width,'y':0}
bl={'x':0,'y':height}
br={'x':width,'y':height}
lt={'x':0,'y':0}
lb={'x':0,'y':height}
dr=distance_to_line(u, rt, rb)
dl=distance_to_line(u, lt, lb)
dt=distance_to_line(u, tl, tr)
db=distance_to_line(u, bl, br)
"""
"""
if(dr==0):
dr=1
if(dl==0):
dl=1
if(dt==0):
dt=1
if(db==0):
db=1
fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))"""
for v in nodes:
#se sono elementi diversi
if(u['id']!=v['id']):
vx=v['x']
vy=v['y']
posj=numpy.array([vx,vy])
duv=numpy.linalg.norm(posi-posj)
#se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto
if(duv==0):
iduv=1000
else:
iduv=1/pow(duv,2)
repulsiveTot=repulsiveTot+lambda1*duv
for l in links:
if(l['source']==u['id'] and l['target']==v['id']):
source=u
target=v
edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2)
break
#questo costa circa 0.28 s
numCrossTot=lambda4*numberOfIntersections(nodes,links)
#print("--- %s Compute energy seconds ---" % (time.time() - sac_time))
#exit("numCrossTot: "+str(numCrossTot))
#exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot))
#print(str(fromBordersTot))
msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n"
log= open('./tmp/info.log',"a+")
log.write(msg)
log.close()
tot=repulsiveTot+edgeLenTot+numCrossTot
totMax=1.7976931348623157e+8#va normalizzata in [0,1]
tot=tot/totMax
return tot,numCrossTot
def computeFTEnergy(nodes,links,logInfo):
tot,numCrossTot= computeEnergy(nodes,links,logInfo)
nodeLinkDist=0
gmin=60
lambda5=1
for v in nodes:
for l in links:
for n in nodes:
if(n['id']==l['source']):
source=n
if(n['id']==l['target']):
target=n
dl=lambda5*distance_to_line(v, source, target)
if(dl<gmin):
nodeLinkDist=nodeLinkDist+dl
totMax=1.7976931348623157e+8#va normalizzata in [0,1]
tot=(tot+nodeLinkDist)/totMax
return tot,numCrossTot
def move(node,rad,nodes,links):
#print("sposto "+node['name'])
#print("x: "+str(node['x'])+"y: "+str(node['y']))
startx=node['x']
starty=node['y']
if(node['x']+rad>1400):
startx=1400-rad-20#tolgo un margine
elif(node['x']-rad<30):
startx=30+rad+20#aggiungo un margine
if(node['y']+rad>1000):
starty=1000-rad-20#tolgo un margine
elif(node['y']-rad<30):
starty=30+rad+20#aggiungo un margine
#radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni
"""angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0
nx=startx+Math.cos(angle)*rad
ny=starty+Math.sin(angle)*rad"""
#approccio quadrato, mi muovo nell'area di un quadrato
nx=uniform(startx-rad,startx+rad)
ny=uniform(starty-rad,starty+rad)
width=1000
height=800
maxNodeX=width
minNodeX=0
maxNodeY=height
minNodeY=0
log= open('./tmp/info.log',"a+")
log.write("("+str(node[' | num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x'])
den = Math.sqrt(y_diff**2 + x_diff**2) | random_line_split |
oldDH.py | '] - p1['y']
num = abs(y_diff*p0['x'] - x_diff*p0['y'] + p2['x']*p1['y'] - p2['y']*p1['x'])
den = Math.sqrt(y_diff**2 + x_diff**2)
return num / den
#questa fase costa tanto 0.32 s circa con il grafo di prova di 52 nodi
# l'ideale è somma len archi ^2 bassa (ma non troppo); somma distanze tra coppie di nodi alta (ma non troppo);
# num incroci bassa
#distanza dai bordi non rispettata, quindi posta come condizione nello spostamento lungo la circonferenza di raggio T
# nel fine tuning considero anche la distanza punto linea tra nodo e archi vicini
def computeEnergy(nodes,links,logInfo):
sac_time=time.time()
width=1000
height=800
lambda1=8
#lambda2=100
lambda3=5
lambda4=1000
repulsiveTot=0
#fromBordersTot=0
edgeLenTot=0
numCrossTot=0
distanze=[]
for u in nodes:
ux=u['x']
uy=u['y']
posi=numpy.array([ux,uy])
"""dr=numpy.linalg.norm(posi-numpy.array([width,uy]))
dl=numpy.linalg.norm(posi-numpy.array([0,uy]))
dt=numpy.linalg.norm(posi-numpy.array([ux,0]))
db=numpy.linalg.norm(posi-numpy.array([ux,height]))
"""
"""rt={'x':width,'y':0}
rb={'x':width,'y':height}
tl={'x':0,'y':0}
tr={'x':width,'y':0}
bl={'x':0,'y':height}
br={'x':width,'y':height}
lt={'x':0,'y':0}
lb={'x':0,'y':height}
dr=distance_to_line(u, rt, rb)
dl=distance_to_line(u, lt, lb)
dt=distance_to_line(u, tl, tr)
db=distance_to_line(u, bl, br)
"""
"""
if(dr==0):
dr=1
if(dl==0):
dl=1
if(dt==0):
dt=1
if(db==0):
db=1
fromBordersTot=fromBordersTot+lambda2*(1/pow(dr,2)+1/pow(dl,2)+1/pow(dt,2)+1/pow(db,2))"""
for v in nodes:
#se sono elementi diversi
if(u['id']!=v['id']):
vx=v['x']
vy=v['y']
posj=numpy.array([vx,vy])
duv=numpy.linalg.norm(posi-posj)
#se sono nello stesso punto, l'inverso della distanza è infinito, quindi ipotizzo un valore molto alto
if(duv==0):
iduv=1000
else:
iduv=1/pow(duv,2)
repulsiveTot=repulsiveTot+lambda1*duv
for l in links:
if(l['source']==u['id'] and l['target']==v['id']):
source=u
target=v
edgeLenTot=edgeLenTot+lambda3*pow(numpy.linalg.norm(posi-posj),2)
break
#questo costa circa 0.28 s
numCrossTot=lambda4*numberOfIntersections(nodes,links)
#print("--- %s Compute energy seconds ---" % (time.time() - sac_time))
#exit("numCrossTot: "+str(numCrossTot))
#exit("repulsiveTot: "+str(repulsiveTot)+"\nfromBordersTot: "+str(fromBordersTot)+"\nedgeLenTot: "+str(edgeLenTot)+"\nnumCrossTot: "+str(numCrossTot))
#print(str(fromBordersTot))
msg="r: "+str(repulsiveTot)+", e: "+str(edgeLenTot)+", c: "+str(numCrossTot)+"\n"+logInfo+"\n"
log= open('./tmp/info.log',"a+")
log.write(msg)
log.close()
tot=repulsiveTot+edgeLenTot+numCrossTot
totMax=1.7976931348623157e+8#va normalizzata in [0,1]
tot=tot/totMax
return tot,numCrossTot
def computeFTEnergy(nodes,links,logInfo):
tot,numCrossTot= computeEnergy(nodes,links,logInfo)
nodeLinkDist=0
gmin=60
lambda5=1
for v in nodes:
for l in links:
for n in nodes:
if(n['id']==l['source']):
source=n
if(n['id']==l['target']):
target=n
dl=lambda5*distance_to_line(v, source, target)
if(dl<gmin):
nodeLinkDist=nodeLinkDist+dl
totMax=1.7976931348623157e+8#va normalizzata in [0,1]
tot=(tot+nodeLinkDist)/totMax
return tot,numCrossTot
def move(node,rad,nodes,links):
#print("sposto "+node['name'])
#print("x: "+str(node['x'])+"y: "+str(node['y']))
startx=node['x']
starty=node['y']
if(node['x']+rad>1400):
startx=1400-rad-20#tolgo un margine
elif(node['x']-rad<30):
startx=30+rad+20#aggiungo un margine
if(node['y']+rad>1000):
starty=1000-rad-20#tolgo un margine
elif(node['y']-rad<30):
starty=30+rad+20#aggiungo un margine
#radial base approach, mi muovo su una circonferenza: fa pochi spostamenti buoni
"""angle=2*Math.pi*uniform(0,1)#senza uniform mi muovo sempre a dx, cos 1 sin 0
nx=startx+Math.cos(angle)*rad
ny=starty+Math.sin(angle)*rad"""
#approccio quadrato, mi muovo nell'area di un quadrato
nx=uniform(startx-rad,startx+rad)
ny=uniform(starty-rad,starty+rad)
width=1000
height=800
maxNodeX=width
minNodeX=0
maxNodeY=height
minNodeY=0
log= open('./tmp/info.log',"a+")
log.write("("+str(node['x'])+", "+str(node['y'])+") =>("+str(nx)+", "+str(ny)+")\n")
log.close()
for n in nodes:
if(n['id']==node['id']):
n['x']=nx
n['y']=ny
break
for l in links:
if(l['source']==node['id']):
l['s']['x']=nx
l['s']['y']=ny
if(l['target']==node['id']):
l['t']['x']=nx
l['t']['y']=ny
def moveBack(vc,vx,vy,nodes,links):
for n in nodes:
if(n['id']==vc['id']):
n['x']=vx
n['y']=vy
break
for l in links:
if(l['source']==vc['id']):
l['s']['x']=vx
l['s']['y']=vx
if(l['target']==vc['id']):
l['t']['x']=vx
l['t']['y']=vx
def fixNodePos(nodes):
maxX=0
maxY=0
for v in nodes:
if(v['x']<30):
if(abs(v['x']-30)>maxX):
maxX=abs(v['x']-30)
if(v['y']<30):
if(abs(v['y']-30)>maxY):
maxY=abs(v['y']-30)
for v in nodes:
v['x']=v['x']+100+maxX
v['y']=v['y']+100+maxY
def saveConfiguration(filename,data):
fileContent = Path('parent.html').read_text()
fileContent=fileContent.replace("@@@@@@", filename+".json")
html= open('./html/'+filename+'.html',"w+")
html.write(fileContent)
html.close()
##json
content= open('./tmp/'+filename+'.json',"w+")
content.write(json.dumps(data))
content.close()
def po | lyn(i | identifier_name |
|
textContent-es.js | as o procedimientos almacenados (SQL) y en la lógica de negocio en C#.",
"Escribí documentación funcional para nuevos clientes y capacité a nuevos usuarios."
],
"image": "aggity"
},
"luxtripper": {
"id": 4,
"ref": "luxtripper",
"slickitem":"#slick-slide05",
"company": "Luxtripper LTD",
"city": "London",
"website": "http://luxtripper.co.uk",
"role": "Project Manager / Developer",
"dateStart": "Ene 2015 ",
"dateEnd": "Ago 2015",
"techStack": "",
"description": "Startup del sector del turimso de lujo para encontrar destinos a nivel mundial según un algoritmo propio. Implementé la infraestructura mínima (servidores, backups, control de versiones) y los procesos del departamento de software dirigiendo un equipo de cuatro desarrolladores offshore en Pakistán.",
"tasks": [
"Gestioné varios proyectos internos. Desempeñé funciones como analista de negocios para recopilar y escribir los requisitos para los departamentos de ventas y marketing.",
"Diseñé e implementé integraciones con terceras API para reservas de hoteles. Prototipé una función de reserva de vuelos.",
"Se implementó el software de control de versiones TFS y se establecieron procesos sobre el mismo (fusiones de ramas, despliegues, pruebas).",
"Implementación de servidores de producción, backup y test usando Rackspace y la plataforma Azure. Copias de seguridad automatizadas mediante scripts para que las bases de datos, los servidores de imágenes y el código se pudieran guardar en el servicio Amazon S3.",
"Se resolvieron errores de codificación relacionados con SQL y lógica empresarial. Contribución a cambios cosméticos.",
"Implementé planes de prueba para front y back-end con TFS en línea.",
"Migré un blog de WordPress a un servidor IIS interno migrando los datos de MySQL a MS SQL Server."
],
"image": "luxtripper"
},
"membra": {
"id": 5,
"ref": "membra",
"slickitem":"#slick-slide04",
"company": "Membership Engagement Services",
"city": "London",
"website": "http://membra.co.uk",
"role": "Web Developer",
"dateStart": "Ago 2015",
"dateEnd": "Ago 2016",
"techStack": "",
"description": "Se implementaron nuevas características y funcionalidades en un sitio web utilizado por los hospitales de la NHS para recopilar y presentar datos de encuestas a los usuarios (casi no tenían back-end).",
"tasks": [
"Se modificó sitio web para que fuera responsive y así mejorar la compatibilidad entre dispositivos. Se añadió una barra de progreso para mejorar UI/UX entre otras funcionalidades.",
"Implementación de un mantenimiento para todo lo referente a las preguntas/respuestas de encuestas como Ordenar-Modificar-Eliminar (en lugar de hacer cambios directamente en la base de datos).",
"Realice múltiples tareas de bases de datos, incluidas tablas dinámicas, creación y diseño de tablas, procedimientos almacenados y optimización de consultas. Hice un scripting para automatizar la importación de datos que mejoró la velocidad de días a solo seis horas.",
"Crée una herramienta de administración de arquitectura de tres niveles que automatizaba y optimizaba las tareas de los administradores de proyectos. Desarrollada con Bootstrap, Telerik framework, ASP.NET Webforms, C# y MS SQL Server."
],
"image": "mes"
},
"mission": {
"id": 6,
"ref": "mission",
"slickitem":"#slick-slide03",
"company": "Mission Communications",
"city": "London",
"website": "http://mission-communications.net",
"role": "Web Developer",
"dateStart": "Ago 2016",
"dateEnd": "Nov 2017",
"techStack": "",
"description": "En Mission Communications pude trabajar en varios proyectos de desarrollo web para clientes de diversas industrias y en particular hice contribuciones significativas al desarrollo de sightseeingpass.com.",
"tasks": [
"Creé un sitio web de comercio electrónico y contenido de noticias/blogs que se integró con Facebook, Instagram y Twitter. Usé Web Forms, C#, Bootstrap y jQuery.",
"Creé un sitio para un evento de administradores de propiedades privadas. El sitio estaba protegido con contraseña para mostrar detalles sobre propiedades disponibles (imágenes, descripción, planos, etc.). Se implementó una presentación de diapositivas utilizando la biblioteca Slick JavaScript y Typescript. El sitio permitía a los agentes inmobiliarios enviar correos electrónicos individuales o masivos a los registrantes (adjuntando pdfs con la información de la propiedad). Construido en Web Forms, C#, jQuery y Bootstrap.",
"Conversión de sitios web críticos para la empresa en aplicaciones web ASP.NET para mejorar la capacidad de depuración.",
"Se migraron varios sitios web a TFS para permitir a los desarrolladores trabajar localmente, reemplazando una solución FTP problemática.",
"Implementé nuevos puntos finales y corrección de errores en la API de la empresa. La API se desarrolló utilizando la arquitectura MVC con un Entity Framework que apuntaba a una base de datos de MS SQL Server."
],
"sightseeingpass": [
"Añadí una nueva función en el back-end para sightseeingpass.com para administrar todas las imágenes relacionadas con una atracción turística. La función permitía a los usuarios cargar varias imágenes a la vez y ordenarlas mediante drag and drop. Se utilizaron Web Forms, C#, jQuery y SQL stored procedures para operaciones CRUD.",
"Desarrollé una nueva función en sightseeingpass.com para que los usuarios creasen itinerarios personalizados para los recorridos en autobús con la capacidad de seleccionar fechas y atracciones de una lista (utilizando datos de la API de la empresa), creando un mapa de Google con las rutas más eficientes utilizando el diseño aportado por la diseñadora gráfica. Lo hice con web forms (user control), Bootstrap, C#, JQuery, controladores ASP.NET y API internas.",
"Se creó una función utilizando la API de la empresa para comparar datos entre 3 bases de datos con alertas de cualquier diferencia. La solución permitió a los usuarios actualizar datos dispares a través de la base de datos del sitio web con la base de datos del sistema central con un solo clic y permitió que el equipo pudiera verificar bajo demanda la consistencia/coherencia entre los sistemas. Se utilizó MS SQL Server, API interna y C#.",
"Escribí pruebas unitarias para validar la reescritura de URL y la precisión de los precios de venta al público (número de personas, tipo de entrega, tipo de tarjeta, etc.)."
],
"image": "mission"
},
"educo": {
"id": 7,
"ref": "educo",
"slickitem":"#slick-slide02",
"company": "Educo ONG",
"city": "Barcelona",
"website": "https://www.educo.org/",
"role": "Web Developer",
"dateStart": "Mar 2018",
"dateEnd": "Ago 2018",
"techStack": "Kentico CMS, C#, SQL Server, Javascript",
"description": "ONG para velar y salvaguardar los derechos de la infancia. Estuve cubriendo una baja por maternidad ayudando en el incidental del aplicativo.",
"tasks": [
"Creé un componente de crowdfunding en Kentico CMS integrado con los sistemas de pago Redsys y Paypal. Usé componentes C#, Javascript, HTML, CSS y Kentico.",
"Corrección de bugs en el sitio web comercial, también en Kentico CMS.",
"Código Javascript refactorizado de acuerdo con los principios SOLID."
],
"image": "educo"
},
"wivi": {
"id": 8,
"ref": "wivi",
"slickitem":"#slick-slide01",
"company": "Wivi Vision",
"city": "Barcelona",
"website": "https://wivivision.com/",
"role": "Web Developer",
"dateStart": "Oct 2018",
"dateEnd": "Abr 2019",
"techStack": "",
"description": "Debido a un acuerdo de confidencialidad no se me permite mostrar públicamente ninguna descripción de ninguna tarea ni tecnología durante mi tiempo en esta startup.", | "tasks": [],
"image": "wivi" | random_line_split |
|
dwarfdebuginfo.rs | may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::helpers::{get_uid, resolve_specification, DieReference};
use binaryninja::{
binaryview::{BinaryView, BinaryViewBase},
debuginfo::{DebugFunctionInfo, DebugInfo},
rc::*,
templatesimplifier::simplify_str_to_str,
types::{Conf, FunctionParameter, Type},
};
use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit};
use log::error;
use std::{
collections::{hash_map::Values, HashMap},
ffi::CString,
hash::Hash,
};
pub(crate) type TypeUID = usize;
/////////////////////////
// FunctionInfoBuilder
// TODO : Function local variables
#[derive(PartialEq, Eq, Hash)]
pub struct FunctionInfoBuilder {
pub full_name: Option<CString>,
pub raw_name: Option<CString>,
pub return_type: Option<TypeUID>,
pub address: Option<u64>,
pub parameters: Vec<Option<(CString, TypeUID)>>,
}
impl FunctionInfoBuilder {
pub fn update(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if full_name.is_some() {
self.full_name = full_name;
}
if raw_name.is_some() {
self.raw_name = raw_name;
}
if return_type.is_some() {
self.return_type = return_type;
}
if address.is_some() {
self.address = address;
}
for (i, new_parameter) in parameters.into_iter().enumerate() {
if let Some(old_parameter) = self.parameters.get(i) {
if old_parameter.is_none() {
self.parameters[i] = new_parameter;
}
} else {
self.parameters.push(new_parameter);
}
}
}
}
//////////////////////
// DebugInfoBuilder
// TODO : Don't make this pub...fix the value thing
pub(crate) struct DebugType {
name: CString,
t: Ref<Type>,
commit: bool,
}
// DWARF info is stored and displayed in a tree, but is really a graph
// The purpose of this builder is to help resolve those graph edges by mapping partial function
// info and types to one DIE's UID (T) before adding the completed info to BN's debug info
pub struct DebugInfoBuilder {
functions: Vec<FunctionInfoBuilder>,
types: HashMap<TypeUID, DebugType>,
data_variables: HashMap<u64, (Option<CString>, TypeUID)>,
names: HashMap<TypeUID, CString>,
default_address_size: usize,
}
impl DebugInfoBuilder {
pub fn new(view: &BinaryView) -> Self {
DebugInfoBuilder {
functions: vec![],
types: HashMap::new(),
data_variables: HashMap::new(),
names: HashMap::new(),
default_address_size: view.address_size(),
}
}
pub fn default_address_size(&self) -> usize {
self.default_address_size
}
#[allow(clippy::too_many_arguments)]
pub fn insert_function(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if let Some(function) = self.functions.iter_mut().find(|func| {
(func.raw_name.is_some() && func.raw_name == raw_name)
|| (func.full_name.is_some() && func.full_name == full_name)
}) {
function.update(full_name, raw_name, return_type, address, parameters);
} else {
self.functions.push(FunctionInfoBuilder {
full_name,
raw_name,
return_type,
address,
parameters,
});
}
}
pub fn functions(&self) -> &[FunctionInfoBuilder] {
&self.functions
}
pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> {
self.types.values()
}
pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) {
if let Some(DebugType {
name: existing_name,
t: existing_type,
commit: _,
}) = self.types.insert(
type_uid,
DebugType {
name: name.clone(),
t: t.clone(),
commit,
},
) {
if existing_type != t {
error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)",
existing_type,
existing_name,
t,
name
);
}
}
}
pub fn remove_type(&mut self, type_uid: TypeUID) {
self.types.remove(&type_uid);
}
// TODO : Non-copy?
pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> {
self.types
.get(&type_uid)
.map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone()))
}
pub fn contains_type(&self, type_uid: TypeUID) -> bool {
self.types.get(&type_uid).is_some()
}
pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) {
if let Some((_existing_name, existing_type_uid)) =
self.data_variables.insert(address, (name, type_uid))
{
let existing_type = self.get_type(existing_type_uid).unwrap().1;
let new_type = self.get_type(type_uid).unwrap().1;
if existing_type_uid != type_uid || existing_type != new_type {
error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`",
address,
self.get_type(existing_type_uid).unwrap().1,
self.get_type(type_uid).unwrap().1
);
}
}
}
pub fn set_name(&mut self, die_uid: TypeUID, name: CString) {
assert!(self.names.insert(die_uid, name).is_none());
}
pub fn get_name<R: Reader<Offset = usize>>(
&self,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
entry: &DebuggingInformationEntry<R>,
) -> Option<CString> {
match resolve_specification(dwarf, unit, entry) {
DieReference::Offset(entry_offset) => self
.names
.get(&get_uid(unit, &unit.entry(entry_offset).unwrap()))
.cloned(),
DieReference::UnitAndOffset((entry_unit, entry_offset)) => self
.names
.get(&get_uid(
&entry_unit,
&entry_unit.entry(entry_offset).unwrap(),
))
.cloned(),
}
}
fn commit_types(&self, debug_info: &mut DebugInfo) {
for debug_type in self.types() {
if debug_type.commit {
debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref());
}
}
}
// TODO : Consume data?
fn commit_data_variables(&self, debug_info: &mut DebugInfo) {
for (&address, (name, type_uid)) in &self.data_variables {
assert!(debug_info.add_data_variable(
address,
&self.get_type(*type_uid).unwrap().1,
name.clone()
));
}
}
fn commit_functions(&self, debug_info: &mut DebugInfo) {
for function in self.functions() {
let return_type = match function.return_type {
Some(return_type_id) => {
Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0)
}
_ => Conf::new(binaryninja::types::Type::void(), 0),
};
let parameters: Vec<FunctionParameter<CString>> = function
.parameters
.iter()
.filter_map(|parameter| match parameter {
Some((name, 0)) => |
Some((name, uid)) => Some(FunctionParameter::new(
self.get_type(*uid).unwrap().1,
name.clone(),
None,
)),
_ => None,
})
.collect();
// TODO : Handle
let platform = None;
let variable_parameters = false;
// let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None;
let function_type =
binaryninja::types::Type::function(&return_type, ¶meters, variable_parameters);
let simplified_full_name = function
.full_name
.as_ref()
.map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned())
.map(|simp| CString::new(simp).unwrap());
debug_info.add_function(DebugFunctionInfo::new(
simplified_full_name.clone(),
simplified_full_name, | {
Some(FunctionParameter::new(Type::void(), name.clone(), None))
} | conditional_block |
dwarfdebuginfo.rs | may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::helpers::{get_uid, resolve_specification, DieReference};
use binaryninja::{
binaryview::{BinaryView, BinaryViewBase},
debuginfo::{DebugFunctionInfo, DebugInfo},
rc::*,
templatesimplifier::simplify_str_to_str,
types::{Conf, FunctionParameter, Type},
};
use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit};
use log::error;
use std::{
collections::{hash_map::Values, HashMap},
ffi::CString,
hash::Hash,
};
pub(crate) type TypeUID = usize;
/////////////////////////
// FunctionInfoBuilder
// TODO : Function local variables
#[derive(PartialEq, Eq, Hash)]
pub struct FunctionInfoBuilder {
pub full_name: Option<CString>,
pub raw_name: Option<CString>,
pub return_type: Option<TypeUID>,
pub address: Option<u64>,
pub parameters: Vec<Option<(CString, TypeUID)>>,
}
impl FunctionInfoBuilder {
pub fn update(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if full_name.is_some() {
self.full_name = full_name;
}
if raw_name.is_some() {
self.raw_name = raw_name;
}
if return_type.is_some() {
self.return_type = return_type;
}
if address.is_some() {
self.address = address;
}
for (i, new_parameter) in parameters.into_iter().enumerate() {
if let Some(old_parameter) = self.parameters.get(i) {
if old_parameter.is_none() {
self.parameters[i] = new_parameter;
}
} else {
self.parameters.push(new_parameter);
}
}
}
}
//////////////////////
// DebugInfoBuilder
// TODO : Don't make this pub...fix the value thing
pub(crate) struct DebugType {
name: CString,
t: Ref<Type>,
commit: bool,
}
// DWARF info is stored and displayed in a tree, but is really a graph
// The purpose of this builder is to help resolve those graph edges by mapping partial function
// info and types to one DIE's UID (T) before adding the completed info to BN's debug info
pub struct DebugInfoBuilder {
functions: Vec<FunctionInfoBuilder>,
types: HashMap<TypeUID, DebugType>,
data_variables: HashMap<u64, (Option<CString>, TypeUID)>,
names: HashMap<TypeUID, CString>,
default_address_size: usize,
}
impl DebugInfoBuilder {
pub fn new(view: &BinaryView) -> Self {
DebugInfoBuilder {
functions: vec![],
types: HashMap::new(),
data_variables: HashMap::new(),
names: HashMap::new(),
default_address_size: view.address_size(),
}
}
pub fn default_address_size(&self) -> usize {
self.default_address_size
}
#[allow(clippy::too_many_arguments)]
pub fn insert_function(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if let Some(function) = self.functions.iter_mut().find(|func| {
(func.raw_name.is_some() && func.raw_name == raw_name)
|| (func.full_name.is_some() && func.full_name == full_name)
}) {
function.update(full_name, raw_name, return_type, address, parameters);
} else {
self.functions.push(FunctionInfoBuilder {
full_name,
raw_name,
return_type,
address,
parameters,
});
}
}
pub fn functions(&self) -> &[FunctionInfoBuilder] {
&self.functions
}
pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> {
self.types.values()
}
pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) {
if let Some(DebugType {
name: existing_name,
t: existing_type,
commit: _,
}) = self.types.insert(
type_uid,
DebugType {
name: name.clone(),
t: t.clone(),
commit,
},
) {
if existing_type != t {
error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)",
existing_type,
existing_name,
t,
name
);
}
}
}
pub fn remove_type(&mut self, type_uid: TypeUID) {
self.types.remove(&type_uid);
}
// TODO : Non-copy?
pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> {
self.types
.get(&type_uid)
.map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone()))
}
pub fn contains_type(&self, type_uid: TypeUID) -> bool {
self.types.get(&type_uid).is_some()
}
pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) |
pub fn set_name(&mut self, die_uid: TypeUID, name: CString) {
assert!(self.names.insert(die_uid, name).is_none());
}
pub fn get_name<R: Reader<Offset = usize>>(
&self,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
entry: &DebuggingInformationEntry<R>,
) -> Option<CString> {
match resolve_specification(dwarf, unit, entry) {
DieReference::Offset(entry_offset) => self
.names
.get(&get_uid(unit, &unit.entry(entry_offset).unwrap()))
.cloned(),
DieReference::UnitAndOffset((entry_unit, entry_offset)) => self
.names
.get(&get_uid(
&entry_unit,
&entry_unit.entry(entry_offset).unwrap(),
))
.cloned(),
}
}
fn commit_types(&self, debug_info: &mut DebugInfo) {
for debug_type in self.types() {
if debug_type.commit {
debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref());
}
}
}
// TODO : Consume data?
fn commit_data_variables(&self, debug_info: &mut DebugInfo) {
for (&address, (name, type_uid)) in &self.data_variables {
assert!(debug_info.add_data_variable(
address,
&self.get_type(*type_uid).unwrap().1,
name.clone()
));
}
}
fn commit_functions(&self, debug_info: &mut DebugInfo) {
for function in self.functions() {
let return_type = match function.return_type {
Some(return_type_id) => {
Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0)
}
_ => Conf::new(binaryninja::types::Type::void(), 0),
};
let parameters: Vec<FunctionParameter<CString>> = function
.parameters
.iter()
.filter_map(|parameter| match parameter {
Some((name, 0)) => {
Some(FunctionParameter::new(Type::void(), name.clone(), None))
}
Some((name, uid)) => Some(FunctionParameter::new(
self.get_type(*uid).unwrap().1,
name.clone(),
None,
)),
_ => None,
})
.collect();
// TODO : Handle
let platform = None;
let variable_parameters = false;
// let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None;
let function_type =
binaryninja::types::Type::function(&return_type, ¶meters, variable_parameters);
let simplified_full_name = function
.full_name
.as_ref()
.map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned())
.map(|simp| CString::new(simp).unwrap());
debug_info.add_function(DebugFunctionInfo::new(
simplified_full_name.clone(),
simplified_full_name, | {
if let Some((_existing_name, existing_type_uid)) =
self.data_variables.insert(address, (name, type_uid))
{
let existing_type = self.get_type(existing_type_uid).unwrap().1;
let new_type = self.get_type(type_uid).unwrap().1;
if existing_type_uid != type_uid || existing_type != new_type {
error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`",
address,
self.get_type(existing_type_uid).unwrap().1,
self.get_type(type_uid).unwrap().1
);
}
}
} | identifier_body |
dwarfdebuginfo.rs | may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::helpers::{get_uid, resolve_specification, DieReference};
use binaryninja::{
binaryview::{BinaryView, BinaryViewBase},
debuginfo::{DebugFunctionInfo, DebugInfo},
rc::*,
templatesimplifier::simplify_str_to_str,
types::{Conf, FunctionParameter, Type},
};
use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit};
use log::error;
use std::{
collections::{hash_map::Values, HashMap},
ffi::CString,
hash::Hash,
};
pub(crate) type TypeUID = usize;
/////////////////////////
// FunctionInfoBuilder
// TODO : Function local variables
#[derive(PartialEq, Eq, Hash)]
pub struct FunctionInfoBuilder {
pub full_name: Option<CString>,
pub raw_name: Option<CString>,
pub return_type: Option<TypeUID>,
pub address: Option<u64>,
pub parameters: Vec<Option<(CString, TypeUID)>>,
}
impl FunctionInfoBuilder {
pub fn update(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if full_name.is_some() {
self.full_name = full_name;
}
if raw_name.is_some() {
self.raw_name = raw_name;
}
if return_type.is_some() {
self.return_type = return_type;
}
if address.is_some() {
self.address = address;
}
for (i, new_parameter) in parameters.into_iter().enumerate() {
if let Some(old_parameter) = self.parameters.get(i) {
if old_parameter.is_none() {
self.parameters[i] = new_parameter;
}
} else {
self.parameters.push(new_parameter);
}
}
}
}
//////////////////////
// DebugInfoBuilder
// TODO : Don't make this pub...fix the value thing
pub(crate) struct DebugType {
name: CString,
t: Ref<Type>,
commit: bool,
}
// DWARF info is stored and displayed in a tree, but is really a graph
// The purpose of this builder is to help resolve those graph edges by mapping partial function
// info and types to one DIE's UID (T) before adding the completed info to BN's debug info
pub struct DebugInfoBuilder {
functions: Vec<FunctionInfoBuilder>,
types: HashMap<TypeUID, DebugType>,
data_variables: HashMap<u64, (Option<CString>, TypeUID)>,
names: HashMap<TypeUID, CString>,
default_address_size: usize,
}
impl DebugInfoBuilder {
pub fn | (view: &BinaryView) -> Self {
DebugInfoBuilder {
functions: vec![],
types: HashMap::new(),
data_variables: HashMap::new(),
names: HashMap::new(),
default_address_size: view.address_size(),
}
}
pub fn default_address_size(&self) -> usize {
self.default_address_size
}
#[allow(clippy::too_many_arguments)]
pub fn insert_function(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if let Some(function) = self.functions.iter_mut().find(|func| {
(func.raw_name.is_some() && func.raw_name == raw_name)
|| (func.full_name.is_some() && func.full_name == full_name)
}) {
function.update(full_name, raw_name, return_type, address, parameters);
} else {
self.functions.push(FunctionInfoBuilder {
full_name,
raw_name,
return_type,
address,
parameters,
});
}
}
pub fn functions(&self) -> &[FunctionInfoBuilder] {
&self.functions
}
pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> {
self.types.values()
}
pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) {
if let Some(DebugType {
name: existing_name,
t: existing_type,
commit: _,
}) = self.types.insert(
type_uid,
DebugType {
name: name.clone(),
t: t.clone(),
commit,
},
) {
if existing_type != t {
error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)",
existing_type,
existing_name,
t,
name
);
}
}
}
pub fn remove_type(&mut self, type_uid: TypeUID) {
self.types.remove(&type_uid);
}
// TODO : Non-copy?
pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> {
self.types
.get(&type_uid)
.map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone()))
}
pub fn contains_type(&self, type_uid: TypeUID) -> bool {
self.types.get(&type_uid).is_some()
}
pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) {
if let Some((_existing_name, existing_type_uid)) =
self.data_variables.insert(address, (name, type_uid))
{
let existing_type = self.get_type(existing_type_uid).unwrap().1;
let new_type = self.get_type(type_uid).unwrap().1;
if existing_type_uid != type_uid || existing_type != new_type {
error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`",
address,
self.get_type(existing_type_uid).unwrap().1,
self.get_type(type_uid).unwrap().1
);
}
}
}
pub fn set_name(&mut self, die_uid: TypeUID, name: CString) {
assert!(self.names.insert(die_uid, name).is_none());
}
pub fn get_name<R: Reader<Offset = usize>>(
&self,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
entry: &DebuggingInformationEntry<R>,
) -> Option<CString> {
match resolve_specification(dwarf, unit, entry) {
DieReference::Offset(entry_offset) => self
.names
.get(&get_uid(unit, &unit.entry(entry_offset).unwrap()))
.cloned(),
DieReference::UnitAndOffset((entry_unit, entry_offset)) => self
.names
.get(&get_uid(
&entry_unit,
&entry_unit.entry(entry_offset).unwrap(),
))
.cloned(),
}
}
fn commit_types(&self, debug_info: &mut DebugInfo) {
for debug_type in self.types() {
if debug_type.commit {
debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref());
}
}
}
// TODO : Consume data?
fn commit_data_variables(&self, debug_info: &mut DebugInfo) {
for (&address, (name, type_uid)) in &self.data_variables {
assert!(debug_info.add_data_variable(
address,
&self.get_type(*type_uid).unwrap().1,
name.clone()
));
}
}
fn commit_functions(&self, debug_info: &mut DebugInfo) {
for function in self.functions() {
let return_type = match function.return_type {
Some(return_type_id) => {
Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0)
}
_ => Conf::new(binaryninja::types::Type::void(), 0),
};
let parameters: Vec<FunctionParameter<CString>> = function
.parameters
.iter()
.filter_map(|parameter| match parameter {
Some((name, 0)) => {
Some(FunctionParameter::new(Type::void(), name.clone(), None))
}
Some((name, uid)) => Some(FunctionParameter::new(
self.get_type(*uid).unwrap().1,
name.clone(),
None,
)),
_ => None,
})
.collect();
// TODO : Handle
let platform = None;
let variable_parameters = false;
// let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None;
let function_type =
binaryninja::types::Type::function(&return_type, ¶meters, variable_parameters);
let simplified_full_name = function
.full_name
.as_ref()
.map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned())
.map(|simp| CString::new(simp).unwrap());
debug_info.add_function(DebugFunctionInfo::new(
simplified_full_name.clone(),
simplified_full_name, // | new | identifier_name |
dwarfdebuginfo.rs | You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::helpers::{get_uid, resolve_specification, DieReference};
use binaryninja::{
binaryview::{BinaryView, BinaryViewBase},
debuginfo::{DebugFunctionInfo, DebugInfo},
rc::*,
templatesimplifier::simplify_str_to_str,
types::{Conf, FunctionParameter, Type},
};
use gimli::{DebuggingInformationEntry, Dwarf, Reader, Unit};
use log::error;
use std::{
collections::{hash_map::Values, HashMap},
ffi::CString,
hash::Hash,
};
pub(crate) type TypeUID = usize;
/////////////////////////
// FunctionInfoBuilder
// TODO : Function local variables
#[derive(PartialEq, Eq, Hash)]
pub struct FunctionInfoBuilder {
pub full_name: Option<CString>,
pub raw_name: Option<CString>,
pub return_type: Option<TypeUID>,
pub address: Option<u64>,
pub parameters: Vec<Option<(CString, TypeUID)>>,
}
impl FunctionInfoBuilder {
pub fn update(
&mut self,
full_name: Option<CString>,
raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if full_name.is_some() {
self.full_name = full_name;
}
if raw_name.is_some() {
self.raw_name = raw_name;
}
if return_type.is_some() {
self.return_type = return_type;
}
if address.is_some() {
self.address = address;
}
for (i, new_parameter) in parameters.into_iter().enumerate() {
if let Some(old_parameter) = self.parameters.get(i) {
if old_parameter.is_none() {
self.parameters[i] = new_parameter;
}
} else {
self.parameters.push(new_parameter);
}
}
}
}
//////////////////////
// DebugInfoBuilder
// TODO : Don't make this pub...fix the value thing
pub(crate) struct DebugType {
name: CString,
t: Ref<Type>,
commit: bool,
}
// DWARF info is stored and displayed in a tree, but is really a graph
// The purpose of this builder is to help resolve those graph edges by mapping partial function
// info and types to one DIE's UID (T) before adding the completed info to BN's debug info
pub struct DebugInfoBuilder {
functions: Vec<FunctionInfoBuilder>,
types: HashMap<TypeUID, DebugType>,
data_variables: HashMap<u64, (Option<CString>, TypeUID)>,
names: HashMap<TypeUID, CString>,
default_address_size: usize,
}
impl DebugInfoBuilder {
pub fn new(view: &BinaryView) -> Self {
DebugInfoBuilder {
functions: vec![],
types: HashMap::new(),
data_variables: HashMap::new(),
names: HashMap::new(),
default_address_size: view.address_size(),
}
}
pub fn default_address_size(&self) -> usize {
self.default_address_size
}
#[allow(clippy::too_many_arguments)]
pub fn insert_function( | raw_name: Option<CString>,
return_type: Option<TypeUID>,
address: Option<u64>,
parameters: Vec<Option<(CString, TypeUID)>>,
) {
if let Some(function) = self.functions.iter_mut().find(|func| {
(func.raw_name.is_some() && func.raw_name == raw_name)
|| (func.full_name.is_some() && func.full_name == full_name)
}) {
function.update(full_name, raw_name, return_type, address, parameters);
} else {
self.functions.push(FunctionInfoBuilder {
full_name,
raw_name,
return_type,
address,
parameters,
});
}
}
pub fn functions(&self) -> &[FunctionInfoBuilder] {
&self.functions
}
pub(crate) fn types(&self) -> Values<'_, TypeUID, DebugType> {
self.types.values()
}
pub fn add_type(&mut self, type_uid: TypeUID, name: CString, t: Ref<Type>, commit: bool) {
if let Some(DebugType {
name: existing_name,
t: existing_type,
commit: _,
}) = self.types.insert(
type_uid,
DebugType {
name: name.clone(),
t: t.clone(),
commit,
},
) {
if existing_type != t {
error!("DWARF info contains duplicate type definition. Overwriting type `{}` (named `{:?}`) with `{}` (named `{:?}`)",
existing_type,
existing_name,
t,
name
);
}
}
}
pub fn remove_type(&mut self, type_uid: TypeUID) {
self.types.remove(&type_uid);
}
// TODO : Non-copy?
pub fn get_type(&self, type_uid: TypeUID) -> Option<(CString, Ref<Type>)> {
self.types
.get(&type_uid)
.map(|type_ref_ref| (type_ref_ref.name.clone(), type_ref_ref.t.clone()))
}
pub fn contains_type(&self, type_uid: TypeUID) -> bool {
self.types.get(&type_uid).is_some()
}
pub fn add_data_variable(&mut self, address: u64, name: Option<CString>, type_uid: TypeUID) {
if let Some((_existing_name, existing_type_uid)) =
self.data_variables.insert(address, (name, type_uid))
{
let existing_type = self.get_type(existing_type_uid).unwrap().1;
let new_type = self.get_type(type_uid).unwrap().1;
if existing_type_uid != type_uid || existing_type != new_type {
error!("DWARF info contains duplicate data variable definition. Overwriting data variable at 0x{:08x} (`{}`) with `{}`",
address,
self.get_type(existing_type_uid).unwrap().1,
self.get_type(type_uid).unwrap().1
);
}
}
}
pub fn set_name(&mut self, die_uid: TypeUID, name: CString) {
assert!(self.names.insert(die_uid, name).is_none());
}
pub fn get_name<R: Reader<Offset = usize>>(
&self,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
entry: &DebuggingInformationEntry<R>,
) -> Option<CString> {
match resolve_specification(dwarf, unit, entry) {
DieReference::Offset(entry_offset) => self
.names
.get(&get_uid(unit, &unit.entry(entry_offset).unwrap()))
.cloned(),
DieReference::UnitAndOffset((entry_unit, entry_offset)) => self
.names
.get(&get_uid(
&entry_unit,
&entry_unit.entry(entry_offset).unwrap(),
))
.cloned(),
}
}
fn commit_types(&self, debug_info: &mut DebugInfo) {
for debug_type in self.types() {
if debug_type.commit {
debug_info.add_type(debug_type.name.clone(), debug_type.t.as_ref());
}
}
}
// TODO : Consume data?
fn commit_data_variables(&self, debug_info: &mut DebugInfo) {
for (&address, (name, type_uid)) in &self.data_variables {
assert!(debug_info.add_data_variable(
address,
&self.get_type(*type_uid).unwrap().1,
name.clone()
));
}
}
fn commit_functions(&self, debug_info: &mut DebugInfo) {
for function in self.functions() {
let return_type = match function.return_type {
Some(return_type_id) => {
Conf::new(self.get_type(return_type_id).unwrap().1.clone(), 0)
}
_ => Conf::new(binaryninja::types::Type::void(), 0),
};
let parameters: Vec<FunctionParameter<CString>> = function
.parameters
.iter()
.filter_map(|parameter| match parameter {
Some((name, 0)) => {
Some(FunctionParameter::new(Type::void(), name.clone(), None))
}
Some((name, uid)) => Some(FunctionParameter::new(
self.get_type(*uid).unwrap().1,
name.clone(),
None,
)),
_ => None,
})
.collect();
// TODO : Handle
let platform = None;
let variable_parameters = false;
// let calling_convention: Option<Ref<CallingConvention<CoreArchitecture>>> = None;
let function_type =
binaryninja::types::Type::function(&return_type, ¶meters, variable_parameters);
let simplified_full_name = function
.full_name
.as_ref()
.map(|name| simplify_str_to_str(name.as_ref()).as_str().to_owned())
.map(|simp| CString::new(simp).unwrap());
debug_info.add_function(DebugFunctionInfo::new(
simplified_full_name.clone(),
simplified_full_name, // | &mut self,
full_name: Option<CString>, | random_line_split |
main.go | bb.llx, bb.lly, bb.urx, bb.lly, bb.urx, bb.ury, bb.llx, bb.ury,
)
// Draw trim marks
if !*longTrimMarks {
stream += fmt.Sprintf(` q
0 0 0 rg %f w
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
Q `,
trimMarkLineWidth,
mb.llx-1, tb.lly, bb.llx, tb.lly,
mb.llx-1, tb.ury, bb.llx, tb.ury,
tb.llx, mb.ury+1, tb.llx, bb.ury,
tb.urx, mb.ury+1, tb.urx, bb.ury,
bb.urx, tb.ury, mb.urx+1, tb.ury,
bb.urx, tb.lly, mb.urx+1, tb.lly,
tb.llx, bb.lly, tb.llx, mb.lly-1,
tb.urx, bb.lly, tb.urx, mb.lly-1,
)
} else {
stream += fmt.Sprintf(` q
0 0 0 rg %f w
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
Q `,
trimMarkLineWidth,
mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line
mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line
tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line
tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line
)
}
// Draw tile ref
vch := float32(vecCharHeight)
stream += fmt.Sprintf(`
q 0 0 0 rg
q 1 0 0 1 %f %f cm %s Q
q 1 0 0 1 %f %f cm %s Q
Q
q
0 0 0 rg %f w 2 J
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l %f %f l h f
%f %f m %f %f l %f %f l h f
Q
`,
bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1),
bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1),
trimMarkLineWidth,
bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5,
bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2,
bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2,
bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2,
)
// Draw page ref
stream += fmt.Sprintf(` q 0 0 0 rg
q 1 0 0 1 %f %f cm %s Q
q 1 0 0 1 %f %f cm %s Q
Q `,
tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1),
bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1),
)
// Draw page title
stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `,
tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1),
)
p.contentIds = append(p.contentIds, overlayID)
return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n",
overlayID, len(stream), stream)
}
func process() error {
// Convert to QDF form
data, err := convertToQDF(*inputFile)
if err != nil {
return err
}
// Get the root page tree object id
m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data)
if m == nil {
return fmt.Errorf("cannot find root page tree")
}
pageTreeID, _ := strconv.Atoi(m[1])
nextID, err := getNextFreeObjectID(data)
if err != nil {
return err
}
// Convert page size (which includes margins) in mm to
// tile sizes (which excludes margins) in pt for use with PDF
tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2
tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2
pages := getAllPages(data)
// Sort pages by page number if not already sorted
sort.Slice(pages, func(i, j int) bool {
return pages[i].number < pages[j].number
})
var tiles []*page
for _, p := range pages {
ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin)
for _, t := range ts {
t.parentID = pageTreeID
}
tiles = append(tiles, ts...)
}
{
// Wrap page content with graphics state preserving streams
objs := fmt.Sprintf(
"%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n",
nextID, nextID+1)
data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1)
for _, t := range tiles {
t.contentIds = append([]int{nextID}, t.contentIds...)
t.contentIds = append(t.contentIds, nextID+1)
}
nextID += 2
}
{
// Create overlays and add it to the doc
b := &strings.Builder{}
for _, t := range tiles {
b.WriteString(createOverlayForPage(nextID, t))
nextID++
}
data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1)
}
data = appendPagesToDoc(data, nextID, tiles)
data = replaceAllDocPagesWith(data, tiles, pageTreeID)
// Write data back to temp file
f, err := ioutil.TempFile("", "pdftilecut-im2-")
if err != nil {
return err
}
if !*debugMode {
defer os.Remove(f.Name())
}
if _, err := f.Write([]byte(data)); err != nil {
f.Close()
return err
}
f.Close()
// Fix and write back an optimized PDF
if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil {
return err
}
return nil
}
// convertToOptimizedPDF converts in PDF to a compressed with
// object streams PDF using QPDF.
func convertToOptimizedPDF(in string, out string) error | {
q, err := qpdf.New()
if err != nil {
return err
}
defer q.Close()
if !*debugMode {
q.SetSuppressWarnings(true)
}
if err := q.ReadFile(in); err != nil {
return err
}
// TODO enable optimization flags
if err := q.InitFileWrite(out); err != nil {
return err
}
q.SetObjectStreamMode(qpdf.ObjectStreamGenerate)
q.SetStreamDataMode(qpdf.StreamDataPreserve)
q.SetCompressStreams(true)
if err := q.Write(); err != nil { | identifier_body |
|
main.go | () bool {
return r.llx <= r.urx && r.lly <= r.ury
}
type page struct {
id int
number int
tileX int
tileY int
mediaBox rect
cropBox rect
bleedBox rect
trimBox rect
contentIds []int
parentID int
raw string
}
var (
boxReTpl = `(?m)^\s+/%s\s*\[\s*(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s*\]`
bleedBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "BleedBox"))
cropBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "CropBox"))
mediaBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "MediaBox"))
trimBoxRe = regexp.MustCompile(fmt.Sprintf(boxReTpl, "TrimBox"))
contentsRe = regexp.MustCompile(`(?m)^\s+/Contents\s+(?:(\d+)|\[([^\]]*))`)
pageObjRmRe = regexp.MustCompile(
`(?m)^\s+/((Bleed|Crop|Media|Trim|Art)Box|Contents|Parent)\s+(\[[^\]]+\]|\d+\s+\d+\s+R)\n`)
)
// marshal serializes the page to string that can be inserted into
// PDF document.
func (p *page) marshal() string {
b := &strings.Builder{}
fmt.Fprintf(b, "\n%d 0 obj\n<<\n", p.id)
fmt.Fprintf(b, " /MediaBox [ %f %f %f %f ]\n", p.mediaBox.llx, p.mediaBox.lly, p.mediaBox.urx, p.mediaBox.ury)
fmt.Fprintf(b, " /CropBox [ %f %f %f %f ]\n", p.cropBox.llx, p.cropBox.lly, p.cropBox.urx, p.cropBox.ury)
fmt.Fprintf(b, " /BleedBox [ %f %f %f %f ]\n", p.bleedBox.llx, p.bleedBox.lly, p.bleedBox.urx, p.bleedBox.ury)
fmt.Fprintf(b, " /TrimBox [ %f %f %f %f ]\n", p.trimBox.llx, p.trimBox.lly, p.trimBox.urx, p.trimBox.ury)
fmt.Fprintf(b, " /Contents [ ")
for _, cid := range p.contentIds {
fmt.Fprintf(b, " %d 0 R ", cid)
}
fmt.Fprintf(b, " ]\n")
fmt.Fprintf(b, " /Parent %d 0 R\n", p.parentID)
b.WriteString(p.raw)
fmt.Fprintf(b, "\n>>\nendobj\n")
return b.String()
}
// extractAttrs extracts interesting attributes of the page into
// struct elements and removes them from raw string of the page.
func (p *page) extractAttrs() error {
atoi := func(s string) int {
i, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return i
}
atof := func(s string) float32 {
f, err := strconv.ParseFloat(s, 32)
if err != nil {
panic(err)
}
return float32(f)
}
var m []string
m = contentsRe.FindStringSubmatch(p.raw)
if m == nil {
return fmt.Errorf("cannot find Contents for page:\n%s", p.raw)
}
if m[1] != "" {
p.contentIds = []int{atoi(m[1])}
} else {
m := regexp.MustCompile(`(?m)^\s+(\d+)\s+\d+\s+R`).FindAllStringSubmatch(m[2], -1)
p.contentIds = []int{}
for _, r := range m {
p.contentIds = append(p.contentIds, atoi(r[1]))
}
}
m = mediaBoxRe.FindStringSubmatch(p.raw)
if m == nil {
return fmt.Errorf("cannot find MediaBox for page:\n%s", p.raw)
}
p.mediaBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])}
if !p.mediaBox.isValid() {
return fmt.Errorf("invalid MediaBox for page:\n%s", p.raw)
}
m = cropBoxRe.FindStringSubmatch(p.raw)
if m == nil {
p.cropBox = p.mediaBox
} else {
p.cropBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])}
}
if !p.cropBox.isValid() {
return fmt.Errorf("invalid CropBox for page:\n%s", p.raw)
}
m = bleedBoxRe.FindStringSubmatch(p.raw)
if m == nil {
p.bleedBox = p.cropBox
} else {
p.bleedBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])}
}
if !p.bleedBox.isValid() {
return fmt.Errorf("invalid BleedBox for page:\n%s", p.raw)
}
m = trimBoxRe.FindStringSubmatch(p.raw)
if m == nil {
p.trimBox = p.cropBox
} else {
p.trimBox = rect{atof(m[1]), atof(m[2]), atof(m[3]), atof(m[4])}
}
if !p.trimBox.isValid() {
return fmt.Errorf("invalid TrimBox for page:\n%s", p.raw)
}
// Delete all the extracted raw content
p.raw = pageObjRmRe.ReplaceAllString(p.raw, "")
return nil
}
// cutPageToTiles slices the page into tiles of the given size, setting
// appropriate *Box attributes of the tiles. All other page attributes
// are copied from the original page.
func cutPageToTiles(p *page, tileW, tileH, bleedMargin, trimMargin float32) []*page {
// Adjust tileW and tileH such that all tiles end up with the same dimensions
pageWidth := p.trimBox.urx - p.trimBox.llx
pageHeight := p.trimBox.ury - p.trimBox.lly
hTiles := int(math.Ceil(float64(pageWidth / tileW)))
vTiles := int(math.Ceil(float64(pageHeight / tileH)))
tileW = pageWidth / float32(hTiles)
tileH = pageHeight / float32(vTiles)
var tilePages []*page
tgy := 0
for y := 0; y < vTiles; y++ {
lly := p.trimBox.lly + float32(y)*tileH
tgx := 0
for x := 0; x < hTiles; x++ {
llx := p.trimBox.llx + float32(x)*tileW
tile := page{
tileX: tgx,
tileY: tgy,
mediaBox: rect{
llx - trimMargin - bleedMargin,
lly - trimMargin - bleedMargin,
llx + tileW + trimMargin + bleedMargin,
lly + tileH + trimMargin + bleedMargin,
},
bleedBox: rect{llx - trimMargin, lly - trimMargin, llx + tileW + trimMargin, lly + tileH + trimMargin},
trimBox: rect{llx, lly, llx + tileW, lly + tileH},
number: p.number,
contentIds: append([]int{}, p.contentIds...),
raw: p.raw,
}
tile.cropBox = tile.mediaBox
tilePages = append(tilePages, &tile)
tgx++
}
tgy++
}
return tilePages
}
// appendPagesToDoc appends the given pages after all the other objects
// but before the xref block. It also updates the object ids as it goes
// starting with startID.
func appendPagesToDoc(d string, startID int, pages []*page) string {
var b strings.Builder
for pi, p := range pages {
p.id = pi + startID
b.WriteString(p.marshal())
}
return strings.Replace(d, "\nxref\n", "\n"+b.String()+"\n\nxref\n", 1)
}
// replaceAllDocPagesWith updates the first node of the page tree with array
// containing references to the given pages, effectively replacing all
// the existing page trees.
func replaceAllDocPagesWith(d string, pages []*page, pageTreeID int) string {
b := &strings.Builder{}
for _, p := range pages {
fmt.Fprintf(b, "%d 0 R\n", p.id)
}
// Replace the count
r := regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Count\s+)\d+`, pageTreeID))
d = r.ReplaceAllString(d, fmt.Sprintf(`${1}%d`, len(pages)))
// Replace page references
r = regexp.MustCompile(fmt.Sprintf(`(?ms)^(%d 0 obj\n.*?^\s+/Kids\s+\[)[^\]]*`, pageTreeID))
d = r.ReplaceAllString(d, fmt.Sprintf(`${1 | isValid | identifier_name |
|
main.go | %f m %f %f l S
%f %f m %f %f l S
Q `,
trimMarkLineWidth,
mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line
mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line
tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line
tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line
)
}
// Draw tile ref
vch := float32(vecCharHeight)
stream += fmt.Sprintf(`
q 0 0 0 rg
q 1 0 0 1 %f %f cm %s Q
q 1 0 0 1 %f %f cm %s Q
Q
q
0 0 0 rg %f w 2 J
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l %f %f l h f
%f %f m %f %f l %f %f l h f
Q
`,
bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1),
bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1),
trimMarkLineWidth,
bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5,
bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2,
bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2,
bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2,
)
// Draw page ref
stream += fmt.Sprintf(` q 0 0 0 rg
q 1 0 0 1 %f %f cm %s Q
q 1 0 0 1 %f %f cm %s Q
Q `,
tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1),
bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1),
)
// Draw page title
stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `,
tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1),
)
p.contentIds = append(p.contentIds, overlayID)
return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n",
overlayID, len(stream), stream)
}
func process() error {
// Convert to QDF form
data, err := convertToQDF(*inputFile)
if err != nil {
return err
}
// Get the root page tree object id
m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data)
if m == nil {
return fmt.Errorf("cannot find root page tree")
}
pageTreeID, _ := strconv.Atoi(m[1])
nextID, err := getNextFreeObjectID(data)
if err != nil {
return err
}
// Convert page size (which includes margins) in mm to
// tile sizes (which excludes margins) in pt for use with PDF
tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2
tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2
pages := getAllPages(data)
// Sort pages by page number if not already sorted
sort.Slice(pages, func(i, j int) bool {
return pages[i].number < pages[j].number
})
var tiles []*page
for _, p := range pages {
ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin)
for _, t := range ts {
t.parentID = pageTreeID
}
tiles = append(tiles, ts...)
}
{
// Wrap page content with graphics state preserving streams
objs := fmt.Sprintf(
"%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n",
nextID, nextID+1)
data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1)
for _, t := range tiles {
t.contentIds = append([]int{nextID}, t.contentIds...)
t.contentIds = append(t.contentIds, nextID+1)
}
nextID += 2
}
{
// Create overlays and add it to the doc
b := &strings.Builder{}
for _, t := range tiles {
b.WriteString(createOverlayForPage(nextID, t))
nextID++
}
data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1)
}
data = appendPagesToDoc(data, nextID, tiles)
data = replaceAllDocPagesWith(data, tiles, pageTreeID)
// Write data back to temp file
f, err := ioutil.TempFile("", "pdftilecut-im2-")
if err != nil {
return err
}
if !*debugMode {
defer os.Remove(f.Name())
}
if _, err := f.Write([]byte(data)); err != nil {
f.Close()
return err
}
f.Close()
// Fix and write back an optimized PDF
if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil {
return err
}
return nil
}
// convertToOptimizedPDF converts in PDF to a compressed with
// object streams PDF using QPDF.
func convertToOptimizedPDF(in string, out string) error {
q, err := qpdf.New()
if err != nil {
return err
}
defer q.Close()
if !*debugMode {
q.SetSuppressWarnings(true)
}
if err := q.ReadFile(in); err != nil {
return err
}
// TODO enable optimization flags
if err := q.InitFileWrite(out); err != nil {
return err
}
q.SetObjectStreamMode(qpdf.ObjectStreamGenerate)
q.SetStreamDataMode(qpdf.StreamDataPreserve)
q.SetCompressStreams(true)
if err := q.Write(); err != nil {
return err
}
return nil
}
// convertToQDF uses QPDF to convert an input PDF to a normalized
// format that is easy to parse and manipulate.
func convertToQDF(in string) (string, error) {
q, err := qpdf.New()
if err != nil {
return "", err
}
defer q.Close()
if !*debugMode {
q.SetSuppressWarnings(true)
}
if err := q.ReadFile(in); err != nil {
return "", err
}
f, err := ioutil.TempFile("", "pdftilecut-im-")
if err != nil {
return "", nil
}
f.Close()
if !*debugMode {
defer os.Remove(f.Name())
}
if err := q.InitFileWrite(f.Name()); err != nil {
return "", err
}
q.SetQDFMode(true)
q.SetObjectStreamMode(qpdf.ObjectStreamDisable)
q.SetStreamDataMode(qpdf.StreamDataPreserve)
if err := q.Write(); err != nil {
return "", err
}
q.Close() // free up memory as soon as possible
f, err = os.Open(f.Name())
if err != nil {
return "", err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return string(b), nil
}
func main() {
if err := run(); err != nil {
log.Fatal(err)
}
}
func run() error {
flag.Parse()
// Create temp file for input and output if needed
if *inputFile == "-" {
f, err := ioutil.TempFile("", "pdftilecut-in-")
if err != nil {
return err
}
defer os.Remove(f.Name())
if _, err := io.Copy(f, os.Stdin); err != nil | {
return err
} | conditional_block |
|
main.go | S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
Q `,
trimMarkLineWidth,
mb.llx-1, tb.lly, bb.llx, tb.lly,
mb.llx-1, tb.ury, bb.llx, tb.ury,
tb.llx, mb.ury+1, tb.llx, bb.ury,
tb.urx, mb.ury+1, tb.urx, bb.ury,
bb.urx, tb.ury, mb.urx+1, tb.ury,
bb.urx, tb.lly, mb.urx+1, tb.lly,
tb.llx, bb.lly, tb.llx, mb.lly-1,
tb.urx, bb.lly, tb.urx, mb.lly-1,
)
} else {
stream += fmt.Sprintf(` q
0 0 0 rg %f w
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l S
Q `,
trimMarkLineWidth,
mb.llx-1, tb.lly, mb.urx+1, tb.lly, // bottom trim line
mb.llx-1, tb.ury, mb.urx+1, tb.ury, // top trim line
tb.llx, mb.lly-1, tb.llx, mb.ury+1, // left trim line
tb.urx, mb.lly-1, tb.urx, mb.ury+1, // right trim line
)
}
// Draw tile ref
vch := float32(vecCharHeight)
stream += fmt.Sprintf(`
q 0 0 0 rg
q 1 0 0 1 %f %f cm %s Q
q 1 0 0 1 %f %f cm %s Q
Q
q
0 0 0 rg %f w 2 J
%f %f m %f %f l S
%f %f m %f %f l S
%f %f m %f %f l %f %f l h f
%f %f m %f %f l %f %f l h f
Q
`,
bb.urx, bb.ury+vch/2, strToVecChars(numToAlpha(p.tileY), -1, 1),
bb.urx+vch/2, bb.ury, strToVecChars(strconv.Itoa(p.tileX+1), 1, -1),
trimMarkLineWidth,
bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch/2, bb.ury+vch*1.5,
bb.urx+vch/2, bb.ury+vch/2, bb.urx+vch*1.5, bb.ury+vch/2,
bb.urx+vch/4, bb.ury+vch*1.5, bb.urx+vch*3/4, bb.ury+vch*1.5, bb.urx+vch/2, bb.ury+vch*2,
bb.urx+vch*1.5, bb.ury+vch/4, bb.urx+vch*1.5, bb.ury+vch*3/4, bb.urx+vch*2, bb.ury+vch/2,
)
// Draw page ref
stream += fmt.Sprintf(` q 0 0 0 rg
q 1 0 0 1 %f %f cm %s Q
q 1 0 0 1 %f %f cm %s Q
Q `,
tb.llx-vch/2, bb.ury+vch/2, strToVecChars(strconv.Itoa(p.number), -1, 1),
bb.llx-vch/2, bb.ury, strToVecChars("PAGE", -1, -1),
)
// Draw page title
stream += fmt.Sprintf(` q 0 0 0 rg q 1 0 0 1 %f %f cm %s Q Q `,
tb.llx+vch/2, bb.lly-vch/2, strToVecChars(*tileTitle, 1, -1),
)
p.contentIds = append(p.contentIds, overlayID)
return fmt.Sprintf("%d 0 obj\n<< /Length %d >> stream\n%sendstream\nendobj\n",
overlayID, len(stream), stream)
}
func process() error {
// Convert to QDF form
data, err := convertToQDF(*inputFile)
if err != nil {
return err
}
// Get the root page tree object id
m := regexp.MustCompile(`(?m)^\s+/Pages\s+(\d+)\s+\d+\s+R`).FindStringSubmatch(data)
if m == nil {
return fmt.Errorf("cannot find root page tree")
}
pageTreeID, _ := strconv.Atoi(m[1])
nextID, err := getNextFreeObjectID(data)
if err != nil {
return err
}
// Convert page size (which includes margins) in mm to
// tile sizes (which excludes margins) in pt for use with PDF
tileW := (tileSize.width * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2
tileH := (tileSize.height * ptsInInch / mmInInch) - (bleedMargin+trimMargin)*2
pages := getAllPages(data)
// Sort pages by page number if not already sorted
sort.Slice(pages, func(i, j int) bool {
return pages[i].number < pages[j].number
})
var tiles []*page
for _, p := range pages {
ts := cutPageToTiles(p, tileW, tileH, bleedMargin, trimMargin)
for _, t := range ts {
t.parentID = pageTreeID
}
tiles = append(tiles, ts...)
}
{
// Wrap page content with graphics state preserving streams
objs := fmt.Sprintf(
"%d 0 obj\n<< /Length 1 >> stream\nqendstream\nendobj\n%d 0 obj\n<< /Length 1 >> stream\nQendstream\nendobj\n",
nextID, nextID+1)
data = strings.Replace(data, "\nxref\n", "\n"+objs+"\nxref\n", 1)
for _, t := range tiles {
t.contentIds = append([]int{nextID}, t.contentIds...)
t.contentIds = append(t.contentIds, nextID+1)
}
nextID += 2
}
{
// Create overlays and add it to the doc
b := &strings.Builder{}
for _, t := range tiles {
b.WriteString(createOverlayForPage(nextID, t))
nextID++
}
data = strings.Replace(data, "\nxref\n", "\n"+b.String()+"\nxref\n", 1)
}
data = appendPagesToDoc(data, nextID, tiles)
data = replaceAllDocPagesWith(data, tiles, pageTreeID)
// Write data back to temp file
f, err := ioutil.TempFile("", "pdftilecut-im2-")
if err != nil {
return err
}
if !*debugMode {
defer os.Remove(f.Name())
}
if _, err := f.Write([]byte(data)); err != nil {
f.Close()
return err
}
f.Close()
// Fix and write back an optimized PDF
if err := convertToOptimizedPDF(f.Name(), *outputFile); err != nil {
return err
}
return nil
}
// convertToOptimizedPDF converts in PDF to a compressed with
// object streams PDF using QPDF.
func convertToOptimizedPDF(in string, out string) error {
q, err := qpdf.New()
if err != nil {
return err
}
defer q.Close()
if !*debugMode {
q.SetSuppressWarnings(true)
}
if err := q.ReadFile(in); err != nil {
return err
}
// TODO enable optimization flags
if err := q.InitFileWrite(out); err != nil {
return err
}
q.SetObjectStreamMode(qpdf.ObjectStreamGenerate)
q.SetStreamDataMode(qpdf.StreamDataPreserve)
q.SetCompressStreams(true)
if err := q.Write(); err != nil {
return err
}
return nil
}
// convertToQDF uses QPDF to convert an input PDF to a normalized
// format that is easy to parse and manipulate.
func convertToQDF(in string) (string, error) {
q, err := qpdf.New()
if err != nil {
return "", err
}
defer q.Close() | if !*debugMode { | random_line_split |
|
app.py |
socketio.emit('patientInfo',res)
@socketio.on('startTest')
def startTest(index):
print('start Test')
info = datas[index]
#print(info)
socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']})
@socketio.on('testFinished')
def testFinished():
socketio.emit('testFinished',True)
@socketio.on('startWords')
def startWords(data):
info = datas[0]
print('start words',data,info)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words[data].copy()
print(word)
@socketio.on('stopWords')
def stopWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':0,
'result':{'response':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('wordsResult',wordsResult)
findwords.append(word.index(r))
finded.append(r)
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startwords')
@socketio.on('startSingleWords')
def startSingleWordsMemory(data):
print('startSingleWordsMemory',data)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words['single'].copy()
print(word)
order = 1
@socketio.on('stopSingleWords')
def stopSingleWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':data,
'result':{'order':order,'word':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('SingleWordsResult',wordsResult)
print('inc order')
order +=1
findwords.append(word.index(r))
finded.append(r)
break
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startSingleWords')
@socketio.on('startSM')
def startSM(data):
index = data['index']
correct = data['correct']
corrects = ['일','이','삼','사','오','육','칠','팔','구','십']
print('start SM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt in [correct,corrects[int(correct)-1]]:
print('clear')
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':int(correct) ,'score':1}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SM')
@socketio.on('startSMM')
def startSMM(data):
index = data['index']
correct = data['correct']
print('start SMM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSMM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt == '예':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':True ,'score':1*(correct == 0)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
elif stt == '아니오' or stt=='아니요':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response': False,'score':1*(correct == 1)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SMM')
@socketio.on('startStickMemory')
def startStickMemory(element):
print(element)
info = datas[9].copy()
index = element['index']
correct = element['content']['correct']
print(info)
socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element})
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopStickMemory')
def stopStickMemory():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt == '예':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':True ,'score':1*(correct == '0')}}}
#print(wordsResult)
print('emit result')
socketio.emit('stickMemoryResult',Result)
elif stt == '아니오' or stt=='아니요':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response': False,'score':1*(correct == '1')}}}
#print(wordsResult)
print('emit result')
socketio.emit('stickMemoryResult',Result)
if (not stt) or stop:
print('stop')
gsp.mic.Pause()
break
@socketio.on('startShapeColor')
def startShapeColor(element):
print(element)
@socketio.on('stopShapeColor')
def stopShapeColor():
stop = True
info = datas[13].copy()
index = element['index']
if int(index) < 0 :
index = '0'
word = element['content']['word']
socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element})
wordlist = ['무', '수박', '귤', '호두', '당근', '깻잎', '연근', '오이', '고추', '땅콩', '말', '토끼', '다람쥐', '금붕어', '돼지', '오리']
corlist = [1,2,1,1,2,2,2,1,1,2,1,2,2,1,2,1]
numberlist = [['1','일'],['2','이']]
cor = numberlist[corlist[int(index)]-1]
gsp = gspeech.Gspeech()
stop = False
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
stt = stt.strip()
print(stt)
time.sleep(0.01)
print('go')
stop = True
Result = {'response':{'index':index if int(index) > 0 else str(int(index)-1),
'phase':0,
'result':{'word': stt,'imageScore':1*((cor[0] in stt) or (cor[1] in stt)),
'nameScore':1*(word in stt)}}}
#print(wordsResult)
print('emit result')
socketio.emit('shapeColorResult',Result)
if (not stt) or stop:
print('stop')
gsp.pauseMic()
break
@socketio.on('startwordFluencyTest')
def startShapeColor(element):
start = time.time()
#print(element)
gsp = gspeech.Gspeech() | random_line_split |
||
app.py | 1. 미어캣 82. 코요테
83. 라마 84. 딱따구리
85. 기러기 86. 비둘기
87. 스컹크 88. 아르마딜로
89. 돌고래 90. 까마귀
91. 매 92. 낙타
93. 여우 94. 사슴
95. 늑대 96. 재규어
97. 알파카 98. 양
99. 다람쥐 100. 담비
'''.split()[1::2]
anilist = list(map(lambda x : x.strip(),anilist))
@app.route("/")
def main():
return ' '
@socketio.on('connect')
def on_connect(client):
print('conn',client)
@socketio.on('disconnect')
def disconnect():
cur = con.cursor(pymysql.cursors.DictCursor)
print('discon',request.sid)
if request.sid in patients:
sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid])
cur.execute(sql)
con.commit()
return
@socketio.on('patientJoin')
def checkpatient(data):
phone = data['phoneNumber']
cur = con.cursor(pymysql.cursors.DictCur | ql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:])
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchone()
print(rows,request.sid)
if rows :
socketio.emit('patientJoin',True)
sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)"
val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH'])
cur.execute(sql,val)
con.commit()
patients[request.sid] = phone
else :
socketio.emit('patientJoin',False)
@socketio.on('doctorJoin')
def checkdoctor(data):
print('doctorJoin',request.sid)
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(data['id'])
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchone()
userid = rows['USER_ID']
#print(patients[data['phoneNumber']])
if rows:
socketio.emit('doctorJoin',True)
@socketio.on('patientInfo')
def getPatientInfo():
print('patientInfo')
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler"
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchall()
res = json.dumps(rows)
# 전체 rows
socketio.emit('patientInfo',res)
@socketio.on('startTest')
def startTest(index):
print('start Test')
info = datas[index]
#print(info)
socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']})
@socketio.on('testFinished')
def testFinished():
socketio.emit('testFinished',True)
@socketio.on('startWords')
def startWords(data):
info = datas[0]
print('start words',data,info)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words[data].copy()
print(word)
@socketio.on('stopWords')
def stopWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':0,
'result':{'response':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('wordsResult',wordsResult)
findwords.append(word.index(r))
finded.append(r)
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startwords')
@socketio.on('startSingleWords')
def startSingleWordsMemory(data):
print('startSingleWordsMemory',data)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words['single'].copy()
print(word)
order = 1
@socketio.on('stopSingleWords')
def stopSingleWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':data,
'result':{'order':order,'word':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('SingleWordsResult',wordsResult)
print('inc order')
order +=1
findwords.append(word.index(r))
finded.append(r)
break
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startSingleWords')
@socketio.on('startSM')
def startSM(data):
index = data['index']
correct = data['correct']
corrects = ['일','이','삼','사','오','육','칠','팔','구','십']
print('start SM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt in [correct,corrects[int(correct)-1]]:
print('clear')
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':int(correct) ,'score':1}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SM')
@socketio.on('startSMM')
def startSMM(data):
index = data['index']
correct = data['correct']
print('start SMM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSMM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt == '예':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':True ,'score':1*(correct == 0)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
elif stt == '아니오' or stt=='아니요':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response': False,'score':1*(correct == 1)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SMM')
@socketio.on('startStickMemory')
def startStickMemory(element):
print(element)
info = datas[9].copy()
index = element['index']
correct = element['content']['correct']
print(info)
socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element})
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopStickMemory')
def stopStickMemory():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText | sor)
s | identifier_body |
app.py | 81. 미어캣 82. 코요테
83. 라마 84. 딱따구리
85. 기러기 86. 비둘기
87. 스컹크 88. 아르마딜로
89. 돌고래 90. 까마귀
91. 매 92. 낙타
93. 여우 94. 사슴
95. 늑대 96. 재규어
97. 알파카 98. 양
99. 다람쥐 100. 담비
'''.split()[1::2]
anilist = list(map(lambda x : x.strip(),anilist))
@app.route("/")
def main():
return ' '
@socketio.on('connect')
def on_connect(client):
print('conn',client)
@socketio.on('disconnect')
def disconnect():
cur = con.cursor(pymysql.cursors.DictCursor)
print('discon',request.sid)
if request.sid in patients:
sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid])
cur.execute(sql)
con.commit()
return
@socketio.on('patientJoin')
def checkpatient(data):
phone = data['phoneNumber']
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:])
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchone()
print(rows,request.sid)
if rows :
socketio.emit('patientJoin',True)
sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)"
val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH'])
cur.execute(sql,val)
con.commit()
patients[request.sid] = phone
else :
socketio.emit('patientJoin',False)
@socketio.on('doctorJoin')
def checkdoctor(data):
print('doctorJoin',request.sid)
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(da | Info')
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler"
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchall()
res = json.dumps(rows)
# 전체 rows
socketio.emit('patientInfo',res)
@socketio.on('startTest')
def startTest(index):
print('start Test')
info = datas[index]
#print(info)
socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']})
@socketio.on('testFinished')
def testFinished():
socketio.emit('testFinished',True)
@socketio.on('startWords')
def startWords(data):
info = datas[0]
print('start words',data,info)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words[data].copy()
print(word)
@socketio.on('stopWords')
def stopWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':0,
'result':{'response':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('wordsResult',wordsResult)
findwords.append(word.index(r))
finded.append(r)
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startwords')
@socketio.on('startSingleWords')
def startSingleWordsMemory(data):
print('startSingleWordsMemory',data)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words['single'].copy()
print(word)
order = 1
@socketio.on('stopSingleWords')
def stopSingleWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':data,
'result':{'order':order,'word':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('SingleWordsResult',wordsResult)
print('inc order')
order +=1
findwords.append(word.index(r))
finded.append(r)
break
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startSingleWords')
@socketio.on('startSM')
def startSM(data):
index = data['index']
correct = data['correct']
corrects = ['일','이','삼','사','오','육','칠','팔','구','십']
print('start SM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt in [correct,corrects[int(correct)-1]]:
print('clear')
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':int(correct) ,'score':1}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SM')
@socketio.on('startSMM')
def startSMM(data):
index = data['index']
correct = data['correct']
print('start SMM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSMM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt == '예':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':True ,'score':1*(correct == 0)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
elif stt == '아니오' or stt=='아니요':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response': False,'score':1*(correct == 1)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SMM')
@socketio.on('startStickMemory')
def startStickMemory(element):
print(element)
info = datas[9].copy()
index = element['index']
correct = element['content']['correct']
print(info)
socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element})
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopStickMemory')
def stopStickMemory():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
| ta['id'])
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchone()
userid = rows['USER_ID']
#print(patients[data['phoneNumber']])
if rows:
socketio.emit('doctorJoin',True)
@socketio.on('patientInfo')
def getPatientInfo():
print('patient | conditional_block |
app.py | 81. 미어캣 82. 코요테
83. 라마 84. 딱따구리
85. 기러기 86. 비둘기
87. 스컹크 88. 아르마딜로
89. 돌고래 90. 까마귀
91. 매 92. 낙타
93. 여우 94. 사슴
95. 늑대 96. 재규어
97. 알파카 98. 양
99. 다람쥐 100. 담비
'''.split()[1::2]
anilist = list(map(lambda x : x.strip(),anilist))
@app.route("/")
def main():
return ' '
@socketio.on('connect')
def on_connect(client):
print('conn',client)
@socketio.on('disconnect')
def disconnect():
cur = con.cursor(pymysql.cursors.DictCursor)
print('discon',request.sid)
if request.sid in patients:
sql = "DELETE FROM TN_Scheduler WHERE phoneNumber='{}'".format(patients[request.sid])
cur.execute(sql)
con.commit()
return
@socketio.on('patientJoin')
def checkpatient(data):
phone = data['phoneNumber']
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT patCd,NAME,BIRTH FROM TN_CM_TRGTER_INFO WHERE TEL_NO_1='{}' and TEL_NO_2='{}' and TEL_NO_3='{}'".format(phone[:3],phone[3:7],phone[7:])
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchone()
print(rows,request.sid)
if rows :
socketio.emit('patientJoin',True)
sql = "INSERT INTO TN_Scheduler (patCd,NAME,phoneNumber,BIRTH) VALUES (%s,%s,%s,%s)"
val = (rows['patCd'],rows['NAME'],phone,rows['BIRTH'])
cur.execute(sql,val)
con.commit()
patients[request.sid] = phone
else :
socketio.emit('patientJoin',False)
@socketio.on('doctorJoin')
def checkdoctor(data):
print('doctorJoin',request.sid)
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT USER_ID FROM TN_CM_USER_INFO WHERE LOGIN_ID='{}'".format(data['id'])
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchone()
userid = rows['USER_ID']
#print(patients[data['phoneNumber']])
if rows:
socketio.emit('doctorJoin',True)
@socketio.on('patientInfo')
def getPatientInfo():
print('patientInfo')
cur = con.cursor(pymysql.cursors.DictCursor)
sql = "SELECT patCd,NAME,phoneNumber,BIRTH FROM TN_Scheduler"
cur.execute(sql)
# 데이타 Fetch
rows = cur.fetchall()
res = json.dumps(rows)
# 전체 rows
socketio.emit('patientInfo',res)
@socketio.on('startTest')
def startTest(index):
print('start Test')
info = datas[index]
#print(info)
socketio.emit('startTest',{'testType':info['type'],'narration':info['narration']})
@socketio.on('testFinished')
def testFinished():
socketio.emit('testFinished',True)
@socketio.on('startWords')
def startWords(data):
info = datas[0]
print('start words',data,info)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words[data].copy()
print(word)
@socketio.on('stopWords')
def stopWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':0,
'result':{'response':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
socketio.emit('wordsResult',wordsResult)
findwords.append(word.index(r))
finded.append(r)
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startwords')
@socketio.on('startSingleWords')
def startSingleWordsMemory(data):
print('startSingleWordsMemory',data)
gsp = gspeech.Gspeech()
stop = False
findwords = []
word = words['single'].copy()
print(word)
order = 1
@socketio.on('stopSingleWords')
def stopSingleWords():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
for r in word:
if (r in stt) and not (r in finded):
wordsResult = {'response':{'index':str(word.index(r)+1),
'phase':data,
'result':{'order':order,'word':r,'score':1}}}
#print(wordsResult)
if not stop:
print('emit result')
| socketio.emit('SingleWordsResult',wordsResult)
print('inc order')
order +=1
findwords.append(word.index(r))
finded.append(r)
break
for x in findwords:
word[x] = '!@'
if (not stt) or stop:
print('stop')
break
print('end startSingleWords')
@socketio.on('startSM')
def startSM(data):
index = data['index']
correct = data['correct']
corrects = ['일','이','삼','사','오','육','칠','팔','구','십']
print('start SM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
finded = []
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt in [correct,corrects[int(correct)-1]]:
print('clear')
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':int(correct) ,'score':1}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SM')
@socketio.on('startSMM')
def startSMM(data):
index = data['index']
correct = data['correct']
print('start SMM')
print(index,correct)
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopSMM')
def stopSM():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText()
stt = stt.strip()
print(stt)
time.sleep(0.01)
if stt == '예':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response':True ,'score':1*(correct == 0)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
elif stt == '아니오' or stt=='아니요':
stop = True
Result = {'response':{'index':index,
'phase':0,
'result':{'response': False,'score':1*(correct == 1)}}}
#print(wordsResult)
print('emit result')
socketio.emit('SMMResult',Result)
if (not stt) or stop:
print('stop')
break
print('end SMM')
@socketio.on('startStickMemory')
def startStickMemory(element):
print(element)
info = datas[9].copy()
index = element['index']
correct = element['content']['correct']
print(info)
socketio.emit('startNarration',{'testType':info['type'],'narration':info['narration'],'questions':element})
gsp = gspeech.Gspeech()
stop = False
@socketio.on('stopStickMemory')
def stopStickMemory():
stop = True
while True:
# 음성 인식 될때까지 대기 한다.
stt = gsp.getText | identifier_name |
|
GameEngine.ts | input: I
}
export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number }
interface InputQueueItem<I extends BaseInput> {
input: I
stateId: number
ts: number
}
interface SetInputParams<I extends BaseInput> {
input: I
stateId?: number
ts?: number
}
interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> {
fps: number
startTime?: number
gameTime?: number
onStateUpdate?: (g: G) => any
}
export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> {
public startTime = 0
private runFn: EngineRunFn<I, G>
private states: G[] = []
private numStatesToKeep = 50
private exitGameLoopFn?: () => void
private inputQueue: InputQueueItem<I>[] = []
private getEngineRunHelpers = (state: G): EngineRunHelpers => {
const seed = [state.gameId, state.id].join("-")
return {
chance: () => new Chance(seed)
}
}
private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => {
return obj && typeof obj.stateIdx === "number"
}
private replaceInput = (inputs: I[], updateInput: I) => {
const idx = inputs.findIndex(i => i.playerId === updateInput.playerId)
if (idx !== -1) {
inputs[idx] = updateInput
} else {
inputs.push(updateInput)
}
}
private replaceInputInState = (state: G, input: I) => {
this.replaceInput(state.inputs, input)
}
private processInputQueue = () => {
const { inputQueue } = this
const currentStateId = this.currentStateId()
const indicesToRemove: number[] = []
// first figure if you are in the past, if so, fast forward
const maxQueuedStateId = max(inputQueue.map(q => q.stateId))
if (maxQueuedStateId && maxQueuedStateId > currentStateId) {
const numStatesToFastForward = maxQueuedStateId - currentStateId
console.log("fast forwarding", numStatesToFastForward, "states to catch up")
const currentState = this.states[this.states.length - 1]
const { dt, time } = currentState
times(numStatesToFastForward, i => {
const stateTime = time * (i + 1)
this.run({ time: stateTime, dt })
})
}
for (let i = 0; i < inputQueue.length; i++) {
const queueItem = inputQueue[i]
const { input, stateId } = queueItem
const iii = input as any
const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId)
console.log("received msg", this.states.length - 1 - stateIdx, "states in the past")
if (stateIdx === -1) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`)
} else {
console.log("handle input queue", stateId, JSON.stringify(iii.axis))
this.run({ stateIdx, input })
indicesToRemove.push(i)
}
}
indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1))
}
constructor(engineRunFn: EngineRunFn<I, G>, startingState: G) |
run = (params: RunParams<I>) => {
const { states } = this
if (!this.isGameStateWithRelac(params)) {
const { time, dt } = params
const state = cloneDeep(states[states.length - 1])
if (!state) {
throw new Error("GameEngine::run no state")
}
state.id += 1
state.time = time
state.dt = dt
const newState = this.runFn(state, this.getEngineRunHelpers(state))
states.push(newState)
// after we finish, make sure we only keep what we need
this.states = this.states.slice(-this.numStatesToKeep)
} else {
const { input } = params
const idx = params.stateIdx
if (!states[idx]) {
throw new Error("GameEngine::run no state")
}
for (let i = idx; i < states.length; i++) {
if (i === idx) {
// since this "correct" input would affect the next state, we dont
// change this state. just its input
this.replaceInputInState(states[i], input)
} else {
// the state at index i is inaccurate. however, we want to keep the other players' inputs from it
const s = states[i]
this.replaceInput(s.inputs, input)
// clone the previous state, generate new state from it
const toBeNewState = cloneDeep(states[i - 1])
toBeNewState.id = s.id
toBeNewState.time = s.time
toBeNewState.dt = s.dt
states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState))
// now re-apply the inputs to it so the next state we generate from this updated state is ok
states[i].inputs = s.inputs
}
}
}
}
setInput = (params: SetInputParams<I>) => {
let { ts } = params
const { input, stateId } = params
const { states } = this
// this is local input. no need to put it on the queue
if (stateId === undefined) {
// this is a new input that should be applied on the next run call
// we can effectively do this by replacing the input of the last
// state we have
if (states.length) {
this.replaceInputInState(states[states.length - 1], input)
}
} else {
if (!ts) {
ts = new Date().getTime()
}
// if state id is less than the very first state we have in the array,
// then this means we got this input too late. this means that the input packet
// took too long to get to us and we will be desynced. we need to request new states!
if (stateId < states[0].id) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`)
// TODO wolf, handle this
return
}
// figure out how far back in the past you are. this means you need to catch up
const iii = input as any
const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId)
if (existingIdx === -1) {
this.inputQueue.push({ input, stateId, ts })
console.log("Pushed to queue", stateId, JSON.stringify(iii.axis))
} else {
// replace with more up to date information
this.inputQueue[existingIdx] = { input, stateId, ts }
console.log("replaced queue item", stateId, JSON.stringify(iii.axis))
}
}
}
startGameLoop = (params: StartGameLoopParams<I, G>) => {
const { fps, onStateUpdate } = params
let { gameTime, startTime } = params
if (!startTime) {
startTime = new Date().getTime()
}
if (!gameTime) {
gameTime = 0
}
// kill any current loop if running
this.stopGameLoop()
// the tickTime basically tells us how often a frame is generated
const tickTimeMs = 1000 / fps
const timeTimeSeconds = tickTimeMs / 1000
const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame
this.numStatesToKeep = fps * 5
console.log("num states to keep", this.numStatesToKeep)
this.startTime = startTime
let time = gameTime
let quit = false
let accumulator = 0
let didUpdateState = false
let frameTime = this.startTime
let currentTime = new Date().getTime()
const loop = () => {
if (quit) {
console.log("Finished game loop after", time.valueOf(), "ms")
return
}
// do normal game loop
didUpdateState = false
const now = new Date().getTime()
frameTime = now - currentTime
accumulator += frameTime
currentTime = now
// when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed
while (accumulator >= tickTimeMs) {
didUpdateState = true
time += tickTimeMs
this.run({ time, dt: timeTimeSeconds })
accumulator -= tickTimeMs
}
// handle input queues only on ticks where the state was updated
if (didUpdateState) {
// process the input queue
this.processInputQueue()
// if there's a state update. do that
if | {
this.runFn = engineRunFn
this.states = [startingState]
} | identifier_body |
GameEngine.ts | input: I
}
export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number }
interface InputQueueItem<I extends BaseInput> {
input: I
stateId: number
ts: number
}
interface SetInputParams<I extends BaseInput> {
input: I
stateId?: number
ts?: number
}
interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> {
fps: number
startTime?: number
gameTime?: number
onStateUpdate?: (g: G) => any
}
export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> {
public startTime = 0
private runFn: EngineRunFn<I, G>
private states: G[] = []
private numStatesToKeep = 50
private exitGameLoopFn?: () => void
private inputQueue: InputQueueItem<I>[] = []
private getEngineRunHelpers = (state: G): EngineRunHelpers => {
const seed = [state.gameId, state.id].join("-")
return {
chance: () => new Chance(seed)
}
}
private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => {
return obj && typeof obj.stateIdx === "number"
}
private replaceInput = (inputs: I[], updateInput: I) => {
const idx = inputs.findIndex(i => i.playerId === updateInput.playerId)
if (idx !== -1) {
inputs[idx] = updateInput
} else {
inputs.push(updateInput)
}
}
private replaceInputInState = (state: G, input: I) => {
this.replaceInput(state.inputs, input)
}
private processInputQueue = () => {
const { inputQueue } = this
const currentStateId = this.currentStateId()
const indicesToRemove: number[] = []
// first figure if you are in the past, if so, fast forward
const maxQueuedStateId = max(inputQueue.map(q => q.stateId))
if (maxQueuedStateId && maxQueuedStateId > currentStateId) {
const numStatesToFastForward = maxQueuedStateId - currentStateId
console.log("fast forwarding", numStatesToFastForward, "states to catch up")
const currentState = this.states[this.states.length - 1]
const { dt, time } = currentState
times(numStatesToFastForward, i => {
const stateTime = time * (i + 1)
this.run({ time: stateTime, dt })
})
}
for (let i = 0; i < inputQueue.length; i++) {
const queueItem = inputQueue[i]
const { input, stateId } = queueItem
const iii = input as any
const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId)
console.log("received msg", this.states.length - 1 - stateIdx, "states in the past")
if (stateIdx === -1) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`)
} else {
console.log("handle input queue", stateId, JSON.stringify(iii.axis))
this.run({ stateIdx, input })
indicesToRemove.push(i)
}
}
indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1))
}
| (engineRunFn: EngineRunFn<I, G>, startingState: G) {
this.runFn = engineRunFn
this.states = [startingState]
}
run = (params: RunParams<I>) => {
const { states } = this
if (!this.isGameStateWithRelac(params)) {
const { time, dt } = params
const state = cloneDeep(states[states.length - 1])
if (!state) {
throw new Error("GameEngine::run no state")
}
state.id += 1
state.time = time
state.dt = dt
const newState = this.runFn(state, this.getEngineRunHelpers(state))
states.push(newState)
// after we finish, make sure we only keep what we need
this.states = this.states.slice(-this.numStatesToKeep)
} else {
const { input } = params
const idx = params.stateIdx
if (!states[idx]) {
throw new Error("GameEngine::run no state")
}
for (let i = idx; i < states.length; i++) {
if (i === idx) {
// since this "correct" input would affect the next state, we dont
// change this state. just its input
this.replaceInputInState(states[i], input)
} else {
// the state at index i is inaccurate. however, we want to keep the other players' inputs from it
const s = states[i]
this.replaceInput(s.inputs, input)
// clone the previous state, generate new state from it
const toBeNewState = cloneDeep(states[i - 1])
toBeNewState.id = s.id
toBeNewState.time = s.time
toBeNewState.dt = s.dt
states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState))
// now re-apply the inputs to it so the next state we generate from this updated state is ok
states[i].inputs = s.inputs
}
}
}
}
setInput = (params: SetInputParams<I>) => {
let { ts } = params
const { input, stateId } = params
const { states } = this
// this is local input. no need to put it on the queue
if (stateId === undefined) {
// this is a new input that should be applied on the next run call
// we can effectively do this by replacing the input of the last
// state we have
if (states.length) {
this.replaceInputInState(states[states.length - 1], input)
}
} else {
if (!ts) {
ts = new Date().getTime()
}
// if state id is less than the very first state we have in the array,
// then this means we got this input too late. this means that the input packet
// took too long to get to us and we will be desynced. we need to request new states!
if (stateId < states[0].id) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`)
// TODO wolf, handle this
return
}
// figure out how far back in the past you are. this means you need to catch up
const iii = input as any
const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId)
if (existingIdx === -1) {
this.inputQueue.push({ input, stateId, ts })
console.log("Pushed to queue", stateId, JSON.stringify(iii.axis))
} else {
// replace with more up to date information
this.inputQueue[existingIdx] = { input, stateId, ts }
console.log("replaced queue item", stateId, JSON.stringify(iii.axis))
}
}
}
startGameLoop = (params: StartGameLoopParams<I, G>) => {
const { fps, onStateUpdate } = params
let { gameTime, startTime } = params
if (!startTime) {
startTime = new Date().getTime()
}
if (!gameTime) {
gameTime = 0
}
// kill any current loop if running
this.stopGameLoop()
// the tickTime basically tells us how often a frame is generated
const tickTimeMs = 1000 / fps
const timeTimeSeconds = tickTimeMs / 1000
const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame
this.numStatesToKeep = fps * 5
console.log("num states to keep", this.numStatesToKeep)
this.startTime = startTime
let time = gameTime
let quit = false
let accumulator = 0
let didUpdateState = false
let frameTime = this.startTime
let currentTime = new Date().getTime()
const loop = () => {
if (quit) {
console.log("Finished game loop after", time.valueOf(), "ms")
return
}
// do normal game loop
didUpdateState = false
const now = new Date().getTime()
frameTime = now - currentTime
accumulator += frameTime
currentTime = now
// when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed
while (accumulator >= tickTimeMs) {
didUpdateState = true
time += tickTimeMs
this.run({ time, dt: timeTimeSeconds })
accumulator -= tickTimeMs
}
// handle input queues only on ticks where the state was updated
if (didUpdateState) {
// process the input queue
this.processInputQueue()
// if there's a state update. do that
if (on | constructor | identifier_name |
GameEngine.ts | input: I
}
export type RunParams<I extends BaseInput> = GameStateRecalculateWithInput<I> | { time: number; dt: number }
interface InputQueueItem<I extends BaseInput> {
input: I
stateId: number
ts: number
}
interface SetInputParams<I extends BaseInput> {
input: I
stateId?: number
ts?: number
}
interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> {
fps: number
startTime?: number
gameTime?: number
onStateUpdate?: (g: G) => any
}
export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> {
public startTime = 0
private runFn: EngineRunFn<I, G>
private states: G[] = []
private numStatesToKeep = 50
private exitGameLoopFn?: () => void
private inputQueue: InputQueueItem<I>[] = []
private getEngineRunHelpers = (state: G): EngineRunHelpers => {
const seed = [state.gameId, state.id].join("-")
return {
chance: () => new Chance(seed)
}
}
private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => {
return obj && typeof obj.stateIdx === "number"
}
private replaceInput = (inputs: I[], updateInput: I) => {
const idx = inputs.findIndex(i => i.playerId === updateInput.playerId)
if (idx !== -1) {
inputs[idx] = updateInput
} else {
inputs.push(updateInput)
}
}
private replaceInputInState = (state: G, input: I) => {
this.replaceInput(state.inputs, input)
}
private processInputQueue = () => {
const { inputQueue } = this
const currentStateId = this.currentStateId()
const indicesToRemove: number[] = []
// first figure if you are in the past, if so, fast forward
const maxQueuedStateId = max(inputQueue.map(q => q.stateId))
if (maxQueuedStateId && maxQueuedStateId > currentStateId) {
const numStatesToFastForward = maxQueuedStateId - currentStateId
console.log("fast forwarding", numStatesToFastForward, "states to catch up")
const currentState = this.states[this.states.length - 1]
const { dt, time } = currentState
times(numStatesToFastForward, i => {
const stateTime = time * (i + 1)
this.run({ time: stateTime, dt })
})
}
for (let i = 0; i < inputQueue.length; i++) {
const queueItem = inputQueue[i]
const { input, stateId } = queueItem
const iii = input as any
const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId)
console.log("received msg", this.states.length - 1 - stateIdx, "states in the past")
if (stateIdx === -1) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`)
} else {
console.log("handle input queue", stateId, JSON.stringify(iii.axis))
this.run({ stateIdx, input })
indicesToRemove.push(i)
}
}
indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1))
}
constructor(engineRunFn: EngineRunFn<I, G>, startingState: G) {
this.runFn = engineRunFn
this.states = [startingState]
}
run = (params: RunParams<I>) => {
const { states } = this
if (!this.isGameStateWithRelac(params)) {
const { time, dt } = params
const state = cloneDeep(states[states.length - 1])
if (!state) {
throw new Error("GameEngine::run no state")
}
state.id += 1
state.time = time
state.dt = dt
const newState = this.runFn(state, this.getEngineRunHelpers(state))
states.push(newState)
// after we finish, make sure we only keep what we need
this.states = this.states.slice(-this.numStatesToKeep)
} else {
const { input } = params
const idx = params.stateIdx
if (!states[idx]) {
throw new Error("GameEngine::run no state")
}
for (let i = idx; i < states.length; i++) {
if (i === idx) {
// since this "correct" input would affect the next state, we dont
// change this state. just its input
this.replaceInputInState(states[i], input)
} else {
// the state at index i is inaccurate. however, we want to keep the other players' inputs from it
const s = states[i]
this.replaceInput(s.inputs, input)
// clone the previous state, generate new state from it
const toBeNewState = cloneDeep(states[i - 1])
toBeNewState.id = s.id
toBeNewState.time = s.time
toBeNewState.dt = s.dt
states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState))
// now re-apply the inputs to it so the next state we generate from this updated state is ok
states[i].inputs = s.inputs
}
}
}
}
setInput = (params: SetInputParams<I>) => {
let { ts } = params
const { input, stateId } = params
const { states } = this
// this is local input. no need to put it on the queue
if (stateId === undefined) {
// this is a new input that should be applied on the next run call
// we can effectively do this by replacing the input of the last
// state we have
if (states.length) {
this.replaceInputInState(states[states.length - 1], input)
}
} else | } else {
// replace with more up to date information
this.inputQueue[existingIdx] = { input, stateId, ts }
console.log("replaced queue item", stateId, JSON.stringify(iii.axis))
}
}
}
startGameLoop = (params: StartGameLoopParams<I, G>) => {
const { fps, onStateUpdate } = params
let { gameTime, startTime } = params
if (!startTime) {
startTime = new Date().getTime()
}
if (!gameTime) {
gameTime = 0
}
// kill any current loop if running
this.stopGameLoop()
// the tickTime basically tells us how often a frame is generated
const tickTimeMs = 1000 / fps
const timeTimeSeconds = tickTimeMs / 1000
const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame
this.numStatesToKeep = fps * 5
console.log("num states to keep", this.numStatesToKeep)
this.startTime = startTime
let time = gameTime
let quit = false
let accumulator = 0
let didUpdateState = false
let frameTime = this.startTime
let currentTime = new Date().getTime()
const loop = () => {
if (quit) {
console.log("Finished game loop after", time.valueOf(), "ms")
return
}
// do normal game loop
didUpdateState = false
const now = new Date().getTime()
frameTime = now - currentTime
accumulator += frameTime
currentTime = now
// when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed
while (accumulator >= tickTimeMs) {
didUpdateState = true
time += tickTimeMs
this.run({ time, dt: timeTimeSeconds })
accumulator -= tickTimeMs
}
// handle input queues only on ticks where the state was updated
if (didUpdateState) {
// process the input queue
this.processInputQueue()
// if there's a state update. do that
if ( | {
if (!ts) {
ts = new Date().getTime()
}
// if state id is less than the very first state we have in the array,
// then this means we got this input too late. this means that the input packet
// took too long to get to us and we will be desynced. we need to request new states!
if (stateId < states[0].id) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`)
// TODO wolf, handle this
return
}
// figure out how far back in the past you are. this means you need to catch up
const iii = input as any
const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId)
if (existingIdx === -1) {
this.inputQueue.push({ input, stateId, ts })
console.log("Pushed to queue", stateId, JSON.stringify(iii.axis)) | conditional_block |
GameEngine.ts | Input> {
input: I
stateId?: number
ts?: number
}
interface StartGameLoopParams<I extends BaseInput, G extends BaseGameState<I>> {
fps: number
startTime?: number
gameTime?: number
onStateUpdate?: (g: G) => any
}
export class GameEngine<I extends BaseInput, G extends BaseGameState<I>> {
public startTime = 0
private runFn: EngineRunFn<I, G>
private states: G[] = []
private numStatesToKeep = 50
private exitGameLoopFn?: () => void
private inputQueue: InputQueueItem<I>[] = []
private getEngineRunHelpers = (state: G): EngineRunHelpers => {
const seed = [state.gameId, state.id].join("-")
return {
chance: () => new Chance(seed)
}
}
private isGameStateWithRelac = (obj: any): obj is GameStateRecalculateWithInput<I> => {
return obj && typeof obj.stateIdx === "number"
}
private replaceInput = (inputs: I[], updateInput: I) => {
const idx = inputs.findIndex(i => i.playerId === updateInput.playerId)
if (idx !== -1) {
inputs[idx] = updateInput
} else {
inputs.push(updateInput)
}
}
private replaceInputInState = (state: G, input: I) => {
this.replaceInput(state.inputs, input)
}
private processInputQueue = () => {
const { inputQueue } = this
const currentStateId = this.currentStateId()
const indicesToRemove: number[] = []
// first figure if you are in the past, if so, fast forward
const maxQueuedStateId = max(inputQueue.map(q => q.stateId))
if (maxQueuedStateId && maxQueuedStateId > currentStateId) {
const numStatesToFastForward = maxQueuedStateId - currentStateId
console.log("fast forwarding", numStatesToFastForward, "states to catch up")
const currentState = this.states[this.states.length - 1]
const { dt, time } = currentState
times(numStatesToFastForward, i => {
const stateTime = time * (i + 1)
this.run({ time: stateTime, dt })
})
}
for (let i = 0; i < inputQueue.length; i++) {
const queueItem = inputQueue[i]
const { input, stateId } = queueItem
const iii = input as any
const stateIdx = stateId === undefined ? -1 : this.states.findIndex(s => s.id === stateId)
console.log("received msg", this.states.length - 1 - stateIdx, "states in the past")
if (stateIdx === -1) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array (processInputQueue)`)
} else {
console.log("handle input queue", stateId, JSON.stringify(iii.axis))
this.run({ stateIdx, input })
indicesToRemove.push(i)
}
}
indicesToRemove.reverse().forEach(i => inputQueue.splice(i, 1))
}
constructor(engineRunFn: EngineRunFn<I, G>, startingState: G) {
this.runFn = engineRunFn
this.states = [startingState]
}
run = (params: RunParams<I>) => {
const { states } = this
if (!this.isGameStateWithRelac(params)) {
const { time, dt } = params
const state = cloneDeep(states[states.length - 1])
if (!state) {
throw new Error("GameEngine::run no state")
}
state.id += 1
state.time = time
state.dt = dt
const newState = this.runFn(state, this.getEngineRunHelpers(state))
states.push(newState)
// after we finish, make sure we only keep what we need
this.states = this.states.slice(-this.numStatesToKeep)
} else {
const { input } = params
const idx = params.stateIdx
if (!states[idx]) {
throw new Error("GameEngine::run no state")
}
for (let i = idx; i < states.length; i++) {
if (i === idx) {
// since this "correct" input would affect the next state, we dont
// change this state. just its input
this.replaceInputInState(states[i], input)
} else {
// the state at index i is inaccurate. however, we want to keep the other players' inputs from it
const s = states[i]
this.replaceInput(s.inputs, input)
// clone the previous state, generate new state from it
const toBeNewState = cloneDeep(states[i - 1])
toBeNewState.id = s.id
toBeNewState.time = s.time
toBeNewState.dt = s.dt
states[i] = this.runFn(toBeNewState, this.getEngineRunHelpers(toBeNewState))
// now re-apply the inputs to it so the next state we generate from this updated state is ok
states[i].inputs = s.inputs
}
}
}
}
setInput = (params: SetInputParams<I>) => {
let { ts } = params
const { input, stateId } = params
const { states } = this
// this is local input. no need to put it on the queue
if (stateId === undefined) {
// this is a new input that should be applied on the next run call
// we can effectively do this by replacing the input of the last
// state we have
if (states.length) {
this.replaceInputInState(states[states.length - 1], input)
}
} else {
if (!ts) {
ts = new Date().getTime()
}
// if state id is less than the very first state we have in the array,
// then this means we got this input too late. this means that the input packet
// took too long to get to us and we will be desynced. we need to request new states!
if (stateId < states[0].id) {
console.log(`Set input packed arrived too late. ${stateId} is no longer in the array`)
// TODO wolf, handle this
return
}
// figure out how far back in the past you are. this means you need to catch up
const iii = input as any
const existingIdx = this.inputQueue.findIndex(q => q.stateId === stateId && q.input.playerId === input.playerId)
if (existingIdx === -1) {
this.inputQueue.push({ input, stateId, ts })
console.log("Pushed to queue", stateId, JSON.stringify(iii.axis))
} else {
// replace with more up to date information
this.inputQueue[existingIdx] = { input, stateId, ts }
console.log("replaced queue item", stateId, JSON.stringify(iii.axis))
}
}
}
startGameLoop = (params: StartGameLoopParams<I, G>) => {
const { fps, onStateUpdate } = params
let { gameTime, startTime } = params
if (!startTime) {
startTime = new Date().getTime()
}
if (!gameTime) {
gameTime = 0
}
// kill any current loop if running
this.stopGameLoop()
// the tickTime basically tells us how often a frame is generated
const tickTimeMs = 1000 / fps
const timeTimeSeconds = tickTimeMs / 1000
const looperFn = typeof window === "undefined" ? setImmediate : requestAnimationFrame
this.numStatesToKeep = fps * 5
console.log("num states to keep", this.numStatesToKeep)
this.startTime = startTime
let time = gameTime
let quit = false
let accumulator = 0
let didUpdateState = false
let frameTime = this.startTime
let currentTime = new Date().getTime()
const loop = () => {
if (quit) {
console.log("Finished game loop after", time.valueOf(), "ms")
return
}
// do normal game loop
didUpdateState = false
const now = new Date().getTime()
frameTime = now - currentTime
accumulator += frameTime
currentTime = now
// when the accumulator builds up greater than tickTimeMs, step the simulation forward as many times as needed
while (accumulator >= tickTimeMs) {
didUpdateState = true
time += tickTimeMs
this.run({ time, dt: timeTimeSeconds })
accumulator -= tickTimeMs
}
// handle input queues only on ticks where the state was updated
if (didUpdateState) {
// process the input queue
this.processInputQueue()
// if there's a state update. do that
if (onStateUpdate) {
onStateUpdate(this.currentState())
}
}
looperFn(loop)
}
loop()
this.exitGameLoopFn = () => (quit = true) | }
stopGameLoop = () => {
if (this.exitGameLoopFn) {
this.exitGameLoopFn() | random_line_split |
|
3d_LJ_nList.py | ')
nMax = x.size
fout_xyz.write("{}\n".format(nMax))
fout_xyz.write("comment\n")
for i in range(nMax):
fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i]))
fout_xyz.close()
return
@jit
def computeForces(x,y,z,natoms,sigma,epsilon):
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
for j in range(natoms):
#avoid the self interaction.
if (j != i):
#calculate distance b/w i and j particles.
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
# minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
# distance b/w i and j particles.
dr = np.sqrt(dx**2 + dy**2 + dz**2)
# now calculate the force.
sr6 = (sigma/dr)**6.0
rinv = 1.0/dr
rinv2 = rinv**2.0
comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + comn_frc_term*dx
fy[i] = fy[i] + comn_frc_term*dy
fz[i] = fz[i] + comn_frc_term*dz
# calculate potential energy here.
pot_term = 4.0*epsilon*sr6*(sr6 - 1.0)
PE = PE + pot_term
# calculation of virial.
vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i]
virial = virial + vir_term
PE = PE * 0.5
virial = virial * 0.5
return [fx,fy,fz,PE,virial]
@jit
def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass):
# this does the first step of V-V algorithm.
for i in range(N):
# position update
x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0
y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0
z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0
# velocity update.
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
return [x,y,z,vx,vy,vz]
@jit
def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass):
# update only velocities. and calculate Kinetic energy.
KE = 0.0
for i in range(N):
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5
return [vx,vy,vz,KE]
#======== function which will calculate the neighbor list.
@jit
def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021.
Distances = np.zeros((natoms,natoms))
nCount[:] = 0
nList[:,:] = 0
for i in range(natoms):
Distances[i,i] = lx
for j in range(natoms):
if(j != i):
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
#minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2 + dy**2 + dz**2)
Distances[i,j] = rij
#Distances[j,i] = Distances[i,j]
verlet_R = (rcut+vskin)*sigma
if(rij < verlet_R):
nCount[i] = nCount[i]+1
k = nCount[i]
# start_index = i*natoms
nList[i, k-1] = j
else:
continue
return [nCount,nList,Distances]
#======== function which will compute the forces on all the particles,
#======== using the list of neighbors for every particle.
@jit
def | (natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021.
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
#
for k in range(nCount[i]):
#starting = i*natoms
j = nList[i, k]
#
if(j != i):
#calculate the distance
dx = x[i]-x[j]
dy = y[i]-y[j]
dz = z[i]-z[j]
#minimum image.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
rij2 = rij**2.0
rcut2 = rcut**2.0
if(rij2 < rcut2):
# need to calculate the force.
rinv = 1.0/rij
rinv2 = rinv**2.0
sr6 = (sigma/rij)**6.0
src6 = (sigma/rcut)**6.0
rcinv = 1.0/rcut
rcinv2 = rcinv**2.0
#
#use LJ potential, with predefined cut-off.
frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + frc_common*dx
fy[i] = fy[i] + frc_common*dy
fz[i] = fz[i] + frc_common*dz
# shifting for the potential force-shifting.
frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2
fx_shift = frc_shift*dx
fy_shift = frc_shift*dy
fz_shift = frc_shift*dz
#shift it.
fx[i] = fx[i] - fx_shift
fy[i] = fy[i] - fy_shift
fz[i] = fz[i] - fz_shift
# now calculate PE & virial.
pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0)
pot_rc = 4.0*epsilon*src6*(src6 - 1.0)
pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv
# add all the components./ shifting.
PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs
virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i])
else:
continue
PE = PE*0.5
virial = virial*0.5
#
return [fx,fy,fz,PE,virial]
@jit
def applyPBC(N,x,y,z,lx,ly,lz):
x = x - np.round(x/lx)*lx
y = y - np.round(y/ly)*ly
z = z - np.round(z/l | compute_Forces_nbrList | identifier_name |
3d_LJ_nList.py | iz = 0
else:
iz = iz +1
else:
iy = iy + 1
else:
ix = ix + 1
x[i] = sigma/2.0 + ix*(dx + sigma)
y[i] = sigma/2.0 + iy*(dx + sigma)
z[i] = sigma/2.0 + iz*(dx + sigma)
return [x,y,z,lx,ly,lz]
def write_xyz_file(filename,x,y,z):
fout_xyz = open(filename, 'w+')
nMax = x.size
fout_xyz.write("{}\n".format(nMax))
fout_xyz.write("comment\n")
for i in range(nMax):
fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i]))
fout_xyz.close()
return
@jit
def computeForces(x,y,z,natoms,sigma,epsilon):
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
for j in range(natoms):
#avoid the self interaction.
if (j != i):
#calculate distance b/w i and j particles.
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
# minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
# distance b/w i and j particles.
dr = np.sqrt(dx**2 + dy**2 + dz**2)
# now calculate the force.
sr6 = (sigma/dr)**6.0
rinv = 1.0/dr
rinv2 = rinv**2.0
comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + comn_frc_term*dx
fy[i] = fy[i] + comn_frc_term*dy
fz[i] = fz[i] + comn_frc_term*dz
# calculate potential energy here.
pot_term = 4.0*epsilon*sr6*(sr6 - 1.0)
PE = PE + pot_term
# calculation of virial.
vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i]
virial = virial + vir_term
PE = PE * 0.5
virial = virial * 0.5
return [fx,fy,fz,PE,virial]
@jit
def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass):
# this does the first step of V-V algorithm.
for i in range(N):
# position update
x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0
y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0
z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0
# velocity update.
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
return [x,y,z,vx,vy,vz]
@jit
def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass):
# update only velocities. and calculate Kinetic energy.
KE = 0.0
for i in range(N):
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5
return [vx,vy,vz,KE]
#======== function which will calculate the neighbor list.
@jit
def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021.
Distances = np.zeros((natoms,natoms))
nCount[:] = 0
nList[:,:] = 0
for i in range(natoms):
Distances[i,i] = lx
for j in range(natoms):
if(j != i):
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
#minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2 + dy**2 + dz**2)
Distances[i,j] = rij
#Distances[j,i] = Distances[i,j]
verlet_R = (rcut+vskin)*sigma
if(rij < verlet_R):
nCount[i] = nCount[i]+1
k = nCount[i]
# start_index = i*natoms
nList[i, k-1] = j
else:
continue
return [nCount,nList,Distances]
#======== function which will compute the forces on all the particles,
#======== using the list of neighbors for every particle.
@jit
def compute_Forces_nbrList(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021.
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
#
for k in range(nCount[i]):
#starting = i*natoms
j = nList[i, k]
#
if(j != i):
#calculate the distance
dx = x[i]-x[j]
dy = y[i]-y[j]
dz = z[i]-z[j]
#minimum image.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
rij2 = rij**2.0
rcut2 = rcut**2.0
if(rij2 < rcut2):
# need to calculate the force.
rinv = 1.0/rij
rinv2 = rinv**2.0
sr6 = (sigma/rij)**6.0
src6 = (sigma/rcut)**6.0
rcinv = 1.0/rcut
rcinv2 = rcinv**2.0
#
#use LJ potential, with predefined cut-off.
frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + frc_common*dx
fy[i] = fy[i] + frc_common*dy
fz[i] = fz[i] + frc_common*dz
# shifting for the potential force-shifting.
frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2
fx_shift = frc_shift*dx
fy_shift = frc_shift*dy
fz_shift = frc_shift*dz
#shift it.
fx[i] = fx[i] - fx_shift
fy[i] = fy[i] - fy_shift
fz[i] = fz[i] - fz_shift
# now calculate PE & virial.
pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0)
pot_rc = 4.0*epsilon*src6*(src6 - 1.0)
pot_fs = -48.0*epsilon*src6*(src6 | if (i % (nx)**2 == 0):
iy = 0
if (i % (nx)**3 == 0):
| random_line_split |
|
3d_LJ_nList.py | ')
nMax = x.size
fout_xyz.write("{}\n".format(nMax))
fout_xyz.write("comment\n")
for i in range(nMax):
fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i]))
fout_xyz.close()
return
@jit
def computeForces(x,y,z,natoms,sigma,epsilon):
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
for j in range(natoms):
#avoid the self interaction.
if (j != i):
#calculate distance b/w i and j particles.
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
# minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
# distance b/w i and j particles.
dr = np.sqrt(dx**2 + dy**2 + dz**2)
# now calculate the force.
sr6 = (sigma/dr)**6.0
rinv = 1.0/dr
rinv2 = rinv**2.0
comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + comn_frc_term*dx
fy[i] = fy[i] + comn_frc_term*dy
fz[i] = fz[i] + comn_frc_term*dz
# calculate potential energy here.
pot_term = 4.0*epsilon*sr6*(sr6 - 1.0)
PE = PE + pot_term
# calculation of virial.
vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i]
virial = virial + vir_term
PE = PE * 0.5
virial = virial * 0.5
return [fx,fy,fz,PE,virial]
@jit
def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass):
# this does the first step of V-V algorithm.
for i in range(N):
# position update
x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0
y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0
z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0
# velocity update.
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
return [x,y,z,vx,vy,vz]
@jit
def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass):
# update only velocities. and calculate Kinetic energy.
KE = 0.0
for i in range(N):
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5
return [vx,vy,vz,KE]
#======== function which will calculate the neighbor list.
@jit
def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021.
Distances = np.zeros((natoms,natoms))
nCount[:] = 0
nList[:,:] = 0
for i in range(natoms):
Distances[i,i] = lx
for j in range(natoms):
if(j != i):
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
#minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2 + dy**2 + dz**2)
Distances[i,j] = rij
#Distances[j,i] = Distances[i,j]
verlet_R = (rcut+vskin)*sigma
if(rij < verlet_R):
nCount[i] = nCount[i]+1
k = nCount[i]
# start_index = i*natoms
nList[i, k-1] = j
else:
continue
return [nCount,nList,Distances]
#======== function which will compute the forces on all the particles,
#======== using the list of neighbors for every particle.
@jit
def compute_Forces_nbrList(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021.
| dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
rij2 = rij**2.0
rcut2 = rcut**2.0
if(rij2 < rcut2):
# need to calculate the force.
rinv = 1.0/rij
rinv2 = rinv**2.0
sr6 = (sigma/rij)**6.0
src6 = (sigma/rcut)**6.0
rcinv = 1.0/rcut
rcinv2 = rcinv**2.0
#
#use LJ potential, with predefined cut-off.
frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + frc_common*dx
fy[i] = fy[i] + frc_common*dy
fz[i] = fz[i] + frc_common*dz
# shifting for the potential force-shifting.
frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2
fx_shift = frc_shift*dx
fy_shift = frc_shift*dy
fz_shift = frc_shift*dz
#shift it.
fx[i] = fx[i] - fx_shift
fy[i] = fy[i] - fy_shift
fz[i] = fz[i] - fz_shift
# now calculate PE & virial.
pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0)
pot_rc = 4.0*epsilon*src6*(src6 - 1.0)
pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv
# add all the components./ shifting.
PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs
virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i])
else:
continue
PE = PE*0.5
virial = virial*0.5
#
return [fx,fy,fz,PE,virial]
@jit
def applyPBC(N,x,y,z,lx,ly,lz):
x = x - np.round(x/lx)*lx
y = y - np.round(y/ly)*ly
z = z - np.round(z/lz | fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
#
for k in range(nCount[i]):
#starting = i*natoms
j = nList[i, k]
#
if(j != i):
#calculate the distance
dx = x[i]-x[j]
dy = y[i]-y[j]
dz = z[i]-z[j]
#minimum image.
dx = dx - np.round(dx/lx)*lx
| identifier_body |
3d_LJ_nList.py | ')
nMax = x.size
fout_xyz.write("{}\n".format(nMax))
fout_xyz.write("comment\n")
for i in range(nMax):
fout_xyz.write("1 {} {} {}\n".format(x[i], y[i], z[i]))
fout_xyz.close()
return
@jit
def computeForces(x,y,z,natoms,sigma,epsilon):
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
for j in range(natoms):
#avoid the self interaction.
if (j != i):
#calculate distance b/w i and j particles.
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
# minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
# distance b/w i and j particles.
dr = np.sqrt(dx**2 + dy**2 + dz**2)
# now calculate the force.
sr6 = (sigma/dr)**6.0
rinv = 1.0/dr
rinv2 = rinv**2.0
comn_frc_term = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + comn_frc_term*dx
fy[i] = fy[i] + comn_frc_term*dy
fz[i] = fz[i] + comn_frc_term*dz
# calculate potential energy here.
pot_term = 4.0*epsilon*sr6*(sr6 - 1.0)
PE = PE + pot_term
# calculation of virial.
vir_term = dx*fx[i] + dy*fy[i] + dz*fz[i]
virial = virial + vir_term
PE = PE * 0.5
virial = virial * 0.5
return [fx,fy,fz,PE,virial]
@jit
def VelocityVerlet_step_1(x,y,z,vx,vy,vz,fx,fy,fz,N,dt,mass):
# this does the first step of V-V algorithm.
for i in range(N):
# position update
x[i] = x[i] + vx[i]*dt + 0.5*fx[i]/mass * dt**2.0
y[i] = y[i] + vy[i]*dt + 0.5*fy[i]/mass * dt**2.0
z[i] = z[i] + vz[i]*dt + 0.5*fz[i]/mass * dt**2.0
# velocity update.
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
return [x,y,z,vx,vy,vz]
@jit
def VelocityVerlet_step_2(vx,vy,vz,fx,fy,fz,N,dt,mass):
# update only velocities. and calculate Kinetic energy.
KE = 0.0
for i in range(N):
vx[i] = vx[i] + fx[i]*dt*0.5
vy[i] = vy[i] + fy[i]*dt*0.5
vz[i] = vz[i] + fz[i]*dt*0.5
KE = KE + (vx[i]**2.0 + vy[i]**2.0 + vz[i]**2.0)*mass*0.5
return [vx,vy,vz,KE]
#======== function which will calculate the neighbor list.
@jit
def get_Neighbor_List(natoms,x,y,z,lx,ly,lz,sigma,rcut,vskin): # Siva, 19 Sept, 2021.
Distances = np.zeros((natoms,natoms))
nCount[:] = 0
nList[:,:] = 0
for i in range(natoms):
Distances[i,i] = lx
for j in range(natoms):
if(j != i):
dx = x[i] - x[j]
dy = y[i] - y[j]
dz = z[i] - z[j]
#minimum image convention.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2 + dy**2 + dz**2)
Distances[i,j] = rij
#Distances[j,i] = Distances[i,j]
verlet_R = (rcut+vskin)*sigma
if(rij < verlet_R):
nCount[i] = nCount[i]+1
k = nCount[i]
# start_index = i*natoms
nList[i, k-1] = j
else:
continue
return [nCount,nList,Distances]
#======== function which will compute the forces on all the particles,
#======== using the list of neighbors for every particle.
@jit
def compute_Forces_nbrList(natoms,x,y,z,nCount,nList,sigma,epsilon,lx,ly,lz,fx,fy,fz): # Siva, 19 Sept, 2021.
fx[:] = 0.0
fy[:] = 0.0
fz[:] = 0.0
PE = 0.0
virial = 0.0
for i in range(natoms):
#
for k in range(nCount[i]):
#starting = i*natoms
j = nList[i, k]
#
if(j != i):
#calculate the distance
dx = x[i]-x[j]
dy = y[i]-y[j]
dz = z[i]-z[j]
#minimum image.
dx = dx - np.round(dx/lx)*lx
dy = dy - np.round(dy/ly)*ly
dz = dz - np.round(dz/lz)*lz
rij = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
rij2 = rij**2.0
rcut2 = rcut**2.0
if(rij2 < rcut2):
# need to calculate the force.
rinv = 1.0/rij
rinv2 = rinv**2.0
sr6 = (sigma/rij)**6.0
src6 = (sigma/rcut)**6.0
rcinv = 1.0/rcut
rcinv2 = rcinv**2.0
#
#use LJ potential, with predefined cut-off.
frc_common = 48.0*epsilon*sr6*(sr6 - 0.5)*rinv2
fx[i] = fx[i] + frc_common*dx
fy[i] = fy[i] + frc_common*dy
fz[i] = fz[i] + frc_common*dz
# shifting for the potential force-shifting.
frc_shift = 48.0*epsilon*src6*(src6 - 0.5)*rcinv2
fx_shift = frc_shift*dx
fy_shift = frc_shift*dy
fz_shift = frc_shift*dz
#shift it.
fx[i] = fx[i] - fx_shift
fy[i] = fy[i] - fy_shift
fz[i] = fz[i] - fz_shift
# now calculate PE & virial.
pot_lj = 4.0*epsilon*sr6*(sr6 - 1.0)
pot_rc = 4.0*epsilon*src6*(src6 - 1.0)
pot_fs = -48.0*epsilon*src6*(src6 - 0.5)*rcinv
# add all the components./ shifting.
PE = PE + pot_lj - pot_rc - (rij - rcut)*pot_fs
virial = virial + (dx*fx[i] + dy*fy[i] + dz*fz[i])
else:
|
PE = PE*0.5
virial = virial*0.5
#
return [fx,fy,fz,PE,virial]
@jit
def applyPBC(N,x,y,z,lx,ly,lz):
x = x - np.round(x/lx)*lx
y = y - np.round(y/ly)*ly
z = z - np.round(z/lz | continue | conditional_block |
intcode.rs | pping forward 4 more positions arrives at opcode `99`, halting the
//! program.
//!
//! Here are the initial and final states of a few more small programs:
//!
//! - `1,0,0,0,99` becomes `2,0,0,0,99` (1 + 1 = 2).
//! - `2,3,0,3,99` becomes `2,3,0,6,99` (3 * 2 = 6).
//! - `2,4,4,5,99,0` becomes `2,4,4,5,99,9801` (99 * 99 = 9801).
//! - `1,1,1,4,99,5,6,0,99` becomes `30,1,1,4,2,5,6,0,99`.
//!
//! Once you have a working computer, the first step is to restore the gravity
//! assist program (your puzzle input) to the "1202 program alarm" state it had
//! just before the last computer caught fire.
//! To do this, before running the program, replace position `1` with the value
//! `12` and replace position `2` with the value `2`. What value is left at
//! position `0` after the program halts?
//!
//! ## Part 2
//!
//! "Good, the new computer seems to be working correctly! Keep it nearby during
//! this mission - you'll probably use it again. Real Intcode computers support
//! many more features than your new one, but we'll let you know what they are
//! as you need them."
//!
//! "However, your current priority should be to complete your gravity assist
//! around the Moon. For this mission to succeed, we should settle on some
//! terminology for the parts you've already built."
//!
//! Intcode programs are given as a list of integers; these values are used as
//! the initial state for the computer's memory. When you run an Intcode program,
//! make sure to start by initializing memory to the program's values. A position
//! in memory is called an address (for example, the first value in memory is at
//! "address 0").
//!
//! Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values
//! used immediately after an opcode, if any, are called the instruction's
//! parameters. For example, in the instruction 1,2,3,4, 1 is the opcode; 2, 3,
//! and 4 are the parameters. The instruction 99 contains only an opcode and has
//! no parameters.
//!
//! The address of the current instruction is called the instruction pointer; it
//! starts at 0. After an instruction finishes, the instruction pointer increases
//! by the number of values in the instruction; until you add more instructions
//! to the computer, this is always 4 (1 opcode + 3 parameters) for the add and
//! multiply instructions. (The halt instruction would increase the instruction
//! pointer by 1, but it halts the program instead.)
//!
//! "With terminology out of the way, we're ready to proceed. To complete the
//! gravity assist, you need to determine what pair of inputs produces the
//! output 19690720."
//!
//! The inputs should still be provided to the program by replacing the values
//! at addresses 1 and 2, just like before. In this program, the value placed in
//! address 1 is called the noun, and the value placed in address 2 is called
//! the verb. Each of the two input values will be between 0 and 99, inclusive.
//!
//! Once the program has halted, its output is available at address 0, also just | //! other words, don't reuse memory from a previous attempt.
//!
//! Find the input noun and verb that cause the program to produce the output
//! 19690720. What is 100 * noun + verb? (For example, if noun=12 and verb=2,
//! the answer would be 1202.)
//!
//! # Day 5: Sunny with a Chance of Asteroids
//!
//! ## Part 1
//!
//! You're starting to sweat as the ship makes its way toward Mercury. The Elves
//! suggest that you get the air conditioner working by upgrading your ship
//! computer to support the Thermal Environment Supervision Terminal.
//!
//! The Thermal Environment Supervision Terminal (TEST) starts by running a
//! diagnostic program (your puzzle input). The TEST diagnostic program will run
//! on your existing Intcode computer after a few modifications:
//!
//! First, you'll need to add **two new instructions**:
//!
//! - Opcode `3` takes a single integer as **input** and saves it to the position
//! given by its only parameter. For example, the instruction `3,50` would take
//! an input value and store it at address `50`.
//! - Opcode `4` **outputs** the value of its only parameter. For example, the
//! instruction `4,50` would output the value at address `50`.
//!
//! Programs that use these instructions will come with documentation that
//! explains what should be connected to the input and output.
//! The program `3,0,4,0,99` outputs whatever it gets as input, then halts.
//!
//! Second, you'll need to add support for parameter modes:
//!
//! Each parameter of an instruction is handled based on its parameter mode.
//! Right now, your ship computer already understands parameter mode 0, position
//! mode, which causes the parameter to be interpreted as a position - if the
//! parameter is 50, its value is the value stored at address 50 in memory.
//! Until now, all parameters have been in position mode.
//!
//! Now, your ship computer will also need to handle parameters in mode 1,
//! immediate mode. In immediate mode, a parameter is interpreted as a value - if
//! the parameter is 50, its value is simply 50.
//!
//! Parameter modes are stored in the same value as the instruction's opcode.
//! The opcode is a two-digit number based only on the ones and tens digit of the
//! value, that is, the opcode is the rightmost two digits of the first value in
//! an instruction. Parameter modes are single digits, one per parameter, read
//! right-to-left from the opcode: the first parameter's mode is in the hundreds
//! digit, the second parameter's mode is in the thousands digit, the third
//! parameter's mode is in the ten-thousands digit, and so on.
//! Any missing modes are 0.
//!
//! For example, consider the program `1002,4,3,4,33`.
//!
//! The first instruction, `1002,4,3,4`, is a multiply instruction - the rightmost
//! two digits of the first value, 02, indicate opcode 2, multiplication.
//! Then, going right to left, the parameter modes are 0 (hundreds digit),
//! 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore
//! zero):
//!
//! ```text
//! ABCDE
//! 1002
//!
//! DE - two-digit opcode, 02 == opcode 2
//! C - mode of 1st parameter, 0 == position mode
//! B - mode of 2nd parameter, 1 == immediate mode
//! A - mode of 3rd parameter, 0 == position mode,
//! omitted due to being a leading zero
//! ```
//!
//! This instruction multiplies its first two parameters.
//! The first parameter, 4 in position mode, works like it did before - its value
//! is the value stored at address 4 (33). The second parameter, 3 in immediate
//! mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written
//! according to the third parameter, 4 in position mode, which also works like
//! it did before - 99 is written to address 4.
//!
//! Parameters that an instruction writes to will never be in immediate mode.
//!
//! Finally, some notes:
//!
//! - It is important to remember that the instruction pointer should increase by
//! the number of values in the instruction after the instruction finishes.
//! Because of the new instructions, this amount is no longer always `4`.
//! - Integers can be negative: `1101,100,-1,4,0` is a valid program (find
//! `100 + -1`, store the result in position `4`).
//!
//! The TEST diagnostic program will start by requesting from the user the ID of
//! the system to test by running an input instruction - provide it 1, the ID for
//! the ship's air conditioner unit.
//!
//! It will then perform a series of diagnostic tests confirming that various
//! parts of the Intcode computer, like parameter modes, function correctly.
//! For each test, it will run an output instruction indicating how far the result
//! of the test was from the expected value, where 0 means | //! like before. Each time you try a pair of inputs, make sure you first reset
//! the computer's memory to the values in the program (your puzzle input) - in | random_line_split |
intcode.rs | computer's memory. When you run an Intcode program,
//! make sure to start by initializing memory to the program's values. A position
//! in memory is called an address (for example, the first value in memory is at
//! "address 0").
//!
//! Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values
//! used immediately after an opcode, if any, are called the instruction's
//! parameters. For example, in the instruction 1,2,3,4, 1 is the opcode; 2, 3,
//! and 4 are the parameters. The instruction 99 contains only an opcode and has
//! no parameters.
//!
//! The address of the current instruction is called the instruction pointer; it
//! starts at 0. After an instruction finishes, the instruction pointer increases
//! by the number of values in the instruction; until you add more instructions
//! to the computer, this is always 4 (1 opcode + 3 parameters) for the add and
//! multiply instructions. (The halt instruction would increase the instruction
//! pointer by 1, but it halts the program instead.)
//!
//! "With terminology out of the way, we're ready to proceed. To complete the
//! gravity assist, you need to determine what pair of inputs produces the
//! output 19690720."
//!
//! The inputs should still be provided to the program by replacing the values
//! at addresses 1 and 2, just like before. In this program, the value placed in
//! address 1 is called the noun, and the value placed in address 2 is called
//! the verb. Each of the two input values will be between 0 and 99, inclusive.
//!
//! Once the program has halted, its output is available at address 0, also just
//! like before. Each time you try a pair of inputs, make sure you first reset
//! the computer's memory to the values in the program (your puzzle input) - in
//! other words, don't reuse memory from a previous attempt.
//!
//! Find the input noun and verb that cause the program to produce the output
//! 19690720. What is 100 * noun + verb? (For example, if noun=12 and verb=2,
//! the answer would be 1202.)
//!
//! # Day 5: Sunny with a Chance of Asteroids
//!
//! ## Part 1
//!
//! You're starting to sweat as the ship makes its way toward Mercury. The Elves
//! suggest that you get the air conditioner working by upgrading your ship
//! computer to support the Thermal Environment Supervision Terminal.
//!
//! The Thermal Environment Supervision Terminal (TEST) starts by running a
//! diagnostic program (your puzzle input). The TEST diagnostic program will run
//! on your existing Intcode computer after a few modifications:
//!
//! First, you'll need to add **two new instructions**:
//!
//! - Opcode `3` takes a single integer as **input** and saves it to the position
//! given by its only parameter. For example, the instruction `3,50` would take
//! an input value and store it at address `50`.
//! - Opcode `4` **outputs** the value of its only parameter. For example, the
//! instruction `4,50` would output the value at address `50`.
//!
//! Programs that use these instructions will come with documentation that
//! explains what should be connected to the input and output.
//! The program `3,0,4,0,99` outputs whatever it gets as input, then halts.
//!
//! Second, you'll need to add support for parameter modes:
//!
//! Each parameter of an instruction is handled based on its parameter mode.
//! Right now, your ship computer already understands parameter mode 0, position
//! mode, which causes the parameter to be interpreted as a position - if the
//! parameter is 50, its value is the value stored at address 50 in memory.
//! Until now, all parameters have been in position mode.
//!
//! Now, your ship computer will also need to handle parameters in mode 1,
//! immediate mode. In immediate mode, a parameter is interpreted as a value - if
//! the parameter is 50, its value is simply 50.
//!
//! Parameter modes are stored in the same value as the instruction's opcode.
//! The opcode is a two-digit number based only on the ones and tens digit of the
//! value, that is, the opcode is the rightmost two digits of the first value in
//! an instruction. Parameter modes are single digits, one per parameter, read
//! right-to-left from the opcode: the first parameter's mode is in the hundreds
//! digit, the second parameter's mode is in the thousands digit, the third
//! parameter's mode is in the ten-thousands digit, and so on.
//! Any missing modes are 0.
//!
//! For example, consider the program `1002,4,3,4,33`.
//!
//! The first instruction, `1002,4,3,4`, is a multiply instruction - the rightmost
//! two digits of the first value, 02, indicate opcode 2, multiplication.
//! Then, going right to left, the parameter modes are 0 (hundreds digit),
//! 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore
//! zero):
//!
//! ```text
//! ABCDE
//! 1002
//!
//! DE - two-digit opcode, 02 == opcode 2
//! C - mode of 1st parameter, 0 == position mode
//! B - mode of 2nd parameter, 1 == immediate mode
//! A - mode of 3rd parameter, 0 == position mode,
//! omitted due to being a leading zero
//! ```
//!
//! This instruction multiplies its first two parameters.
//! The first parameter, 4 in position mode, works like it did before - its value
//! is the value stored at address 4 (33). The second parameter, 3 in immediate
//! mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written
//! according to the third parameter, 4 in position mode, which also works like
//! it did before - 99 is written to address 4.
//!
//! Parameters that an instruction writes to will never be in immediate mode.
//!
//! Finally, some notes:
//!
//! - It is important to remember that the instruction pointer should increase by
//! the number of values in the instruction after the instruction finishes.
//! Because of the new instructions, this amount is no longer always `4`.
//! - Integers can be negative: `1101,100,-1,4,0` is a valid program (find
//! `100 + -1`, store the result in position `4`).
//!
//! The TEST diagnostic program will start by requesting from the user the ID of
//! the system to test by running an input instruction - provide it 1, the ID for
//! the ship's air conditioner unit.
//!
//! It will then perform a series of diagnostic tests confirming that various
//! parts of the Intcode computer, like parameter modes, function correctly.
//! For each test, it will run an output instruction indicating how far the result
//! of the test was from the expected value, where 0 means the test was successful.
//! Non-zero outputs mean that a function is not working correctly; check the
//! instructions that were run before the output instruction to see which one
//! failed.
//!
//! Finally, the program will output a diagnostic code and immediately halt.
//! This final output isn't an error; an output followed immediately by a halt
//! means the program finished. If all outputs were zero except the diagnostic
//! code, the diagnostic program ran successfully.
//!
//! After providing 1 to the only input instruction and passing all the tests,
//! what diagnostic code does the program produce?
use std::convert::TryFrom;
use std::str::FromStr;
#[derive(Debug, PartialEq)]
struct OpHeader {
mode1: usize,
mode2: usize,
mode3: usize,
opcode: usize,
}
impl FromStr for OpHeader {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
// initial string should not be larger than 5 or smaller than 1 chars.
if s.len() > 5 || s.len() < 1 {
return Err(());
}
let padded = format!("{:0>5}", s.chars().take(5).collect::<String>());
let (modes, opcode) = padded.split_at(3);
let modes: Vec<u32> = modes.chars().filter_map(|c| c.to_digit(10)).collect();
let opcode: usize = opcode.parse().map_err(|_| ())?;
Ok(OpHeader {
mode1: modes[2] as usize,
mode2: modes[1] as usize,
mode3: modes[0] as usize,
opcode,
})
}
}
impl TryFrom<i32> for OpHeader {
type Error = ();
fn try_from(value: i32) -> Result<Self, Self::Error> {
value.to_string().parse()
}
}
#[derive(Debug)]
enum Param {
Immediate(i32),
Position(usize),
}
impl Param {
pub fn | from_pair | identifier_name |
|
intcode.rs | - if
//! the parameter is 50, its value is simply 50.
//!
//! Parameter modes are stored in the same value as the instruction's opcode.
//! The opcode is a two-digit number based only on the ones and tens digit of the
//! value, that is, the opcode is the rightmost two digits of the first value in
//! an instruction. Parameter modes are single digits, one per parameter, read
//! right-to-left from the opcode: the first parameter's mode is in the hundreds
//! digit, the second parameter's mode is in the thousands digit, the third
//! parameter's mode is in the ten-thousands digit, and so on.
//! Any missing modes are 0.
//!
//! For example, consider the program `1002,4,3,4,33`.
//!
//! The first instruction, `1002,4,3,4`, is a multiply instruction - the rightmost
//! two digits of the first value, 02, indicate opcode 2, multiplication.
//! Then, going right to left, the parameter modes are 0 (hundreds digit),
//! 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore
//! zero):
//!
//! ```text
//! ABCDE
//! 1002
//!
//! DE - two-digit opcode, 02 == opcode 2
//! C - mode of 1st parameter, 0 == position mode
//! B - mode of 2nd parameter, 1 == immediate mode
//! A - mode of 3rd parameter, 0 == position mode,
//! omitted due to being a leading zero
//! ```
//!
//! This instruction multiplies its first two parameters.
//! The first parameter, 4 in position mode, works like it did before - its value
//! is the value stored at address 4 (33). The second parameter, 3 in immediate
//! mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written
//! according to the third parameter, 4 in position mode, which also works like
//! it did before - 99 is written to address 4.
//!
//! Parameters that an instruction writes to will never be in immediate mode.
//!
//! Finally, some notes:
//!
//! - It is important to remember that the instruction pointer should increase by
//! the number of values in the instruction after the instruction finishes.
//! Because of the new instructions, this amount is no longer always `4`.
//! - Integers can be negative: `1101,100,-1,4,0` is a valid program (find
//! `100 + -1`, store the result in position `4`).
//!
//! The TEST diagnostic program will start by requesting from the user the ID of
//! the system to test by running an input instruction - provide it 1, the ID for
//! the ship's air conditioner unit.
//!
//! It will then perform a series of diagnostic tests confirming that various
//! parts of the Intcode computer, like parameter modes, function correctly.
//! For each test, it will run an output instruction indicating how far the result
//! of the test was from the expected value, where 0 means the test was successful.
//! Non-zero outputs mean that a function is not working correctly; check the
//! instructions that were run before the output instruction to see which one
//! failed.
//!
//! Finally, the program will output a diagnostic code and immediately halt.
//! This final output isn't an error; an output followed immediately by a halt
//! means the program finished. If all outputs were zero except the diagnostic
//! code, the diagnostic program ran successfully.
//!
//! After providing 1 to the only input instruction and passing all the tests,
//! what diagnostic code does the program produce?
use std::convert::TryFrom;
use std::str::FromStr;
#[derive(Debug, PartialEq)]
struct OpHeader {
mode1: usize,
mode2: usize,
mode3: usize,
opcode: usize,
}
impl FromStr for OpHeader {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
// initial string should not be larger than 5 or smaller than 1 chars.
if s.len() > 5 || s.len() < 1 {
return Err(());
}
let padded = format!("{:0>5}", s.chars().take(5).collect::<String>());
let (modes, opcode) = padded.split_at(3);
let modes: Vec<u32> = modes.chars().filter_map(|c| c.to_digit(10)).collect();
let opcode: usize = opcode.parse().map_err(|_| ())?;
Ok(OpHeader {
mode1: modes[2] as usize,
mode2: modes[1] as usize,
mode3: modes[0] as usize,
opcode,
})
}
}
impl TryFrom<i32> for OpHeader {
type Error = ();
fn try_from(value: i32) -> Result<Self, Self::Error> {
value.to_string().parse()
}
}
#[derive(Debug)]
enum Param {
Immediate(i32),
Position(usize),
}
impl Param {
pub fn from_pair((mode, value): (usize, i32)) -> Self {
match mode {
0 => Param::Position(value as usize),
1 => Param::Immediate(value),
_ => unreachable!(),
}
}
}
#[derive(Debug)]
enum Op {
Add { a: Param, b: Param, out: usize },
Multiply { a: Param, b: Param, out: usize },
Input { out: usize },
Output { value: Param },
Halt,
Unknown,
}
/// Builds an `Op` from `data` by reading up to 4 items from a given offset.
fn read_instruction(offset: usize, data: &[i32]) -> Op {
// FIXME: add support for Input/Output
let header: Option<OpHeader> = data.get(offset).and_then(|x| OpHeader::try_from(*x).ok());
match (
header,
data.get(offset + 1).map(|x| *x),
data.get(offset + 2).map(|x| *x),
data.get(offset + 3).map(|x| *x),
) {
(
Some(OpHeader {
opcode: 1,
mode1,
mode2,
mode3,
}),
Some(a),
Some(b),
Some(out),
) => Op::Add {
a: Param::from_pair((mode1, a)),
b: Param::from_pair((mode2, b)),
out: match Param::from_pair((mode3, out)) {
Param::Position(out) => out,
_ => unreachable!("output params cannot be immediate"),
},
},
(
Some(OpHeader {
opcode: 2,
mode1,
mode2,
mode3,
}),
Some(a),
Some(b),
Some(out),
) => Op::Multiply {
a: Param::from_pair((mode1, a)),
b: Param::from_pair((mode2, b)),
out: match Param::from_pair((mode3, out)) {
Param::Position(out) => out,
_ => unreachable!("output params cannot be immediate"),
},
},
(Some(OpHeader { opcode: 3, .. }), Some(out), _, _) => Op::Input { out: out as usize },
(
Some(OpHeader {
opcode: 4, mode1, ..
}),
Some(value),
_,
_,
) => Op::Output {
value: Param::from_pair((mode1, value)),
},
(Some(OpHeader { opcode: 99, .. }), _, _, _) => Op::Halt,
_ => Op::Unknown,
}
}
fn read_value(param: Param, data: &[i32]) -> Option<i32> {
match param {
Param::Position(idx) => data.get(idx).map(|x| *x),
Param::Immediate(val) => Some(val),
}
}
use std::io::BufRead;
fn prompt_for_input() -> Result<i32, ()> {
let mut buf = String::new();
println!("Waiting for input... >");
std::io::stdin()
.lock()
.read_line(&mut buf)
.expect("input read");
buf.trim().parse().map_err(|e| {
eprintln!("{}", e);
})
}
/// Run an intcode program.
pub fn compute(data: &mut [i32]) | {
let mut i = 0;
loop {
// FIXME: make read_instruction an iterator so it can manage the increment internally
match read_instruction(i, &data) {
Op::Add { a, b, out } => {
let a = read_value(a, data).unwrap();
let b = read_value(b, data).unwrap();
data[out] = a + b;
i += 4;
}
Op::Multiply { a, b, out } => {
let a = read_value(a, data).unwrap();
let b = read_value(b, data).unwrap();
data[out] = a * b;
i += 4;
}
Op::Input { out } => {
let value = prompt_for_input().unwrap();
data[out] = value; | identifier_body |
|
token.go | // Misc
TokenComma: {Description: ","},
TokenStar: {Kw: "*", Description: "*"},
TokenColon: {Kw: ":", Description: ":"},
TokenLeftBracket: {Kw: "[", Description: "["},
TokenRightBracket: {Kw: "]", Description: "]"},
TokenLeftBrace: {Kw: "{", Description: "{"},
TokenRightBrace: {Kw: "}", Description: "}"},
// Logic, Expressions, Operators etc
TokenMultiply: {Kw: "*", Description: "Multiply"},
TokenMinus: {Kw: "-", Description: "-"},
TokenPlus: {Kw: "+", Description: "+"},
TokenPlusPlus: {Kw: "++", Description: "++"},
TokenPlusEquals: {Kw: "+=", Description: "+="},
TokenDivide: {Kw: "/", Description: "Divide /"},
TokenModulus: {Kw: "%", Description: "Modulus %"},
TokenEqual: {Kw: "=", Description: "Equal"},
TokenEqualEqual: {Kw: "==", Description: "=="},
TokenNE: {Kw: "!=", Description: "NE"},
TokenGE: {Kw: ">=", Description: "GE"},
TokenLE: {Kw: "<=", Description: "LE"},
TokenGT: {Kw: ">", Description: "GT"},
TokenLT: {Kw: "<", Description: "LT"},
TokenIf: {Kw: "if", Description: "IF"},
TokenAnd: {Kw: "&&", Description: "&&"},
TokenOr: {Kw: "||", Description: "||"},
TokenLogicOr: {Kw: "or", Description: "Or"},
TokenLogicAnd: {Kw: "and", Description: "And"},
TokenIN: {Kw: "in", Description: "IN"},
TokenLike: {Kw: "like", Description: "LIKE"},
TokenNegate: {Kw: "not", Description: "NOT"},
TokenBetween: {Kw: "between", Description: "between"},
TokenIs: {Kw: "is", Description: "IS"},
TokenNull: {Kw: "null", Description: "NULL"},
TokenContains: {Kw: "contains", Description: "contains"},
TokenIntersects: {Kw: "intersects", Description: "intersects"},
// Identity ish bools
TokenTrue: {Kw: "true", Description: "True"},
TokenFalse: {Kw: "false", Description: "False"},
// parens, both logical expression as well as functional
TokenLeftParenthesis: {Description: "("},
TokenRightParenthesis: {Description: ")"},
// Expression Identifier
TokenUdfExpr: {Description: "expr"},
// Initial Keywords, these are the most important QL Type words
TokenPrepare: {Description: "prepare"},
TokenInsert: {Description: "insert"},
TokenSelect: {Description: "select"},
TokenDelete: {Description: "delete"},
TokenUpdate: {Description: "update"},
TokenUpsert: {Description: "upsert"},
TokenAlter: {Description: "alter"},
TokenCreate: {Description: "create"},
TokenDrop: {Description: "drop"},
TokenSubscribe: {Description: "subscribe"},
TokenFilter: {Description: "filter"},
TokenShow: {Description: "show"},
TokenDescribe: {Description: "describe"},
TokenExplain: {Description: "explain"},
TokenReplace: {Description: "replace"},
TokenRollback: {Description: "rollback"},
TokenCommit: {Description: "commit"},
// Top Level dml ql clause keywords
TokenInto: {Description: "into"},
TokenBy: {Description: "by"},
TokenFrom: {Description: "from"},
TokenWhere: {Description: "where"},
TokenHaving: {Description: "having"},
TokenGroupBy: {Description: "group by"},
// Other Ql Keywords
TokenAlias: {Description: "alias"},
TokenWith: {Description: "with"},
TokenValues: {Description: "values"},
TokenLimit: {Description: "limit"},
TokenOrderBy: {Description: "order by"},
TokenInner: {Description: "inner"},
TokenCross: {Description: "cross"},
TokenOuter: {Description: "outer"},
TokenLeft: {Description: "left"},
TokenRight: {Description: "right"},
TokenJoin: {Description: "join"},
TokenOn: {Description: "on"},
TokenDistinct: {Description: "distinct"},
TokenAll: {Description: "all"},
TokenInclude: {Description: "include"},
TokenExists: {Description: "exists"},
TokenOffset: {Description: "offset"},
TokenFull: {Description: "full"},
TokenGlobal: {Description: "global"},
TokenSession: {Description: "session"},
TokenTables: {Description: "tables"},
// ddl keywords
TokenSchema: {Description: "schema"},
TokenDatabase: {Description: "database"},
TokenTable: {Description: "table"},
TokenSource: {Description: "source"},
TokenView: {Description: "view"},
TokenContinuousView: {Description: "continuousview"},
TokenTemp: {Description: "temp"},
// ddl other
TokenChange: {Description: "change"},
TokenCharacterSet: {Description: "character set"},
TokenAdd: {Description: "add"},
TokenFirst: {Description: "first"},
TokenAfter: {Description: "after"},
TokenDefault: {Description: "default"},
TokenUnique: {Description: "unique"},
TokenKey: {Description: "key"},
TokenPrimary: {Description: "primary"},
TokenConstraint: {Description: "constraint"},
TokenForeign: {Description: "foreign"},
TokenReferences: {Description: "references"},
TokenEngine: {Description: "engine"},
// QL Keywords, all lower-case
TokenSet: {Description: "set"},
TokenAs: {Description: "as"},
TokenAsc: {Description: "asc"},
TokenDesc: {Description: "desc"},
TokenUse: {Description: "use"},
// special value types
TokenIdentity: {Description: "identity"},
TokenValue: {Description: "value"},
TokenValueEscaped: {Description: "value-escaped"},
TokenRegex: {Description: "regex"},
TokenDuration: {Description: "duration"},
// Data TYPES: ie type system
TokenTypeDef: {Description: "TypeDef"}, // Generic DataType
TokenTypeBool: {Description: "BoolType"},
TokenTypeFloat: {Description: "FloatType"},
TokenTypeInteger: {Description: "IntegerType"},
TokenTypeString: {Description: "StringType"},
TokenTypeVarChar: {Description: "VarCharType"},
TokenTypeChar: {Description: "CharType"},
TokenTypeBigInt: {Description: "BigIntType"},
TokenTypeTime: {Description: "TimeType"},
TokenTypeText: {Description: "TextType"},
TokenTypeJson: {Description: "JsonType"},
// VALUE TYPES: ie literal values
TokenBool: {Description: "BoolVal"},
TokenFloat: {Description: "FloatVal"},
TokenInteger: {Description: "IntegerVal"},
TokenString: {Description: "StringVal"},
TokenTime: {Description: "TimeVal"},
// Some other value Types
TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value
TokenList: {Description: "List"},
TokenMap: {Description: "Map"},
TokenJson: {Description: "JSON"},
}
TokenToOp = make(map[string]TokenType)
)
func init() {
LoadTokenInfo()
SqlDialect.Init()
FilterQLDialect.Init()
JsonDialect.Init()
}
// LoadTokenInfo load the token info into global map
func LoadTokenInfo() {
for tok, ti := range TokenNameMap {
ti.T = tok
if ti.Kw == "" {
ti.Kw = ti.Description
}
TokenToOp[ti.Kw] = tok
if strings.Contains(ti.Kw, " ") {
parts := strings.Split(ti.Kw, " ")
ti.firstWord = parts[0]
ti.HasSpaces = true
}
}
}
// TokenFromOp get token from operation string
func TokenFromOp(op string) Token {
tt, ok := TokenToOp[op]
if ok | {
return Token{T: tt, V: op}
} | conditional_block |
|
token.go | ma: {Description: ","},
TokenStar: {Kw: "*", Description: "*"},
TokenColon: {Kw: ":", Description: ":"},
TokenLeftBracket: {Kw: "[", Description: "["},
TokenRightBracket: {Kw: "]", Description: "]"},
TokenLeftBrace: {Kw: "{", Description: "{"},
TokenRightBrace: {Kw: "}", Description: "}"},
// Logic, Expressions, Operators etc
TokenMultiply: {Kw: "*", Description: "Multiply"},
TokenMinus: {Kw: "-", Description: "-"},
TokenPlus: {Kw: "+", Description: "+"},
TokenPlusPlus: {Kw: "++", Description: "++"},
TokenPlusEquals: {Kw: "+=", Description: "+="},
TokenDivide: {Kw: "/", Description: "Divide /"},
TokenModulus: {Kw: "%", Description: "Modulus %"},
TokenEqual: {Kw: "=", Description: "Equal"},
TokenEqualEqual: {Kw: "==", Description: "=="},
TokenNE: {Kw: "!=", Description: "NE"},
TokenGE: {Kw: ">=", Description: "GE"},
TokenLE: {Kw: "<=", Description: "LE"},
TokenGT: {Kw: ">", Description: "GT"},
TokenLT: {Kw: "<", Description: "LT"},
TokenIf: {Kw: "if", Description: "IF"},
TokenAnd: {Kw: "&&", Description: "&&"},
TokenOr: {Kw: "||", Description: "||"},
TokenLogicOr: {Kw: "or", Description: "Or"},
TokenLogicAnd: {Kw: "and", Description: "And"},
TokenIN: {Kw: "in", Description: "IN"},
TokenLike: {Kw: "like", Description: "LIKE"},
TokenNegate: {Kw: "not", Description: "NOT"},
TokenBetween: {Kw: "between", Description: "between"},
TokenIs: {Kw: "is", Description: "IS"},
TokenNull: {Kw: "null", Description: "NULL"},
TokenContains: {Kw: "contains", Description: "contains"},
TokenIntersects: {Kw: "intersects", Description: "intersects"},
// Identity ish bools
TokenTrue: {Kw: "true", Description: "True"},
TokenFalse: {Kw: "false", Description: "False"},
// parens, both logical expression as well as functional
TokenLeftParenthesis: {Description: "("},
TokenRightParenthesis: {Description: ")"},
// Expression Identifier
TokenUdfExpr: {Description: "expr"},
// Initial Keywords, these are the most important QL Type words
TokenPrepare: {Description: "prepare"},
TokenInsert: {Description: "insert"},
TokenSelect: {Description: "select"},
TokenDelete: {Description: "delete"},
TokenUpdate: {Description: "update"},
TokenUpsert: {Description: "upsert"},
TokenAlter: {Description: "alter"},
TokenCreate: {Description: "create"},
TokenDrop: {Description: "drop"},
TokenSubscribe: {Description: "subscribe"},
TokenFilter: {Description: "filter"},
TokenShow: {Description: "show"},
TokenDescribe: {Description: "describe"},
TokenExplain: {Description: "explain"},
TokenReplace: {Description: "replace"},
TokenRollback: {Description: "rollback"},
TokenCommit: {Description: "commit"},
// Top Level dml ql clause keywords
TokenInto: {Description: "into"},
TokenBy: {Description: "by"},
TokenFrom: {Description: "from"},
TokenWhere: {Description: "where"},
TokenHaving: {Description: "having"},
TokenGroupBy: {Description: "group by"},
// Other Ql Keywords
TokenAlias: {Description: "alias"},
TokenWith: {Description: "with"},
TokenValues: {Description: "values"},
TokenLimit: {Description: "limit"},
TokenOrderBy: {Description: "order by"},
TokenInner: {Description: "inner"},
TokenCross: {Description: "cross"},
TokenOuter: {Description: "outer"},
TokenLeft: {Description: "left"},
TokenRight: {Description: "right"},
TokenJoin: {Description: "join"},
TokenOn: {Description: "on"},
TokenDistinct: {Description: "distinct"},
TokenAll: {Description: "all"},
TokenInclude: {Description: "include"},
TokenExists: {Description: "exists"},
TokenOffset: {Description: "offset"},
TokenFull: {Description: "full"},
TokenGlobal: {Description: "global"},
TokenSession: {Description: "session"},
TokenTables: {Description: "tables"},
// ddl keywords
TokenSchema: {Description: "schema"},
TokenDatabase: {Description: "database"},
TokenTable: {Description: "table"},
TokenSource: {Description: "source"},
TokenView: {Description: "view"},
TokenContinuousView: {Description: "continuousview"},
TokenTemp: {Description: "temp"},
// ddl other
TokenChange: {Description: "change"},
TokenCharacterSet: {Description: "character set"},
TokenAdd: {Description: "add"},
TokenFirst: {Description: "first"},
TokenAfter: {Description: "after"},
TokenDefault: {Description: "default"},
TokenUnique: {Description: "unique"},
TokenKey: {Description: "key"},
TokenPrimary: {Description: "primary"},
TokenConstraint: {Description: "constraint"},
TokenForeign: {Description: "foreign"},
TokenReferences: {Description: "references"},
TokenEngine: {Description: "engine"},
// QL Keywords, all lower-case
TokenSet: {Description: "set"},
TokenAs: {Description: "as"},
TokenAsc: {Description: "asc"},
TokenDesc: {Description: "desc"},
TokenUse: {Description: "use"},
// special value types
TokenIdentity: {Description: "identity"},
TokenValue: {Description: "value"},
TokenValueEscaped: {Description: "value-escaped"},
TokenRegex: {Description: "regex"},
TokenDuration: {Description: "duration"},
// Data TYPES: ie type system
TokenTypeDef: {Description: "TypeDef"}, // Generic DataType
TokenTypeBool: {Description: "BoolType"},
TokenTypeFloat: {Description: "FloatType"},
TokenTypeInteger: {Description: "IntegerType"},
TokenTypeString: {Description: "StringType"},
TokenTypeVarChar: {Description: "VarCharType"},
TokenTypeChar: {Description: "CharType"},
TokenTypeBigInt: {Description: "BigIntType"},
TokenTypeTime: {Description: "TimeType"},
TokenTypeText: {Description: "TextType"},
TokenTypeJson: {Description: "JsonType"},
// VALUE TYPES: ie literal values
TokenBool: {Description: "BoolVal"},
TokenFloat: {Description: "FloatVal"},
TokenInteger: {Description: "IntegerVal"},
TokenString: {Description: "StringVal"},
TokenTime: {Description: "TimeVal"},
// Some other value Types
TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value
TokenList: {Description: "List"},
TokenMap: {Description: "Map"},
TokenJson: {Description: "JSON"},
}
TokenToOp = make(map[string]TokenType)
)
func init() {
LoadTokenInfo()
SqlDialect.Init()
FilterQLDialect.Init()
JsonDialect.Init()
}
// LoadTokenInfo load the token info into global map
func LoadTokenInfo() {
for tok, ti := range TokenNameMap {
ti.T = tok
if ti.Kw == "" {
ti.Kw = ti.Description
}
TokenToOp[ti.Kw] = tok
if strings.Contains(ti.Kw, " ") {
parts := strings.Split(ti.Kw, " ")
ti.firstWord = parts[0]
ti.HasSpaces = true
}
}
}
// TokenFromOp get token from operation string
func TokenFromOp(op string) Token | {
tt, ok := TokenToOp[op]
if ok {
return Token{T: tt, V: op}
}
return Token{T: TokenNil}
} | identifier_body |
|
token.go | 07 // values
TokenInto TokenType = 308 // into
TokenLimit TokenType = 309 // limit
TokenOrderBy TokenType = 310 // order by
TokenInner TokenType = 311 // inner , ie of join
TokenCross TokenType = 312 // cross
TokenOuter TokenType = 313 // outer
TokenLeft TokenType = 314 // left
TokenRight TokenType = 315 // right
TokenJoin TokenType = 316 // Join
TokenOn TokenType = 317 // on
TokenDistinct TokenType = 318 // DISTINCT
TokenAll TokenType = 319 // all
TokenInclude TokenType = 320 // INCLUDE
TokenExists TokenType = 321 // EXISTS
TokenOffset TokenType = 322 // OFFSET
TokenFull TokenType = 323 // FULL
TokenGlobal TokenType = 324 // GLOBAL
TokenSession TokenType = 325 // SESSION
TokenTables TokenType = 326 // TABLES
// ddl major words
TokenSchema TokenType = 400 // SCHEMA
TokenDatabase TokenType = 401 // DATABASE
TokenTable TokenType = 402 // TABLE
TokenSource TokenType = 403 // SOURCE
TokenView TokenType = 404 // VIEW
TokenContinuousView TokenType = 405 // CONTINUOUSVIEW
TokenTemp TokenType = 406 // TEMP or TEMPORARY
// ddl other
TokenChange TokenType = 410 // change
TokenAdd TokenType = 411 // add
TokenFirst TokenType = 412 // first
TokenAfter TokenType = 413 // after
TokenCharacterSet TokenType = 414 // character set
TokenDefault TokenType = 415 // default
TokenUnique TokenType = 416 // unique
TokenKey TokenType = 417 // key
TokenPrimary TokenType = 418 // primary
TokenConstraint TokenType = 419 // constraint
TokenForeign TokenType = 420 // foreign
TokenReferences TokenType = 421 // references
TokenEngine TokenType = 422 // engine
// Other QL keywords
TokenSet TokenType = 500 // set
TokenAs TokenType = 501 // as
TokenAsc TokenType = 502 // ascending
TokenDesc TokenType = 503 // descending
TokenUse TokenType = 504 // use
// User defined function/expression
TokenUdfExpr TokenType = 550
// Value Types
TokenIdentity TokenType = 600 // identity, either column, table name etc
TokenValue TokenType = 601 // 'some string' string or continuous sequence of chars delimited by WHITE SPACE | ' | , | ( | )
TokenValueEscaped TokenType = 602 // '' becomes ' inside the string, parser will need to replace the string
TokenRegex TokenType = 603 // regex
TokenDuration TokenType = 604 // 14d , 22w, 3y, 45ms, 45us, 24hr, 2h, 45m, 30s
// Data Type Definitions
TokenTypeDef TokenType = 999
TokenTypeBool TokenType = 998
TokenTypeFloat TokenType = 997
TokenTypeInteger TokenType = 996
TokenTypeString TokenType = 995
TokenTypeVarChar TokenType = 994
TokenTypeChar TokenType = 993
TokenTypeBigInt TokenType = 992
TokenTypeTime TokenType = 991
TokenTypeText TokenType = 990
TokenTypeJson TokenType = 989
// Value types
TokenValueType TokenType = 1000 // A generic Identifier of value type
TokenBool TokenType = 1001
TokenFloat TokenType = 1002
TokenInteger TokenType = 1003
TokenString TokenType = 1004
TokenTime TokenType = 1005
// Composite Data Types
TokenJson TokenType = 1010
TokenList TokenType = 1011
TokenMap TokenType = 1012
)
var (
// IDENTITY_CHARS Which Identity Characters are allowed for UNESCAPED identities
IDENTITY_CHARS = "_.-/"
// A much more lax identity char set rule that allows spaces
IDENTITY_LAX_CHARS = "_./- "
// sql variables start with @@ ??
IDENTITY_SQL_CHARS = "@_.-"
// list of token-name
TokenNameMap = map[TokenType]*TokenInfo{
TokenEOF: {Description: "EOF"},
TokenEOS: {Description: ";"},
TokenEofOrEos: {Kw: "", Description: "; OR EOF"},
TokenError: {Description: "Error"},
TokenRaw: {Description: "unlexed text"},
TokenNewLine: {Description: "New Line"},
// Comments
TokenComment: {Description: "Comment"},
TokenCommentML: {Description: "CommentMultiLine"},
TokenCommentStart: {Description: "/*"},
TokenCommentEnd: {Description: "*/"},
TokenCommentHash: {Description: "#"},
TokenCommentSingleLine: {Description: "--"},
TokenCommentSlashes: {Description: "//"},
// Misc
TokenComma: {Description: ","},
TokenStar: {Kw: "*", Description: "*"},
TokenColon: {Kw: ":", Description: ":"},
TokenLeftBracket: {Kw: "[", Description: "["},
TokenRightBracket: {Kw: "]", Description: "]"},
TokenLeftBrace: {Kw: "{", Description: "{"},
TokenRightBrace: {Kw: "}", Description: "}"},
// Logic, Expressions, Operators etc
TokenMultiply: {Kw: "*", Description: "Multiply"},
TokenMinus: {Kw: "-", Description: "-"},
TokenPlus: {Kw: "+", Description: "+"},
TokenPlusPlus: {Kw: "++", Description: "++"},
TokenPlusEquals: {Kw: "+=", Description: "+="},
TokenDivide: {Kw: "/", Description: "Divide /"},
TokenModulus: {Kw: "%", Description: "Modulus %"},
TokenEqual: {Kw: "=", Description: "Equal"},
TokenEqualEqual: {Kw: "==", Description: "=="},
TokenNE: {Kw: "!=", Description: "NE"},
TokenGE: {Kw: ">=", Description: "GE"},
TokenLE: {Kw: "<=", Description: "LE"},
TokenGT: {Kw: ">", Description: "GT"},
TokenLT: {Kw: "<", Description: "LT"},
TokenIf: {Kw: "if", Description: "IF"},
TokenAnd: {Kw: "&&", Description: "&&"},
TokenOr: {Kw: "||", Description: "||"},
TokenLogicOr: {Kw: "or", Description: "Or"},
TokenLogicAnd: {Kw: "and", Description: "And"},
TokenIN: {Kw: "in", Description: "IN"},
TokenLike: {Kw: "like", Description: "LIKE"},
TokenNegate: {Kw: "not", Description: "NOT"},
TokenBetween: {Kw: "between", Description: "between"},
TokenIs: {Kw: "is", Description: "IS"},
TokenNull: {Kw: "null", Description: "NULL"},
TokenContains: {Kw: "contains", Description: "contains"},
TokenIntersects: {Kw: "intersects", Description: "intersects"},
// Identity ish bools
TokenTrue: {Kw: "true", Description: "True"},
TokenFalse: {Kw: "false", Description: "False"},
// parens, both logical expression as well as functional
TokenLeftParenthesis: {Description: "("},
TokenRightParenthesis: {Description: ")"},
// Expression Identifier
TokenUdfExpr: {Description: "expr"},
// Initial Keywords, these are the most important QL Type words
TokenPrepare: {Description: "prepare"},
TokenInsert: {Description: "insert"},
TokenSelect: {Description: "select"},
TokenDelete: {Description: "delete"},
TokenUpdate: {Description: "update"},
TokenUpsert: {Description: "upsert"},
TokenAlter: {Description: "alter"},
TokenCreate: {Description: "create"},
TokenDrop: {Description: "drop"}, | TokenSubscribe: {Description: "subscribe"},
TokenFilter: {Description: "filter"},
TokenShow: {Description: "show"}, | random_line_split |
|
token.go | Kw: "*", Description: "*"},
TokenColon: {Kw: ":", Description: ":"},
TokenLeftBracket: {Kw: "[", Description: "["},
TokenRightBracket: {Kw: "]", Description: "]"},
TokenLeftBrace: {Kw: "{", Description: "{"},
TokenRightBrace: {Kw: "}", Description: "}"},
// Logic, Expressions, Operators etc
TokenMultiply: {Kw: "*", Description: "Multiply"},
TokenMinus: {Kw: "-", Description: "-"},
TokenPlus: {Kw: "+", Description: "+"},
TokenPlusPlus: {Kw: "++", Description: "++"},
TokenPlusEquals: {Kw: "+=", Description: "+="},
TokenDivide: {Kw: "/", Description: "Divide /"},
TokenModulus: {Kw: "%", Description: "Modulus %"},
TokenEqual: {Kw: "=", Description: "Equal"},
TokenEqualEqual: {Kw: "==", Description: "=="},
TokenNE: {Kw: "!=", Description: "NE"},
TokenGE: {Kw: ">=", Description: "GE"},
TokenLE: {Kw: "<=", Description: "LE"},
TokenGT: {Kw: ">", Description: "GT"},
TokenLT: {Kw: "<", Description: "LT"},
TokenIf: {Kw: "if", Description: "IF"},
TokenAnd: {Kw: "&&", Description: "&&"},
TokenOr: {Kw: "||", Description: "||"},
TokenLogicOr: {Kw: "or", Description: "Or"},
TokenLogicAnd: {Kw: "and", Description: "And"},
TokenIN: {Kw: "in", Description: "IN"},
TokenLike: {Kw: "like", Description: "LIKE"},
TokenNegate: {Kw: "not", Description: "NOT"},
TokenBetween: {Kw: "between", Description: "between"},
TokenIs: {Kw: "is", Description: "IS"},
TokenNull: {Kw: "null", Description: "NULL"},
TokenContains: {Kw: "contains", Description: "contains"},
TokenIntersects: {Kw: "intersects", Description: "intersects"},
// Identity ish bools
TokenTrue: {Kw: "true", Description: "True"},
TokenFalse: {Kw: "false", Description: "False"},
// parens, both logical expression as well as functional
TokenLeftParenthesis: {Description: "("},
TokenRightParenthesis: {Description: ")"},
// Expression Identifier
TokenUdfExpr: {Description: "expr"},
// Initial Keywords, these are the most important QL Type words
TokenPrepare: {Description: "prepare"},
TokenInsert: {Description: "insert"},
TokenSelect: {Description: "select"},
TokenDelete: {Description: "delete"},
TokenUpdate: {Description: "update"},
TokenUpsert: {Description: "upsert"},
TokenAlter: {Description: "alter"},
TokenCreate: {Description: "create"},
TokenDrop: {Description: "drop"},
TokenSubscribe: {Description: "subscribe"},
TokenFilter: {Description: "filter"},
TokenShow: {Description: "show"},
TokenDescribe: {Description: "describe"},
TokenExplain: {Description: "explain"},
TokenReplace: {Description: "replace"},
TokenRollback: {Description: "rollback"},
TokenCommit: {Description: "commit"},
// Top Level dml ql clause keywords
TokenInto: {Description: "into"},
TokenBy: {Description: "by"},
TokenFrom: {Description: "from"},
TokenWhere: {Description: "where"},
TokenHaving: {Description: "having"},
TokenGroupBy: {Description: "group by"},
// Other Ql Keywords
TokenAlias: {Description: "alias"},
TokenWith: {Description: "with"},
TokenValues: {Description: "values"},
TokenLimit: {Description: "limit"},
TokenOrderBy: {Description: "order by"},
TokenInner: {Description: "inner"},
TokenCross: {Description: "cross"},
TokenOuter: {Description: "outer"},
TokenLeft: {Description: "left"},
TokenRight: {Description: "right"},
TokenJoin: {Description: "join"},
TokenOn: {Description: "on"},
TokenDistinct: {Description: "distinct"},
TokenAll: {Description: "all"},
TokenInclude: {Description: "include"},
TokenExists: {Description: "exists"},
TokenOffset: {Description: "offset"},
TokenFull: {Description: "full"},
TokenGlobal: {Description: "global"},
TokenSession: {Description: "session"},
TokenTables: {Description: "tables"},
// ddl keywords
TokenSchema: {Description: "schema"},
TokenDatabase: {Description: "database"},
TokenTable: {Description: "table"},
TokenSource: {Description: "source"},
TokenView: {Description: "view"},
TokenContinuousView: {Description: "continuousview"},
TokenTemp: {Description: "temp"},
// ddl other
TokenChange: {Description: "change"},
TokenCharacterSet: {Description: "character set"},
TokenAdd: {Description: "add"},
TokenFirst: {Description: "first"},
TokenAfter: {Description: "after"},
TokenDefault: {Description: "default"},
TokenUnique: {Description: "unique"},
TokenKey: {Description: "key"},
TokenPrimary: {Description: "primary"},
TokenConstraint: {Description: "constraint"},
TokenForeign: {Description: "foreign"},
TokenReferences: {Description: "references"},
TokenEngine: {Description: "engine"},
// QL Keywords, all lower-case
TokenSet: {Description: "set"},
TokenAs: {Description: "as"},
TokenAsc: {Description: "asc"},
TokenDesc: {Description: "desc"},
TokenUse: {Description: "use"},
// special value types
TokenIdentity: {Description: "identity"},
TokenValue: {Description: "value"},
TokenValueEscaped: {Description: "value-escaped"},
TokenRegex: {Description: "regex"},
TokenDuration: {Description: "duration"},
// Data TYPES: ie type system
TokenTypeDef: {Description: "TypeDef"}, // Generic DataType
TokenTypeBool: {Description: "BoolType"},
TokenTypeFloat: {Description: "FloatType"},
TokenTypeInteger: {Description: "IntegerType"},
TokenTypeString: {Description: "StringType"},
TokenTypeVarChar: {Description: "VarCharType"},
TokenTypeChar: {Description: "CharType"},
TokenTypeBigInt: {Description: "BigIntType"},
TokenTypeTime: {Description: "TimeType"},
TokenTypeText: {Description: "TextType"},
TokenTypeJson: {Description: "JsonType"},
// VALUE TYPES: ie literal values
TokenBool: {Description: "BoolVal"},
TokenFloat: {Description: "FloatVal"},
TokenInteger: {Description: "IntegerVal"},
TokenString: {Description: "StringVal"},
TokenTime: {Description: "TimeVal"},
// Some other value Types
TokenValueType: {Description: "Value"}, // Generic DataType just stores in a value.Value
TokenList: {Description: "List"},
TokenMap: {Description: "Map"},
TokenJson: {Description: "JSON"},
}
TokenToOp = make(map[string]TokenType)
)
func init() {
LoadTokenInfo()
SqlDialect.Init()
FilterQLDialect.Init()
JsonDialect.Init()
}
// LoadTokenInfo load the token info into global map
func LoadTokenInfo() {
for tok, ti := range TokenNameMap {
ti.T = tok
if ti.Kw == "" {
ti.Kw = ti.Description
}
TokenToOp[ti.Kw] = tok
if strings.Contains(ti.Kw, " ") {
parts := strings.Split(ti.Kw, " ")
ti.firstWord = parts[0]
ti.HasSpaces = true
}
}
}
// TokenFromOp get token from operation string
func TokenFromOp(op string) Token {
tt, ok := TokenToOp[op]
if ok {
return Token{T: tt, V: op}
}
return Token{T: TokenNil}
}
// String convert to human readable string
func (typ TokenType) | String | identifier_name |
|
begot.go | , v))
}
yaml_copy(mv, &dep)
if dep.Import_path != "" {
parts := strings.Split(dep.Import_path, "/")
if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok {
panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name))
} else {
repo_path := strings.Join(parts[:repo_parts+1], "/")
dep.Git_url = bf.default_git_url_from_repo_path(repo_path)
dep.Subpath = strings.Join(parts[repo_parts+1:], "/")
dep.Aliases = append(dep.Aliases, dep.Import_path)
// Redirect through repo aliases:
if alias, ok := bf.data.Repo_aliases[repo_path]; ok {
var aliasdep Dep // only allow git_url and ref
if aliasstr, ok := alias.(string); ok {
aliasstr = bf.default_git_url_from_repo_path(aliasstr)
alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}}
}
yaml_copy(alias, &aliasdep)
if aliasdep.Git_url != "" {
dep.Git_url = aliasdep.Git_url
}
if aliasdep.Ref != "" {
dep.Ref = aliasdep.Ref
}
}
}
}
if dep.Git_url == "" {
panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name))
}
if dep.Ref == "" {
dep.Ref = "master"
}
return
}
func (bf *BegottenFile) deps() (out []Dep) {
out = make([]Dep, len(bf.data.Deps))
i := 0
for name, v := range bf.data.Deps {
out[i] = bf.parse_dep(name, v)
i++
}
return
}
func (bf *BegottenFile) set_deps(deps []Dep) {
bf.data.Deps = make(map[string]interface{})
for _, dep := range deps {
bf.data.Deps[dep.name] = dep
}
}
func (bf *BegottenFile) repo_deps() map[string][]string {
if bf.data.Repo_deps == nil {
bf.data.Repo_deps = make(map[string][]string)
}
return bf.data.Repo_deps
}
func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) {
bf.data.Repo_deps = repo_deps
}
type Env struct {
Home string
BegotCache string
DepWorkspaceDir string
CodeWorkspaceDir string
RepoDir string
CacheLock string
}
func EnvNew() (env *Env) {
env = new(Env)
env.Home = os.Getenv("HOME")
env.BegotCache = os.Getenv("BEGOT_CACHE")
if env.BegotCache == "" {
env.BegotCache = filepath.Join(env.Home, ".cache", "begot")
}
env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk")
env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk")
env.RepoDir = filepath.Join(env.BegotCache, "repo")
env.CacheLock = filepath.Join(env.BegotCache, "lock")
return
}
type Builder struct {
env *Env
code_root string
code_wk string
dep_wk string
bf *BegottenFile
deps []Dep
repo_deps map[string][]string
cached_lf_hash string
}
func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) {
b = new(Builder)
b.env = env
b.code_root = realpath(code_root)
hsh := sha1str(b.code_root)[:8]
b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh)
b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh)
var fn string
if use_lockfile {
fn = filepath.Join(b.code_root, BEGOTTEN_LOCK)
} else {
fn = filepath.Join(b.code_root, BEGOTTEN)
}
b.bf = BegottenFileNew(fn)
b.deps = b.bf.deps()
b.repo_deps = b.bf.repo_deps()
return
}
func (b *Builder) | () (out map[string]string) {
out = make(map[string]string)
for _, dep := range b.deps {
out[dep.Git_url] = dep.Ref
}
return
}
func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) {
out = make(map[string]string)
if len(limits) == 0 {
return
}
defer func() {
if err := recover(); err != nil {
panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK))
}
}()
bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK))
lock_deps := bf_lock.deps()
lock_repo_deps := bf_lock.repo_deps()
match := func(name string) bool {
for _, limit := range limits {
if matched, err := filepath.Match(limit, name); err != nil {
panic(err)
} else if matched {
return true
}
}
return false
}
repos_to_update := make(map[string]bool)
for _, dep := range lock_deps {
if match(dep.name) {
repos_to_update[dep.Git_url] = true
}
}
// transitive closure
n := -1
for len(repos_to_update) != n {
n = len(repos_to_update)
repos := make([]string, 0, len(repos_to_update))
for repo, _ := range repos_to_update {
repos = append(repos, repo)
}
for _, repo := range repos {
if deps, ok := lock_repo_deps[repo]; ok {
for _, dep := range deps {
repos_to_update[dep] = true
}
}
}
}
for _, dep := range lock_deps {
if !repos_to_update[dep.Git_url] {
out[dep.Git_url] = dep.Ref
}
}
return
}
func (b *Builder) setup_repos(fetch bool, limits []string) *Builder {
processed_deps := 0
repo_versions := make(map[string]string)
var fetched_set map[string]bool
if fetch {
fetched_set = make(map[string]bool)
}
locked_refs := b.get_locked_refs_for_update(limits)
for processed_deps < len(b.deps) {
repos_to_setup := []string{}
for i, dep := range b.deps[processed_deps:] {
have := repo_versions[dep.Git_url]
if fetch &&
strings.HasPrefix(dep.name, IMPLICIT_PREFIX) &&
have != "" {
// Implicit deps take the revision of an explicit dep from the same
// repo, if one exists.
b.deps[processed_deps+i].Ref = have
continue
}
want := locked_refs[dep.Git_url]
if want == "" {
want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set)
}
if have != "" {
if have != want {
panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)",
dep.name, have, want, dep.Ref))
}
} else {
repo_versions[dep.Git_url] = want
repos_to_setup = append(repos_to_setup, dep.Git_url)
}
b.deps[processed_deps+i].Ref = want
}
processed_deps = len(b.deps)
// This will add newly-found dependencies to b.deps.
for _, url := range repos_to_setup {
b._setup_repo(url, repo_versions[url])
}
}
return b
}
func (b *Builder) save_lockfile() *Builder {
// Should only be called when loaded from Begotten, not lockfile.
b.bf.set_deps(b.deps)
b.bf.set_repo_deps(b.repo_deps)
b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK))
return b
}
func (b *Builder) _record_repo_dep(src_url, dep_url string) {
if src_url != dep_url {
lst := b.repo_deps[src_url]
if !contains_str(lst, dep_url) {
b.repo_deps[src_url] = append(lst, dep_url)
}
}
}
func (b *Builder) _repo_dir(url string) string {
return filepath.Join(b.env.RepoDir, sha1str(url))
}
var RE_SHA1_HASH = regexp.MustCompile("[[:xdigit:]]{40}")
func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) {
repo_dir := b._repo_dir(url)
if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() {
fmt.Printf("Cloning %s\n", url)
cc("/", "git", "clone", "-q", url, repo_dir)
// Get into detached head state so we can manipulate things without
// worrying about messing up a branch.
cc(repo_dir, " | _all_repos | identifier_name |
begot.go |
type SortedStringMap yaml.MapSlice
func (sm SortedStringMap) Len() int {
return len(sm)
}
func (sm SortedStringMap) Less(i, j int) bool {
return sm[i].Key.(string) < sm[j].Key.(string)
}
func (sm SortedStringMap) Swap(i, j int) {
sm[i], sm[j] = sm[j], sm[i]
}
func (bf *BegottenFile) save(fn string) {
// We have to sort everything so the output is deterministic. go-yaml
// doesn't write maps in sorted order, so we have to convert them to
// yaml.MapSlices and sort those.
var out struct {
Deps SortedStringMap
Meta struct {
File_version int
Generated_by string
}
Repo_aliases SortedStringMap
Repo_deps SortedStringMap
}
out.Meta.File_version = FILE_VERSION
out.Meta.Generated_by = CODE_VERSION
for k, v := range bf.data.Deps {
dep := v.(Dep)
dep.Import_path = ""
sort.StringSlice(dep.Aliases).Sort()
out.Deps = append(out.Deps, yaml.MapItem{k, dep})
}
sort.Sort(out.Deps)
for k, v := range bf.data.Repo_aliases {
out.Repo_aliases = append(out.Repo_aliases, yaml.MapItem{k, v})
}
sort.Sort(out.Repo_aliases)
for k, v := range bf.data.Repo_deps {
sort.StringSlice(v).Sort()
out.Repo_deps = append(out.Repo_deps, yaml.MapItem{k, v})
}
sort.Sort(out.Repo_deps)
if data, err := yaml.Marshal(out); err != nil {
panic(err)
} else if err := ioutil.WriteFile(fn, data, 0666); err != nil {
panic(err)
}
}
func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string {
// Hook for testing:
test_repo_path := os.Getenv("BEGOT_TEST_REPOS")
if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" {
return "file://" + filepath.Join(test_repo_path, repo_path)
}
// Default to https for other repos:
return "https://" + repo_path
}
func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) {
dep.name = name
if _, ok := v.(string); ok {
v = map[interface{}]interface{}{"import_path": v}
}
mv, ok := v.(map[interface{}]interface{})
if !ok {
panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v))
}
yaml_copy(mv, &dep)
if dep.Import_path != "" {
parts := strings.Split(dep.Import_path, "/")
if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok {
panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name))
} else {
repo_path := strings.Join(parts[:repo_parts+1], "/")
dep.Git_url = bf.default_git_url_from_repo_path(repo_path)
dep.Subpath = strings.Join(parts[repo_parts+1:], "/")
dep.Aliases = append(dep.Aliases, dep.Import_path)
// Redirect through repo aliases:
if alias, ok := bf.data.Repo_aliases[repo_path]; ok {
var aliasdep Dep // only allow git_url and ref
if aliasstr, ok := alias.(string); ok {
aliasstr = bf.default_git_url_from_repo_path(aliasstr)
alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}}
}
yaml_copy(alias, &aliasdep)
if aliasdep.Git_url != "" {
dep.Git_url = aliasdep.Git_url
}
if aliasdep.Ref != "" {
dep.Ref = aliasdep.Ref
}
}
}
}
if dep.Git_url == "" {
panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name))
}
if dep.Ref == "" {
dep.Ref = "master"
}
return
}
func (bf *BegottenFile) deps() (out []Dep) {
out = make([]Dep, len(bf.data.Deps))
i := 0
for name, v := range bf.data.Deps {
out[i] = bf.parse_dep(name, v)
i++
}
return
}
func (bf *BegottenFile) set_deps(deps []Dep) {
bf.data.Deps = make(map[string]interface{})
for _, dep := range deps {
bf.data.Deps[dep.name] = dep
}
}
func (bf *BegottenFile) repo_deps() map[string][]string {
if bf.data.Repo_deps == nil {
bf.data.Repo_deps = make(map[string][]string)
}
return bf.data.Repo_deps
}
func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) {
bf.data.Repo_deps = repo_deps
}
type Env struct {
Home string
BegotCache string
DepWorkspaceDir string
CodeWorkspaceDir string
RepoDir string
CacheLock string
}
func EnvNew() (env *Env) {
env = new(Env)
env.Home = os.Getenv("HOME")
env.BegotCache = os.Getenv("BEGOT_CACHE")
if env.BegotCache == "" {
env.BegotCache = filepath.Join(env.Home, ".cache", "begot")
}
env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk")
env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk")
env.RepoDir = filepath.Join(env.BegotCache, "repo")
env.CacheLock = filepath.Join(env.BegotCache, "lock")
return
}
type Builder struct {
env *Env
code_root string
code_wk string
dep_wk string
bf *BegottenFile
deps []Dep
repo_deps map[string][]string
cached_lf_hash string
}
func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) {
b = new(Builder)
b.env = env
b.code_root = realpath(code_root)
hsh := sha1str(b.code_root)[:8]
b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh)
b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh)
var fn string
if use_lockfile {
fn = filepath.Join(b.code_root, BEGOTTEN_LOCK)
} else {
fn = filepath.Join(b.code_root, BEGOTTEN)
}
b.bf = BegottenFileNew(fn)
b.deps = b.bf.deps()
b.repo_deps = b.bf.repo_deps()
return
}
func (b *Builder) _all_repos() (out map[string]string) {
out = make(map[string]string)
for _, dep := range b.deps {
out[dep.Git_url] = dep.Ref
}
return
}
func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) {
out = make(map[string]string)
if len(limits) == 0 {
return
}
defer func() {
if err := recover(); err != nil {
panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK))
}
}()
bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK))
lock_deps := bf_lock.deps()
lock_repo_deps := bf_lock.repo_deps()
match := func(name string) bool {
for _, limit := range limits {
if matched, err := filepath.Match(limit, name); err != nil {
panic(err)
} else if matched {
return true
}
}
return false
}
repos_to_update := make(map[string]bool)
for _, dep := range lock_deps {
if match(dep.name) {
repos_to_update[dep.Git_url] = true
}
}
// transitive closure
n := -1
for len(repos_to_update) != n {
n = len(repos_to_update)
repos := make([]string, 0, len(repos_to_update))
for repo, _ := range repos_to_update {
repos = append(repos, repo)
}
for _, repo := range repos {
if deps, ok := lock_repo_deps[repo]; ok {
for _, dep := range deps {
repos_to_update[dep] = true
}
}
}
}
for _, dep := range lock_deps {
if !repos_to_update[dep | {
bf = new(BegottenFile)
bf.data.Meta.File_version = -1
if data, err := ioutil.ReadFile(fn); err != nil {
panic(err)
} else if err := yaml.Unmarshal(data, &bf.data); err != nil {
panic(err)
}
ver := bf.data.Meta.File_version
if ver != -1 && ver != FILE_VERSION {
panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver))
}
return
} | identifier_body |
|
begot.go | writes *map[string]bool) string {
if rewrite, ok := (*sub_dep_map)[imp]; ok {
imp = rewrite
(*used_rewrites)[rewrite] = true
} else {
parts := strings.Split(imp, "/")
if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok {
imp = b._lookup_dep_name(src_url, imp)
}
}
return imp
}
func (b *Builder) _lookup_dep_name(src_url, imp string) string {
for _, dep := range b.deps {
if contains_str(dep.Aliases, imp) {
b._record_repo_dep(src_url, dep.Git_url)
return dep.name
}
}
// Each dep turns into a symlink at build time. Packages can be nested, so we
// might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't
// also create 'a/b'. So rename it to 'a_b'.
name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp)
dep := b.bf.parse_dep(name, imp)
b.deps = append(b.deps, dep)
b._record_repo_dep(src_url, dep.Git_url)
return name
}
func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep {
for _, dep := range b.deps {
if dep.Git_url == git_url && dep.Subpath == subpath {
return &dep
}
}
return nil
}
func (b *Builder) tag_repos() {
// Run this after setup_repos.
for url, ref := range b._all_repos() {
out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref))
for _, line := range strings.SplitAfter(out, "\n") {
if !strings.HasPrefix(line, "Updated tag ") {
fmt.Print(line)
}
}
}
}
func (b *Builder) _tag_hash(ref string) string {
// We want to tag the current state with a name that depends on:
// 1. The base ref that we rewrote from.
// 2. The full set of deps that describe how we rewrote imports.
// The contents of Begotten.lock suffice for (2):
if b.cached_lf_hash == "" {
lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK)
if bts, err := ioutil.ReadFile(lockfile); err != nil {
panic(err)
} else {
b.cached_lf_hash = sha1bts(bts)
}
}
return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash)
}
func (b *Builder) run(args []string) {
b._reset_to_tags()
// Set up code_wk.
cbin := filepath.Join(b.code_wk, "bin")
depsrc := filepath.Join(b.dep_wk, "src")
empty_dep := filepath.Join(depsrc, EMPTY_DEP)
os.MkdirAll(cbin, 0777)
os.MkdirAll(empty_dep, 0777)
if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil {
panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " +
"Please remove it before using begot."))
}
ln_sf(b.code_root, filepath.Join(b.code_wk, "src"))
old_links := make(map[string]bool)
filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.Mode()&os.ModeType == os.ModeSymlink {
old_links[path] = true
}
return nil
})
for _, dep := range b.deps {
path := filepath.Join(depsrc, dep.name)
target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath)
if created, err := ln_sf(target, path); err != nil {
panic(err)
} else if created {
// If we've created or changed this symlink, any pkg files that go may
// have compiled from it should be invalidated.
// Note: This makes some assumptions about go's build layout. It should
// be safe enough, though it may be simpler to just blow away everything
// if any dep symlinks change.
pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*"))
for _, pkg := range pkgs {
os.RemoveAll(pkg)
}
}
delete(old_links, path)
}
// Remove unexpected links.
for old_link := range old_links {
os.RemoveAll(old_link)
}
// Try to remove all directories; ignore ENOTEMPTY errors.
var dirs []string
filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.IsDir() {
dirs = append(dirs, path)
}
return nil
})
for i := len(dirs) - 1; i >= 0; i-- {
if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY {
panic(err)
}
}
// Set up empty dep.
//
// The go tool tries to be helpful by not rebuilding modified code if that
// code is in a workspace and no packages from that workspace are mentioned
// on the command line. See cmd/go/pkg.go:isStale around line 680.
//
// We are explicitly managing all of the workspaces in our GOPATH and do
// indeed want to rebuild everything when dependencies change. That is
// required by the goal of reproducible builds: the alternative would mean
// what you get for this build depends on the state of a previous build.
//
// The go tool doesn't provide any way of disabling this "helpful"
// functionality. The simplest workaround is to always mention a package from
// the dependency workspace on the command line. Hence, we add an empty
// package.
empty_go := filepath.Join(empty_dep, "empty.go")
if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() {
os.MkdirAll(filepath.Dir(empty_go), 0777)
if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil {
panic(err)
}
}
// Overwrite any existing GOPATH.
if argv0, err := exec.LookPath(args[0]); err != nil {
panic(err)
} else {
os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk))
os.Chdir(b.code_root)
err := syscall.Exec(argv0, args, os.Environ())
panic(fmt.Errorf("exec failed: %s", err))
}
}
func (b *Builder) _reset_to_tags() {
defer func() {
if recover() != nil {
panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " +
"Please run 'begot fetch' first."))
}
}()
for url, ref := range b._all_repos() {
wd := b._repo_dir(url)
if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() {
panic("not directory")
}
cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref))
}
}
func (b *Builder) clean() {
os.RemoveAll(b.dep_wk)
os.RemoveAll(b.code_wk)
os.Remove(filepath.Join(b.code_root, "bin"))
}
func get_gopath(env *Env) string {
// This duplicates logic in Builder, but we want to just get the GOPATH without
// parsing anything.
for {
if _, err := os.Stat(BEGOTTEN); err == nil {
break
}
if wd, err := os.Getwd(); err != nil {
panic(err)
} else if wd == "/" {
panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN))
}
if err := os.Chdir(".."); err != nil {
panic(err)
}
}
hsh := sha1str(realpath("."))[:8]
code_wk := filepath.Join(env.CodeWorkspaceDir, hsh)
dep_wk := filepath.Join(env.DepWorkspaceDir, hsh)
return code_wk + ":" + dep_wk
}
var _cache_lock *os.File
func lock_cache(env *Env) {
os.MkdirAll(env.BegotCache, 0777)
_cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
panic(err)
}
err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil | {
panic(fmt.Errorf("Can't lock %r", env.BegotCache))
} | conditional_block |
|
begot.go | _bg.deps() {
b._record_repo_dep(url, sub_dep.Git_url)
our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath)
if our_dep != nil {
if sub_dep.Ref != our_dep.Ref {
panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s",
url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref))
}
sub_dep_map[sub_dep.name] = our_dep.name
} else {
// Include a hash of this repo identifier so that if two repos use the
// same dep name to refer to two different things, they don't conflict
// when we flatten deps.
transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name)
sub_dep_map[sub_dep.name] = transitive_name
sub_dep.name = transitive_name
b.deps = append(b.deps, sub_dep)
}
}
// Allow relative import paths within this repo.
e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error {
basename := filepath.Base(path)
if err != nil {
return err
} else if fi.IsDir() && basename[0] == '.' {
return filepath.SkipDir
} else if path == repo_dir {
return nil
}
relpath := path[len(repo_dir)+1:]
our_dep := b._lookup_dep_by_git_url_and_path(url, relpath)
if our_dep != nil {
sub_dep_map[relpath] = our_dep.name
} else {
// See comment on _lookup_dep_name for rationale.
self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath))
sub_dep_map[relpath] = self_name
self_deps = append(self_deps, Dep{
name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref})
}
return nil
})
if e != nil {
panic(e)
}
}
used_rewrites := make(map[string]bool)
b._rewrite_imports(url, repo_dir, &sub_dep_map, &used_rewrites)
msg := fmt.Sprintf("rewritten by begot for %s", b.code_root)
cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg)
// Add only the self-deps that were used, to reduce clutter.
for _, self_dep := range self_deps {
if used_rewrites[self_dep.name] {
b.deps = append(b.deps, self_dep)
}
}
}
func (b *Builder) _rewrite_imports(src_url, repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) {
filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasSuffix(path, ".go") {
b._rewrite_file(src_url, path, sub_dep_map, used_rewrites)
}
return nil
})
}
func (b *Builder) _rewrite_file(src_url, path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) {
bts, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
fs := token.NewFileSet()
f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly)
if err != nil {
panic(err)
}
var pos int
var out bytes.Buffer
out.Grow(len(bts) * 5 / 4)
for _, imp := range f.Imports {
start := fs.Position(imp.Path.Pos()).Offset
end := fs.Position(imp.Path.End()).Offset
orig_import := string(bts[start+1 : end-1])
rewritten := b._rewrite_import(src_url, orig_import, sub_dep_map, used_rewrites)
if orig_import != rewritten {
out.Write(bts[pos : start+1])
out.WriteString(rewritten)
pos = end - 1
}
}
out.Write(bts[pos:])
if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil {
panic(err)
}
}
func (b *Builder) _rewrite_import(src_url, imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string {
if rewrite, ok := (*sub_dep_map)[imp]; ok {
imp = rewrite
(*used_rewrites)[rewrite] = true
} else {
parts := strings.Split(imp, "/")
if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok {
imp = b._lookup_dep_name(src_url, imp)
}
}
return imp
}
func (b *Builder) _lookup_dep_name(src_url, imp string) string {
for _, dep := range b.deps {
if contains_str(dep.Aliases, imp) {
b._record_repo_dep(src_url, dep.Git_url)
return dep.name
}
}
// Each dep turns into a symlink at build time. Packages can be nested, so we
// might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't
// also create 'a/b'. So rename it to 'a_b'.
name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp)
dep := b.bf.parse_dep(name, imp)
b.deps = append(b.deps, dep)
b._record_repo_dep(src_url, dep.Git_url)
return name
}
func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep {
for _, dep := range b.deps {
if dep.Git_url == git_url && dep.Subpath == subpath {
return &dep
}
}
return nil
}
func (b *Builder) tag_repos() {
// Run this after setup_repos.
for url, ref := range b._all_repos() {
out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref))
for _, line := range strings.SplitAfter(out, "\n") {
if !strings.HasPrefix(line, "Updated tag ") {
fmt.Print(line)
}
}
}
}
func (b *Builder) _tag_hash(ref string) string {
// We want to tag the current state with a name that depends on:
// 1. The base ref that we rewrote from.
// 2. The full set of deps that describe how we rewrote imports.
// The contents of Begotten.lock suffice for (2):
if b.cached_lf_hash == "" {
lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK)
if bts, err := ioutil.ReadFile(lockfile); err != nil {
panic(err)
} else {
b.cached_lf_hash = sha1bts(bts)
}
}
return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash)
}
func (b *Builder) run(args []string) {
b._reset_to_tags()
// Set up code_wk.
cbin := filepath.Join(b.code_wk, "bin")
depsrc := filepath.Join(b.dep_wk, "src")
empty_dep := filepath.Join(depsrc, EMPTY_DEP)
os.MkdirAll(cbin, 0777)
os.MkdirAll(empty_dep, 0777)
if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil {
panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " +
"Please remove it before using begot."))
}
ln_sf(b.code_root, filepath.Join(b.code_wk, "src"))
old_links := make(map[string]bool)
filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.Mode()&os.ModeType == os.ModeSymlink {
old_links[path] = true
}
return nil
})
for _, dep := range b.deps {
path := filepath.Join(depsrc, dep.name)
target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath)
if created, err := ln_sf(target, path); err != nil {
panic(err)
} else if created {
// If we've created or changed this symlink, any pkg files that go may
// have compiled from it should be invalidated.
// Note: This makes some assumptions about go's build layout. It should
// be safe enough, though it may be simpler to just blow away everything
// if any dep symlinks change.
pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*"))
for _, pkg := range pkgs {
os.RemoveAll(pkg) | }
}
delete(old_links, path)
} | random_line_split |
|
com.rs | {
/// Creates a new instance.
pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) }
/// Creates an empty instance.
pub fn empty() -> Self { Self::new(std::u32::MAX - 2) }
/// Returns whether the corresponding list is empty.
pub fn is_empty(&self) -> bool { *self == Self::empty() }
/// Returns the inner ID.
pub fn value(&self) -> u32 { self.0.raw() }
}
impl<T: ?Sized> Clone for Id<T> {
fn clone(&self) -> Self { *self }
}
impl<T: ?Sized> Copy for Id<T> {}
impl<T: ?Sized> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
const MODULE_OFFSET: usize = 1usize << 30;
const REPOSITORY_OFFSET: usize = 1usize << 31;
// More compact representation for `{:#?}`.
//
// FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()`
// once it stabilizes.
if *self == Default::default() {
write!(f, "Id(default)")
} else if *self == Self::empty() {
write!(f, "Id(empty)")
} else {
match self.index() {
index if index < MODULE_OFFSET =>
write!(f, "Id({})", index),
index if index < REPOSITORY_OFFSET =>
write!(f, "Id(M-{})", index - MODULE_OFFSET),
index =>
write!(f, "Id(R-{})", index - REPOSITORY_OFFSET),
}
}
}
}
impl<T: ?Sized> Default for Id<T> {
fn default() -> Self { Id(Default::default(), marker::PhantomData) }
}
impl<T: ?Sized> cmp::Eq for Id<T> {}
impl<T: ?Sized> hash::Hash for Id<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.0.hash(state);
}
}
impl<T: ?Sized> cmp::Ord for Id<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) }
}
impl<T: ?Sized> cmp::PartialEq for Id<T> {
fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) }
}
impl<T: ?Sized> cmp::PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<T: ?Sized> TableIndex for Id<T> {
fn from_index(index: usize) -> Self { Id::new(index as u32) }
fn index(&self) -> usize { self.value() as usize }
}
/// IdIterator.
///
/// An Iterator over consecutive IDs.
// #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct IdIterator<T: ?Sized> {
start: u32,
end: u32,
_marker: marker::PhantomData<*const T>,
}
impl<T: ?Sized> IdIterator<T> {
/// Creates an instance.
pub fn new(start: u32, end: u32) -> Self {
IdIterator { start, end, _marker: marker::PhantomData }
}
}
impl<T: ?Sized> Clone for IdIterator<T> {
fn clone(&self) -> Self { *self }
}
impl<T: ?Sized> Copy for IdIterator<T> {}
impl<T: ?Sized> fmt::Debug for IdIterator<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
// FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()`
// once it stabilizes.
write!(f, "IdIterator({}, {})", self.start, self.end)
}
}
impl<T: ?Sized> Default for IdIterator<T> {
fn default() -> Self { IdIterator::new(0, 0) }
}
impl<T: ?Sized> cmp::Eq for IdIterator<T> {}
impl<T: ?Sized> hash::Hash for IdIterator<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.start.hash(state);
self.end.hash(state);
}
}
impl<T: ?Sized> iter::Iterator for IdIterator<T> {
type Item = Id<T>;
fn next(&mut self) -> Option<Id<T>> {
if self.start < self.end {
let result = Id::new(self.start);
self.start += 1;
Some(result)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let difference = self.len();
(difference, Some(difference))
}
fn count(self) -> usize { self.len() }
fn last(self) -> Option<Id<T>> {
if self.start < self.end {
Some(Id::new(self.end - 1))
} else {
None
}
}
fn nth(&mut self, n: usize) -> Option<Id<T>> {
let result = self.start.saturating_add(n as u32);
if result < self.end {
self.start = result + 1;
Some(Id::new(result))
} else {
self.start = self.end;
None
}
}
fn max(self) -> Option<Id<T>> { self.last() }
fn min(mut self) -> Option<Id<T>> { self.next() }
}
impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> {
fn next_back(&mut self) -> Option<Id<T>> {
if self.start < self.end {
self.end -= 1;
Some(Id::new(self.end))
} else {
None
}
}
}
impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> {
fn len(&self) -> usize {
self.end.saturating_sub(self.start) as usize
}
}
impl<T: ?Sized> cmp::Ord for IdIterator<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
(self.start, self.end).cmp(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialEq for IdIterator<T> {
fn eq(&self, other: &Self) -> bool {
(self.start, self.end).eq(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
(self.start, self.end).partial_cmp(&(other.start, other.end))
}
}
/// A Range represents a start and end position in a buffer.
///
/// Note: the `Range` does not know which buffer it indexes in.
///
/// Note: a `Range` cannot index past 4GB.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Range {
offset: u32,
length: u32,
}
impl Range {
/// Creates a new `Range` from a start position and length.
///
/// In Debug, it is checked that the end position will not exceed 4GB.
pub fn new(offset: usize, length: usize) -> Range {
debug_assert!(offset <= std::u32::MAX as usize);
debug_assert!(length <= std::u32::MAX as usize);
debug_assert!(offset <= (std::u32::MAX as usize - length));
Range { offset: offset as u32, length: length as u32 }
}
/// Creates a new `Range` from a start and end position.
///
/// As the name implies, this creates a half-open range, similar to `start..end`.
pub fn half_open(start: u32, end: u32) -> Range {
debug_assert!(start <= end);
Range { offset: start, length: end - start }
}
/// Returns the start position of the range.
pub fn offset(self) -> usize { self.offset as usize }
/// Returns the end position of the range (excluded).
pub fn end_offset(self) -> usize { self.offset() + self.length() }
/// Returns the length of the range.
pub fn length(self) -> usize { self.length as usize }
/// Shifts range to the left.
pub fn shift_left(self, n: usize) -> Range {
self.shift_to(self.offset() - n)
}
/// Shifts range to the right.
pub fn shift_right(self, n: usize) -> Range {
self.shift_to(self.offset() + n)
}
| /// Shifts range to specified offset.
pub fn shift_to(self, offset: usize) -> Range {
Range { offset: offset as u32, ..self } | random_line_split |
|
com.rs | (&self) -> u32 { self.0.get() - 1 }
}
impl fmt::Debug for CoreId {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.raw())
}
}
impl Default for CoreId {
fn default() -> CoreId {
unsafe { CoreId(num::NonZeroU32::new_unchecked(std::u32::MAX)) }
}
}
impl fmt::Display for CoreId {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.raw())
}
}
impl convert::From<CoreId> for u32 {
fn from(core_id: CoreId) -> u32 { core_id.raw() }
}
/// An Id implementation based on CoreId.
///
/// It contains a default empty state, to represent empty streams.
// #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Id<T: ?Sized>(CoreId, marker::PhantomData<*const T>);
impl<T: ?Sized> Id<T> {
/// Creates a new instance.
pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) }
/// Creates an empty instance.
pub fn empty() -> Self { Self::new(std::u32::MAX - 2) }
/// Returns whether the corresponding list is empty.
pub fn is_empty(&self) -> bool { *self == Self::empty() }
/// Returns the inner ID.
pub fn value(&self) -> u32 { self.0.raw() }
}
impl<T: ?Sized> Clone for Id<T> {
fn clone(&self) -> Self { *self }
}
impl<T: ?Sized> Copy for Id<T> {}
impl<T: ?Sized> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
const MODULE_OFFSET: usize = 1usize << 30;
const REPOSITORY_OFFSET: usize = 1usize << 31;
// More compact representation for `{:#?}`.
//
// FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()`
// once it stabilizes.
if *self == Default::default() {
write!(f, "Id(default)")
} else if *self == Self::empty() {
write!(f, "Id(empty)")
} else {
match self.index() {
index if index < MODULE_OFFSET =>
write!(f, "Id({})", index),
index if index < REPOSITORY_OFFSET =>
write!(f, "Id(M-{})", index - MODULE_OFFSET),
index =>
write!(f, "Id(R-{})", index - REPOSITORY_OFFSET),
}
}
}
}
impl<T: ?Sized> Default for Id<T> {
fn default() -> Self { Id(Default::default(), marker::PhantomData) }
}
impl<T: ?Sized> cmp::Eq for Id<T> {}
impl<T: ?Sized> hash::Hash for Id<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.0.hash(state);
}
}
impl<T: ?Sized> cmp::Ord for Id<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) }
}
impl<T: ?Sized> cmp::PartialEq for Id<T> {
fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) }
}
impl<T: ?Sized> cmp::PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<T: ?Sized> TableIndex for Id<T> {
fn from_index(index: usize) -> Self { Id::new(index as u32) }
fn index(&self) -> usize { self.value() as usize }
}
/// IdIterator.
///
/// An Iterator over consecutive IDs.
// #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct IdIterator<T: ?Sized> {
start: u32,
end: u32,
_marker: marker::PhantomData<*const T>,
}
impl<T: ?Sized> IdIterator<T> {
/// Creates an instance.
pub fn new(start: u32, end: u32) -> Self {
IdIterator { start, end, _marker: marker::PhantomData }
}
}
impl<T: ?Sized> Clone for IdIterator<T> {
fn clone(&self) -> Self { *self }
}
impl<T: ?Sized> Copy for IdIterator<T> {}
impl<T: ?Sized> fmt::Debug for IdIterator<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
// FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()`
// once it stabilizes.
write!(f, "IdIterator({}, {})", self.start, self.end)
}
}
impl<T: ?Sized> Default for IdIterator<T> {
fn default() -> Self { IdIterator::new(0, 0) }
}
impl<T: ?Sized> cmp::Eq for IdIterator<T> {}
impl<T: ?Sized> hash::Hash for IdIterator<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.start.hash(state);
self.end.hash(state);
}
}
impl<T: ?Sized> iter::Iterator for IdIterator<T> {
type Item = Id<T>;
fn next(&mut self) -> Option<Id<T>> {
if self.start < self.end {
let result = Id::new(self.start);
self.start += 1;
Some(result)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let difference = self.len();
(difference, Some(difference))
}
fn count(self) -> usize { self.len() }
fn last(self) -> Option<Id<T>> {
if self.start < self.end {
Some(Id::new(self.end - 1))
} else {
None
}
}
fn nth(&mut self, n: usize) -> Option<Id<T>> {
let result = self.start.saturating_add(n as u32);
if result < self.end {
self.start = result + 1;
Some(Id::new(result))
} else {
self.start = self.end;
None
}
}
fn max(self) -> Option<Id<T>> { self.last() }
fn min(mut self) -> Option<Id<T>> { self.next() }
}
impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> {
fn next_back(&mut self) -> Option<Id<T>> {
if self.start < self.end {
self.end -= 1;
Some(Id::new(self.end))
} else {
None
}
}
}
impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> {
fn len(&self) -> usize {
self.end.saturating_sub(self.start) as usize
}
}
impl<T: ?Sized> cmp::Ord for IdIterator<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
(self.start, self.end).cmp(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialEq for IdIterator<T> {
fn eq(&self, other: &Self) -> bool {
(self.start, self.end).eq(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
(self.start, self.end).partial_cmp(&(other.start, other.end))
}
}
/// A Range represents a start and end position in a buffer.
///
/// Note: the `Range` does not know which buffer it indexes in.
///
/// Note: a `Range` cannot index past 4GB.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Range {
offset: u32,
length: u32,
}
impl Range {
/// Creates a new `Range` from a start position and length.
///
/// In Debug, it is checked that the end position will not exceed 4GB.
pub fn new(offset: usize, length: usize) -> Range {
debug_assert!(offset <= std::u32::MAX as usize);
debug_assert!(length <= std::u32::MAX as usize);
debug_assert!(offset <= (std::u32::MAX as usize - length));
Range { offset: offset as u32, length: length as u32 }
}
/// Creates a new `Range` | raw | identifier_name |
|
com.rs | Iterator for IdIterator<T> {
fn len(&self) -> usize {
self.end.saturating_sub(self.start) as usize
}
}
impl<T: ?Sized> cmp::Ord for IdIterator<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
(self.start, self.end).cmp(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialEq for IdIterator<T> {
fn eq(&self, other: &Self) -> bool {
(self.start, self.end).eq(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
(self.start, self.end).partial_cmp(&(other.start, other.end))
}
}
/// A Range represents a start and end position in a buffer.
///
/// Note: the `Range` does not know which buffer it indexes in.
///
/// Note: a `Range` cannot index past 4GB.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Range {
offset: u32,
length: u32,
}
impl Range {
/// Creates a new `Range` from a start position and length.
///
/// In Debug, it is checked that the end position will not exceed 4GB.
pub fn new(offset: usize, length: usize) -> Range {
debug_assert!(offset <= std::u32::MAX as usize);
debug_assert!(length <= std::u32::MAX as usize);
debug_assert!(offset <= (std::u32::MAX as usize - length));
Range { offset: offset as u32, length: length as u32 }
}
/// Creates a new `Range` from a start and end position.
///
/// As the name implies, this creates a half-open range, similar to `start..end`.
pub fn half_open(start: u32, end: u32) -> Range {
debug_assert!(start <= end);
Range { offset: start, length: end - start }
}
/// Returns the start position of the range.
pub fn offset(self) -> usize { self.offset as usize }
/// Returns the end position of the range (excluded).
pub fn end_offset(self) -> usize { self.offset() + self.length() }
/// Returns the length of the range.
pub fn length(self) -> usize { self.length as usize }
/// Shifts range to the left.
pub fn shift_left(self, n: usize) -> Range {
self.shift_to(self.offset() - n)
}
/// Shifts range to the right.
pub fn shift_right(self, n: usize) -> Range {
self.shift_to(self.offset() + n)
}
/// Shifts range to specified offset.
pub fn shift_to(self, offset: usize) -> Range {
Range { offset: offset as u32, ..self }
}
/// Skips n from the left.
pub fn skip_left(self, n: usize) -> Range {
Range {
offset: self.offset + (n as u32),
length: self.length - (n as u32),
}
}
/// Skips n from the right.
pub fn skip_right(self, n: usize) -> Range {
Range {
offset: self.offset,
length: self.length - (n as u32),
}
}
/// Extend one range with another, the resulting range spans both ranges,
/// and in the case they were discontiguous also spans the interval.
pub fn extend(self, other: Range) -> Range {
if self.offset > other.offset {
other.extend(self)
} else if self.end_offset() >= other.end_offset() {
self
} else {
Range {
offset: self.offset,
length: (other.end_offset() - self.offset()) as u32
}
}
}
}
impl fmt::Debug for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}@{}", self.length, self.offset)
}
}
impl Default for Range {
fn default() -> Range { Range::new(0, 0) }
}
impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}@{}", self.length, self.offset)
}
}
impl ops::Index<Range> for [u8] {
type Output = [u8];
fn index(&self, index: Range) -> &[u8] {
&self[index.offset()..index.end_offset()]
}
}
/// A Slice of bytes, printed more pleasantly
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Slice<'a>(pub &'a [u8]);
impl<'a> Slice<'a> {
/// Returns true if empty, false otherwise.
pub fn is_empty(&self) -> bool { self.0.is_empty() }
/// Returns the length of the slice.
pub fn len(&self) -> usize { self.0.len() }
/// Returns the byte at the indicated position, or None if it is invalid.
pub fn get(&self, pos: usize) -> Option<&u8> { self.0.get(pos) }
}
impl<'a> fmt::Debug for Slice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self)
}
}
impl<'a> fmt::Display for Slice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let mut start = 0;
while start < self.0.len() {
let end =
self.0[start..].iter().position(|&b| b < 32 || b > 126)
.unwrap_or(self.len());
f.write_str(
std::str::from_utf8(&self.0[start..end]).expect("Valid UTF-8")
)?;
start = end;
let end =
self.0[start..].iter().position(|&b| b >= 32 && b <= 126)
.unwrap_or(self.len());
for &byte in &self.0[start..end] {
write!(f, "{{0x{:X}}}", byte)?;
}
start = end;
}
Ok(())
}
}
/// Span
pub trait Span {
/// Returns the Range spanned by the element.
fn span(&self) -> Range;
}
/// A Store trait, to abstract over the actual storage of individual elements.
pub trait Store<T, I = Id<T>> {
/// Returns the number of items.
fn len(&self) -> usize;
/// Returns a copy of the item.
fn get(&self, id: I) -> T;
/// Returns the range of the item.
fn get_range(&self, id: I) -> Range;
/// Pushes an item.
fn push(&mut self, item: T, range: Range) -> I;
}
/// A MultiStore trait, to abstract over the actual storage of slices.
pub trait MultiStore<T, I = Id<[T]>> {
/// Returns the slice of items.
fn get_slice(&self, id: I) -> &[T];
// TODO(matthieum): A more efficient interface would take IntoIterator<Item = T>
/// Pushes a slice of element.
fn push_slice(&mut self, items: &[T]) -> I;
}
//
// Tests
//
#[cfg(test)]
mod tests {
use super::{CoreId, Range};
#[test]
fn core_id_roundtrip() {
for i in 0..10 {
assert_eq!(i, CoreId::new(i).raw());
}
}
#[test]
fn core_id_default() {
let core: CoreId = Default::default();
assert_eq!(std::u32::MAX - 1, core.raw());
}
#[test]
#[should_panic]
fn core_id_reserved_size_optimization() { CoreId::new(std::u32::MAX); }
#[test]
fn range_extend_contiguous() {
let result = Range::new(3, 4).extend(Range::new(7, 2));
assert_eq!(result, Range::new(3, 6));
}
#[test]
fn range_extend_separated() {
let result = Range::new(3, 4).extend(Range::new(11, 3));
assert_eq!(result, Range::new(3, 11));
}
#[test]
fn range_extend_partially_overlapping() {
let result = Range::new(3, 4).extend(Range::new(5, 3));
assert_eq!(result, Range::new(3, 5));
}
#[test]
fn range_extend_totally_overlapping() | {
let result = Range::new(3, 4).extend(Range::new(5, 2));
assert_eq!(result, Range::new(3, 4));
} | identifier_body |
|
com.rs | fmt::Formatter) -> Result<(), fmt::Error> {
const MODULE_OFFSET: usize = 1usize << 30;
const REPOSITORY_OFFSET: usize = 1usize << 31;
// More compact representation for `{:#?}`.
//
// FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()`
// once it stabilizes.
if *self == Default::default() {
write!(f, "Id(default)")
} else if *self == Self::empty() {
write!(f, "Id(empty)")
} else {
match self.index() {
index if index < MODULE_OFFSET =>
write!(f, "Id({})", index),
index if index < REPOSITORY_OFFSET =>
write!(f, "Id(M-{})", index - MODULE_OFFSET),
index =>
write!(f, "Id(R-{})", index - REPOSITORY_OFFSET),
}
}
}
}
impl<T: ?Sized> Default for Id<T> {
fn default() -> Self { Id(Default::default(), marker::PhantomData) }
}
impl<T: ?Sized> cmp::Eq for Id<T> {}
impl<T: ?Sized> hash::Hash for Id<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.0.hash(state);
}
}
impl<T: ?Sized> cmp::Ord for Id<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) }
}
impl<T: ?Sized> cmp::PartialEq for Id<T> {
fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) }
}
impl<T: ?Sized> cmp::PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<T: ?Sized> TableIndex for Id<T> {
fn from_index(index: usize) -> Self { Id::new(index as u32) }
fn index(&self) -> usize { self.value() as usize }
}
/// IdIterator.
///
/// An Iterator over consecutive IDs.
// #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct IdIterator<T: ?Sized> {
start: u32,
end: u32,
_marker: marker::PhantomData<*const T>,
}
impl<T: ?Sized> IdIterator<T> {
/// Creates an instance.
pub fn new(start: u32, end: u32) -> Self {
IdIterator { start, end, _marker: marker::PhantomData }
}
}
impl<T: ?Sized> Clone for IdIterator<T> {
fn clone(&self) -> Self { *self }
}
impl<T: ?Sized> Copy for IdIterator<T> {}
impl<T: ?Sized> fmt::Debug for IdIterator<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
// FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()`
// once it stabilizes.
write!(f, "IdIterator({}, {})", self.start, self.end)
}
}
impl<T: ?Sized> Default for IdIterator<T> {
fn default() -> Self { IdIterator::new(0, 0) }
}
impl<T: ?Sized> cmp::Eq for IdIterator<T> {}
impl<T: ?Sized> hash::Hash for IdIterator<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.start.hash(state);
self.end.hash(state);
}
}
impl<T: ?Sized> iter::Iterator for IdIterator<T> {
type Item = Id<T>;
fn next(&mut self) -> Option<Id<T>> {
if self.start < self.end {
let result = Id::new(self.start);
self.start += 1;
Some(result)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let difference = self.len();
(difference, Some(difference))
}
fn count(self) -> usize { self.len() }
fn last(self) -> Option<Id<T>> {
if self.start < self.end {
Some(Id::new(self.end - 1))
} else {
None
}
}
fn nth(&mut self, n: usize) -> Option<Id<T>> {
let result = self.start.saturating_add(n as u32);
if result < self.end {
self.start = result + 1;
Some(Id::new(result))
} else {
self.start = self.end;
None
}
}
fn max(self) -> Option<Id<T>> { self.last() }
fn min(mut self) -> Option<Id<T>> { self.next() }
}
impl<T: ?Sized> iter::DoubleEndedIterator for IdIterator<T> {
fn next_back(&mut self) -> Option<Id<T>> {
if self.start < self.end {
self.end -= 1;
Some(Id::new(self.end))
} else {
None
}
}
}
impl<T: ?Sized> iter::ExactSizeIterator for IdIterator<T> {
fn len(&self) -> usize {
self.end.saturating_sub(self.start) as usize
}
}
impl<T: ?Sized> cmp::Ord for IdIterator<T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
(self.start, self.end).cmp(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialEq for IdIterator<T> {
fn eq(&self, other: &Self) -> bool {
(self.start, self.end).eq(&(other.start, other.end))
}
}
impl<T: ?Sized> cmp::PartialOrd for IdIterator<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
(self.start, self.end).partial_cmp(&(other.start, other.end))
}
}
/// A Range represents a start and end position in a buffer.
///
/// Note: the `Range` does not know which buffer it indexes in.
///
/// Note: a `Range` cannot index past 4GB.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Range {
offset: u32,
length: u32,
}
impl Range {
/// Creates a new `Range` from a start position and length.
///
/// In Debug, it is checked that the end position will not exceed 4GB.
pub fn new(offset: usize, length: usize) -> Range {
debug_assert!(offset <= std::u32::MAX as usize);
debug_assert!(length <= std::u32::MAX as usize);
debug_assert!(offset <= (std::u32::MAX as usize - length));
Range { offset: offset as u32, length: length as u32 }
}
/// Creates a new `Range` from a start and end position.
///
/// As the name implies, this creates a half-open range, similar to `start..end`.
pub fn half_open(start: u32, end: u32) -> Range {
debug_assert!(start <= end);
Range { offset: start, length: end - start }
}
/// Returns the start position of the range.
pub fn offset(self) -> usize { self.offset as usize }
/// Returns the end position of the range (excluded).
pub fn end_offset(self) -> usize { self.offset() + self.length() }
/// Returns the length of the range.
pub fn length(self) -> usize { self.length as usize }
/// Shifts range to the left.
pub fn shift_left(self, n: usize) -> Range {
self.shift_to(self.offset() - n)
}
/// Shifts range to the right.
pub fn shift_right(self, n: usize) -> Range {
self.shift_to(self.offset() + n)
}
/// Shifts range to specified offset.
pub fn shift_to(self, offset: usize) -> Range {
Range { offset: offset as u32, ..self }
}
/// Skips n from the left.
pub fn skip_left(self, n: usize) -> Range {
Range {
offset: self.offset + (n as u32),
length: self.length - (n as u32),
}
}
/// Skips n from the right.
pub fn skip_right(self, n: usize) -> Range {
Range {
offset: self.offset,
length: self.length - (n as u32),
}
}
/// Extend one range with another, the resulting range spans both ranges,
/// and in the case they were discontiguous also spans the interval.
pub fn extend(self, other: Range) -> Range {
if self.offset > other.offset {
other.extend(self)
} else if self.end_offset() >= other.end_offset() | {
self
} | conditional_block |
|
circuit.go | to this configuration are not reflected by the circuit.
// In other words, this creates a copy.
func (c *Circuit) Config() Config {
c.notThreadSafeConfigMu.Lock()
defer c.notThreadSafeConfigMu.Unlock()
return c.notThreadSafeConfig
}
// SetConfigNotThreadSafe is only useful during construction before a circuit is being used. It is not thread safe,
// but will modify all the circuit's internal structs to match what the config wants. It also doe *NOT* use the
// default configuration parameters.
func (c *Circuit) SetConfigNotThreadSafe(config Config) {
c.notThreadSafeConfigMu.Lock()
// Set, but do not reference this config inside this function, since that would not be thread safe (no mu protection)
c.notThreadSafeConfig = config
c.notThreadSafeConfigMu.Unlock()
c.goroutineWrapper.lostErrors = config.General.GoLostErrors
c.timeNow = config.General.TimeKeeper.Now
c.OpenToClose = config.General.OpenToClosedFactory()
c.ClosedToOpen = config.General.ClosedToOpenFactory()
if cfg, ok := c.OpenToClose.(Configurable); ok {
cfg.SetConfigNotThreadSafe(config)
}
if cfg, ok := c.ClosedToOpen.(Configurable); ok {
cfg.SetConfigNotThreadSafe(config)
}
c.CmdMetricCollector = append(
make([]RunMetrics, 0, len(config.Metrics.Run)+2),
c.OpenToClose,
c.ClosedToOpen)
c.CmdMetricCollector = append(c.CmdMetricCollector, config.Metrics.Run...)
c.FallbackMetricCollector = append(
make([]FallbackMetrics, 0, len(config.Metrics.Fallback)+2),
config.Metrics.Fallback...)
c.CircuitMetricsCollector = append(
make([]Metrics, 0, len(config.Metrics.Circuit)+2),
c.OpenToClose,
c.ClosedToOpen)
c.CircuitMetricsCollector = append(c.CircuitMetricsCollector, config.Metrics.Circuit...)
c.SetConfigThreadSafe(config)
}
func (c *Circuit) now() time.Time {
return c.timeNow()
}
// Var exports that help diagnose the circuit
func (c *Circuit) Var() expvar.Var {
return expvar.Func(func() interface{} {
if c == nil {
return nil
}
ret := map[string]interface{}{
"config": c.Config(),
"is_open": c.IsOpen(),
"name": c.Name(),
"run_metrics": expvarToVal(c.CmdMetricCollector.Var()),
"concurrent_commands": c.ConcurrentCommands(),
"concurrent_fallbacks": c.ConcurrentFallbacks(),
"closer": c.OpenToClose,
"opener": c.ClosedToOpen,
"fallback_metrics": expvarToVal(c.FallbackMetricCollector.Var()),
}
return ret
})
}
// Name of this circuit
func (c *Circuit) Name() string {
if c == nil {
return ""
}
return c.name
}
// IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls)
func (c *Circuit) IsOpen() bool {
if c == nil {
return false
}
if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() {
return true
}
if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {
return false
}
return c.isOpen.Get()
}
// CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually
// healthy, it will just open back up again.
func (c *Circuit) CloseCircuit() {
c.close(c.now(), true)
}
// OpenCircuit will open a closed circuit. The circuit will then try to repair itself
func (c *Circuit) OpenCircuit() {
c.openCircuit(time.Now())
}
// OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after
// some delay, try to close again.
func (c *Circuit) openCircuit(now time.Time) {
if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {
// Don't open circuits that are forced closed
return
}
if c.IsOpen() {
// Don't bother opening a circuit that is already open
return
}
c.CircuitMetricsCollector.Opened(now)
c.isOpen.Set(true)
}
// Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust
// the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it
// is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what.
func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c == nil {
var wrapper goroutineWrapper
return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc))
}
return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc))
}
// Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function
func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error {
return c.Execute(ctx, runFunc, nil)
}
// Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute--
// The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error.
// Internal library errors will match the interface Error and you can use type casting to check this.
func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() {
return runFunc(ctx)
}
// Try to run the command in the context of the circuit
err := c.run(ctx, runFunc)
if err == nil {
return nil
}
// A bad request should not trigger fallback logic. The user just gave bad input.
// The list of conditions that trigger fallbacks is documented at
// https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype
if IsBadRequest(err) {
return err
}
return c.fallback(ctx, err, fallbackFunc)
}
// --------- only private functions below here
func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error {
if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() {
return errThrottledConcurrentCommands | }
// isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty
// circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior
// in the nil/empty case.
func (c *Circuit) isEmptyOrNil() bool {
return c == nil || c.OpenToClose == nil
}
// run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run()
func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) {
if runFunc == nil {
return nil
}
var expectedDoneBy time.Time
startTime := c.now()
originalContext := ctx
if !c.allowNewRun(startTime) {
// Rather than make this inline, return a global reference (for memory optimization sake).
c.CmdMetricCollector.ErrShortCircuit(startTime)
return errCircuitOpen
}
if c.ClosedToOpen.Prevent(startTime) {
return errCircuitOpen
}
currentCommandCount := c.concurrentCommands.Add(1)
defer c.concurrentCommands.Add(-1)
if err := c.throttleConcurrentCommands(currentCommandCount); err != nil {
c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime)
return err
}
// Set timeout on the command if we have one
if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 {
var timeoutCancel func()
expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration())
ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy)
defer timeoutCancel()
}
ret := runFunc(ctx)
endTime := c.now()
totalCmdTime := endTime.Sub(startTime)
runFuncDoneTime := c.now()
// See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation
// This request had invalid input, but shouldn't be marked as an 'error' for the circuit
// From documentation
// -------
// The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system
// failures that should not count against the failure metrics and should not trigger fallback logic.
if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) {
return ret
}
// | }
return nil | random_line_split |
circuit.go | this configuration are not reflected by the circuit.
// In other words, this creates a copy.
func (c *Circuit) Config() Config {
c.notThreadSafeConfigMu.Lock()
defer c.notThreadSafeConfigMu.Unlock()
return c.notThreadSafeConfig
}
// SetConfigNotThreadSafe is only useful during construction before a circuit is being used. It is not thread safe,
// but will modify all the circuit's internal structs to match what the config wants. It also doe *NOT* use the
// default configuration parameters.
func (c *Circuit) SetConfigNotThreadSafe(config Config) {
c.notThreadSafeConfigMu.Lock()
// Set, but do not reference this config inside this function, since that would not be thread safe (no mu protection)
c.notThreadSafeConfig = config
c.notThreadSafeConfigMu.Unlock()
c.goroutineWrapper.lostErrors = config.General.GoLostErrors
c.timeNow = config.General.TimeKeeper.Now
c.OpenToClose = config.General.OpenToClosedFactory()
c.ClosedToOpen = config.General.ClosedToOpenFactory()
if cfg, ok := c.OpenToClose.(Configurable); ok {
cfg.SetConfigNotThreadSafe(config)
}
if cfg, ok := c.ClosedToOpen.(Configurable); ok {
cfg.SetConfigNotThreadSafe(config)
}
c.CmdMetricCollector = append(
make([]RunMetrics, 0, len(config.Metrics.Run)+2),
c.OpenToClose,
c.ClosedToOpen)
c.CmdMetricCollector = append(c.CmdMetricCollector, config.Metrics.Run...)
c.FallbackMetricCollector = append(
make([]FallbackMetrics, 0, len(config.Metrics.Fallback)+2),
config.Metrics.Fallback...)
c.CircuitMetricsCollector = append(
make([]Metrics, 0, len(config.Metrics.Circuit)+2),
c.OpenToClose,
c.ClosedToOpen)
c.CircuitMetricsCollector = append(c.CircuitMetricsCollector, config.Metrics.Circuit...)
c.SetConfigThreadSafe(config)
}
func (c *Circuit) now() time.Time {
return c.timeNow()
}
// Var exports that help diagnose the circuit
func (c *Circuit) Var() expvar.Var {
return expvar.Func(func() interface{} {
if c == nil {
return nil
}
ret := map[string]interface{}{
"config": c.Config(),
"is_open": c.IsOpen(),
"name": c.Name(),
"run_metrics": expvarToVal(c.CmdMetricCollector.Var()),
"concurrent_commands": c.ConcurrentCommands(),
"concurrent_fallbacks": c.ConcurrentFallbacks(),
"closer": c.OpenToClose,
"opener": c.ClosedToOpen,
"fallback_metrics": expvarToVal(c.FallbackMetricCollector.Var()),
}
return ret
})
}
// Name of this circuit
func (c *Circuit) Name() string {
if c == nil {
return ""
}
return c.name
}
// IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls)
func (c *Circuit) IsOpen() bool {
if c == nil {
return false
}
if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() {
return true
}
if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {
return false
}
return c.isOpen.Get()
}
// CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually
// healthy, it will just open back up again.
func (c *Circuit) CloseCircuit() {
c.close(c.now(), true)
}
// OpenCircuit will open a closed circuit. The circuit will then try to repair itself
func (c *Circuit) OpenCircuit() {
c.openCircuit(time.Now())
}
// OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after
// some delay, try to close again.
func (c *Circuit) openCircuit(now time.Time) {
if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {
// Don't open circuits that are forced closed
return
}
if c.IsOpen() {
// Don't bother opening a circuit that is already open
return
}
c.CircuitMetricsCollector.Opened(now)
c.isOpen.Set(true)
}
// Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust
// the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it
// is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what.
func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c == nil {
var wrapper goroutineWrapper
return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc))
}
return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc))
}
// Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function
func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error {
return c.Execute(ctx, runFunc, nil)
}
// Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute--
// The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error.
// Internal library errors will match the interface Error and you can use type casting to check this.
func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() {
return runFunc(ctx)
}
// Try to run the command in the context of the circuit
err := c.run(ctx, runFunc)
if err == nil {
return nil
}
// A bad request should not trigger fallback logic. The user just gave bad input.
// The list of conditions that trigger fallbacks is documented at
// https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype
if IsBadRequest(err) {
return err
}
return c.fallback(ctx, err, fallbackFunc)
}
// --------- only private functions below here
func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error {
if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() {
return errThrottledConcurrentCommands
}
return nil
}
// isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty
// circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior
// in the nil/empty case.
func (c *Circuit) isEmptyOrNil() bool {
return c == nil || c.OpenToClose == nil
}
// run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run()
func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) {
if runFunc == nil {
return nil
}
var expectedDoneBy time.Time
startTime := c.now()
originalContext := ctx
if !c.allowNewRun(startTime) {
// Rather than make this inline, return a global reference (for memory optimization sake).
c.CmdMetricCollector.ErrShortCircuit(startTime)
return errCircuitOpen
}
if c.ClosedToOpen.Prevent(startTime) |
currentCommandCount := c.concurrentCommands.Add(1)
defer c.concurrentCommands.Add(-1)
if err := c.throttleConcurrentCommands(currentCommandCount); err != nil {
c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime)
return err
}
// Set timeout on the command if we have one
if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 {
var timeoutCancel func()
expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration())
ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy)
defer timeoutCancel()
}
ret := runFunc(ctx)
endTime := c.now()
totalCmdTime := endTime.Sub(startTime)
runFuncDoneTime := c.now()
// See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation
// This request had invalid input, but shouldn't be marked as an 'error' for the circuit
// From documentation
// -------
// The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system
// failures that should not count against the failure metrics and should not trigger fallback logic.
if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) {
return ret
| {
return errCircuitOpen
} | conditional_block |
circuit.go | expvarToVal(c.FallbackMetricCollector.Var()),
}
return ret
})
}
// Name of this circuit
func (c *Circuit) Name() string {
if c == nil {
return ""
}
return c.name
}
// IsOpen returns true if the circuit should be considered 'open' (ie not allowing runFunc calls)
func (c *Circuit) IsOpen() bool {
if c == nil {
return false
}
if c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() {
return true
}
if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {
return false
}
return c.isOpen.Get()
}
// CloseCircuit closes an open circuit. Usually because we think it's healthy again. Be aware, if the circuit isn't actually
// healthy, it will just open back up again.
func (c *Circuit) CloseCircuit() {
c.close(c.now(), true)
}
// OpenCircuit will open a closed circuit. The circuit will then try to repair itself
func (c *Circuit) OpenCircuit() {
c.openCircuit(time.Now())
}
// OpenCircuit opens a circuit, without checking error thresholds or request volume thresholds. The circuit will, after
// some delay, try to close again.
func (c *Circuit) openCircuit(now time.Time) {
if c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {
// Don't open circuits that are forced closed
return
}
if c.IsOpen() {
// Don't bother opening a circuit that is already open
return
}
c.CircuitMetricsCollector.Opened(now)
c.isOpen.Set(true)
}
// Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust
// the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it
// is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what.
func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c == nil {
var wrapper goroutineWrapper
return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc))
}
return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc))
}
// Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function
func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error {
return c.Execute(ctx, runFunc, nil)
}
// Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute--
// The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error.
// Internal library errors will match the interface Error and you can use type casting to check this.
func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() {
return runFunc(ctx)
}
// Try to run the command in the context of the circuit
err := c.run(ctx, runFunc)
if err == nil {
return nil
}
// A bad request should not trigger fallback logic. The user just gave bad input.
// The list of conditions that trigger fallbacks is documented at
// https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype
if IsBadRequest(err) {
return err
}
return c.fallback(ctx, err, fallbackFunc)
}
// --------- only private functions below here
func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error {
if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() {
return errThrottledConcurrentCommands
}
return nil
}
// isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty
// circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior
// in the nil/empty case.
func (c *Circuit) isEmptyOrNil() bool {
return c == nil || c.OpenToClose == nil
}
// run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run()
func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) {
if runFunc == nil {
return nil
}
var expectedDoneBy time.Time
startTime := c.now()
originalContext := ctx
if !c.allowNewRun(startTime) {
// Rather than make this inline, return a global reference (for memory optimization sake).
c.CmdMetricCollector.ErrShortCircuit(startTime)
return errCircuitOpen
}
if c.ClosedToOpen.Prevent(startTime) {
return errCircuitOpen
}
currentCommandCount := c.concurrentCommands.Add(1)
defer c.concurrentCommands.Add(-1)
if err := c.throttleConcurrentCommands(currentCommandCount); err != nil {
c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime)
return err
}
// Set timeout on the command if we have one
if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 {
var timeoutCancel func()
expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration())
ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy)
defer timeoutCancel()
}
ret := runFunc(ctx)
endTime := c.now()
totalCmdTime := endTime.Sub(startTime)
runFuncDoneTime := c.now()
// See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation
// This request had invalid input, but shouldn't be marked as an 'error' for the circuit
// From documentation
// -------
// The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system
// failures that should not count against the failure metrics and should not trigger fallback logic.
if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) {
return ret
}
// Even if there is no error (or if there is an error), if the request took too long it is always an error for the
// circuit. Note that ret *MAY* actually be nil. In that case, we still want to return nil.
if c.checkErrTimeout(expectedDoneBy, runFuncDoneTime, totalCmdTime) {
// Note: ret could possibly be nil. We will still return nil, but the circuit will consider it a failure.
return ret
}
// The runFunc failed, but someone asked the original context to end. This probably isn't a failure of the
// circuit: someone just wanted `Execute` to end early, so don't track it as a failure.
if c.checkErrInterrupt(originalContext, ret, runFuncDoneTime, totalCmdTime) {
return ret
}
if c.checkErrFailure(ret, runFuncDoneTime, totalCmdTime) {
return ret
}
// The circuit works. Close it!
// Note: Execute this *after* you check for timeouts so we can still track circuit time outs that happen to also return a
// valid value later.
c.checkSuccess(runFuncDoneTime, totalCmdTime)
return nil
}
func (c *Circuit) checkSuccess(runFuncDoneTime time.Time, totalCmdTime time.Duration) {
c.CmdMetricCollector.Success(runFuncDoneTime, totalCmdTime)
if c.IsOpen() {
c.close(runFuncDoneTime, false)
}
}
// checkErrInterrupt returns true if this is considered an interrupt error: interrupt errors do not open the circuit.
// Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit
// unhealthy. But when ExecutionConfig.IgnoreInterrupts set to true we try to classify originalContext.Err()
// with help of ExecutionConfig.IsErrInterrupt function. When this function returns true we do not open the circuit
func (c *Circuit) checkErrInterrupt(originalContext context.Context, ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool | {
// We need to see an error in both the original context and the return value to consider this an "interrupt" caused
// error.
if ret == nil || originalContext.Err() == nil {
return false
}
isErrInterrupt := c.notThreadSafeConfig.Execution.IsErrInterrupt
if isErrInterrupt == nil {
isErrInterrupt = func(_ error) bool {
// By default, we consider any error from the original context an interrupt causing error
return true
}
}
if !c.threadSafeConfig.GoSpecific.IgnoreInterrupts.Get() && isErrInterrupt(originalContext.Err()) {
c.CmdMetricCollector.ErrInterrupt(runFuncDoneTime, totalCmdTime)
return true
}
| identifier_body |
|
circuit.go | () {
// Don't open circuits that are forced closed
return
}
if c.IsOpen() {
// Don't bother opening a circuit that is already open
return
}
c.CircuitMetricsCollector.Opened(now)
c.isOpen.Set(true)
}
// Go executes `Execute`, but uses spawned goroutines to end early if the context is canceled. Use this if you don't trust
// the runFunc to end correctly if context fails. This is a design mirroed in the go-hystrix library, but be warned it
// is very dangerous and could leave orphaned goroutines hanging around forever doing who knows what.
func (c *Circuit) Go(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c == nil {
var wrapper goroutineWrapper
return c.Execute(ctx, wrapper.run(runFunc), wrapper.fallback(fallbackFunc))
}
return c.Execute(ctx, c.goroutineWrapper.run(runFunc), c.goroutineWrapper.fallback(fallbackFunc))
}
// Run will execute the circuit without a fallback. It is the equivalent of calling Execute with a nil fallback function
func (c *Circuit) Run(ctx context.Context, runFunc func(context.Context) error) error {
return c.Execute(ctx, runFunc, nil)
}
// Execute the circuit. Prefer this over Go. Similar to http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#execute--
// The returned error will either be the result of runFunc, the result of fallbackFunc, or an internal library error.
// Internal library errors will match the interface Error and you can use type casting to check this.
func (c *Circuit) Execute(ctx context.Context, runFunc func(context.Context) error, fallbackFunc func(context.Context, error) error) error {
if c.isEmptyOrNil() || c.threadSafeConfig.CircuitBreaker.Disabled.Get() {
return runFunc(ctx)
}
// Try to run the command in the context of the circuit
err := c.run(ctx, runFunc)
if err == nil {
return nil
}
// A bad request should not trigger fallback logic. The user just gave bad input.
// The list of conditions that trigger fallbacks is documented at
// https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#command-execution-event-types-comnetflixhystrixhystrixeventtype
if IsBadRequest(err) {
return err
}
return c.fallback(ctx, err, fallbackFunc)
}
// --------- only private functions below here
func (c *Circuit) throttleConcurrentCommands(currentCommandCount int64) error {
if c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() >= 0 && currentCommandCount > c.threadSafeConfig.Execution.MaxConcurrentRequests.Get() {
return errThrottledConcurrentCommands
}
return nil
}
// isEmptyOrNil returns true if the circuit is nil or if the circuit was created from an empty circuit. The empty
// circuit setup is mostly a guess (checking OpenToClose). This allows us to give circuits reasonable behavior
// in the nil/empty case.
func (c *Circuit) isEmptyOrNil() bool {
return c == nil || c.OpenToClose == nil
}
// run is the equivalent of Java Manager's http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#run()
func (c *Circuit) run(ctx context.Context, runFunc func(context.Context) error) (retErr error) {
if runFunc == nil {
return nil
}
var expectedDoneBy time.Time
startTime := c.now()
originalContext := ctx
if !c.allowNewRun(startTime) {
// Rather than make this inline, return a global reference (for memory optimization sake).
c.CmdMetricCollector.ErrShortCircuit(startTime)
return errCircuitOpen
}
if c.ClosedToOpen.Prevent(startTime) {
return errCircuitOpen
}
currentCommandCount := c.concurrentCommands.Add(1)
defer c.concurrentCommands.Add(-1)
if err := c.throttleConcurrentCommands(currentCommandCount); err != nil {
c.CmdMetricCollector.ErrConcurrencyLimitReject(startTime)
return err
}
// Set timeout on the command if we have one
if c.threadSafeConfig.Execution.ExecutionTimeout.Get() > 0 {
var timeoutCancel func()
expectedDoneBy = startTime.Add(c.threadSafeConfig.Execution.ExecutionTimeout.Duration())
ctx, timeoutCancel = context.WithDeadline(ctx, expectedDoneBy)
defer timeoutCancel()
}
ret := runFunc(ctx)
endTime := c.now()
totalCmdTime := endTime.Sub(startTime)
runFuncDoneTime := c.now()
// See bad request documentation at https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation
// This request had invalid input, but shouldn't be marked as an 'error' for the circuit
// From documentation
// -------
// The HystrixBadRequestException is intended for use cases such as reporting illegal arguments or non-system
// failures that should not count against the failure metrics and should not trigger fallback logic.
if c.checkErrBadRequest(ret, runFuncDoneTime, totalCmdTime) {
return ret
}
// Even if there is no error (or if there is an error), if the request took too long it is always an error for the
// circuit. Note that ret *MAY* actually be nil. In that case, we still want to return nil.
if c.checkErrTimeout(expectedDoneBy, runFuncDoneTime, totalCmdTime) {
// Note: ret could possibly be nil. We will still return nil, but the circuit will consider it a failure.
return ret
}
// The runFunc failed, but someone asked the original context to end. This probably isn't a failure of the
// circuit: someone just wanted `Execute` to end early, so don't track it as a failure.
if c.checkErrInterrupt(originalContext, ret, runFuncDoneTime, totalCmdTime) {
return ret
}
if c.checkErrFailure(ret, runFuncDoneTime, totalCmdTime) {
return ret
}
// The circuit works. Close it!
// Note: Execute this *after* you check for timeouts so we can still track circuit time outs that happen to also return a
// valid value later.
c.checkSuccess(runFuncDoneTime, totalCmdTime)
return nil
}
func (c *Circuit) checkSuccess(runFuncDoneTime time.Time, totalCmdTime time.Duration) {
c.CmdMetricCollector.Success(runFuncDoneTime, totalCmdTime)
if c.IsOpen() {
c.close(runFuncDoneTime, false)
}
}
// checkErrInterrupt returns true if this is considered an interrupt error: interrupt errors do not open the circuit.
// Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit
// unhealthy. But when ExecutionConfig.IgnoreInterrupts set to true we try to classify originalContext.Err()
// with help of ExecutionConfig.IsErrInterrupt function. When this function returns true we do not open the circuit
func (c *Circuit) checkErrInterrupt(originalContext context.Context, ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool {
// We need to see an error in both the original context and the return value to consider this an "interrupt" caused
// error.
if ret == nil || originalContext.Err() == nil {
return false
}
isErrInterrupt := c.notThreadSafeConfig.Execution.IsErrInterrupt
if isErrInterrupt == nil {
isErrInterrupt = func(_ error) bool {
// By default, we consider any error from the original context an interrupt causing error
return true
}
}
if !c.threadSafeConfig.GoSpecific.IgnoreInterrupts.Get() && isErrInterrupt(originalContext.Err()) {
c.CmdMetricCollector.ErrInterrupt(runFuncDoneTime, totalCmdTime)
return true
}
return false
}
func (c *Circuit) checkErrBadRequest(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool {
if IsBadRequest(ret) {
c.CmdMetricCollector.ErrBadRequest(runFuncDoneTime, totalCmdTime)
return true
}
return false
}
func (c *Circuit) checkErrFailure(ret error, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool {
if ret != nil {
c.CmdMetricCollector.ErrFailure(runFuncDoneTime, totalCmdTime)
if !c.IsOpen() {
c.attemptToOpen(runFuncDoneTime)
}
return true
}
return false
}
func (c *Circuit) checkErrTimeout(expectedDoneBy time.Time, runFuncDoneTime time.Time, totalCmdTime time.Duration) bool {
// I don't use the deadline from the context because it could be a smaller timeout from the parent context
if !expectedDoneBy.IsZero() && expectedDoneBy.Before(runFuncDoneTime) {
c.CmdMetricCollector.ErrTimeout(runFuncDoneTime, totalCmdTime)
if !c.IsOpen() {
c.attemptToOpen(runFuncDoneTime)
}
return true
}
return false
}
// Does fallback logic. Equivalent of
// http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/HystrixCommand.html#getFallback
func (c *Circuit) | fallback | identifier_name |
|
aku-utils.go | clientPolicy := aerospike.NewClientPolicy()
tlsConfig := initTLSConfig()
if securityEnabled == "true" {
clientPolicy.User = username
clientPolicy.Password = password
if authMode == "external" |
}
// only one connection
clientPolicy.ConnectionQueueSize = 1
clientPolicy.Timeout = 5 * time.Second
clientPolicy.TlsConfig = tlsConfig
port := servicePlainPort
tlsName := ""
if clientPolicy.TlsConfig != nil {
port = serviceTLSPort
tlsName = serviceTLSName
}
portInt, _ := strconv.Atoi(port)
server := aerospike.NewHost(host, portInt)
server.TLSName = tlsName
zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt)
connection, err := aerospike.NewConnection(clientPolicy, server)
if err != nil {
return nil, err
}
if clientPolicy.RequiresAuthentication() {
if err := connection.Login(clientPolicy); err != nil {
return nil, err
}
}
return connection, nil
}
// Initialize TLS config
func initTLSConfig() *tls.Config {
var tlsConfig *tls.Config
if serviceTLSEnabled == "true" {
serverPool, err := x509.SystemCertPool()
if serverPool == nil || err != nil {
zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err)
serverPool = x509.NewCertPool()
}
if len(serviceCAFile) > 0 {
path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
// Try to load system CA certs and add them to the system cert pool
caCert := readCertFile(path)
zap.S().Debugf("Adding server certificate `%s` to the pool.", path)
serverPool.AppendCertsFromPEM(caCert)
}
var clientPool []tls.Certificate
if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 {
certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem")
if err != nil {
zap.S().Fatal("Unable to get key file path: %v.", err)
}
// Read Cert and Key files
certFileBytes := readCertFile(certPath)
keyFileBytes := readCertFile(keyPath)
// Decode PEM data
keyBlock, _ := pem.Decode(keyFileBytes)
certBlock, _ := pem.Decode(certFileBytes)
if keyBlock == nil || certBlock == nil {
zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath)
}
// Encode PEM data
keyPEM := pem.EncodeToMemory(keyBlock)
certPEM := pem.EncodeToMemory(certBlock)
if keyPEM == nil || certPEM == nil {
zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath)
}
cert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err)
}
zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath)
clientPool = append(clientPool, cert)
}
tlsConfig = &tls.Config{
Certificates: clientPool,
RootCAs: serverPool,
InsecureSkipVerify: false,
PreferServerCipherSuites: true,
}
tlsConfig.BuildNameToCertificate()
}
return tlsConfig
}
// Get certificate file path
func getCertFilePath(configMountPoint string, certFile string, fileName string) (string, error) {
if certFile == "" {
return "", fmt.Errorf("certificate file name empty")
}
parsedCertFile := strings.Split(certFile, ":")
switch len(parsedCertFile) {
case 1:
return certFile, nil
case 2:
switch parsedCertFile[0] {
case "file":
return parsedCertFile[1], nil
case "b64enc":
return configMountPoint + "/certs/" + fileName, nil
default:
return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0])
}
}
// Should not reach here
return "", fmt.Errorf("Unable to parse cert file: %s", certFile)
}
// Update global variables from ENV variable inputs
func initVars() {
zap.S().Info("Initializing variables.")
podIP, ok := os.LookupEnv("MY_POD_IP")
if ok {
myPodIP = podIP
}
secEnabled, ok := os.LookupEnv("SECURITY_ENABLED")
if ok {
securityEnabled = secEnabled
}
helmusr, ok := os.LookupEnv("HELM_USERNAME")
if ok {
helmUsername = helmusr
}
helmpass, ok := os.LookupEnv("HELM_PASSWORD")
if ok {
helmPassword = helmpass
}
adminusr, ok := os.LookupEnv("ADMIN_USERNAME")
if ok {
adminUsername = adminusr
}
adminpass, ok := os.LookupEnv("ADMIN_PASSWORD")
if ok {
adminPassword = adminpass
}
auth, ok := os.LookupEnv("AUTH_MODE")
if ok {
authMode = auth
}
tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED")
if ok {
serviceTLSEnabled = tlsEnabled
}
tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE")
if ok {
serviceCAFile = tlsCAFile
}
tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE")
if ok {
serviceCertFile = tlsCertFile
}
tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE")
if ok {
serviceKeyFile = tlsKeyFile
}
tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME")
if ok {
serviceTLSName = tlsName
}
tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH")
if ok {
serviceMutualAuth = tlsMutualAuth
}
tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT")
if ok {
serviceTLSPort = tlsPort
}
plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT")
if ok {
servicePlainPort = plainPort
}
}
// InfoParser provides a reader for Aerospike cluster's response for any of the metric
type InfoParser struct {
*bufio.Reader
}
// NewInfoParser provides an instance of the InfoParser
func NewInfoParser(s string) *InfoParser {
return &InfoParser{bufio.NewReader(strings.NewReader(s))}
}
// PeekAndExpect checks if the expected value is present without advancing the reader
func (ip *InfoParser) PeekAndExpect(s string) error {
bytes, err := ip.Peek(len(s))
if err != nil {
return err
}
v := string(bytes)
if v != s {
return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v)
}
return nil
}
// Expect validates the expected value against the one returned by the InfoParser
// This advances the reader by length of the input string.
func (ip *InfoParser) Expect(s string) error {
bytes := make([]byte, len(s))
v, err := ip.Read(bytes)
if err != nil {
return err
}
if string(bytes) != s {
return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v)
}
return nil
}
// ReadUntil reads bytes from the InfoParser by handeling some edge-cases
func (ip *InfoParser) ReadUntil(delim byte) (string, error) {
v, err := ip.ReadBytes(delim)
switch len(v) {
case 0:
return string(v), err
case 1:
if v[0] == delim {
return "", err
}
return string(v), err
}
return string(v[:len(v)-1]), err
}
// Get ops/sec
// Format (with and without latency data)
// {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00;
// error-no-data-yet-or-back-too | {
clientPolicy.AuthMode = aerospike.AuthModeExternal
} | conditional_block |
aku-utils.go | clientPolicy := aerospike.NewClientPolicy()
tlsConfig := initTLSConfig()
if securityEnabled == "true" {
clientPolicy.User = username
clientPolicy.Password = password
if authMode == "external" {
clientPolicy.AuthMode = aerospike.AuthModeExternal
}
}
// only one connection
clientPolicy.ConnectionQueueSize = 1
clientPolicy.Timeout = 5 * time.Second
clientPolicy.TlsConfig = tlsConfig
port := servicePlainPort
tlsName := ""
if clientPolicy.TlsConfig != nil {
port = serviceTLSPort
tlsName = serviceTLSName
}
portInt, _ := strconv.Atoi(port)
server := aerospike.NewHost(host, portInt)
server.TLSName = tlsName
zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt)
connection, err := aerospike.NewConnection(clientPolicy, server)
if err != nil {
return nil, err
}
if clientPolicy.RequiresAuthentication() {
if err := connection.Login(clientPolicy); err != nil {
return nil, err
}
}
return connection, nil
}
// Initialize TLS config
func initTLSConfig() *tls.Config | serverPool.AppendCertsFromPEM(caCert)
}
var clientPool []tls.Certificate
if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 {
certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem")
if err != nil {
zap.S().Fatal("Unable to get key file path: %v.", err)
}
// Read Cert and Key files
certFileBytes := readCertFile(certPath)
keyFileBytes := readCertFile(keyPath)
// Decode PEM data
keyBlock, _ := pem.Decode(keyFileBytes)
certBlock, _ := pem.Decode(certFileBytes)
if keyBlock == nil || certBlock == nil {
zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath)
}
// Encode PEM data
keyPEM := pem.EncodeToMemory(keyBlock)
certPEM := pem.EncodeToMemory(certBlock)
if keyPEM == nil || certPEM == nil {
zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath)
}
cert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err)
}
zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath)
clientPool = append(clientPool, cert)
}
tlsConfig = &tls.Config{
Certificates: clientPool,
RootCAs: serverPool,
InsecureSkipVerify: false,
PreferServerCipherSuites: true,
}
tlsConfig.BuildNameToCertificate()
}
return tlsConfig
}
// Get certificate file path
func getCertFilePath(configMountPoint string, certFile string, fileName string) (string, error) {
if certFile == "" {
return "", fmt.Errorf("certificate file name empty")
}
parsedCertFile := strings.Split(certFile, ":")
switch len(parsedCertFile) {
case 1:
return certFile, nil
case 2:
switch parsedCertFile[0] {
case "file":
return parsedCertFile[1], nil
case "b64enc":
return configMountPoint + "/certs/" + fileName, nil
default:
return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0])
}
}
// Should not reach here
return "", fmt.Errorf("Unable to parse cert file: %s", certFile)
}
// Update global variables from ENV variable inputs
func initVars() {
zap.S().Info("Initializing variables.")
podIP, ok := os.LookupEnv("MY_POD_IP")
if ok {
myPodIP = podIP
}
secEnabled, ok := os.LookupEnv("SECURITY_ENABLED")
if ok {
securityEnabled = secEnabled
}
helmusr, ok := os.LookupEnv("HELM_USERNAME")
if ok {
helmUsername = helmusr
}
helmpass, ok := os.LookupEnv("HELM_PASSWORD")
if ok {
helmPassword = helmpass
}
adminusr, ok := os.LookupEnv("ADMIN_USERNAME")
if ok {
adminUsername = adminusr
}
adminpass, ok := os.LookupEnv("ADMIN_PASSWORD")
if ok {
adminPassword = adminpass
}
auth, ok := os.LookupEnv("AUTH_MODE")
if ok {
authMode = auth
}
tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED")
if ok {
serviceTLSEnabled = tlsEnabled
}
tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE")
if ok {
serviceCAFile = tlsCAFile
}
tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE")
if ok {
serviceCertFile = tlsCertFile
}
tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE")
if ok {
serviceKeyFile = tlsKeyFile
}
tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME")
if ok {
serviceTLSName = tlsName
}
tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH")
if ok {
serviceMutualAuth = tlsMutualAuth
}
tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT")
if ok {
serviceTLSPort = tlsPort
}
plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT")
if ok {
servicePlainPort = plainPort
}
}
// InfoParser provides a reader for Aerospike cluster's response for any of the metric
type InfoParser struct {
*bufio.Reader
}
// NewInfoParser provides an instance of the InfoParser
func NewInfoParser(s string) *InfoParser {
return &InfoParser{bufio.NewReader(strings.NewReader(s))}
}
// PeekAndExpect checks if the expected value is present without advancing the reader
func (ip *InfoParser) PeekAndExpect(s string) error {
bytes, err := ip.Peek(len(s))
if err != nil {
return err
}
v := string(bytes)
if v != s {
return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v)
}
return nil
}
// Expect validates the expected value against the one returned by the InfoParser
// This advances the reader by length of the input string.
func (ip *InfoParser) Expect(s string) error {
bytes := make([]byte, len(s))
v, err := ip.Read(bytes)
if err != nil {
return err
}
if string(bytes) != s {
return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v)
}
return nil
}
// ReadUntil reads bytes from the InfoParser by handeling some edge-cases
func (ip *InfoParser) ReadUntil(delim byte) (string, error) {
v, err := ip.ReadBytes(delim)
switch len(v) {
case 0:
return string(v), err
case 1:
if v[0] == delim {
return "", err
}
return string(v), err
}
return string(v[:len(v)-1]), err
}
// Get ops/sec
// Format (with and without latency data)
// {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00;
// error-no-data-yet-or-back-too-small | {
var tlsConfig *tls.Config
if serviceTLSEnabled == "true" {
serverPool, err := x509.SystemCertPool()
if serverPool == nil || err != nil {
zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err)
serverPool = x509.NewCertPool()
}
if len(serviceCAFile) > 0 {
path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
// Try to load system CA certs and add them to the system cert pool
caCert := readCertFile(path)
zap.S().Debugf("Adding server certificate `%s` to the pool.", path) | identifier_body |
aku-utils.go | clientPolicy := aerospike.NewClientPolicy()
tlsConfig := initTLSConfig()
if securityEnabled == "true" {
clientPolicy.User = username
clientPolicy.Password = password
if authMode == "external" {
clientPolicy.AuthMode = aerospike.AuthModeExternal
}
}
// only one connection
clientPolicy.ConnectionQueueSize = 1
clientPolicy.Timeout = 5 * time.Second
clientPolicy.TlsConfig = tlsConfig
port := servicePlainPort
tlsName := ""
if clientPolicy.TlsConfig != nil {
port = serviceTLSPort
tlsName = serviceTLSName
}
portInt, _ := strconv.Atoi(port)
server := aerospike.NewHost(host, portInt)
server.TLSName = tlsName
zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt)
connection, err := aerospike.NewConnection(clientPolicy, server)
if err != nil {
return nil, err
}
if clientPolicy.RequiresAuthentication() {
if err := connection.Login(clientPolicy); err != nil {
return nil, err
}
}
return connection, nil
}
// Initialize TLS config
func initTLSConfig() *tls.Config {
var tlsConfig *tls.Config
if serviceTLSEnabled == "true" {
serverPool, err := x509.SystemCertPool()
if serverPool == nil || err != nil {
zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err)
serverPool = x509.NewCertPool()
}
if len(serviceCAFile) > 0 {
path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
// Try to load system CA certs and add them to the system cert pool
caCert := readCertFile(path)
zap.S().Debugf("Adding server certificate `%s` to the pool.", path)
serverPool.AppendCertsFromPEM(caCert)
}
var clientPool []tls.Certificate
if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 {
certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem")
if err != nil {
zap.S().Fatal("Unable to get key file path: %v.", err)
}
// Read Cert and Key files
certFileBytes := readCertFile(certPath)
keyFileBytes := readCertFile(keyPath)
// Decode PEM data
keyBlock, _ := pem.Decode(keyFileBytes)
certBlock, _ := pem.Decode(certFileBytes)
if keyBlock == nil || certBlock == nil {
zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath)
}
// Encode PEM data
keyPEM := pem.EncodeToMemory(keyBlock)
certPEM := pem.EncodeToMemory(certBlock)
if keyPEM == nil || certPEM == nil {
zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath)
}
cert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err)
}
zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath)
clientPool = append(clientPool, cert)
}
tlsConfig = &tls.Config{
Certificates: clientPool,
RootCAs: serverPool,
InsecureSkipVerify: false,
PreferServerCipherSuites: true,
}
tlsConfig.BuildNameToCertificate()
}
return tlsConfig
}
// Get certificate file path
func getCertFilePath(configMountPoint string, certFile string, fileName string) (string, error) {
if certFile == "" {
return "", fmt.Errorf("certificate file name empty")
}
parsedCertFile := strings.Split(certFile, ":")
switch len(parsedCertFile) {
case 1:
return certFile, nil
case 2:
switch parsedCertFile[0] {
case "file":
return parsedCertFile[1], nil
case "b64enc":
return configMountPoint + "/certs/" + fileName, nil
default:
return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0])
}
}
// Should not reach here | // Update global variables from ENV variable inputs
func initVars() {
zap.S().Info("Initializing variables.")
podIP, ok := os.LookupEnv("MY_POD_IP")
if ok {
myPodIP = podIP
}
secEnabled, ok := os.LookupEnv("SECURITY_ENABLED")
if ok {
securityEnabled = secEnabled
}
helmusr, ok := os.LookupEnv("HELM_USERNAME")
if ok {
helmUsername = helmusr
}
helmpass, ok := os.LookupEnv("HELM_PASSWORD")
if ok {
helmPassword = helmpass
}
adminusr, ok := os.LookupEnv("ADMIN_USERNAME")
if ok {
adminUsername = adminusr
}
adminpass, ok := os.LookupEnv("ADMIN_PASSWORD")
if ok {
adminPassword = adminpass
}
auth, ok := os.LookupEnv("AUTH_MODE")
if ok {
authMode = auth
}
tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED")
if ok {
serviceTLSEnabled = tlsEnabled
}
tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE")
if ok {
serviceCAFile = tlsCAFile
}
tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE")
if ok {
serviceCertFile = tlsCertFile
}
tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE")
if ok {
serviceKeyFile = tlsKeyFile
}
tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME")
if ok {
serviceTLSName = tlsName
}
tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH")
if ok {
serviceMutualAuth = tlsMutualAuth
}
tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT")
if ok {
serviceTLSPort = tlsPort
}
plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT")
if ok {
servicePlainPort = plainPort
}
}
// InfoParser provides a reader for Aerospike cluster's response for any of the metric
type InfoParser struct {
*bufio.Reader
}
// NewInfoParser provides an instance of the InfoParser
func NewInfoParser(s string) *InfoParser {
return &InfoParser{bufio.NewReader(strings.NewReader(s))}
}
// PeekAndExpect checks if the expected value is present without advancing the reader
func (ip *InfoParser) PeekAndExpect(s string) error {
bytes, err := ip.Peek(len(s))
if err != nil {
return err
}
v := string(bytes)
if v != s {
return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v)
}
return nil
}
// Expect validates the expected value against the one returned by the InfoParser
// This advances the reader by length of the input string.
func (ip *InfoParser) Expect(s string) error {
bytes := make([]byte, len(s))
v, err := ip.Read(bytes)
if err != nil {
return err
}
if string(bytes) != s {
return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v)
}
return nil
}
// ReadUntil reads bytes from the InfoParser by handeling some edge-cases
func (ip *InfoParser) ReadUntil(delim byte) (string, error) {
v, err := ip.ReadBytes(delim)
switch len(v) {
case 0:
return string(v), err
case 1:
if v[0] == delim {
return "", err
}
return string(v), err
}
return string(v[:len(v)-1]), err
}
// Get ops/sec
// Format (with and without latency data)
// {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00;
// error-no-data-yet-or-back-too-small;
// | return "", fmt.Errorf("Unable to parse cert file: %s", certFile)
}
| random_line_split |
aku-utils.go | clientPolicy := aerospike.NewClientPolicy()
tlsConfig := initTLSConfig()
if securityEnabled == "true" {
clientPolicy.User = username
clientPolicy.Password = password
if authMode == "external" {
clientPolicy.AuthMode = aerospike.AuthModeExternal
}
}
// only one connection
clientPolicy.ConnectionQueueSize = 1
clientPolicy.Timeout = 5 * time.Second
clientPolicy.TlsConfig = tlsConfig
port := servicePlainPort
tlsName := ""
if clientPolicy.TlsConfig != nil {
port = serviceTLSPort
tlsName = serviceTLSName
}
portInt, _ := strconv.Atoi(port)
server := aerospike.NewHost(host, portInt)
server.TLSName = tlsName
zap.S().Debugf("Connecting to aerospike node %s:%d.", host, portInt)
connection, err := aerospike.NewConnection(clientPolicy, server)
if err != nil {
return nil, err
}
if clientPolicy.RequiresAuthentication() {
if err := connection.Login(clientPolicy); err != nil {
return nil, err
}
}
return connection, nil
}
// Initialize TLS config
func initTLSConfig() *tls.Config {
var tlsConfig *tls.Config
if serviceTLSEnabled == "true" {
serverPool, err := x509.SystemCertPool()
if serverPool == nil || err != nil {
zap.S().Debugf("Adding system certificates to the cert pool failed: %s.", err)
serverPool = x509.NewCertPool()
}
if len(serviceCAFile) > 0 {
path, err := getCertFilePath(aerospikeConfigVolumePath, serviceCAFile, serviceTLSName+"-service-cacert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
// Try to load system CA certs and add them to the system cert pool
caCert := readCertFile(path)
zap.S().Debugf("Adding server certificate `%s` to the pool.", path)
serverPool.AppendCertsFromPEM(caCert)
}
var clientPool []tls.Certificate
if len(serviceCertFile) > 0 || len(serviceKeyFile) > 0 {
certPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceCertFile, serviceTLSName+"-service-cert.pem")
if err != nil {
zap.S().Fatal("Unable to get certificate file path: %v.", err)
}
keyPath, err := getCertFilePath(aerospikeConfigVolumePath, serviceKeyFile, serviceTLSName+"-service-key.pem")
if err != nil {
zap.S().Fatal("Unable to get key file path: %v.", err)
}
// Read Cert and Key files
certFileBytes := readCertFile(certPath)
keyFileBytes := readCertFile(keyPath)
// Decode PEM data
keyBlock, _ := pem.Decode(keyFileBytes)
certBlock, _ := pem.Decode(certFileBytes)
if keyBlock == nil || certBlock == nil {
zap.S().Fatalf("Unable to decode PEM data for `%s` or `%s`.", keyPath, certPath)
}
// Encode PEM data
keyPEM := pem.EncodeToMemory(keyBlock)
certPEM := pem.EncodeToMemory(certBlock)
if keyPEM == nil || certPEM == nil {
zap.S().Fatalf("Unable to encode PEM data for `%s` or `%s`.", keyPath, certPath)
}
cert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
zap.S().Fatalf("Unable to add client certificate `%s` and key file `%s` to the pool: `%s`.", certPath, keyPath, err)
}
zap.S().Debugf("Adding client certificate `%s` to the pool.", certPath)
clientPool = append(clientPool, cert)
}
tlsConfig = &tls.Config{
Certificates: clientPool,
RootCAs: serverPool,
InsecureSkipVerify: false,
PreferServerCipherSuites: true,
}
tlsConfig.BuildNameToCertificate()
}
return tlsConfig
}
// Get certificate file path
func | (configMountPoint string, certFile string, fileName string) (string, error) {
if certFile == "" {
return "", fmt.Errorf("certificate file name empty")
}
parsedCertFile := strings.Split(certFile, ":")
switch len(parsedCertFile) {
case 1:
return certFile, nil
case 2:
switch parsedCertFile[0] {
case "file":
return parsedCertFile[1], nil
case "b64enc":
return configMountPoint + "/certs/" + fileName, nil
default:
return "", fmt.Errorf("Invalid option while parsing cert file: %s", parsedCertFile[0])
}
}
// Should not reach here
return "", fmt.Errorf("Unable to parse cert file: %s", certFile)
}
// Update global variables from ENV variable inputs
func initVars() {
zap.S().Info("Initializing variables.")
podIP, ok := os.LookupEnv("MY_POD_IP")
if ok {
myPodIP = podIP
}
secEnabled, ok := os.LookupEnv("SECURITY_ENABLED")
if ok {
securityEnabled = secEnabled
}
helmusr, ok := os.LookupEnv("HELM_USERNAME")
if ok {
helmUsername = helmusr
}
helmpass, ok := os.LookupEnv("HELM_PASSWORD")
if ok {
helmPassword = helmpass
}
adminusr, ok := os.LookupEnv("ADMIN_USERNAME")
if ok {
adminUsername = adminusr
}
adminpass, ok := os.LookupEnv("ADMIN_PASSWORD")
if ok {
adminPassword = adminpass
}
auth, ok := os.LookupEnv("AUTH_MODE")
if ok {
authMode = auth
}
tlsEnabled, ok := os.LookupEnv("SERVICE_TLS_ENABLED")
if ok {
serviceTLSEnabled = tlsEnabled
}
tlsCAFile, ok := os.LookupEnv("SERVICE_CA_FILE")
if ok {
serviceCAFile = tlsCAFile
}
tlsCertFile, ok := os.LookupEnv("SERVICE_CERT_FILE")
if ok {
serviceCertFile = tlsCertFile
}
tlsKeyFile, ok := os.LookupEnv("SERVICE_KEY_FILE")
if ok {
serviceKeyFile = tlsKeyFile
}
tlsName, ok := os.LookupEnv("SERVICE_TLS_NAME")
if ok {
serviceTLSName = tlsName
}
tlsMutualAuth, ok := os.LookupEnv("SERVICE_MUTUAL_AUTH")
if ok {
serviceMutualAuth = tlsMutualAuth
}
tlsPort, ok := os.LookupEnv("SERVICE_TLS_PORT")
if ok {
serviceTLSPort = tlsPort
}
plainPort, ok := os.LookupEnv("SERVICE_PLAIN_PORT")
if ok {
servicePlainPort = plainPort
}
}
// InfoParser provides a reader for Aerospike cluster's response for any of the metric
type InfoParser struct {
*bufio.Reader
}
// NewInfoParser provides an instance of the InfoParser
func NewInfoParser(s string) *InfoParser {
return &InfoParser{bufio.NewReader(strings.NewReader(s))}
}
// PeekAndExpect checks if the expected value is present without advancing the reader
func (ip *InfoParser) PeekAndExpect(s string) error {
bytes, err := ip.Peek(len(s))
if err != nil {
return err
}
v := string(bytes)
if v != s {
return fmt.Errorf("InfoParser: Wrong value. Peek expected %s, but found %s", s, v)
}
return nil
}
// Expect validates the expected value against the one returned by the InfoParser
// This advances the reader by length of the input string.
func (ip *InfoParser) Expect(s string) error {
bytes := make([]byte, len(s))
v, err := ip.Read(bytes)
if err != nil {
return err
}
if string(bytes) != s {
return fmt.Errorf("InfoParser: Wrong value. Expected %s, found %d", s, v)
}
return nil
}
// ReadUntil reads bytes from the InfoParser by handeling some edge-cases
func (ip *InfoParser) ReadUntil(delim byte) (string, error) {
v, err := ip.ReadBytes(delim)
switch len(v) {
case 0:
return string(v), err
case 1:
if v[0] == delim {
return "", err
}
return string(v), err
}
return string(v[:len(v)-1]), err
}
// Get ops/sec
// Format (with and without latency data)
// {test}-read:10:17:37-GMT,ops/sec,>1ms,>8ms,>64ms;10:17:47,29648.2,3.44,0.08,0.00;
// error-no-data-yet-or-back-too-small | getCertFilePath | identifier_name |
physics_hooks.rs | handle of the first body involved in the potential collision.
pub rigid_body1: Option<RigidBodyHandle>,
/// The handle of the first body involved in the potential collision.
pub rigid_body2: Option<RigidBodyHandle>,
}
/// Context given to custom contact modifiers to modify the contacts seen by the constraints solver.
pub struct ContactModificationContext<'a> {
/// The set of rigid-bodies.
pub bodies: &'a RigidBodySet,
/// The set of colliders.
pub colliders: &'a ColliderSet,
/// The handle of the first collider involved in the potential collision.
pub collider1: ColliderHandle,
/// The handle of the first collider involved in the potential collision.
pub collider2: ColliderHandle,
/// The handle of the first body involved in the potential collision.
pub rigid_body1: Option<RigidBodyHandle>,
/// The handle of the first body involved in the potential collision.
pub rigid_body2: Option<RigidBodyHandle>,
/// The contact manifold.
pub manifold: &'a ContactManifold,
/// The solver contacts that can be modified.
pub solver_contacts: &'a mut Vec<SolverContact>,
/// The contact normal that can be modified.
pub normal: &'a mut Vector<Real>,
/// User-defined data attached to the manifold.
// NOTE: we keep this a &'a mut u32 to emphasize the
// fact that this can be modified.
pub user_data: &'a mut u32,
}
impl<'a> ContactModificationContext<'a> {
/// Helper function to update `self` to emulate a oneway-platform.
///
/// The "oneway" behavior will only allow contacts between two colliders
/// if the local contact normal of the first collider involved in the contact
/// is almost aligned with the provided `allowed_local_n1` direction.
///
/// To make this method work properly it must be called as part of the
/// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each
/// contact manifold involving a one-way platform. The `self.user_data` field
/// must not be modified from the outside of this method.
pub fn update_as_oneway_platform(
&mut self,
allowed_local_n1: &Vector<Real>,
allowed_angle: Real,
) {
const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0;
const CONTACT_CURRENTLY_ALLOWED: u32 = 1;
const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2;
let cang = ComplexField::cos(allowed_angle);
// Test the allowed normal with the local-space contact normal that
// points towards the exterior of context.collider1.
let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang;
match *self.user_data {
CONTACT_CONFIGURATION_UNKNOWN => {
if contact_is_ok {
// The contact is close enough to the allowed normal.
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// The contact normal isn't close enough to the allowed
// normal, so remove all the contacts and mark further contacts
// as forbidden.
self.solver_contacts.clear();
// NOTE: in some very rare cases `local_n1` will be
// zero if the objects are exactly touching at one point.
// So in this case we can't really conclude.
// If the norm is non-zero, then we can tell we need to forbid
// further contacts. Otherwise we have to wait for the next frame.
if self.manifold.local_n1.norm_squared() > 0.1 {
*self.user_data = CONTACT_CURRENTLY_FORBIDDEN;
}
}
}
CONTACT_CURRENTLY_FORBIDDEN => {
// Contacts are forbidden so we need to continue forbidding contacts
// until all the contacts are non-penetrating again. In that case, if
// the contacts are OK wrt. the contact normal, then we can mark them as allowed.
if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) {
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// Discard all the contacts.
self.solver_contacts.clear();
}
}
CONTACT_CURRENTLY_ALLOWED => {
// We allow all the contacts right now. The configuration becomes
// uncertain again when the contact manifold no longer contains any contact.
if self.solver_contacts.is_empty() {
*self.user_data = CONTACT_CONFIGURATION_UNKNOWN;
}
}
_ => unreachable!(),
}
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Flags affecting the behavior of the constraints solver for a given contact manifold.
pub struct ActiveHooks: u32 {
/// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant.
const FILTER_CONTACT_PAIRS = 0b0001;
/// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant.
const FILTER_INTERSECTION_PAIR = 0b0010;
/// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant.
const MODIFY_SOLVER_CONTACTS = 0b0100;
}
}
impl Default for ActiveHooks {
fn default() -> Self {
ActiveHooks::empty()
}
}
// TODO: right now, the wasm version don't have the Send+Sync bounds.
// This is because these bounds are very difficult to fulfill if we want to
// call JS closures. Also, parallelism cannot be enabled for wasm targets, so
// not having Send+Sync isn't a problem.
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(target_arch = "wasm32")]
pub trait PhysicsHooks {
/// Applies the contact pair filter.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
None
}
/// Applies the intersection pair filter.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
false
}
/// Modifies the set of contacts seen by the constraints solver.
fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {}
}
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(not(target_arch = "wasm32"))]
pub trait PhysicsHooks: Send + Sync {
/// Applies the contact pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential contact pairs detected by the broad-phase.
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their contact computed by the narrow-phase, and if these contact
/// should be solved by the constraints solver
///
/// Note that using a contact pair filter will replace the default contact filtering
/// which consists of preventing contact computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `None`, then the narrow-phase will ignore this contact pair and
/// not compute any contact manifolds for it.
/// If this returns `Some`, then the narrow-phase will compute contact manifolds for
/// this pair of colliders, and configure them with the returned solver flags. For
/// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts
/// will be taken into account by the constraints solver. If this returns
/// `Some(SolverFlags::empty())` then the constraints solver will ignore these
/// contacts.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
Some(SolverFlags::COMPUTE_IMPULSES)
}
/// Applies the intersection pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential intersection pairs detected by the broad-phase. | /// This can be used to apply custom logic in order to decide whether two colliders
/// should have their intersection computed by the narrow-phase.
///
/// Note that using an intersection pair filter will replace the default intersection filtering
/// which consists of preventing intersection computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `false`, then the narrow-phase will ignore this pair and
/// not compute any intersection information for it.
/// If this return `true` then the narrow-phase will compute intersection
/// information for this pair.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
true
}
/// Modifies the set of contacts seen by the constraints solver.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags
/// in its physics | /// | random_line_split |
physics_hooks.rs | of the first body involved in the potential collision.
pub rigid_body1: Option<RigidBodyHandle>,
/// The handle of the first body involved in the potential collision.
pub rigid_body2: Option<RigidBodyHandle>,
}
/// Context given to custom contact modifiers to modify the contacts seen by the constraints solver.
pub struct ContactModificationContext<'a> {
/// The set of rigid-bodies.
pub bodies: &'a RigidBodySet,
/// The set of colliders.
pub colliders: &'a ColliderSet,
/// The handle of the first collider involved in the potential collision.
pub collider1: ColliderHandle,
/// The handle of the first collider involved in the potential collision.
pub collider2: ColliderHandle,
/// The handle of the first body involved in the potential collision.
pub rigid_body1: Option<RigidBodyHandle>,
/// The handle of the first body involved in the potential collision.
pub rigid_body2: Option<RigidBodyHandle>,
/// The contact manifold.
pub manifold: &'a ContactManifold,
/// The solver contacts that can be modified.
pub solver_contacts: &'a mut Vec<SolverContact>,
/// The contact normal that can be modified.
pub normal: &'a mut Vector<Real>,
/// User-defined data attached to the manifold.
// NOTE: we keep this a &'a mut u32 to emphasize the
// fact that this can be modified.
pub user_data: &'a mut u32,
}
impl<'a> ContactModificationContext<'a> {
/// Helper function to update `self` to emulate a oneway-platform.
///
/// The "oneway" behavior will only allow contacts between two colliders
/// if the local contact normal of the first collider involved in the contact
/// is almost aligned with the provided `allowed_local_n1` direction.
///
/// To make this method work properly it must be called as part of the
/// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each
/// contact manifold involving a one-way platform. The `self.user_data` field
/// must not be modified from the outside of this method.
pub fn update_as_oneway_platform(
&mut self,
allowed_local_n1: &Vector<Real>,
allowed_angle: Real,
) | self.solver_contacts.clear();
// NOTE: in some very rare cases `local_n1` will be
// zero if the objects are exactly touching at one point.
// So in this case we can't really conclude.
// If the norm is non-zero, then we can tell we need to forbid
// further contacts. Otherwise we have to wait for the next frame.
if self.manifold.local_n1.norm_squared() > 0.1 {
*self.user_data = CONTACT_CURRENTLY_FORBIDDEN;
}
}
}
CONTACT_CURRENTLY_FORBIDDEN => {
// Contacts are forbidden so we need to continue forbidding contacts
// until all the contacts are non-penetrating again. In that case, if
// the contacts are OK wrt. the contact normal, then we can mark them as allowed.
if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) {
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// Discard all the contacts.
self.solver_contacts.clear();
}
}
CONTACT_CURRENTLY_ALLOWED => {
// We allow all the contacts right now. The configuration becomes
// uncertain again when the contact manifold no longer contains any contact.
if self.solver_contacts.is_empty() {
*self.user_data = CONTACT_CONFIGURATION_UNKNOWN;
}
}
_ => unreachable!(),
}
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Flags affecting the behavior of the constraints solver for a given contact manifold.
pub struct ActiveHooks: u32 {
/// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant.
const FILTER_CONTACT_PAIRS = 0b0001;
/// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant.
const FILTER_INTERSECTION_PAIR = 0b0010;
/// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant.
const MODIFY_SOLVER_CONTACTS = 0b0100;
}
}
impl Default for ActiveHooks {
fn default() -> Self {
ActiveHooks::empty()
}
}
// TODO: right now, the wasm version don't have the Send+Sync bounds.
// This is because these bounds are very difficult to fulfill if we want to
// call JS closures. Also, parallelism cannot be enabled for wasm targets, so
// not having Send+Sync isn't a problem.
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(target_arch = "wasm32")]
pub trait PhysicsHooks {
/// Applies the contact pair filter.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
None
}
/// Applies the intersection pair filter.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
false
}
/// Modifies the set of contacts seen by the constraints solver.
fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {}
}
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(not(target_arch = "wasm32"))]
pub trait PhysicsHooks: Send + Sync {
/// Applies the contact pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential contact pairs detected by the broad-phase.
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their contact computed by the narrow-phase, and if these contact
/// should be solved by the constraints solver
///
/// Note that using a contact pair filter will replace the default contact filtering
/// which consists of preventing contact computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `None`, then the narrow-phase will ignore this contact pair and
/// not compute any contact manifolds for it.
/// If this returns `Some`, then the narrow-phase will compute contact manifolds for
/// this pair of colliders, and configure them with the returned solver flags. For
/// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts
/// will be taken into account by the constraints solver. If this returns
/// `Some(SolverFlags::empty())` then the constraints solver will ignore these
/// contacts.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
Some(SolverFlags::COMPUTE_IMPULSES)
}
/// Applies the intersection pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential intersection pairs detected by the broad-phase.
///
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their intersection computed by the narrow-phase.
///
/// Note that using an intersection pair filter will replace the default intersection filtering
/// which consists of preventing intersection computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `false`, then the narrow-phase will ignore this pair and
/// not compute any intersection information for it.
/// If this return `true` then the narrow-phase will compute intersection
/// information for this pair.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
true
}
/// Modifies the set of contacts seen by the constraints solver.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags
/// in its physics | {
const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0;
const CONTACT_CURRENTLY_ALLOWED: u32 = 1;
const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2;
let cang = ComplexField::cos(allowed_angle);
// Test the allowed normal with the local-space contact normal that
// points towards the exterior of context.collider1.
let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang;
match *self.user_data {
CONTACT_CONFIGURATION_UNKNOWN => {
if contact_is_ok {
// The contact is close enough to the allowed normal.
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// The contact normal isn't close enough to the allowed
// normal, so remove all the contacts and mark further contacts
// as forbidden. | identifier_body |
physics_hooks.rs | of the first body involved in the potential collision.
pub rigid_body1: Option<RigidBodyHandle>,
/// The handle of the first body involved in the potential collision.
pub rigid_body2: Option<RigidBodyHandle>,
}
/// Context given to custom contact modifiers to modify the contacts seen by the constraints solver.
pub struct ContactModificationContext<'a> {
/// The set of rigid-bodies.
pub bodies: &'a RigidBodySet,
/// The set of colliders.
pub colliders: &'a ColliderSet,
/// The handle of the first collider involved in the potential collision.
pub collider1: ColliderHandle,
/// The handle of the first collider involved in the potential collision.
pub collider2: ColliderHandle,
/// The handle of the first body involved in the potential collision.
pub rigid_body1: Option<RigidBodyHandle>,
/// The handle of the first body involved in the potential collision.
pub rigid_body2: Option<RigidBodyHandle>,
/// The contact manifold.
pub manifold: &'a ContactManifold,
/// The solver contacts that can be modified.
pub solver_contacts: &'a mut Vec<SolverContact>,
/// The contact normal that can be modified.
pub normal: &'a mut Vector<Real>,
/// User-defined data attached to the manifold.
// NOTE: we keep this a &'a mut u32 to emphasize the
// fact that this can be modified.
pub user_data: &'a mut u32,
}
impl<'a> ContactModificationContext<'a> {
/// Helper function to update `self` to emulate a oneway-platform.
///
/// The "oneway" behavior will only allow contacts between two colliders
/// if the local contact normal of the first collider involved in the contact
/// is almost aligned with the provided `allowed_local_n1` direction.
///
/// To make this method work properly it must be called as part of the
/// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each
/// contact manifold involving a one-way platform. The `self.user_data` field
/// must not be modified from the outside of this method.
pub fn update_as_oneway_platform(
&mut self,
allowed_local_n1: &Vector<Real>,
allowed_angle: Real,
) {
const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0;
const CONTACT_CURRENTLY_ALLOWED: u32 = 1;
const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2;
let cang = ComplexField::cos(allowed_angle);
// Test the allowed normal with the local-space contact normal that
// points towards the exterior of context.collider1.
let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang;
match *self.user_data {
CONTACT_CONFIGURATION_UNKNOWN => {
if contact_is_ok {
// The contact is close enough to the allowed normal.
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// The contact normal isn't close enough to the allowed
// normal, so remove all the contacts and mark further contacts
// as forbidden.
self.solver_contacts.clear();
// NOTE: in some very rare cases `local_n1` will be
// zero if the objects are exactly touching at one point.
// So in this case we can't really conclude.
// If the norm is non-zero, then we can tell we need to forbid
// further contacts. Otherwise we have to wait for the next frame.
if self.manifold.local_n1.norm_squared() > 0.1 {
*self.user_data = CONTACT_CURRENTLY_FORBIDDEN;
}
}
}
CONTACT_CURRENTLY_FORBIDDEN => {
// Contacts are forbidden so we need to continue forbidding contacts
// until all the contacts are non-penetrating again. In that case, if
// the contacts are OK wrt. the contact normal, then we can mark them as allowed.
if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) {
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else |
}
CONTACT_CURRENTLY_ALLOWED => {
// We allow all the contacts right now. The configuration becomes
// uncertain again when the contact manifold no longer contains any contact.
if self.solver_contacts.is_empty() {
*self.user_data = CONTACT_CONFIGURATION_UNKNOWN;
}
}
_ => unreachable!(),
}
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Flags affecting the behavior of the constraints solver for a given contact manifold.
pub struct ActiveHooks: u32 {
/// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant.
const FILTER_CONTACT_PAIRS = 0b0001;
/// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant.
const FILTER_INTERSECTION_PAIR = 0b0010;
/// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant.
const MODIFY_SOLVER_CONTACTS = 0b0100;
}
}
impl Default for ActiveHooks {
fn default() -> Self {
ActiveHooks::empty()
}
}
// TODO: right now, the wasm version don't have the Send+Sync bounds.
// This is because these bounds are very difficult to fulfill if we want to
// call JS closures. Also, parallelism cannot be enabled for wasm targets, so
// not having Send+Sync isn't a problem.
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(target_arch = "wasm32")]
pub trait PhysicsHooks {
/// Applies the contact pair filter.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
None
}
/// Applies the intersection pair filter.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
false
}
/// Modifies the set of contacts seen by the constraints solver.
fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {}
}
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(not(target_arch = "wasm32"))]
pub trait PhysicsHooks: Send + Sync {
/// Applies the contact pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential contact pairs detected by the broad-phase.
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their contact computed by the narrow-phase, and if these contact
/// should be solved by the constraints solver
///
/// Note that using a contact pair filter will replace the default contact filtering
/// which consists of preventing contact computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `None`, then the narrow-phase will ignore this contact pair and
/// not compute any contact manifolds for it.
/// If this returns `Some`, then the narrow-phase will compute contact manifolds for
/// this pair of colliders, and configure them with the returned solver flags. For
/// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts
/// will be taken into account by the constraints solver. If this returns
/// `Some(SolverFlags::empty())` then the constraints solver will ignore these
/// contacts.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
Some(SolverFlags::COMPUTE_IMPULSES)
}
/// Applies the intersection pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential intersection pairs detected by the broad-phase.
///
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their intersection computed by the narrow-phase.
///
/// Note that using an intersection pair filter will replace the default intersection filtering
/// which consists of preventing intersection computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `false`, then the narrow-phase will ignore this pair and
/// not compute any intersection information for it.
/// If this return `true` then the narrow-phase will compute intersection
/// information for this pair.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
true
}
/// Modifies the set of contacts seen by the constraints solver.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags
/// in its | {
// Discard all the contacts.
self.solver_contacts.clear();
} | conditional_block |
physics_hooks.rs | `self` to emulate a oneway-platform.
///
/// The "oneway" behavior will only allow contacts between two colliders
/// if the local contact normal of the first collider involved in the contact
/// is almost aligned with the provided `allowed_local_n1` direction.
///
/// To make this method work properly it must be called as part of the
/// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each
/// contact manifold involving a one-way platform. The `self.user_data` field
/// must not be modified from the outside of this method.
pub fn update_as_oneway_platform(
&mut self,
allowed_local_n1: &Vector<Real>,
allowed_angle: Real,
) {
const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0;
const CONTACT_CURRENTLY_ALLOWED: u32 = 1;
const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2;
let cang = ComplexField::cos(allowed_angle);
// Test the allowed normal with the local-space contact normal that
// points towards the exterior of context.collider1.
let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang;
match *self.user_data {
CONTACT_CONFIGURATION_UNKNOWN => {
if contact_is_ok {
// The contact is close enough to the allowed normal.
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// The contact normal isn't close enough to the allowed
// normal, so remove all the contacts and mark further contacts
// as forbidden.
self.solver_contacts.clear();
// NOTE: in some very rare cases `local_n1` will be
// zero if the objects are exactly touching at one point.
// So in this case we can't really conclude.
// If the norm is non-zero, then we can tell we need to forbid
// further contacts. Otherwise we have to wait for the next frame.
if self.manifold.local_n1.norm_squared() > 0.1 {
*self.user_data = CONTACT_CURRENTLY_FORBIDDEN;
}
}
}
CONTACT_CURRENTLY_FORBIDDEN => {
// Contacts are forbidden so we need to continue forbidding contacts
// until all the contacts are non-penetrating again. In that case, if
// the contacts are OK wrt. the contact normal, then we can mark them as allowed.
if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) {
*self.user_data = CONTACT_CURRENTLY_ALLOWED;
} else {
// Discard all the contacts.
self.solver_contacts.clear();
}
}
CONTACT_CURRENTLY_ALLOWED => {
// We allow all the contacts right now. The configuration becomes
// uncertain again when the contact manifold no longer contains any contact.
if self.solver_contacts.is_empty() {
*self.user_data = CONTACT_CONFIGURATION_UNKNOWN;
}
}
_ => unreachable!(),
}
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Flags affecting the behavior of the constraints solver for a given contact manifold.
pub struct ActiveHooks: u32 {
/// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant.
const FILTER_CONTACT_PAIRS = 0b0001;
/// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant.
const FILTER_INTERSECTION_PAIR = 0b0010;
/// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant.
const MODIFY_SOLVER_CONTACTS = 0b0100;
}
}
impl Default for ActiveHooks {
fn default() -> Self {
ActiveHooks::empty()
}
}
// TODO: right now, the wasm version don't have the Send+Sync bounds.
// This is because these bounds are very difficult to fulfill if we want to
// call JS closures. Also, parallelism cannot be enabled for wasm targets, so
// not having Send+Sync isn't a problem.
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(target_arch = "wasm32")]
pub trait PhysicsHooks {
/// Applies the contact pair filter.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
None
}
/// Applies the intersection pair filter.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
false
}
/// Modifies the set of contacts seen by the constraints solver.
fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {}
}
/// User-defined functions called by the physics engines during one timestep in order to customize its behavior.
#[cfg(not(target_arch = "wasm32"))]
pub trait PhysicsHooks: Send + Sync {
/// Applies the contact pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential contact pairs detected by the broad-phase.
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their contact computed by the narrow-phase, and if these contact
/// should be solved by the constraints solver
///
/// Note that using a contact pair filter will replace the default contact filtering
/// which consists of preventing contact computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `None`, then the narrow-phase will ignore this contact pair and
/// not compute any contact manifolds for it.
/// If this returns `Some`, then the narrow-phase will compute contact manifolds for
/// this pair of colliders, and configure them with the returned solver flags. For
/// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts
/// will be taken into account by the constraints solver. If this returns
/// `Some(SolverFlags::empty())` then the constraints solver will ignore these
/// contacts.
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
Some(SolverFlags::COMPUTE_IMPULSES)
}
/// Applies the intersection pair filter.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags
/// in its physics hooks flags.
///
/// User-defined filter for potential intersection pairs detected by the broad-phase.
///
/// This can be used to apply custom logic in order to decide whether two colliders
/// should have their intersection computed by the narrow-phase.
///
/// Note that using an intersection pair filter will replace the default intersection filtering
/// which consists of preventing intersection computation between two non-dynamic bodies.
///
/// This filtering method is called after taking into account the colliders collision groups.
///
/// If this returns `false`, then the narrow-phase will ignore this pair and
/// not compute any intersection information for it.
/// If this return `true` then the narrow-phase will compute intersection
/// information for this pair.
fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool {
true
}
/// Modifies the set of contacts seen by the constraints solver.
///
/// Note that this method will only be called if at least one of the colliders
/// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags
/// in its physics hooks flags.
///
/// By default, the content of `solver_contacts` is computed from `manifold.points`.
/// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set.
/// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts
/// can be removed and modified.
///
/// Note that if all the contacts have to be ignored by the constraint solver, you may simply
/// do `context.solver_contacts.clear()`.
///
/// Modifying the solver contacts allow you to achieve various effects, including:
/// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact.
/// - Simulating shapes with multiply materials by modifying the friction and restitution
/// coefficient depending of the features in contacts.
/// - Simulating one-way platforms depending on the contact normal.
///
/// Each contact manifold is given a `u32` user-defined data that is persistent between
/// timesteps (as long as the contact manifold exists). This user-defined data is initialized
/// as 0 and can be modified in `context.user_data`.
///
/// The world-space contact normal can be modified in `context.normal`.
fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {}
}
impl PhysicsHooks for () {
fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> {
Some(SolverFlags::default())
}
fn | filter_intersection_pair | identifier_name |
|
envmon_sensor_info.pb.go | ) GetName_4() string {
if m != nil {
return m.Name_4
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_5() string {
if m != nil {
return m.Name_5
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_6() string {
if m != nil {
return m.Name_6
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_7() string {
if m != nil |
return ""
}
type EnvmonSensorInfo struct {
FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"`
DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"`
Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"`
DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"`
AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"`
DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"`
Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"`
Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"`
Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"`
AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"`
UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"`
Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"`
Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"`
Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"`
Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} }
func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) }
func (*EnvmonSensorInfo) ProtoMessage() {}
func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_bc03e94ffc42a321, []int{1}
}
func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b)
}
func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic)
}
func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_EnvmonSensorInfo.Merge(m, src)
}
func (m *EnvmonSensorInfo) XXX_Size() int {
return xxx_messageInfo_EnvmonSensorInfo.Size(m)
}
func (m *EnvmonSensorInfo) XXX_DiscardUnknown() {
xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m)
}
var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo
func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string {
if m != nil {
return m.FieldValidityBitmap
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceDescription() string {
if m != nil {
return m.DeviceDescription
}
return ""
}
func (m *EnvmonSensorInfo) GetUnits() string {
if m != nil {
return m.Units
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceId() uint32 {
if m != nil {
return m.DeviceId
}
return 0
}
func (m *EnvmonSensorInfo) GetValue() uint32 {
if m != nil {
return m.Value
}
return 0
}
func (m *EnvmonSensorInfo) GetAlarmType() uint32 {
if m != nil {
return m.AlarmType
}
return 0
}
func (m *EnvmonSensorInfo) GetDataType() uint32 {
if m != nil {
return m.DataType
}
return 0
}
func (m *EnvmonSensorInfo) GetScale() uint32 {
if m != nil {
return m.Scale
}
return 0
}
func (m *EnvmonSensorInfo) GetPrecision() uint32 {
if m != nil {
return m.Precision
}
return 0
}
func (m *EnvmonSensorInfo) GetStatus() uint32 {
if m != nil {
return m.Status
}
return 0
}
func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 {
if m != nil {
return m.AgeTimeStamp
}
return 0
}
func (m *EnvmonSensorInfo) GetUpdateRate() uint32 {
if m != nil {
return m.UpdateRate
}
return 0
}
func (m *EnvmonSensorInfo) GetAverage() int32 {
if m != nil {
return m.Average
}
return 0
}
func (m *EnvmonSensorInfo) GetMinimum() int32 {
if m != nil {
return m.Minimum
}
return 0
}
func (m *EnvmonSensorInfo) GetMaximum() int32 {
if m != nil {
return m.Maximum
}
return 0
}
func (m *EnvmonSensorInfo) GetInterval() int32 {
if m != nil {
return m.Interval
}
return 0
}
func init() {
proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS")
proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info")
}
func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) }
var fileDescriptor_bc03e94ffc42a321 = []byte{
// 506 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a,
0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a,
0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, 0xdc, 0x0f,
0x3b, 0x34, 0xc7 | {
return m.Name_7
} | conditional_block |
envmon_sensor_info.pb.go | :"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"`
Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"`
Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"`
Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} }
func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) }
func (*EnvmonSensorInfo) ProtoMessage() {}
func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_bc03e94ffc42a321, []int{1}
}
func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b)
}
func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic)
}
func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_EnvmonSensorInfo.Merge(m, src)
}
func (m *EnvmonSensorInfo) XXX_Size() int {
return xxx_messageInfo_EnvmonSensorInfo.Size(m)
}
func (m *EnvmonSensorInfo) XXX_DiscardUnknown() {
xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m)
}
var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo
func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string {
if m != nil {
return m.FieldValidityBitmap
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceDescription() string {
if m != nil {
return m.DeviceDescription
}
return ""
}
func (m *EnvmonSensorInfo) GetUnits() string {
if m != nil {
return m.Units
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceId() uint32 {
if m != nil {
return m.DeviceId
}
return 0
}
func (m *EnvmonSensorInfo) GetValue() uint32 {
if m != nil {
return m.Value
}
return 0
}
func (m *EnvmonSensorInfo) GetAlarmType() uint32 {
if m != nil {
return m.AlarmType
}
return 0
}
func (m *EnvmonSensorInfo) GetDataType() uint32 {
if m != nil {
return m.DataType
}
return 0
}
func (m *EnvmonSensorInfo) GetScale() uint32 {
if m != nil {
return m.Scale
}
return 0
}
func (m *EnvmonSensorInfo) GetPrecision() uint32 {
if m != nil {
return m.Precision
}
return 0
}
func (m *EnvmonSensorInfo) GetStatus() uint32 {
if m != nil {
return m.Status
}
return 0
}
func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 {
if m != nil {
return m.AgeTimeStamp
}
return 0
}
func (m *EnvmonSensorInfo) GetUpdateRate() uint32 {
if m != nil {
return m.UpdateRate
}
return 0
}
func (m *EnvmonSensorInfo) GetAverage() int32 {
if m != nil {
return m.Average
}
return 0
}
func (m *EnvmonSensorInfo) GetMinimum() int32 {
if m != nil {
return m.Minimum
}
return 0
}
func (m *EnvmonSensorInfo) GetMaximum() int32 {
if m != nil {
return m.Maximum
}
return 0
}
func (m *EnvmonSensorInfo) GetInterval() int32 {
if m != nil {
return m.Interval
}
return 0
}
func init() {
proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS")
proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info")
}
func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) }
var fileDescriptor_bc03e94ffc42a321 = []byte{
// 506 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a,
0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a,
0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, 0xdc, 0x0f,
0x3b, 0x34, 0xc7, 0xc6, 0x46, 0xea, 0xe6, 0xcd, 0x79, 0x9f, 0x27, 0x73, 0x34, 0xb3, 0xb0, 0x50,
0x68, 0xdb, 0xb2, 0xb2, 0xda, 0xa3, 0xf5, 0x95, 0xd3, 0xc6, 0x5e, 0x55, 0x71, 0xed, 0x2a, 0xaa, | 0xe4, 0xcf, 0x28, 0x33, 0x3e, 0xab, 0xb4, 0xa9, 0xbc, 0xbe, 0x0e, 0xa6, 0x2d, 0x37, 0x4e, 0x57,
0x35, 0xba, 0xd8, 0xd8, 0x16, 0x2d, 0x55, 0x6e, 0x1b, 0x3b, 0xc8, 0x7e, 0x78, 0xce, 0x18, 0x2d,
0x19, 0xda, 0xc6, 0xbe, 0xa8, 0x28, 0x26, 0x6f, 0xce, 0x3c, 0x67, 0x88, 0x84, 0xc7, 0x24, 0x44,
0xca, 0x63, 0x1a, 0x62, 0xce, 0xe3, 0x3c, 0xc4, 0x82, 0xc7, 0x45, 0x88, 0x25, 0x8f, 0xcb, 0x18, | random_line_split |
|
envmon_sensor_info.pb.go | (src proto.Message) {
xxx_messageInfo_EnvmonSensorInfo_KEYS.Merge(m, src)
}
func (m *EnvmonSensorInfo_KEYS) XXX_Size() int {
return xxx_messageInfo_EnvmonSensorInfo_KEYS.Size(m)
}
func (m *EnvmonSensorInfo_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_EnvmonSensorInfo_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_EnvmonSensorInfo_KEYS proto.InternalMessageInfo
func (m *EnvmonSensorInfo_KEYS) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_1() string {
if m != nil {
return m.Name_1
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_2() string {
if m != nil {
return m.Name_2
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_3() string {
if m != nil {
return m.Name_3
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_4() string {
if m != nil {
return m.Name_4
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_5() string {
if m != nil {
return m.Name_5
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_6() string {
if m != nil {
return m.Name_6
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_7() string {
if m != nil {
return m.Name_7
}
return ""
}
type EnvmonSensorInfo struct {
FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"`
DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"`
Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"`
DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"`
AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"`
DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"`
Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"`
Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"`
Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"`
AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"`
UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"`
Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"`
Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"`
Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"`
Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} }
func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) }
func (*EnvmonSensorInfo) ProtoMessage() {}
func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_bc03e94ffc42a321, []int{1}
}
func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b)
}
func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic)
}
func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_EnvmonSensorInfo.Merge(m, src)
}
func (m *EnvmonSensorInfo) XXX_Size() int {
return xxx_messageInfo_EnvmonSensorInfo.Size(m)
}
func (m *EnvmonSensorInfo) XXX_DiscardUnknown() {
xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m)
}
var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo
func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string {
if m != nil {
return m.FieldValidityBitmap
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceDescription() string {
if m != nil {
return m.DeviceDescription
}
return ""
}
func (m *EnvmonSensorInfo) GetUnits() string {
if m != nil {
return m.Units
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceId() uint32 {
if m != nil {
return m.DeviceId
}
return 0
}
func (m *EnvmonSensorInfo) GetValue() uint32 {
if m != nil {
return m.Value
}
return 0
}
func (m *EnvmonSensorInfo) GetAlarmType() uint32 {
if m != nil {
return m.AlarmType
}
return 0
}
func (m *EnvmonSensorInfo) GetDataType() uint32 {
if m != nil {
return m.DataType
}
return 0
}
func (m *EnvmonSensorInfo) GetScale() uint32 {
if m != nil {
return m.Scale
}
return 0
}
func (m *EnvmonSensorInfo) GetPrecision() uint32 {
if m != nil {
return m.Precision
}
return 0
}
func (m *EnvmonSensorInfo) GetStatus() uint32 {
if m != nil {
return m.Status
}
return 0
}
func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 {
if m != nil {
return m.AgeTimeStamp
}
return 0
}
func (m *EnvmonSensorInfo) GetUpdateRate() uint32 {
if m != nil {
return m.UpdateRate
}
return 0
}
func (m *EnvmonSensorInfo) GetAverage() int32 {
if m != nil {
return m.Average
}
return 0
}
func (m *EnvmonSensorInfo) GetMinimum() int32 {
if m != nil {
return m.Minimum
}
return 0
}
func (m *EnvmonSensorInfo) GetMaximum() int32 {
if m != nil {
return m.Maximum
}
return 0
}
func (m *EnvmonSensorInfo) GetInterval() int32 {
if m != nil {
return m.Interval
}
return 0
}
func init() {
proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS")
proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info")
}
func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) }
var fileDescriptor_bc03e94ffc42a321 = []byte{
// 506 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, | XXX_Merge | identifier_name |
|
envmon_sensor_info.pb.go |
func (m *EnvmonSensorInfo_KEYS) GetName_4() string {
if m != nil {
return m.Name_4
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_5() string {
if m != nil {
return m.Name_5
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_6() string {
if m != nil {
return m.Name_6
}
return ""
}
func (m *EnvmonSensorInfo_KEYS) GetName_7() string {
if m != nil {
return m.Name_7
}
return ""
}
type EnvmonSensorInfo struct {
FieldValidityBitmap string `protobuf:"bytes,50,opt,name=field_validity_bitmap,json=fieldValidityBitmap,proto3" json:"field_validity_bitmap,omitempty"`
DeviceDescription string `protobuf:"bytes,51,opt,name=device_description,json=deviceDescription,proto3" json:"device_description,omitempty"`
Units string `protobuf:"bytes,52,opt,name=units,proto3" json:"units,omitempty"`
DeviceId uint32 `protobuf:"varint,53,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
Value uint32 `protobuf:"varint,54,opt,name=value,proto3" json:"value,omitempty"`
AlarmType uint32 `protobuf:"varint,55,opt,name=alarm_type,json=alarmType,proto3" json:"alarm_type,omitempty"`
DataType uint32 `protobuf:"varint,56,opt,name=data_type,json=dataType,proto3" json:"data_type,omitempty"`
Scale uint32 `protobuf:"varint,57,opt,name=scale,proto3" json:"scale,omitempty"`
Precision uint32 `protobuf:"varint,58,opt,name=precision,proto3" json:"precision,omitempty"`
Status uint32 `protobuf:"varint,59,opt,name=status,proto3" json:"status,omitempty"`
AgeTimeStamp uint32 `protobuf:"varint,60,opt,name=age_time_stamp,json=ageTimeStamp,proto3" json:"age_time_stamp,omitempty"`
UpdateRate uint32 `protobuf:"varint,61,opt,name=update_rate,json=updateRate,proto3" json:"update_rate,omitempty"`
Average int32 `protobuf:"zigzag32,62,opt,name=average,proto3" json:"average,omitempty"`
Minimum int32 `protobuf:"zigzag32,63,opt,name=minimum,proto3" json:"minimum,omitempty"`
Maximum int32 `protobuf:"zigzag32,64,opt,name=maximum,proto3" json:"maximum,omitempty"`
Interval int32 `protobuf:"zigzag32,65,opt,name=interval,proto3" json:"interval,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EnvmonSensorInfo) Reset() { *m = EnvmonSensorInfo{} }
func (m *EnvmonSensorInfo) String() string { return proto.CompactTextString(m) }
func (*EnvmonSensorInfo) ProtoMessage() {}
func (*EnvmonSensorInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_bc03e94ffc42a321, []int{1}
}
func (m *EnvmonSensorInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnvmonSensorInfo.Unmarshal(m, b)
}
func (m *EnvmonSensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnvmonSensorInfo.Marshal(b, m, deterministic)
}
func (m *EnvmonSensorInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_EnvmonSensorInfo.Merge(m, src)
}
func (m *EnvmonSensorInfo) XXX_Size() int {
return xxx_messageInfo_EnvmonSensorInfo.Size(m)
}
func (m *EnvmonSensorInfo) XXX_DiscardUnknown() {
xxx_messageInfo_EnvmonSensorInfo.DiscardUnknown(m)
}
var xxx_messageInfo_EnvmonSensorInfo proto.InternalMessageInfo
func (m *EnvmonSensorInfo) GetFieldValidityBitmap() string {
if m != nil {
return m.FieldValidityBitmap
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceDescription() string {
if m != nil {
return m.DeviceDescription
}
return ""
}
func (m *EnvmonSensorInfo) GetUnits() string {
if m != nil {
return m.Units
}
return ""
}
func (m *EnvmonSensorInfo) GetDeviceId() uint32 {
if m != nil {
return m.DeviceId
}
return 0
}
func (m *EnvmonSensorInfo) GetValue() uint32 {
if m != nil {
return m.Value
}
return 0
}
func (m *EnvmonSensorInfo) GetAlarmType() uint32 {
if m != nil {
return m.AlarmType
}
return 0
}
func (m *EnvmonSensorInfo) GetDataType() uint32 {
if m != nil {
return m.DataType
}
return 0
}
func (m *EnvmonSensorInfo) GetScale() uint32 {
if m != nil {
return m.Scale
}
return 0
}
func (m *EnvmonSensorInfo) GetPrecision() uint32 {
if m != nil {
return m.Precision
}
return 0
}
func (m *EnvmonSensorInfo) GetStatus() uint32 {
if m != nil {
return m.Status
}
return 0
}
func (m *EnvmonSensorInfo) GetAgeTimeStamp() uint32 {
if m != nil {
return m.AgeTimeStamp
}
return 0
}
func (m *EnvmonSensorInfo) GetUpdateRate() uint32 {
if m != nil {
return m.UpdateRate
}
return 0
}
func (m *EnvmonSensorInfo) GetAverage() int32 {
if m != nil {
return m.Average
}
return 0
}
func (m *EnvmonSensorInfo) GetMinimum() int32 {
if m != nil {
return m.Minimum
}
return 0
}
func (m *EnvmonSensorInfo) GetMaximum() int32 {
if m != nil {
return m.Maximum
}
return 0
}
func (m *EnvmonSensorInfo) GetInterval() int32 {
if m != nil {
return m.Interval
}
return 0
}
func init() {
proto.RegisterType((*EnvmonSensorInfo_KEYS)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info_KEYS")
proto.RegisterType((*EnvmonSensorInfo)(nil), "cisco_ios_xr_invmgr_oper.inventory.racks.rack.entity.slot.tsi1s.tsi1.tsi2s.tsi2.tsi3s.tsi3.tsi4s.tsi4.tsi5s.tsi5.tsi6s.tsi6.attributes.env_sensor_info.envmon_sensor_info")
}
func init() { proto.RegisterFile("envmon_sensor_info.proto", fileDescriptor_bc03e94ffc42a321) }
var fileDescriptor_bc03e94ffc42a321 = []byte{
// 506 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0x86, 0xe5, 0xaf, 0x4d, 0xda, 0xcc, 0x07, 0x48, 0x1d, 0x5a, 0x18, 0xf1, 0x23, 0xaa, 0x8a,
0x45, 0x37, 0x58, 0xaa, 0x9d, 0xa4, 0xfc, 0xff, 0x09, 0x16, 0x88, 0x5d, 0x5a, 0x21, 0xb1, 0x1a,
0x9d, 0xd8, 0xa7, 0xd1, 0x08, 0x7b, 0x6c, 0xcd, 0x1c, 0x5b, 0xcd, 0x8d, 0x70, 0x59, | {
if m != nil {
return m.Name_3
}
return ""
} | identifier_body |
|
battleship-final.py | # input: boat, taken_positions
# this func checks if the boat outside the playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
#this condition checks if the block boat[i] is already in the list taken_positions
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
#this condition checks border 1 and 3
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
#this condition checks border 2 and 4
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
# this condition checks if there is any hole in the boat
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
# input: shot, all the boats (ships), hit, miss, comp, sinked_boats
# this func initially assumes that the shot is missed (cond = 0)
# given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships
# if yes, remove the block of the boat that is hitted by the shot
# append the shot to hit or comp. If comp, sinked_boats += 1
# if not, append the shot to miss
# return: all the boats (ships), hit, miss, comp, cond, sinked_boats
cond = 0 # miss
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1 # hit
else:
comp.append(shot)
cond = 2 # comp
sinked_boats += 1
if cond == 0: # miss
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
# input: hit, miss, comp
# this func creates the playground with the status of each block
# print the playground
print(" battleship")
print(" 0 1 2 3 4 5 6 7 8 9")
block = 0 #this variable keep track of the spot of the block
for i in range(10):
#create each row
row = ""
for j in range(10):
#create each spot on the specific row
character = "_ "
if block in miss:
character = "x "
elif block in hit:
character = "o "
elif block in comp:
character = "Q "
row += character
block += 1 #the block var increments 1 after each character is add to row
print(i, " ", row)
print("")
def check_empty(ships):
# input: ships
# [] = False, [#have element] = True
# this func checks each ship in the 2D list ships
# if ship is empty, return True, and vice versa
# if all ships are empty, return True, else return False
# return True or False
return all([not elem for elem in ships])
"""
user - 2 funcs:
"""
def create_ships_u(taken_positions, num_boats):
# input: num_boats
# this func has a loop that makes all boats,
# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(" battleships ")
print(" 0 1 2 3 4 5 6 7 8 9")
place = 0
for x in range(10):
row = ""
for y in range(10):
ch = " _ "
if place in taken_positions:
ch = " o "
row = row + ch
place = place + 1
print(x," ",row)
def get_ship(len_of_boat, taken_positions):
# input: len_of_boat, taken_positions
# this func gets the boat's position from the user's input
# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order
# return a valid ship
while True:
ship = []
print("enter your ship of length", len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input("please enter a number: ")
ship.append(int(boat_num))
except ValueError: # better try again... Return to the start of the loop
print("wrong type of input")
continue
else: # is is a correct input, and we're ready to exit the loop
break
ship = check_ok(ship, taken_positions)
if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break
taken_positions += ship
break
else:
print("invalid number - please enter again")
return ship, taken_positions
def get_shot_user(guesses):
# input: guesses is the combined list of hit, miss, comp
# this funcs asks the user to enter the shot, then checks the validity of the shot
# return: the valid shot
while True:
try:
shot = int(input("Enter your shot: "))
if shot < 0 or shot > 99:
shot = int(input("Enter your shot:"))
elif shot in guesses:
print("already guessed - please enter again")
else:
return shot
except:
print("incorrect - please enter integer only")
"""
computer - 1 funcs:
"""
def create_ships_c(taken_positions, num_boats):
# input: num_boats
# this funcs has a loop that makes all boats,
# which calls the create_boat() that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
boat_position = [-1] #create the initial position of every boat is [-1]
while -1 in boat_position:
boat_start = randrange(99) #boat starting point
boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"}
boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat
#a new boat is created after finishing the while loop
ships.append(boat_position)
taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
# input: len_of_boat, boat_start, boat_direction, taken_positions
# this func initializes boat = []
# with len_of_boat, boat_start, boat_direction, this func create the position of the boat
# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10) # already have the position of boat after this line
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp |
"""
both user and computer funcs:
"""
def check_ok(boat, taken_positions):
| random_line_split |
|
battleship-final.py | miss
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1 # hit
else:
comp.append(shot)
cond = 2 # comp
sinked_boats += 1
if cond == 0: # miss
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
# input: hit, miss, comp
# this func creates the playground with the status of each block
# print the playground
print(" battleship")
print(" 0 1 2 3 4 5 6 7 8 9")
block = 0 #this variable keep track of the spot of the block
for i in range(10):
#create each row
row = ""
for j in range(10):
#create each spot on the specific row
character = "_ "
if block in miss:
character = "x "
elif block in hit:
character = "o "
elif block in comp:
character = "Q "
row += character
block += 1 #the block var increments 1 after each character is add to row
print(i, " ", row)
print("")
def check_empty(ships):
# input: ships
# [] = False, [#have element] = True
# this func checks each ship in the 2D list ships
# if ship is empty, return True, and vice versa
# if all ships are empty, return True, else return False
# return True or False
return all([not elem for elem in ships])
"""
user - 2 funcs:
"""
def create_ships_u(taken_positions, num_boats):
# input: num_boats
# this func has a loop that makes all boats,
# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(" battleships ")
print(" 0 1 2 3 4 5 6 7 8 9")
place = 0
for x in range(10):
row = ""
for y in range(10):
ch = " _ "
if place in taken_positions:
ch = " o "
row = row + ch
place = place + 1
print(x," ",row)
def get_ship(len_of_boat, taken_positions):
# input: len_of_boat, taken_positions
# this func gets the boat's position from the user's input
# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order
# return a valid ship
while True:
ship = []
print("enter your ship of length", len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input("please enter a number: ")
ship.append(int(boat_num))
except ValueError: # better try again... Return to the start of the loop
print("wrong type of input")
continue
else: # is is a correct input, and we're ready to exit the loop
break
ship = check_ok(ship, taken_positions)
if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break
taken_positions += ship
break
else:
print("invalid number - please enter again")
return ship, taken_positions
def | (guesses):
# input: guesses is the combined list of hit, miss, comp
# this funcs asks the user to enter the shot, then checks the validity of the shot
# return: the valid shot
while True:
try:
shot = int(input("Enter your shot: "))
if shot < 0 or shot > 99:
shot = int(input("Enter your shot:"))
elif shot in guesses:
print("already guessed - please enter again")
else:
return shot
except:
print("incorrect - please enter integer only")
"""
computer - 1 funcs:
"""
def create_ships_c(taken_positions, num_boats):
# input: num_boats
# this funcs has a loop that makes all boats,
# which calls the create_boat() that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
boat_position = [-1] #create the initial position of every boat is [-1]
while -1 in boat_position:
boat_start = randrange(99) #boat starting point
boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"}
boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat
#a new boat is created after finishing the while loop
ships.append(boat_position)
taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
# input: len_of_boat, boat_start, boat_direction, taken_positions
# this func initializes boat = []
# with len_of_boat, boat_start, boat_direction, this func create the position of the boat
# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10) # already have the position of boat after this line
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)
# in the first mơve, tactics = []
# this func checks if len(tactics) > 0
# if yes, pick shot = tactics[0]
# if no, pick shot = randrange(99)
# this func check if shot not in guesses(which is the list of all moves)
# if yes, guess.append(shot), and break
# return: the valid shot, guesses
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print("incorrect - please enter integer only")
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
# input: shot, tactics, guesses, hit
# this function takes the newly shot, and changes the tactics list accordingly
# the list temp is the possible positions that the next shot can be
# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot
# else, the list temp will be created based on the last 2 shots
# candidate is the list of valid possible shots that is created from temp
# shuffle the order of elements inside candidate
# return: candidate (candidate is tactics)
temp = []
if len(tactics) < 1:
# got 1 hit the first time
temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be
else:
# got at least 2 hits
# checks to see if the 4 spots around is in hit
if shot - 1 in hit: # east
temp = [shot + 1]
for num in [2, 3, | get_shot_user | identifier_name |
battleship-final.py | # miss
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1 # hit
else:
comp.append(shot)
cond = 2 # comp
sinked_boats += 1
if cond == 0: # miss
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
# input: hit, miss, comp
# this func creates the playground with the status of each block
# print the playground
print(" battleship")
print(" 0 1 2 3 4 5 6 7 8 9")
block = 0 #this variable keep track of the spot of the block
for i in range(10):
#create each row
row = ""
for j in range(10):
#create each spot on the specific row
character = "_ "
if block in miss:
character = "x "
elif block in hit:
character = "o "
elif block in comp:
character = "Q "
row += character
block += 1 #the block var increments 1 after each character is add to row
print(i, " ", row)
print("")
def check_empty(ships):
# input: ships
# [] = False, [#have element] = True
# this func checks each ship in the 2D list ships
# if ship is empty, return True, and vice versa
# if all ships are empty, return True, else return False
# return True or False
return all([not elem for elem in ships])
"""
user - 2 funcs:
"""
def create_ships_u(taken_positions, num_boats):
# input: num_boats
# this func has a loop that makes all boats,
# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(" battleships ")
print(" 0 1 2 3 4 5 6 7 8 9")
place = 0
for x in range(10):
row = ""
for y in range(10):
ch = " _ "
if place in taken_positions:
ch = " o "
row = row + ch
place = place + 1
print(x," ",row)
def get_ship(len_of_boat, taken_positions):
# input: len_of_boat, taken_positions
# this func gets the boat's position from the user's input
# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order
# return a valid ship
while True:
ship = []
print("enter your ship of length", len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input("please enter a number: ")
ship.append(int(boat_num))
except ValueError: # better try again... Return to the start of the loop
print("wrong type of input")
continue
else: # is is a correct input, and we're ready to exit the loop
break
ship = check_ok(ship, taken_positions)
if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break
taken_positions += ship
break
else:
print("invalid number - please enter again")
return ship, taken_positions
def get_shot_user(guesses):
# input: guesses is the combined list of hit, miss, comp
# this funcs asks the user to enter the shot, then checks the validity of the shot
# return: the valid shot
while True:
try:
shot = int(input("Enter your shot: "))
if shot < 0 or shot > 99:
shot = int(input("Enter your shot:"))
elif shot in guesses:
print("already guessed - please enter again")
else:
return shot
except:
print("incorrect - please enter integer only")
"""
computer - 1 funcs:
"""
def create_ships_c(taken_positions, num_boats):
# input: num_boats
# this funcs has a loop that makes all boats,
# which calls the create_boat() that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
|
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
# input: len_of_boat, boat_start, boat_direction, taken_positions
# this func initializes boat = []
# with len_of_boat, boat_start, boat_direction, this func create the position of the boat
# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10) # already have the position of boat after this line
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)
# in the first mơve, tactics = []
# this func checks if len(tactics) > 0
# if yes, pick shot = tactics[0]
# if no, pick shot = randrange(99)
# this func check if shot not in guesses(which is the list of all moves)
# if yes, guess.append(shot), and break
# return: the valid shot, guesses
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print("incorrect - please enter integer only")
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
# input: shot, tactics, guesses, hit
# this function takes the newly shot, and changes the tactics list accordingly
# the list temp is the possible positions that the next shot can be
# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot
# else, the list temp will be created based on the last 2 shots
# candidate is the list of valid possible shots that is created from temp
# shuffle the order of elements inside candidate
# return: candidate (candidate is tactics)
temp = []
if len(tactics) < 1:
# got 1 hit the first time
temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be
else:
# got at least 2 hits
# checks to see if the 4 spots around is in hit
if shot - 1 in hit: # east
temp = [shot + 1]
for num in [2, 3, | ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
boat_position = [-1] #create the initial position of every boat is [-1]
while -1 in boat_position:
boat_start = randrange(99) #boat starting point
boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"}
boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat
#a new boat is created after finishing the while loop
ships.append(boat_position)
taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions
return ships, taken_positions | identifier_body |
battleship-final.py | miss
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1 # hit
else:
comp.append(shot)
cond = 2 # comp
sinked_boats += 1
if cond == 0: # miss
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
# input: hit, miss, comp
# this func creates the playground with the status of each block
# print the playground
print(" battleship")
print(" 0 1 2 3 4 5 6 7 8 9")
block = 0 #this variable keep track of the spot of the block
for i in range(10):
#create each row
row = ""
for j in range(10):
#create each spot on the specific row
character = "_ "
if block in miss:
character = "x "
elif block in hit:
character = "o "
elif block in comp:
character = "Q "
row += character
block += 1 #the block var increments 1 after each character is add to row
print(i, " ", row)
print("")
def check_empty(ships):
# input: ships
# [] = False, [#have element] = True
# this func checks each ship in the 2D list ships
# if ship is empty, return True, and vice versa
# if all ships are empty, return True, else return False
# return True or False
return all([not elem for elem in ships])
"""
user - 2 funcs:
"""
def create_ships_u(taken_positions, num_boats):
# input: num_boats
# this func has a loop that makes all boats,
# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(" battleships ")
print(" 0 1 2 3 4 5 6 7 8 9")
place = 0
for x in range(10):
row = ""
for y in range(10):
ch = " _ "
if place in taken_positions:
ch = " o "
row = row + ch
place = place + 1
print(x," ",row)
def get_ship(len_of_boat, taken_positions):
# input: len_of_boat, taken_positions
# this func gets the boat's position from the user's input
# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order
# return a valid ship
while True:
ship = []
print("enter your ship of length", len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input("please enter a number: ")
ship.append(int(boat_num))
except ValueError: # better try again... Return to the start of the loop
print("wrong type of input")
continue
else: # is is a correct input, and we're ready to exit the loop
break
ship = check_ok(ship, taken_positions)
if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break
taken_positions += ship
break
else:
print("invalid number - please enter again")
return ship, taken_positions
def get_shot_user(guesses):
# input: guesses is the combined list of hit, miss, comp
# this funcs asks the user to enter the shot, then checks the validity of the shot
# return: the valid shot
while True:
try:
shot = int(input("Enter your shot: "))
if shot < 0 or shot > 99:
shot = int(input("Enter your shot:"))
elif shot in guesses:
print("already guessed - please enter again")
else:
return shot
except:
print("incorrect - please enter integer only")
"""
computer - 1 funcs:
"""
def create_ships_c(taken_positions, num_boats):
# input: num_boats
# this funcs has a loop that makes all boats,
# which calls the create_boat() that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
boat_position = [-1] #create the initial position of every boat is [-1]
while -1 in boat_position:
boat_start = randrange(99) #boat starting point
boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"}
boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat
#a new boat is created after finishing the while loop
ships.append(boat_position)
taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
# input: len_of_boat, boat_start, boat_direction, taken_positions
# this func initializes boat = []
# with len_of_boat, boat_start, boat_direction, this func create the position of the boat
# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10) # already have the position of boat after this line
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
|
return boat
def get_shot_comp(guesses, tactics):
# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)
# in the first mơve, tactics = []
# this func checks if len(tactics) > 0
# if yes, pick shot = tactics[0]
# if no, pick shot = randrange(99)
# this func check if shot not in guesses(which is the list of all moves)
# if yes, guess.append(shot), and break
# return: the valid shot, guesses
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print("incorrect - please enter integer only")
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
# input: shot, tactics, guesses, hit
# this function takes the newly shot, and changes the tactics list accordingly
# the list temp is the possible positions that the next shot can be
# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot
# else, the list temp will be created based on the last 2 shots
# candidate is the list of valid possible shots that is created from temp
# shuffle the order of elements inside candidate
# return: candidate (candidate is tactics)
temp = []
if len(tactics) < 1:
# got 1 hit the first time
temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be
else:
# got at least 2 hits
# checks to see if the 4 spots around is in hit
if shot - 1 in hit: # east
temp = [shot + 1]
for num in [2, 3, | for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions) | conditional_block |
ei_formulario.js | (id, instancia, rango_tabs, input_submit, maestros, esclavos, invalidos) {
this._id = id;
this._instancia = instancia; //Nombre de la instancia del objeto, permite asociar al objeto con el arbol DOM
this._rango_tabs = rango_tabs;
this._input_submit = input_submit; //Campo que se setea en el submit del form
this.controlador = null; //Referencia al CI contenedor
this._efs = {}; //Lista de objeto_ef contenidos
this._efs_procesar = {}; //ID de los ef's que poseen procesamiento
this._silencioso = false; //¿Silenciar confirmaciones y alertas? Util para testing
this._evento_implicito = null; //No hay evento prefijado
this._expandido = false; //El formulario comienza sin expandirse
this._maestros = maestros;
this._esclavos = esclavos;
this._invalidos = invalidos;
this._estado_inicial = {};
this._con_examen_cambios = false;
this._cambios_excluir_efs = [];
this._tmp_valores_esclavos = {}; //lista temporal de valores a guardar hasta que retorna la cascada
}
/**
* @private
* @param {ef} ef objeto que representa al ef
* @param {string} identificador Id. del ef
*/
ei_formulario.prototype.agregar_ef = function (ef, identificador) {
if (ef) {
this._efs[identificador] = ef;
}
};
/**
*@private
*@param {ef} objeto_ef Objeto que representa al ef
*/
ei_formulario.prototype.instancia_ef = function (objeto_ef) {
var id = objeto_ef.get_id();
return this._instancia + ".ef('"+ id + "')";
};
ei_formulario.prototype.iniciar = function () {
var id_ef;
for (id_ef in this._efs) {
this._efs[id_ef].iniciar(id_ef, this);
this._estado_inicial[id_ef] = this._efs[id_ef].get_estado();
this._efs[id_ef].cuando_cambia_valor(this._instancia + '.validar_ef("' + id_ef + '", true)');
if (this._invalidos[id_ef]) {
this._efs[id_ef].resaltar(this._invalidos[id_ef]);
}
}
if (this._con_examen_cambios) {
this._examinar_cambios();
}
this.agregar_procesamientos();
this.refrescar_procesamientos(true);
this.reset_evento();
if (this.configurar) {
this.configurar();
}
};
//---Consultas
/**
* Accede a la instancia de un ef especifico
* @param {string} id del ef
* @type ef
* @see ef
*/
ei_formulario.prototype.ef = function(id) {
return this._efs[id];
};
/**
* Retorna un objeto asociativo id_ef => ef, para usarlo en algun ciclo, por ej.
* for (id_ef in this.efs()) {
* this.ef(id_ef).metodo()
* }
* @type Object
* @see ef
*/
ei_formulario.prototype.efs = function() {
return this._efs;
};
/**
* Retorna el estado actual de los efs en un Objeto asociativo id_ef=>valor
* @type Object
*/
ei_formulario.prototype.get_datos = function() {
var datos = {};
for (var id_ef in this._efs) {
datos[id_ef] = this._efs[id_ef].get_estado();
}
return datos;
};
//---Submit
ei_formulario.prototype.submit = function() {
var id_ef;
if (this.controlador && !this.controlador.en_submit()) {
return this.controlador.submit();
}
if (this._evento && this.debe_disparar_evento()) {
//Enviar la noticia del submit a los efs
for (id_ef in this._efs) {
this._efs[id_ef].submit();
}
//Marco la ejecucion del evento para que la clase PHP lo reconozca
document.getElementById(this._input_submit).value = this._evento.id;
}
};
//Chequea si es posible realiza el submit de todos los objetos asociados
ei_formulario.prototype.puede_submit = function() {
if(this._evento) //Si hay un evento seteado...
{
//- 1 - Hay que realizar las validaciones
if(! this.validar() ) {
this.reset_evento();
return false;
}
if (! ei.prototype.puede_submit.call(this)) {
return false;
}
}
return true;
};
ei_formulario.prototype.debe_disparar_evento = function()
{
var debe = true, id_ef;
if (this._evento_condicionado_a_datos && this._evento.es_implicito) {
var cambios = false;
for (id_ef in this._efs) {
cambios = (cambios || this.hay_cambios(id_ef));
}
debe = cambios;
}
return debe;
};
//---- Cascadas
/**
* Esquema de Cascadas:<br>
* Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse
* @param {string} id_ef Identificador del ef maestro que sufrio una modificación
*/
ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila)
{
if (this._esclavos[id_ef]) {
this.evt__cascadas_inicio(this.ef(id_ef));
//--Se recorren los esclavos del master modificado
for (var i=0; i < this._esclavos[id_ef].length; i++) {
this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila);
}
}
};
/**
* Esquema de Cascadas:<br>
* Determina si los maestros de un ef esclavo tienen sus valores cargados
* @param {string} id_esclavo Identificador del ef esclavo
* @type boolean
*/
ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila)
{
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var ef = this.ef(this._maestros[id_esclavo][i]);
if (ef && typeof fila != 'undefined') {
ef.ir_a_fila(fila);
}
if (ef && ! ef.tiene_estado()) {
return false;
}
}
return true;
};
/**
* Esquema de Cascadas:<br>
* Un ef esclavo esta listo para refrescar su valor en base a sus maestros,
* para esto en este metodo se recolecta los valores de sus maestros y se dispara
* la comunicación con el servidor
* @param {string} id_esclavo Identificador del ef esclavo que se refrescara
*/
ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila)
{
//Primero se resetea por si la consulta nunca retorna
this.cascadas_en_espera(id_esclavo);
//---Todos los maestros tienen estado?
var con_estado = true;
var valores = '';
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var id_maestro = this._maestros[id_esclavo][i];
var ef = this.ef(id_maestro);
if (ef && ef.tiene_estado()) {
var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado();
valores += id_maestro + '-;-' + valor + '-|-';
} else if (ef) {
//-- Evita caso del oculto
con_estado = false;
break;
}
}
//--- Si estan todos los maestros puedo ir al server a preguntar el valor de este
if (con_estado) {
if (this.ef(id_esclavo)._cascadas_ajax) {
//Caso normal
this.cascadas_comunicar(id_esclavo, valores, fila);
} else {
//Caso combo_editable
this.ef(id_esclavo).set_solo_lectura(false);
}
}
};
/**
* Esquema de Cascadas:<br>
* Retorna | ei_formulario | identifier_name |
|
ei_formulario.js | _maestro = function(id_ef, fila)
{
if (this._esclavos[id_ef]) {
this.evt__cascadas_inicio(this.ef(id_ef));
//--Se recorren los esclavos del master modificado
for (var i=0; i < this._esclavos[id_ef].length; i++) {
this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila);
}
}
};
/**
* Esquema de Cascadas:<br>
* Determina si los maestros de un ef esclavo tienen sus valores cargados
* @param {string} id_esclavo Identificador del ef esclavo
* @type boolean
*/
ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila)
{
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var ef = this.ef(this._maestros[id_esclavo][i]);
if (ef && typeof fila != 'undefined') {
ef.ir_a_fila(fila);
}
if (ef && ! ef.tiene_estado()) {
return false;
}
}
return true;
};
/**
* Esquema de Cascadas:<br>
* Un ef esclavo esta listo para refrescar su valor en base a sus maestros,
* para esto en este metodo se recolecta los valores de sus maestros y se dispara
* la comunicación con el servidor
* @param {string} id_esclavo Identificador del ef esclavo que se refrescara
*/
ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila)
{
//Primero se resetea por si la consulta nunca retorna
this.cascadas_en_espera(id_esclavo);
//---Todos los maestros tienen estado?
var con_estado = true;
var valores = '';
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var id_maestro = this._maestros[id_esclavo][i];
var ef = this.ef(id_maestro);
if (ef && ef.tiene_estado()) {
var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado();
valores += id_maestro + '-;-' + valor + '-|-';
} else if (ef) {
//-- Evita caso del oculto
con_estado = false;
break;
}
}
//--- Si estan todos los maestros puedo ir al server a preguntar el valor de este
if (con_estado) {
if (this.ef(id_esclavo)._cascadas_ajax) {
//Caso normal
this.cascadas_comunicar(id_esclavo, valores, fila);
} else {
//Caso combo_editable
this.ef(id_esclavo).set_solo_lectura(false);
}
}
};
/**
* Esquema de Cascadas:<br>
* Retorna el estado actual de los maestros directos de un esclavo
* @param {string} id_esclavo Identificador del ef esclavo que se refrescara
*/
ei_formulario.prototype.get_valores_maestros = function (id_esclavo, fila)
{
var maestros = {};
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var id_maestro = this._maestros[id_esclavo][i];
var ef = (typeof fila == 'undefined') ? this.ef(id_maestro): this.ef(id_maestro).ir_a_fila(fila);
if (ef && ef.tiene_estado()) {
maestros[id_maestro] = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado();
}
}
return maestros;
};
/**
* @private
* @param {string} id_ef Id. del ef
*/
ei_formulario.prototype.cascadas_en_espera = function(id_ef)
{
if (this.ef(id_ef).tiene_estado() && this.ef(id_ef).mantiene_valor_cascada()) { //Guardo el estado actual por si acaso vuelve en la respuesta
this._tmp_valores_esclavos[id_ef] = this.ef(id_ef).get_estado();
}
//Se resetea y desactiva al ef y todos sus esclavos
this.ef(id_ef).borrar_opciones();
this.ef(id_ef).desactivar();
if (this._esclavos[id_ef]) {
for (var i=0; i< this._esclavos[id_ef].length; i++) {
this.cascadas_en_espera(this._esclavos[id_ef][i]);
}
}
};
/**
* Esquema de Cascadas:<br>
* Se comunica al servidor que debe refrescar el valor de un ef en base a valores especificos de sus efs maestros
* Este método dispara la llamada asincronica al servidor
* @see #cascadas_respuesta
* @param {string} id_ef Id. del ef a refrescar (un ef esclavo)
* @param {string valores Lista plana de valores. Formato: ef1-;-valor1-|-ef2-;-valor2-|- etc.
*/
ei_formulario.prototype.cascadas_comunicar = function(id_ef, valores, fila)
{
//Empaqueto toda la informacion que tengo que mandar.
var parametros = {'cascadas-ef': id_ef, 'cascadas-maestros' : valores};
if (typeof fila != 'undefined') {
parametros['cascadas-fila'] = fila;
}
var callback = {
success: this.cascadas_respuesta,
failure: toba.error_comunicacion,
argument: id_ef,
scope: this
};
var vinculo = vinculador.get_url(null, null, 'cascadas_efs', parametros, [this._id]);
var con = conexion.asyncRequest('GET', vinculo, callback, null);
};
/**
* Esquema de Cascadas:<br>
* Respuesta del servidor ante el pedido de refresco de un ef puntual
* @param {Object} respuesta La respuesta es un objeto asociativo con claves responseText que contiene el nuevo valor del ef
*/
ei_formulario.prototype.cascadas_respuesta = function(respuesta)
{
if (respuesta.responseText === '') {
var error = 'Error en la respuesta de la cascada, para más información consulte el log';
notificacion.limpiar();
notificacion.agregar(error);
notificacion.mostrar();
} else {
try {
var datos_rs = JSON.parse(respuesta.responseText);
var datos_asociativo;
if ('Array' == getObjectClass(datos_rs)) {
datos_asociativo = [];
for (var ind = 0; ind < datos_rs.length ; ind++) {
datos_asociativo[datos_rs[ind][0]] = datos_rs[ind][1];
}
//Se le pasa el formato RS para que no se rompa el ordenamiento, para el resto se usa el asociativo por BC
this.ef(respuesta.argument).set_opciones_rs(datos_rs);
} else {
datos_asociativo = datos_rs;
this.ef(respuesta.argument).set_opciones(datos_asociativo);
}
if(this.ef(respuesta.argument).mantiene_valor_cascada() && isset(this._tmp_valores_esclavos[respuesta.argument])) {
var valor_viejo = this._tmp_valores_esclavos[respuesta.argument];
if (isset(datos_asociativo[valor_viejo])) {
this.ef(respuesta.argument).set_estado(valor_viejo);
}
}
this.evt__cascadas_fin(this.ef(respuesta.argument), datos_asociativo);
} catch (e) {
var componente = "<textarea id='displayMore' class='ef-input-solo-lectura' cols='30' rows='35' readonly='true' style='display:none;'>" + respuesta.responseText + '</textarea>';
var error = 'Error en la respueta.<br>' + 'Error JS:<br>' + e + '<br>Mensaje Server:<br>' +
"<a href='#' onclick='toggle_nodo(document.getElementById(\"displayMore\"));'>Mas</a><br>" + componente;
notificacion.limpiar();
notificacion.agregar(error);
notificacion.mostrar();
} | random_line_split |
||
ei_formulario.js | de todos los objetos asociados
ei_formulario.prototype.puede_submit = function() {
if(this._evento) //Si hay un evento seteado...
{
//- 1 - Hay que realizar las validaciones
if(! this.validar() ) {
this.reset_evento();
return false;
}
if (! ei.prototype.puede_submit.call(this)) {
return false;
}
}
return true;
};
ei_formulario.prototype.debe_disparar_evento = function()
{
var debe = true, id_ef;
if (this._evento_condicionado_a_datos && this._evento.es_implicito) {
var cambios = false;
for (id_ef in this._efs) {
cambios = (cambios || this.hay_cambios(id_ef));
}
debe = cambios;
}
return debe;
};
//---- Cascadas
/**
* Esquema de Cascadas:<br>
* Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse
* @param {string} id_ef Identificador del ef maestro que sufrio una modificación
*/
ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila)
{
if (this._esclavos[id_ef]) {
this.evt__cascadas_inicio(this.ef(id_ef));
//--Se recorren los esclavos del master modificado
for (var i=0; i < this._esclavos[id_ef].length; i++) {
this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila);
}
}
};
/**
* Esquema de Cascadas:<br>
* Determina si los maestros de un ef esclavo tienen sus valores cargados
* @param {string} id_esclavo Identificador del ef esclavo
* @type boolean
*/
ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila)
{
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var ef = this.ef(this._maestros[id_esclavo][i]);
if (ef && typeof fila != 'undefined') {
ef.ir_a_fila(fila);
}
if (ef && ! ef.tiene_estado()) {
return false;
}
}
return true;
};
/**
* Esquema de Cascadas:<br>
* Un ef esclavo esta listo para refrescar su valor en base a sus maestros,
* para esto en este metodo se recolecta los valores de sus maestros y se dispara
* la comunicación con el servidor
* @param {string} id_esclavo Identificador del ef esclavo que se refrescara
*/
ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila)
{
//Primero se resetea por si la consulta nunca retorna
this.cascadas_en_espera(id_esclavo);
//---Todos los maestros tienen estado?
var con_estado = true;
var valores = '';
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var id_maestro = this._maestros[id_esclavo][i];
var ef = this.ef(id_maestro);
if (ef && ef.tiene_estado()) {
var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado();
valores += id_maestro + '-;-' + valor + '-|-';
} else if (ef) {
//-- Evita caso del oculto
con_estado = false;
break;
}
}
//--- Si estan todos los maestros puedo ir al server a preguntar el valor de este
if (con_estado) {
if (this.ef(id_esclavo)._cascadas_ajax) {
//Caso normal
this.cascadas_comunicar(id_esclavo, valores, fila);
} else {
//Caso combo_editable
this.ef(id_esclavo).set_solo_lectura(false);
}
}
};
/**
* Esquema de Cascadas:<br>
* Retorna el estado actual de los maestros directos de un esclavo
* @param {string} id_esclavo Identificador del ef esclavo que se refrescara
*/
ei_formulario.prototype.get_valores_maestros = function (id_esclavo, fila)
{
var maestros = {};
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var id_maestro = this._maestros[id_esclavo][i];
var ef = (typeof fila == 'undefined') ? this.ef(id_maestro): this.ef(id_maestro).ir_a_fila(fila);
if (ef && ef.tiene_estado()) {
maestros[id_maestro] = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado();
}
}
return maestros;
};
/**
* @private
* @param {string} id_ef Id. del ef
*/
ei_formulario.prototype.cascadas_en_espera = function(id_ef)
{
if (this.ef(id_ef).tiene_estado() && this.ef(id_ef).mantiene_valor_cascada()) { //Guardo el estado actual por si acaso vuelve en la respuesta
this._tmp_valores_esclavos[id_ef] = this.ef(id_ef).get_estado();
}
//Se resetea y desactiva al ef y todos sus esclavos
this.ef(id_ef).borrar_opciones();
this.ef(id_ef).desactivar();
if (this._esclavos[id_ef]) {
for (var i=0; i< this._esclavos[id_ef].length; i++) {
this.cascadas_en_espera(this._esclavos[id_ef][i]);
}
}
};
/**
* Esquema de Cascadas:<br>
* Se comunica al servidor que debe refrescar el valor de un ef en base a valores especificos de sus efs maestros
* Este método dispara la llamada asincronica al servidor
* @see #cascadas_respuesta
* @param {string} id_ef Id. del ef a refrescar (un ef esclavo)
* @param {string valores Lista plana de valores. Formato: ef1-;-valor1-|-ef2-;-valor2-|- etc.
*/
ei_formulario.prototype.cascadas_comunicar = function(id_ef, valores, fila)
{
//Empaqueto toda la informacion que tengo que mandar.
var parametros = {'cascadas-ef': id_ef, 'cascadas-maestros' : valores};
if (typeof fila != 'undefined') {
parametros['cascadas-fila'] = fila;
}
var callback = {
success: this.cascadas_respuesta,
failure: toba.error_comunicacion,
argument: id_ef,
scope: this
};
var vinculo = vinculador.get_url(null, null, 'cascadas_efs', parametros, [this._id]);
var con = conexion.asyncRequest('GET', vinculo, callback, null);
};
/**
* Esquema de Cascadas:<br>
* Respuesta del servidor ante el pedido de refresco de un ef puntual
* @param {Object} respuesta La respuesta es un objeto asociativo con claves responseText que contiene el nuevo valor del ef
*/
ei_formulario.prototype.cascadas_respuesta = function(respuesta)
{
if (respuesta.responseText === '') {
var error = 'Error en la respuesta de la cascada, para más información consulte el log';
notificacion.limpiar();
notificacion.agregar(error);
notificacion.mostrar();
} else {
try {
var datos_rs = JSON.parse(respuesta.responseText);
var datos_asociativo;
if ('Array' == getObjectClass(datos_rs)) {
| datos_asociativo = [];
for (var ind = 0; ind < datos_rs.length ; ind++) {
datos_asociativo[datos_rs[ind][0]] = datos_rs[ind][1];
}
//Se le pasa el formato RS para que no se rompa el ordenamiento, para el resto se usa el asociativo por BC
this.ef(respuesta.argument).set_opciones_rs(datos_rs);
} else | conditional_block |
|
ei_formulario.js |
/**
* @private
* @param {ef} ef objeto que representa al ef
* @param {string} identificador Id. del ef
*/
ei_formulario.prototype.agregar_ef = function (ef, identificador) {
if (ef) {
this._efs[identificador] = ef;
}
};
/**
*@private
*@param {ef} objeto_ef Objeto que representa al ef
*/
ei_formulario.prototype.instancia_ef = function (objeto_ef) {
var id = objeto_ef.get_id();
return this._instancia + ".ef('"+ id + "')";
};
ei_formulario.prototype.iniciar = function () {
var id_ef;
for (id_ef in this._efs) {
this._efs[id_ef].iniciar(id_ef, this);
this._estado_inicial[id_ef] = this._efs[id_ef].get_estado();
this._efs[id_ef].cuando_cambia_valor(this._instancia + '.validar_ef("' + id_ef + '", true)');
if (this._invalidos[id_ef]) {
this._efs[id_ef].resaltar(this._invalidos[id_ef]);
}
}
if (this._con_examen_cambios) {
this._examinar_cambios();
}
this.agregar_procesamientos();
this.refrescar_procesamientos(true);
this.reset_evento();
if (this.configurar) {
this.configurar();
}
};
//---Consultas
/**
* Accede a la instancia de un ef especifico
* @param {string} id del ef
* @type ef
* @see ef
*/
ei_formulario.prototype.ef = function(id) {
return this._efs[id];
};
/**
* Retorna un objeto asociativo id_ef => ef, para usarlo en algun ciclo, por ej.
* for (id_ef in this.efs()) {
* this.ef(id_ef).metodo()
* }
* @type Object
* @see ef
*/
ei_formulario.prototype.efs = function() {
return this._efs;
};
/**
* Retorna el estado actual de los efs en un Objeto asociativo id_ef=>valor
* @type Object
*/
ei_formulario.prototype.get_datos = function() {
var datos = {};
for (var id_ef in this._efs) {
datos[id_ef] = this._efs[id_ef].get_estado();
}
return datos;
};
//---Submit
ei_formulario.prototype.submit = function() {
var id_ef;
if (this.controlador && !this.controlador.en_submit()) {
return this.controlador.submit();
}
if (this._evento && this.debe_disparar_evento()) {
//Enviar la noticia del submit a los efs
for (id_ef in this._efs) {
this._efs[id_ef].submit();
}
//Marco la ejecucion del evento para que la clase PHP lo reconozca
document.getElementById(this._input_submit).value = this._evento.id;
}
};
//Chequea si es posible realiza el submit de todos los objetos asociados
ei_formulario.prototype.puede_submit = function() {
if(this._evento) //Si hay un evento seteado...
{
//- 1 - Hay que realizar las validaciones
if(! this.validar() ) {
this.reset_evento();
return false;
}
if (! ei.prototype.puede_submit.call(this)) {
return false;
}
}
return true;
};
ei_formulario.prototype.debe_disparar_evento = function()
{
var debe = true, id_ef;
if (this._evento_condicionado_a_datos && this._evento.es_implicito) {
var cambios = false;
for (id_ef in this._efs) {
cambios = (cambios || this.hay_cambios(id_ef));
}
debe = cambios;
}
return debe;
};
//---- Cascadas
/**
* Esquema de Cascadas:<br>
* Un ef indica que su valor cambio y por lo tanto sus esclavos deben refrescarse
* @param {string} id_ef Identificador del ef maestro que sufrio una modificación
*/
ei_formulario.prototype.cascadas_cambio_maestro = function(id_ef, fila)
{
if (this._esclavos[id_ef]) {
this.evt__cascadas_inicio(this.ef(id_ef));
//--Se recorren los esclavos del master modificado
for (var i=0; i < this._esclavos[id_ef].length; i++) {
this.cascadas_preparar_esclavo(this._esclavos[id_ef][i], fila);
}
}
};
/**
* Esquema de Cascadas:<br>
* Determina si los maestros de un ef esclavo tienen sus valores cargados
* @param {string} id_esclavo Identificador del ef esclavo
* @type boolean
*/
ei_formulario.prototype.cascadas_maestros_preparados = function(id_esclavo, fila)
{
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var ef = this.ef(this._maestros[id_esclavo][i]);
if (ef && typeof fila != 'undefined') {
ef.ir_a_fila(fila);
}
if (ef && ! ef.tiene_estado()) {
return false;
}
}
return true;
};
/**
* Esquema de Cascadas:<br>
* Un ef esclavo esta listo para refrescar su valor en base a sus maestros,
* para esto en este metodo se recolecta los valores de sus maestros y se dispara
* la comunicación con el servidor
* @param {string} id_esclavo Identificador del ef esclavo que se refrescara
*/
ei_formulario.prototype.cascadas_preparar_esclavo = function (id_esclavo, fila)
{
//Primero se resetea por si la consulta nunca retorna
this.cascadas_en_espera(id_esclavo);
//---Todos los maestros tienen estado?
var con_estado = true;
var valores = '';
for (var i=0; i< this._maestros[id_esclavo].length; i++) {
var id_maestro = this._maestros[id_esclavo][i];
var ef = this.ef(id_maestro);
if (ef && ef.tiene_estado()) {
var valor = (typeof fila == 'undefined') ? this.ef(id_maestro).get_estado() : this.ef(id_maestro).ir_a_fila(fila).get_estado();
valores += id_maestro + '-;-' + valor + '-|-';
} else if (ef) {
//-- Evita caso del oculto
con_estado = false;
break;
}
}
//--- Si estan todos los maestros puedo ir al server a preguntar el valor de este
if (con_estado) {
if (this.ef(id_esclavo)._cascadas_ajax) {
//Caso normal
this.cascadas_comunicar(id_esclavo, valores, fila);
} else {
//Caso combo_editable
this.ef(id_esclavo).set_solo_lectura(false);
}
}
};
/**
* Esquema de Cascadas:<br>
* Retorna el estado actual de los maestros directos de un esclavo
* @param {string} id_es | {
this._id = id;
this._instancia = instancia; //Nombre de la instancia del objeto, permite asociar al objeto con el arbol DOM
this._rango_tabs = rango_tabs;
this._input_submit = input_submit; //Campo que se setea en el submit del form
this.controlador = null; //Referencia al CI contenedor
this._efs = {}; //Lista de objeto_ef contenidos
this._efs_procesar = {}; //ID de los ef's que poseen procesamiento
this._silencioso = false; //¿Silenciar confirmaciones y alertas? Util para testing
this._evento_implicito = null; //No hay evento prefijado
this._expandido = false; //El formulario comienza sin expandirse
this._maestros = maestros;
this._esclavos = esclavos;
this._invalidos = invalidos;
this._estado_inicial = {};
this._con_examen_cambios = false;
this._cambios_excluir_efs = [];
this._tmp_valores_esclavos = {}; //lista temporal de valores a guardar hasta que retorna la cascada
} | identifier_body |
|
webots_launcher.py | implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This launcher simply starts Webots."""
import os
import re
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from launch.actions import ExecuteProcess
from launch_ros.actions import Node
from launch.launch_context import LaunchContext
from launch.substitution import Substitution
from launch.substitutions import TextSubstitution
from launch.substitutions.path_join_substitution import PathJoinSubstitution
from ament_index_python.packages import get_package_share_directory, get_package_prefix
from webots_ros2_driver.utils import (get_webots_home,
handle_webots_installation,
is_wsl,
has_shared_folder,
container_shared_folder,
controller_url_prefix)
class _ConditionalSubstitution(Substitution):
def __init__(self, *, condition, false_value='', true_value=''):
self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition))
self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value)
self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value)
def perform(self, context):
if context.perform_substitution(self.__condition).lower() in ['false', '0', '']:
return context.perform_substitution(self.__false_value)
return context.perform_substitution(self.__true_value)
class WebotsLauncher(ExecuteProcess):
def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False,
port='1234', **kwargs):
if sys.platform == 'win32':
print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a '
'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr)
print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more '
'information.', file=sys.stderr)
self.__is_wsl = is_wsl()
self.__has_shared_folder = has_shared_folder()
self.__is_supervisor = ros2_supervisor
if self.__is_supervisor:
self._supervisor = Ros2SupervisorLauncher(port=port)
# Find Webots executable
if not self.__has_shared_folder:
webots_path = get_webots_home(show_warning=True)
if webots_path is None:
handle_webots_installation()
webots_path = get_webots_home()
if self.__is_wsl:
webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe')
else:
webots_path = os.path.join(webots_path, 'webots')
else:
webots_path = ''
mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode)
self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False)
self.__world = world
if not isinstance(world, Substitution):
world = TextSubstitution(text=self.__world_copy.name)
if self.__is_wsl:
wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8')
world = TextSubstitution(text=wsl_tmp_path)
no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering')
stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout')
stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr')
minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize')
if isinstance(stream, bool):
stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream')
else:
stream_argument = "--stream=" + stream
port_argument = '--port=' + port
xvfb_run_prefix = []
if 'WEBOTS_OFFSCREEN' in os.environ:
xvfb_run_prefix.append('xvfb-run')
xvfb_run_prefix.append('--auto-servernum')
no_rendering = '--no-rendering'
# Initialize command to start Webots remotely through TCP
if self.__has_shared_folder:
webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts',
'webots_tcp_client.py'))
super().__init__(
output=output,
cmd=[
'python3',
webots_tcp_client,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
'--batch',
['--mode=', mode],
os.path.basename(self.__world_copy.name),
],
name='webots_tcp_client',
**kwargs
)
# Initialize command to start Webots locally
else:
# no_rendering, stdout, stderr, minimize
super().__init__(
output=output,
cmd=xvfb_run_prefix + [
webots_path,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
world,
'--batch',
['--mode=', mode],
],
name='webots',
**kwargs
)
def execute(self, context: LaunchContext):
# User can give a PathJoinSubstitution world or an absolute path world
if isinstance(self.__world, PathJoinSubstitution):
world_path = self.__world.perform(context)
context.launch_configurations['world'] = self.__world_copy.name
else:
world_path = self.__world
shutil.copy2(world_path, self.__world_copy.name)
# look for a wbproj file and copy if available
wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj')
if wbproj_path.exists():
wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem +
'.wbproj')
shutil.copy2(wbproj_path, wbproj_copy_path)
# copy sumo network file if it exists
sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net')
if sumonet_path.exists():
sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net')
shutil.copytree(sumonet_path, sumonet_copy_path)
# Update relative paths in the world
with open(self.__world_copy.name, 'r') as file:
content = file.read()
for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content):
url_path = match.group(1)
# Absolute path or Webots relative path or Web paths
if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \
or url_path.startswith('https://'):
continue
new_url_path = os.path.split(world_path)[0] + '/' + url_path
if self.__has_shared_folder:
# Copy asset to shared folder
shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path)))
new_url_path = './' + os.path.basename(new_url_path)
if self.__is_wsl:
command = ['wslpath', '-w', new_url_path]
new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/')
new_url_path = '"' + new_url_path + '"'
url_path = '"' + url_path + '"'
content = content.replace(url_path, new_url_path)
with open(self.__world_copy.name, 'w') as file:
file.write(content) | # Add the Ros2Supervisor
if self.__is_supervisor:
indent = ' '
world_file = open(self.__world_copy.name, 'a')
world_file.write('Robot {\n')
world_file.write(indent + 'name "Ros2Supervisor"\n')
world_file.write(indent + 'controller "<extern>"\n')
world_file.write(indent + 'supervisor TRUE\n')
world_file.write('}\n')
world_file.close()
# Copy world file to shared folder
if self.__has_shared_folder:
shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name))
shutil.copy(self.__world_copy.name, shared_world_file)
if wbproj_path.exists():
shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj')
shutil.copy(wbproj_path, shared_wbproj_copy_path)
# Execute process
return super().execute(context)
def _shutdown_process(self, context, *, send_sigint):
# Remove copy of the world and the corresponding ".wbproj" file
if self.__world_copy:
self.__world_copy.close()
if os.path.isfile(self.__world_copy.name):
os.unlink(self.__world_copy.name)
path, file = os.path.split(self.__world_copy.name)
world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj')
if os.path.isfile(world_copy_secondary_file):
os.unlink(world_copy_secondary_file | random_line_split |
|
webots_launcher.py | import Node
from launch.launch_context import LaunchContext
from launch.substitution import Substitution
from launch.substitutions import TextSubstitution
from launch.substitutions.path_join_substitution import PathJoinSubstitution
from ament_index_python.packages import get_package_share_directory, get_package_prefix
from webots_ros2_driver.utils import (get_webots_home,
handle_webots_installation,
is_wsl,
has_shared_folder,
container_shared_folder,
controller_url_prefix)
class _ConditionalSubstitution(Substitution):
def __init__(self, *, condition, false_value='', true_value=''):
self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition))
self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value)
self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value)
def perform(self, context):
if context.perform_substitution(self.__condition).lower() in ['false', '0', '']:
return context.perform_substitution(self.__false_value)
return context.perform_substitution(self.__true_value)
class WebotsLauncher(ExecuteProcess):
def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False,
port='1234', **kwargs):
if sys.platform == 'win32':
print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a '
'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr)
print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more '
'information.', file=sys.stderr)
self.__is_wsl = is_wsl()
self.__has_shared_folder = has_shared_folder()
self.__is_supervisor = ros2_supervisor
if self.__is_supervisor:
self._supervisor = Ros2SupervisorLauncher(port=port)
# Find Webots executable
if not self.__has_shared_folder:
webots_path = get_webots_home(show_warning=True)
if webots_path is None:
handle_webots_installation()
webots_path = get_webots_home()
if self.__is_wsl:
webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe')
else:
webots_path = os.path.join(webots_path, 'webots')
else:
webots_path = ''
mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode)
self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False)
self.__world = world
if not isinstance(world, Substitution):
world = TextSubstitution(text=self.__world_copy.name)
if self.__is_wsl:
wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8')
world = TextSubstitution(text=wsl_tmp_path)
no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering')
stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout')
stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr')
minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize')
if isinstance(stream, bool):
stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream')
else:
stream_argument = "--stream=" + stream
port_argument = '--port=' + port
xvfb_run_prefix = []
if 'WEBOTS_OFFSCREEN' in os.environ:
xvfb_run_prefix.append('xvfb-run')
xvfb_run_prefix.append('--auto-servernum')
no_rendering = '--no-rendering'
# Initialize command to start Webots remotely through TCP
if self.__has_shared_folder:
webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts',
'webots_tcp_client.py'))
super().__init__(
output=output,
cmd=[
'python3',
webots_tcp_client,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
'--batch',
['--mode=', mode],
os.path.basename(self.__world_copy.name),
],
name='webots_tcp_client',
**kwargs
)
# Initialize command to start Webots locally
else:
# no_rendering, stdout, stderr, minimize
super().__init__(
output=output,
cmd=xvfb_run_prefix + [
webots_path,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
world,
'--batch',
['--mode=', mode],
],
name='webots',
**kwargs
)
def execute(self, context: LaunchContext):
# User can give a PathJoinSubstitution world or an absolute path world
if isinstance(self.__world, PathJoinSubstitution):
world_path = self.__world.perform(context)
context.launch_configurations['world'] = self.__world_copy.name
else:
world_path = self.__world
shutil.copy2(world_path, self.__world_copy.name)
# look for a wbproj file and copy if available
wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj')
if wbproj_path.exists():
wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem +
'.wbproj')
shutil.copy2(wbproj_path, wbproj_copy_path)
# copy sumo network file if it exists
sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net')
if sumonet_path.exists():
sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net')
shutil.copytree(sumonet_path, sumonet_copy_path)
# Update relative paths in the world
with open(self.__world_copy.name, 'r') as file:
content = file.read()
for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content):
url_path = match.group(1)
# Absolute path or Webots relative path or Web paths
if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \
or url_path.startswith('https://'):
continue
new_url_path = os.path.split(world_path)[0] + '/' + url_path
if self.__has_shared_folder:
# Copy asset to shared folder
shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path)))
new_url_path = './' + os.path.basename(new_url_path)
if self.__is_wsl:
command = ['wslpath', '-w', new_url_path]
new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/')
new_url_path = '"' + new_url_path + '"'
url_path = '"' + url_path + '"'
content = content.replace(url_path, new_url_path)
with open(self.__world_copy.name, 'w') as file:
file.write(content)
# Add the Ros2Supervisor
if self.__is_supervisor:
indent = ' '
world_file = open(self.__world_copy.name, 'a')
world_file.write('Robot {\n')
world_file.write(indent + 'name "Ros2Supervisor"\n')
world_file.write(indent + 'controller "<extern>"\n')
world_file.write(indent + 'supervisor TRUE\n')
world_file.write('}\n')
world_file.close()
# Copy world file to shared folder
if self.__has_shared_folder:
shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name))
shutil.copy(self.__world_copy.name, shared_world_file)
if wbproj_path.exists():
shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj')
shutil.copy(wbproj_path, shared_wbproj_copy_path)
# Execute process
return super().execute(context)
def _shutdown_process(self, context, *, send_sigint):
# Remove copy of the world and the corresponding ".wbproj" file
if self.__world_copy:
self.__world_copy.close()
if os.path.isfile(self.__world_copy.name):
os.unlink(self.__world_copy.name)
path, file = os.path.split(self.__world_copy.name)
world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj')
if os.path.isfile(world_copy_secondary_file):
os.unlink(world_copy_secondary_file)
# Clean the content of the shared directory for next run
if self.__has_shared_folder:
for filename in os.listdir(container_shared_folder()):
file_path = os.path.join(container_shared_folder(), filename)
try:
if os.path.isfile(file_path):
| os.unlink(file_path) | conditional_block |
|
webots_launcher.py | implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This launcher simply starts Webots."""
import os
import re
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from launch.actions import ExecuteProcess
from launch_ros.actions import Node
from launch.launch_context import LaunchContext
from launch.substitution import Substitution
from launch.substitutions import TextSubstitution
from launch.substitutions.path_join_substitution import PathJoinSubstitution
from ament_index_python.packages import get_package_share_directory, get_package_prefix
from webots_ros2_driver.utils import (get_webots_home,
handle_webots_installation,
is_wsl,
has_shared_folder,
container_shared_folder,
controller_url_prefix)
class _ConditionalSubstitution(Substitution):
def | (self, *, condition, false_value='', true_value=''):
self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition))
self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value)
self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value)
def perform(self, context):
if context.perform_substitution(self.__condition).lower() in ['false', '0', '']:
return context.perform_substitution(self.__false_value)
return context.perform_substitution(self.__true_value)
class WebotsLauncher(ExecuteProcess):
def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False,
port='1234', **kwargs):
if sys.platform == 'win32':
print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a '
'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr)
print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more '
'information.', file=sys.stderr)
self.__is_wsl = is_wsl()
self.__has_shared_folder = has_shared_folder()
self.__is_supervisor = ros2_supervisor
if self.__is_supervisor:
self._supervisor = Ros2SupervisorLauncher(port=port)
# Find Webots executable
if not self.__has_shared_folder:
webots_path = get_webots_home(show_warning=True)
if webots_path is None:
handle_webots_installation()
webots_path = get_webots_home()
if self.__is_wsl:
webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe')
else:
webots_path = os.path.join(webots_path, 'webots')
else:
webots_path = ''
mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode)
self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False)
self.__world = world
if not isinstance(world, Substitution):
world = TextSubstitution(text=self.__world_copy.name)
if self.__is_wsl:
wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8')
world = TextSubstitution(text=wsl_tmp_path)
no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering')
stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout')
stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr')
minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize')
if isinstance(stream, bool):
stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream')
else:
stream_argument = "--stream=" + stream
port_argument = '--port=' + port
xvfb_run_prefix = []
if 'WEBOTS_OFFSCREEN' in os.environ:
xvfb_run_prefix.append('xvfb-run')
xvfb_run_prefix.append('--auto-servernum')
no_rendering = '--no-rendering'
# Initialize command to start Webots remotely through TCP
if self.__has_shared_folder:
webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts',
'webots_tcp_client.py'))
super().__init__(
output=output,
cmd=[
'python3',
webots_tcp_client,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
'--batch',
['--mode=', mode],
os.path.basename(self.__world_copy.name),
],
name='webots_tcp_client',
**kwargs
)
# Initialize command to start Webots locally
else:
# no_rendering, stdout, stderr, minimize
super().__init__(
output=output,
cmd=xvfb_run_prefix + [
webots_path,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
world,
'--batch',
['--mode=', mode],
],
name='webots',
**kwargs
)
def execute(self, context: LaunchContext):
# User can give a PathJoinSubstitution world or an absolute path world
if isinstance(self.__world, PathJoinSubstitution):
world_path = self.__world.perform(context)
context.launch_configurations['world'] = self.__world_copy.name
else:
world_path = self.__world
shutil.copy2(world_path, self.__world_copy.name)
# look for a wbproj file and copy if available
wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj')
if wbproj_path.exists():
wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem +
'.wbproj')
shutil.copy2(wbproj_path, wbproj_copy_path)
# copy sumo network file if it exists
sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net')
if sumonet_path.exists():
sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net')
shutil.copytree(sumonet_path, sumonet_copy_path)
# Update relative paths in the world
with open(self.__world_copy.name, 'r') as file:
content = file.read()
for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content):
url_path = match.group(1)
# Absolute path or Webots relative path or Web paths
if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \
or url_path.startswith('https://'):
continue
new_url_path = os.path.split(world_path)[0] + '/' + url_path
if self.__has_shared_folder:
# Copy asset to shared folder
shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path)))
new_url_path = './' + os.path.basename(new_url_path)
if self.__is_wsl:
command = ['wslpath', '-w', new_url_path]
new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/')
new_url_path = '"' + new_url_path + '"'
url_path = '"' + url_path + '"'
content = content.replace(url_path, new_url_path)
with open(self.__world_copy.name, 'w') as file:
file.write(content)
# Add the Ros2Supervisor
if self.__is_supervisor:
indent = ' '
world_file = open(self.__world_copy.name, 'a')
world_file.write('Robot {\n')
world_file.write(indent + 'name "Ros2Supervisor"\n')
world_file.write(indent + 'controller "<extern>"\n')
world_file.write(indent + 'supervisor TRUE\n')
world_file.write('}\n')
world_file.close()
# Copy world file to shared folder
if self.__has_shared_folder:
shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name))
shutil.copy(self.__world_copy.name, shared_world_file)
if wbproj_path.exists():
shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj')
shutil.copy(wbproj_path, shared_wbproj_copy_path)
# Execute process
return super().execute(context)
def _shutdown_process(self, context, *, send_sigint):
# Remove copy of the world and the corresponding ".wbproj" file
if self.__world_copy:
self.__world_copy.close()
if os.path.isfile(self.__world_copy.name):
os.unlink(self.__world_copy.name)
path, file = os.path.split(self.__world_copy.name)
world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj')
if os.path.isfile(world_copy_secondary_file):
os.unlink(world_copy_secondary | __init__ | identifier_name |
webots_launcher.py | implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This launcher simply starts Webots."""
import os
import re
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from launch.actions import ExecuteProcess
from launch_ros.actions import Node
from launch.launch_context import LaunchContext
from launch.substitution import Substitution
from launch.substitutions import TextSubstitution
from launch.substitutions.path_join_substitution import PathJoinSubstitution
from ament_index_python.packages import get_package_share_directory, get_package_prefix
from webots_ros2_driver.utils import (get_webots_home,
handle_webots_installation,
is_wsl,
has_shared_folder,
container_shared_folder,
controller_url_prefix)
class _ConditionalSubstitution(Substitution):
|
class WebotsLauncher(ExecuteProcess):
def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, ros2_supervisor=False,
port='1234', **kwargs):
if sys.platform == 'win32':
print('WARNING: Native webots_ros2 compatibility with Windows is deprecated and will be removed soon. Please use a '
'WSL (Windows Subsystem for Linux) environment instead.', file=sys.stderr)
print('WARNING: Check https://github.com/cyberbotics/webots_ros2/wiki/Complete-Installation-Guide for more '
'information.', file=sys.stderr)
self.__is_wsl = is_wsl()
self.__has_shared_folder = has_shared_folder()
self.__is_supervisor = ros2_supervisor
if self.__is_supervisor:
self._supervisor = Ros2SupervisorLauncher(port=port)
# Find Webots executable
if not self.__has_shared_folder:
webots_path = get_webots_home(show_warning=True)
if webots_path is None:
handle_webots_installation()
webots_path = get_webots_home()
if self.__is_wsl:
webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin', 'webots.exe')
else:
webots_path = os.path.join(webots_path, 'webots')
else:
webots_path = ''
mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode)
self.__world_copy = tempfile.NamedTemporaryFile(mode='w+', suffix='_world_with_URDF_robot.wbt', delete=False)
self.__world = world
if not isinstance(world, Substitution):
world = TextSubstitution(text=self.__world_copy.name)
if self.__is_wsl:
wsl_tmp_path = subprocess.check_output(['wslpath', '-w', self.__world_copy.name]).strip().decode('utf-8')
world = TextSubstitution(text=wsl_tmp_path)
no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering')
stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout')
stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr')
minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize')
if isinstance(stream, bool):
stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream')
else:
stream_argument = "--stream=" + stream
port_argument = '--port=' + port
xvfb_run_prefix = []
if 'WEBOTS_OFFSCREEN' in os.environ:
xvfb_run_prefix.append('xvfb-run')
xvfb_run_prefix.append('--auto-servernum')
no_rendering = '--no-rendering'
# Initialize command to start Webots remotely through TCP
if self.__has_shared_folder:
webots_tcp_client = (os.path.join(get_package_share_directory('webots_ros2_driver'), 'scripts',
'webots_tcp_client.py'))
super().__init__(
output=output,
cmd=[
'python3',
webots_tcp_client,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
'--batch',
['--mode=', mode],
os.path.basename(self.__world_copy.name),
],
name='webots_tcp_client',
**kwargs
)
# Initialize command to start Webots locally
else:
# no_rendering, stdout, stderr, minimize
super().__init__(
output=output,
cmd=xvfb_run_prefix + [
webots_path,
stream_argument,
port_argument,
no_rendering,
stdout,
stderr,
minimize,
world,
'--batch',
['--mode=', mode],
],
name='webots',
**kwargs
)
def execute(self, context: LaunchContext):
# User can give a PathJoinSubstitution world or an absolute path world
if isinstance(self.__world, PathJoinSubstitution):
world_path = self.__world.perform(context)
context.launch_configurations['world'] = self.__world_copy.name
else:
world_path = self.__world
shutil.copy2(world_path, self.__world_copy.name)
# look for a wbproj file and copy if available
wbproj_path = Path(world_path).with_name('.' + Path(world_path).stem + '.wbproj')
if wbproj_path.exists():
wbproj_copy_path = Path(self.__world_copy.name).with_name('.' + Path(self.__world_copy.name).stem +
'.wbproj')
shutil.copy2(wbproj_path, wbproj_copy_path)
# copy sumo network file if it exists
sumonet_path = Path(world_path).with_name(Path(world_path).stem + '_net')
if sumonet_path.exists():
sumonet_copy_path = Path(self.__world_copy.name).with_name(Path(self.__world_copy.name).stem + '_net')
shutil.copytree(sumonet_path, sumonet_copy_path)
# Update relative paths in the world
with open(self.__world_copy.name, 'r') as file:
content = file.read()
for match in re.finditer('\"((?:[^\"]*)\\.(?:jpe?g|png|hdr|obj|stl|dae|wav|mp3|proto))\"', content):
url_path = match.group(1)
# Absolute path or Webots relative path or Web paths
if os.path.isabs(url_path) or url_path.startswith('webots://') or url_path.startswith('http://') \
or url_path.startswith('https://'):
continue
new_url_path = os.path.split(world_path)[0] + '/' + url_path
if self.__has_shared_folder:
# Copy asset to shared folder
shutil.copy(new_url_path, os.path.join(container_shared_folder(), os.path.basename(new_url_path)))
new_url_path = './' + os.path.basename(new_url_path)
if self.__is_wsl:
command = ['wslpath', '-w', new_url_path]
new_url_path = subprocess.check_output(command).strip().decode('utf-8').replace('\\', '/')
new_url_path = '"' + new_url_path + '"'
url_path = '"' + url_path + '"'
content = content.replace(url_path, new_url_path)
with open(self.__world_copy.name, 'w') as file:
file.write(content)
# Add the Ros2Supervisor
if self.__is_supervisor:
indent = ' '
world_file = open(self.__world_copy.name, 'a')
world_file.write('Robot {\n')
world_file.write(indent + 'name "Ros2Supervisor"\n')
world_file.write(indent + 'controller "<extern>"\n')
world_file.write(indent + 'supervisor TRUE\n')
world_file.write('}\n')
world_file.close()
# Copy world file to shared folder
if self.__has_shared_folder:
shared_world_file = os.path.join(container_shared_folder(), os.path.basename(self.__world_copy.name))
shutil.copy(self.__world_copy.name, shared_world_file)
if wbproj_path.exists():
shared_wbproj_copy_path = Path(shared_world_file).with_name('.' + Path(shared_world_file).stem + '.wbproj')
shutil.copy(wbproj_path, shared_wbproj_copy_path)
# Execute process
return super().execute(context)
def _shutdown_process(self, context, *, send_sigint):
# Remove copy of the world and the corresponding ".wbproj" file
if self.__world_copy:
self.__world_copy.close()
if os.path.isfile(self.__world_copy.name):
os.unlink(self.__world_copy.name)
path, file = os.path.split(self.__world_copy.name)
world_copy_secondary_file = os.path.join(path, '.' + file[:-1] + 'proj')
if os.path.isfile(world_copy_secondary_file):
os.unlink(world_copy_secondary | def __init__(self, *, condition, false_value='', true_value=''):
self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition))
self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value)
self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value)
def perform(self, context):
if context.perform_substitution(self.__condition).lower() in ['false', '0', '']:
return context.perform_substitution(self.__false_value)
return context.perform_substitution(self.__true_value) | identifier_body |
bikeshare_2.py | 3 = "Which month - January, February, March, April, May, or June?\n"
question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n"
def handle_invalid_inputs(question,my_list):
"""
Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend
and handle invalid inputs of the user.
Args:
(str) question - the question for what we want to get and test the input of the user.
(list) my_list - the list of answer that we wish to have.
Returns:
(str) final_answer - a string containing a good input typed by the user.
"""
final_answer = None
while final_answer not in my_list:
final_answer = input(question).lower()
return final_answer
def get_month():
"""
Gets the input month choosed by the user in case where filter_choosed equal to "month".
Returns:
month - name of the month
"""
return handle_invalid_inputs(question_3, months)
def get_day():
"""
Gets the input day choosed by the user in case where filter_choosed equal to "day".
Returns:
day - string contening the name of the day
"""
return handle_invalid_inputs(question_4, days)
def get_both():
"""
Gets the input month and day choosed by the user in case where filter_choosed equal to "both".
Returns:
(str) get_month()
(str) get_day()
"""
return get_month(), get_day()
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
(str) filter_choosed - name of the the choosed filter_choosed
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = handle_invalid_inputs(question_1, cities)
# get the user input of the filter_choosed (month, day, both, or not at all(none))
filter_choosed = handle_invalid_inputs(question_2, filters)
# if filter_choosed == "month"
if filter_choosed == "month":
# get user input for month (all, january, february, ... , june)
month = get_month()
day = "all"
# if filter_choosed == "day"
if filter_choosed == "day":
# get user input for day of week (all, monday, tuesday, ... sunday)
day = get_day()
month = "all"
# if filter_choosed == "both"
if filter_choosed == "both":
# get user input for day of week and month
month, day = get_both()
# if filter_choosed == none
if filter_choosed == "none":
month = "all"
day = "all"
print('-'*40)
return city, month, day, filter_choosed
def load_data(city, month, day):
"""
Loads data for the specified city and filter_chooseds by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed | # load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month, day of week and hour from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
# filter_choosed by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ["january", "february", "march", "april", "may", "june"]
month = months.index(month) + 1
# filter_choosed by month to create the new dataframe
df = df[df['month'] == month]
# filter_choosed by day of week if applicable
if day != 'all':
# filter_choosed by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def popular_counts_column(column):
"""
calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel.
Args:
(pd.Series) column - column of a DataFrame
Returns:
popular_anything - string containing the popular entry
counts_anything - int containing number of occurence of that popular entry
"""
popular_anything = column.mode()[0]
counts_anything = column.value_counts()[popular_anything]
return popular_anything, counts_anything
def time_stats(df, filter_choosed):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month and number of occurrence
popular_month, counts_month = popular_counts_column(df['month'])
print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ')
# display the most common day of week and number of occurence
popular_day, counts_day = popular_counts_column(df['day_of_week'])
print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ')
# display the most common start hour and number of occurrence
popular_hour, counts_hour = popular_counts_column(df['hour'])
print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df, filter_choosed):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start, counts_start = popular_counts_column(df['Start Station'])
print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ')
# display most commonly used end station
popular_end, counts_end = popular_counts_column(df['End Station'])
print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ')
# display most frequent combination of start station and end station trip
popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station'])
print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df, filter_choosed):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
travel_number = df['Trip Duration'].size
print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ')
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city, filter_choosed):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
print('Statistics for User Types ...... \n')
user_types_dict = dict(df['User Type'].value_counts())
for key, value in user_types_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print('filter:', filter_choosed)
# Display counts of gender
print | (str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
Returns:
df - Pandas DataFrame containing city data filter_chooseded by month and day
"""
| random_line_split |
bikeshare_2.py | 3 = "Which month - January, February, March, April, May, or June?\n"
question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n"
def handle_invalid_inputs(question,my_list):
"""
Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend
and handle invalid inputs of the user.
Args:
(str) question - the question for what we want to get and test the input of the user.
(list) my_list - the list of answer that we wish to have.
Returns:
(str) final_answer - a string containing a good input typed by the user.
"""
final_answer = None
while final_answer not in my_list:
final_answer = input(question).lower()
return final_answer
def get_month():
"""
Gets the input month choosed by the user in case where filter_choosed equal to "month".
Returns:
month - name of the month
"""
return handle_invalid_inputs(question_3, months)
def get_day():
"""
Gets the input day choosed by the user in case where filter_choosed equal to "day".
Returns:
day - string contening the name of the day
"""
return handle_invalid_inputs(question_4, days)
def get_both():
"""
Gets the input month and day choosed by the user in case where filter_choosed equal to "both".
Returns:
(str) get_month()
(str) get_day()
"""
return get_month(), get_day()
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
(str) filter_choosed - name of the the choosed filter_choosed
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = handle_invalid_inputs(question_1, cities)
# get the user input of the filter_choosed (month, day, both, or not at all(none))
filter_choosed = handle_invalid_inputs(question_2, filters)
# if filter_choosed == "month"
if filter_choosed == "month":
# get user input for month (all, january, february, ... , june)
month = get_month()
day = "all"
# if filter_choosed == "day"
if filter_choosed == "day":
# get user input for day of week (all, monday, tuesday, ... sunday)
|
# if filter_choosed == "both"
if filter_choosed == "both":
# get user input for day of week and month
month, day = get_both()
# if filter_choosed == none
if filter_choosed == "none":
month = "all"
day = "all"
print('-'*40)
return city, month, day, filter_choosed
def load_data(city, month, day):
"""
Loads data for the specified city and filter_chooseds by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
Returns:
df - Pandas DataFrame containing city data filter_chooseded by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month, day of week and hour from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
# filter_choosed by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ["january", "february", "march", "april", "may", "june"]
month = months.index(month) + 1
# filter_choosed by month to create the new dataframe
df = df[df['month'] == month]
# filter_choosed by day of week if applicable
if day != 'all':
# filter_choosed by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def popular_counts_column(column):
"""
calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel.
Args:
(pd.Series) column - column of a DataFrame
Returns:
popular_anything - string containing the popular entry
counts_anything - int containing number of occurence of that popular entry
"""
popular_anything = column.mode()[0]
counts_anything = column.value_counts()[popular_anything]
return popular_anything, counts_anything
def time_stats(df, filter_choosed):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month and number of occurrence
popular_month, counts_month = popular_counts_column(df['month'])
print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ')
# display the most common day of week and number of occurence
popular_day, counts_day = popular_counts_column(df['day_of_week'])
print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ')
# display the most common start hour and number of occurrence
popular_hour, counts_hour = popular_counts_column(df['hour'])
print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df, filter_choosed):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start, counts_start = popular_counts_column(df['Start Station'])
print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ')
# display most commonly used end station
popular_end, counts_end = popular_counts_column(df['End Station'])
print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ')
# display most frequent combination of start station and end station trip
popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station'])
print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df, filter_choosed):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
travel_number = df['Trip Duration'].size
print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ')
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city, filter_choosed):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
print('Statistics for User Types ...... \n')
user_types_dict = dict(df['User Type'].value_counts())
for key, value in user_types_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print('filter:', filter_choosed)
# Display counts of gender
| day = get_day()
month = "all" | conditional_block |
bikeshare_2.py | ()
"""
return get_month(), get_day()
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
(str) filter_choosed - name of the the choosed filter_choosed
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = handle_invalid_inputs(question_1, cities)
# get the user input of the filter_choosed (month, day, both, or not at all(none))
filter_choosed = handle_invalid_inputs(question_2, filters)
# if filter_choosed == "month"
if filter_choosed == "month":
# get user input for month (all, january, february, ... , june)
month = get_month()
day = "all"
# if filter_choosed == "day"
if filter_choosed == "day":
# get user input for day of week (all, monday, tuesday, ... sunday)
day = get_day()
month = "all"
# if filter_choosed == "both"
if filter_choosed == "both":
# get user input for day of week and month
month, day = get_both()
# if filter_choosed == none
if filter_choosed == "none":
month = "all"
day = "all"
print('-'*40)
return city, month, day, filter_choosed
def load_data(city, month, day):
"""
Loads data for the specified city and filter_chooseds by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
Returns:
df - Pandas DataFrame containing city data filter_chooseded by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month, day of week and hour from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
# filter_choosed by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ["january", "february", "march", "april", "may", "june"]
month = months.index(month) + 1
# filter_choosed by month to create the new dataframe
df = df[df['month'] == month]
# filter_choosed by day of week if applicable
if day != 'all':
# filter_choosed by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def popular_counts_column(column):
"""
calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel.
Args:
(pd.Series) column - column of a DataFrame
Returns:
popular_anything - string containing the popular entry
counts_anything - int containing number of occurence of that popular entry
"""
popular_anything = column.mode()[0]
counts_anything = column.value_counts()[popular_anything]
return popular_anything, counts_anything
def time_stats(df, filter_choosed):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month and number of occurrence
popular_month, counts_month = popular_counts_column(df['month'])
print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ')
# display the most common day of week and number of occurence
popular_day, counts_day = popular_counts_column(df['day_of_week'])
print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ')
# display the most common start hour and number of occurrence
popular_hour, counts_hour = popular_counts_column(df['hour'])
print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df, filter_choosed):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start, counts_start = popular_counts_column(df['Start Station'])
print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ')
# display most commonly used end station
popular_end, counts_end = popular_counts_column(df['End Station'])
print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ')
# display most frequent combination of start station and end station trip
popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station'])
print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df, filter_choosed):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
travel_number = df['Trip Duration'].size
print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ')
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city, filter_choosed):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
print('Statistics for User Types ...... \n')
user_types_dict = dict(df['User Type'].value_counts())
for key, value in user_types_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print('filter:', filter_choosed)
# Display counts of gender
print('\nStatistics for gender ...... \n')
if city != 'washington':
gender_dict = dict(df['Gender'].value_counts())
for key, value in gender_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print(' filter:', filter_choosed)
else:
print('No data about gender')
# Display earliest, most recent, and most common year of birth
print('\nStatistics for year of birth ...... \n')
if city != 'washington':
earliest_year = df['Birth Year'].min()
most_recent_year = df['Birth Year'].max()
popular_year = df['Birth Year'].mode()[0]
print('Earliest Year:{}, Most Recent Year:{}, Most Popular Year:{}, filter:{}'.format(earliest_year, most_recent_year, popular_year, filter_choosed))
else:
print('No data about birth of year')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def individual_trip_data(df):
| """Displays individual trip data of each user."""
data = df.to_dict('records')
i = 0
j = 5
length = len(data)
while True:
see_trip = input('\nWould you like to individual trip data? Type yes or no.\n')
if see_trip.lower() != 'yes':
break
else:
if i < j and i < length:
for i in range(j):
print(data[i])
i = j
j += 5 | identifier_body |
|
bikeshare_2.py | 3 = "Which month - January, February, March, April, May, or June?\n"
question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n"
def handle_invalid_inputs(question,my_list):
"""
Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend
and handle invalid inputs of the user.
Args:
(str) question - the question for what we want to get and test the input of the user.
(list) my_list - the list of answer that we wish to have.
Returns:
(str) final_answer - a string containing a good input typed by the user.
"""
final_answer = None
while final_answer not in my_list:
final_answer = input(question).lower()
return final_answer
def get_month():
"""
Gets the input month choosed by the user in case where filter_choosed equal to "month".
Returns:
month - name of the month
"""
return handle_invalid_inputs(question_3, months)
def get_day():
"""
Gets the input day choosed by the user in case where filter_choosed equal to "day".
Returns:
day - string contening the name of the day
"""
return handle_invalid_inputs(question_4, days)
def get_both():
"""
Gets the input month and day choosed by the user in case where filter_choosed equal to "both".
Returns:
(str) get_month()
(str) get_day()
"""
return get_month(), get_day()
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
(str) filter_choosed - name of the the choosed filter_choosed
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = handle_invalid_inputs(question_1, cities)
# get the user input of the filter_choosed (month, day, both, or not at all(none))
filter_choosed = handle_invalid_inputs(question_2, filters)
# if filter_choosed == "month"
if filter_choosed == "month":
# get user input for month (all, january, february, ... , june)
month = get_month()
day = "all"
# if filter_choosed == "day"
if filter_choosed == "day":
# get user input for day of week (all, monday, tuesday, ... sunday)
day = get_day()
month = "all"
# if filter_choosed == "both"
if filter_choosed == "both":
# get user input for day of week and month
month, day = get_both()
# if filter_choosed == none
if filter_choosed == "none":
month = "all"
day = "all"
print('-'*40)
return city, month, day, filter_choosed
def load_data(city, month, day):
"""
Loads data for the specified city and filter_chooseds by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
Returns:
df - Pandas DataFrame containing city data filter_chooseded by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month, day of week and hour from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
# filter_choosed by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ["january", "february", "march", "april", "may", "june"]
month = months.index(month) + 1
# filter_choosed by month to create the new dataframe
df = df[df['month'] == month]
# filter_choosed by day of week if applicable
if day != 'all':
# filter_choosed by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def | (column):
"""
calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel.
Args:
(pd.Series) column - column of a DataFrame
Returns:
popular_anything - string containing the popular entry
counts_anything - int containing number of occurence of that popular entry
"""
popular_anything = column.mode()[0]
counts_anything = column.value_counts()[popular_anything]
return popular_anything, counts_anything
def time_stats(df, filter_choosed):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month and number of occurrence
popular_month, counts_month = popular_counts_column(df['month'])
print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ')
# display the most common day of week and number of occurence
popular_day, counts_day = popular_counts_column(df['day_of_week'])
print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ')
# display the most common start hour and number of occurrence
popular_hour, counts_hour = popular_counts_column(df['hour'])
print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df, filter_choosed):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start, counts_start = popular_counts_column(df['Start Station'])
print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ')
# display most commonly used end station
popular_end, counts_end = popular_counts_column(df['End Station'])
print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ')
# display most frequent combination of start station and end station trip
popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station'])
print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df, filter_choosed):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
travel_number = df['Trip Duration'].size
print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ')
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city, filter_choosed):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
print('Statistics for User Types ...... \n')
user_types_dict = dict(df['User Type'].value_counts())
for key, value in user_types_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print('filter:', filter_choosed)
# Display counts of gender
| popular_counts_column | identifier_name |
main.rs | _path(&path)?;
shared::chmod(&invitation_file, 0o600)?;
invitation_file
.write_all(toml::to_string(self).unwrap().as_bytes())
.with_path(path)?;
Ok(())
}
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let path = path.as_ref();
let file = File::open(path).with_path(path)?;
if shared::chmod(&file, 0o600)? {
println!(
"{} updated permissions for {} to 0600.",
"[!]".yellow(),
path.display()
);
}
Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?)
}
}
#[derive(Clone, Debug, Default)]
pub struct ServerConfig {
wg_manage_dir_override: Option<PathBuf>,
wg_dir_override: Option<PathBuf>,
}
impl ServerConfig {
fn database_dir(&self) -> &Path {
self.wg_manage_dir_override
.as_deref()
.unwrap_or(*SERVER_DATABASE_DIR)
}
fn database_path(&self, interface: &InterfaceName) -> PathBuf {
PathBuf::new()
.join(self.database_dir())
.join(interface.to_string())
.with_extension("db")
}
fn config_dir(&self) -> &Path {
self.wg_dir_override
.as_deref()
.unwrap_or(*SERVER_CONFIG_DIR)
}
fn config_path(&self, interface: &InterfaceName) -> PathBuf {
PathBuf::new()
.join(self.config_dir())
.join(interface.to_string())
.with_extension("conf")
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if env::var_os("RUST_LOG").is_none() {
// Set some default log settings.
env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info");
}
pretty_env_logger::init();
let opt = Opt::from_args();
if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) {
return Err("innernet-server must run as root.".into());
}
let conf = ServerConfig::default();
match opt.command {
Command::New { opts } => {
if let Err(e) = initialize::init_wizard(&conf, opts) {
eprintln!("{}: {}.", "creation failed".red(), e);
std::process::exit(1);
}
},
Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?,
Command::Serve {
interface,
network: routing,
} => serve(*interface, &conf, routing).await?,
Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?,
Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?,
Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?,
Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?,
Command::Completions { shell } => {
Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout());
std::process::exit(0);
},
}
Ok(())
}
fn open_database_connection(
interface: &InterfaceName,
conf: &ServerConfig,
) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> {
let database_path = conf.database_path(&interface);
if !Path::new(&database_path).exists() {
return Err(format!(
"no database file found at {}",
database_path.to_string_lossy()
)
.into());
}
let conn = Connection::open(&database_path)?;
// Foreign key constraints aren't on in SQLite by default. Enable.
conn.pragma_update(None, "foreign_keys", &1)?;
db::auto_migrate(&conn)?;
Ok(conn)
}
fn add_peer(
interface: &InterfaceName,
conf: &ServerConfig,
opts: AddPeerOpts,
network: NetworkOpt,
) -> Result<(), Error> {
let config = ConfigFile::from_file(conf.config_path(interface))?;
let conn = open_database_connection(interface, conf)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
let cidrs = DatabaseCidr::list(&conn)?;
let cidr_tree = CidrTree::new(&cidrs[..]);
if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? {
let peer = DatabasePeer::create(&conn, peer_request)?;
if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() {
// Update the current WireGuard interface with the new peers.
DeviceUpdate::new()
.add_peer((&*peer).into())
.apply(interface, network.backend)
.map_err(|_| ServerError::WireGuard)?;
println!("adding to WireGuard interface: {}", &*peer);
}
let server_peer = DatabasePeer::get(&conn, 1)?;
prompts::save_peer_invitation(
interface,
&peer,
&*server_peer,
&cidr_tree,
keypair,
&SocketAddr::new(config.address, config.listen_port),
&opts.save_config,
)?;
} else {
println!("exited without creating peer.");
}
Ok(())
}
fn rename_peer(
interface: &InterfaceName,
conf: &ServerConfig,
opts: RenamePeerOpts,
) -> Result<(), Error> {
let conn = open_database_connection(interface, conf)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? {
let mut db_peer = DatabasePeer::list(&conn)?
.into_iter()
.find(|p| p.name == old_name)
.ok_or( "Peer not found.")?;
let _peer = db_peer.update(&conn, peer_request)?;
} else {
println!("exited without creating peer.");
}
Ok(())
}
fn add_cidr(
interface: &InterfaceName,
conf: &ServerConfig,
opts: AddCidrOpts,
) -> Result<(), Error> {
let conn = open_database_connection(interface, conf)?;
let cidrs = DatabaseCidr::list(&conn)?;
if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? {
let cidr = DatabaseCidr::create(&conn, cidr_request)?;
printdoc!(
"
CIDR \"{cidr_name}\" added.
Right now, peers within {cidr_name} can only see peers in the same CIDR, and in
the special \"innernet-server\" CIDR that includes the innernet server peer.
You'll need to add more associations for peers in diffent CIDRs to communicate.
",
cidr_name = cidr.name.bold()
);
} else {
println!("exited without creating CIDR.");
}
Ok(())
}
fn delete_cidr(
interface: &InterfaceName,
conf: &ServerConfig,
args: DeleteCidrOpts,
) -> Result<(), Error> {
println!("Fetching eligible CIDRs");
let conn = open_database_connection(interface, conf)?;
let cidrs = DatabaseCidr::list(&conn)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?;
println!("Deleting CIDR...");
let _ = DatabaseCidr::delete(&conn, cidr_id)?;
println!("CIDR deleted.");
Ok(())
}
fn uninstall(
interface: &InterfaceName,
conf: &ServerConfig,
network: NetworkOpt,
) -> Result<(), Error> {
if Confirm::with_theme(&*prompts::THEME)
.with_prompt(&format!(
"Permanently delete network \"{}\"?",
interface.as_str_lossy().yellow()
))
.default(false)
.interact()?
| {
println!("{} bringing down interface (if up).", "[*]".dimmed());
wg::down(interface, network.backend).ok();
let config = conf.config_path(interface);
let data = conf.database_path(interface);
std::fs::remove_file(&config)
.with_path(&config)
.map_err(|e| println!("[!] {}", e.to_string().yellow()))
.ok();
std::fs::remove_file(&data)
.with_path(&data)
.map_err(|e| println!("[!] {}", e.to_string().yellow()))
.ok();
println!(
"{} network {} is uninstalled.",
"[*]".dimmed(),
interface.as_str_lossy().yellow()
);
} | conditional_block |
|
main.rs | _cidr_prefix: u8,
}
impl ConfigFile {
pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> |
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let path = path.as_ref();
let file = File::open(path).with_path(path)?;
if shared::chmod(&file, 0o600)? {
println!(
"{} updated permissions for {} to 0600.",
"[!]".yellow(),
path.display()
);
}
Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?)
}
}
#[derive(Clone, Debug, Default)]
pub struct ServerConfig {
wg_manage_dir_override: Option<PathBuf>,
wg_dir_override: Option<PathBuf>,
}
impl ServerConfig {
fn database_dir(&self) -> &Path {
self.wg_manage_dir_override
.as_deref()
.unwrap_or(*SERVER_DATABASE_DIR)
}
fn database_path(&self, interface: &InterfaceName) -> PathBuf {
PathBuf::new()
.join(self.database_dir())
.join(interface.to_string())
.with_extension("db")
}
fn config_dir(&self) -> &Path {
self.wg_dir_override
.as_deref()
.unwrap_or(*SERVER_CONFIG_DIR)
}
fn config_path(&self, interface: &InterfaceName) -> PathBuf {
PathBuf::new()
.join(self.config_dir())
.join(interface.to_string())
.with_extension("conf")
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if env::var_os("RUST_LOG").is_none() {
// Set some default log settings.
env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info");
}
pretty_env_logger::init();
let opt = Opt::from_args();
if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) {
return Err("innernet-server must run as root.".into());
}
let conf = ServerConfig::default();
match opt.command {
Command::New { opts } => {
if let Err(e) = initialize::init_wizard(&conf, opts) {
eprintln!("{}: {}.", "creation failed".red(), e);
std::process::exit(1);
}
},
Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?,
Command::Serve {
interface,
network: routing,
} => serve(*interface, &conf, routing).await?,
Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?,
Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?,
Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?,
Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?,
Command::Completions { shell } => {
Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout());
std::process::exit(0);
},
}
Ok(())
}
fn open_database_connection(
interface: &InterfaceName,
conf: &ServerConfig,
) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> {
let database_path = conf.database_path(&interface);
if !Path::new(&database_path).exists() {
return Err(format!(
"no database file found at {}",
database_path.to_string_lossy()
)
.into());
}
let conn = Connection::open(&database_path)?;
// Foreign key constraints aren't on in SQLite by default. Enable.
conn.pragma_update(None, "foreign_keys", &1)?;
db::auto_migrate(&conn)?;
Ok(conn)
}
fn add_peer(
interface: &InterfaceName,
conf: &ServerConfig,
opts: AddPeerOpts,
network: NetworkOpt,
) -> Result<(), Error> {
let config = ConfigFile::from_file(conf.config_path(interface))?;
let conn = open_database_connection(interface, conf)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
let cidrs = DatabaseCidr::list(&conn)?;
let cidr_tree = CidrTree::new(&cidrs[..]);
if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? {
let peer = DatabasePeer::create(&conn, peer_request)?;
if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() {
// Update the current WireGuard interface with the new peers.
DeviceUpdate::new()
.add_peer((&*peer).into())
.apply(interface, network.backend)
.map_err(|_| ServerError::WireGuard)?;
println!("adding to WireGuard interface: {}", &*peer);
}
let server_peer = DatabasePeer::get(&conn, 1)?;
prompts::save_peer_invitation(
interface,
&peer,
&*server_peer,
&cidr_tree,
keypair,
&SocketAddr::new(config.address, config.listen_port),
&opts.save_config,
)?;
} else {
println!("exited without creating peer.");
}
Ok(())
}
fn rename_peer(
interface: &InterfaceName,
conf: &ServerConfig,
opts: RenamePeerOpts,
) -> Result<(), Error> {
let conn = open_database_connection(interface, conf)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? {
let mut db_peer = DatabasePeer::list(&conn)?
.into_iter()
.find(|p| p.name == old_name)
.ok_or( "Peer not found.")?;
let _peer = db_peer.update(&conn, peer_request)?;
} else {
println!("exited without creating peer.");
}
Ok(())
}
fn add_cidr(
interface: &InterfaceName,
conf: &ServerConfig,
opts: AddCidrOpts,
) -> Result<(), Error> {
let conn = open_database_connection(interface, conf)?;
let cidrs = DatabaseCidr::list(&conn)?;
if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? {
let cidr = DatabaseCidr::create(&conn, cidr_request)?;
printdoc!(
"
CIDR \"{cidr_name}\" added.
Right now, peers within {cidr_name} can only see peers in the same CIDR, and in
the special \"innernet-server\" CIDR that includes the innernet server peer.
You'll need to add more associations for peers in diffent CIDRs to communicate.
",
cidr_name = cidr.name.bold()
);
} else {
println!("exited without creating CIDR.");
}
Ok(())
}
fn delete_cidr(
interface: &InterfaceName,
conf: &ServerConfig,
args: DeleteCidrOpts,
) -> Result<(), Error> {
println!("Fetching eligible CIDRs");
let conn = open_database_connection(interface, conf)?;
let cidrs = DatabaseCidr::list(&conn)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?;
println!("Deleting CIDR...");
let _ = DatabaseCidr::delete(&conn, cidr_id)?;
println!("CIDR deleted.");
Ok(())
}
fn uninstall(
interface: &InterfaceName,
conf: &ServerConfig,
network: NetworkOpt,
) -> Result<(), Error> {
if Confirm::with_theme(&*prompts::THEME)
.with_prompt(&format!(
"Permanently delete network \"{}\"?",
interface.as_str_lossy().yellow()
))
.default(false)
.interact()?
{
println!("{} bringing down interface (if up).", "[*]".dimmed());
wg::down(interface, network.backend).ok();
let config = conf.config_path(interface);
let data = conf.database_path(interface);
std::fs::remove_file(&config)
.with_path(&config)
.map_err(|e| println!("[!] {}", e.to_string().yellow()))
.ok();
std::fs::remove_file(&data)
.with_path(&data)
. | {
let mut invitation_file = File::create(&path).with_path(&path)?;
shared::chmod(&invitation_file, 0o600)?;
invitation_file
.write_all(toml::to_string(self).unwrap().as_bytes())
.with_path(path)?;
Ok(())
} | identifier_body |
main.rs | : NetworkOpt) -> Endpoints {
let endpoints = Arc::new(RwLock::new(HashMap::new()));
tokio::task::spawn({
let endpoints = endpoints.clone();
async move {
let mut interval = tokio::time::interval(Duration::from_secs(10));
loop {
interval.tick().await;
if let Ok(info) = Device::get(&interface, network.backend) {
for peer in info.peers {
if let Some(endpoint) = peer.config.endpoint {
endpoints
.write()
.insert(peer.config.public_key.to_base64(), endpoint);
}
}
}
}
}
});
endpoints
}
fn spawn_expired_invite_sweeper(db: Db) {
tokio::task::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(10));
loop {
interval.tick().await;
match DatabasePeer::delete_expired_invites(&db.lock()) {
Ok(deleted) if deleted > 0 => {
log::info!("Deleted {} expired peer invitations.", deleted)
},
Err(e) => log::error!("Failed to delete expired peer invitations: {}", e),
_ => {},
}
}
});
}
async fn serve(
interface: InterfaceName,
conf: &ServerConfig,
network: NetworkOpt,
) -> Result<(), Error> {
let config = ConfigFile::from_file(conf.config_path(&interface))?;
let conn = open_database_connection(&interface, conf)?;
let peers = DatabasePeer::list(&conn)?;
let peer_configs = peers
.iter()
.map(|peer| peer.deref().into())
.collect::<Vec<PeerConfigBuilder>>();
log::info!("bringing up interface.");
wg::up(
&interface,
&config.private_key,
IpNetwork::new(config.address, config.network_cidr_prefix)?,
Some(config.listen_port),
None,
network,
)?;
DeviceUpdate::new()
.add_peers(&peer_configs)
.apply(&interface, network.backend)?;
log::info!("{} peers added to wireguard interface.", peers.len());
let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public();
let db = Arc::new(Mutex::new(conn));
let endpoints = spawn_endpoint_refresher(interface, network);
spawn_expired_invite_sweeper(db.clone());
let context = Context {
db,
endpoints,
interface,
public_key,
backend: network.backend,
};
log::info!("innernet-server {} starting.", VERSION);
let listener = get_listener((config.address, config.listen_port).into(), &interface)?;
let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| {
let remote_addr = socket.remote_addr();
let context = context.clone();
async move {
Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| {
log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri());
hyper_service(req, context.clone(), remote_addr)
}))
}
});
let server = hyper::Server::from_tcp(listener)?.serve(make_svc);
server.await?;
Ok(())
}
/// This function differs per OS, because different operating systems have
/// opposing characteristics when binding to a specific IP address.
/// On Linux, binding to a specific local IP address does *not* bind it to
/// that IP's interface, allowing for spoofing attacks.
///
/// See https://github.com/tonarino/innernet/issues/26 for more details.
#[cfg(target_os = "linux")]
fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> {
let listener = TcpListener::bind(&addr)?;
listener.set_nonblocking(true)?;
let sock = socket2::Socket::from(listener);
sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?;
Ok(sock.into())
}
/// BSD-likes do seem to bind to an interface when binding to an IP,
/// according to the internet, but we may want to explicitly use
/// IP_BOUND_IF in the future regardless. This isn't currently in
/// the socket2 crate however, so we aren't currently using it.
///
/// See https://github.com/tonarino/innernet/issues/26 for more details.
#[cfg(not(target_os = "linux"))]
fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> {
let listener = TcpListener::bind(&addr)?;
listener.set_nonblocking(true)?;
Ok(listener)
}
pub(crate) async fn hyper_service(
req: Request<Body>,
context: Context,
remote_addr: SocketAddr,
) -> Result<Response<Body>, http::Error> {
// Break the path into components.
let components: VecDeque<_> = req
.uri()
.path()
.trim_start_matches('/')
.split('/')
.map(String::from)
.collect();
routes(req, context, remote_addr, components)
.await
.or_else(TryInto::try_into)
}
async fn routes(
req: Request<Body>,
context: Context,
remote_addr: SocketAddr,
mut components: VecDeque<String>,
) -> Result<Response<Body>, ServerError> {
// Must be "/v1/[something]"
if components.pop_front().as_deref() != Some("v1") {
Err(ServerError::NotFound)
} else {
let session = get_session(&req, context, remote_addr.ip())?;
let component = components.pop_front();
match component.as_deref() {
Some("user") => api::user::routes(req, components, session).await,
Some("admin") => api::admin::routes(req, components, session).await,
_ => Err(ServerError::NotFound),
}
}
}
fn get_session(
req: &Request<Body>,
context: Context,
addr: IpAddr,
) -> Result<Session, ServerError> {
let pubkey = req
.headers()
.get(INNERNET_PUBKEY_HEADER)
.ok_or(ServerError::Unauthorized)?;
let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?;
let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?;
if pubkey.0.ct_eq(&context.public_key.0).into() {
let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e {
rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized,
e => ServerError::Database(e),
})?;
if !peer.is_disabled {
return Ok(Session { context, peer });
}
}
Err(ServerError::Unauthorized)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test;
use anyhow::Result;
use hyper::StatusCode;
use std::path::Path;
#[test]
fn test_init_wizard() -> Result<(), Error> {
// This runs init_wizard().
let server = test::Server::new()?;
assert!(Path::new(&server.wg_conf_path()).exists());
Ok(())
}
#[tokio::test]
async fn test_with_session_disguised_with_headers() -> Result<(), Error> {
let server = test::Server::new()?;
let req = Request::builder()
.uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP))
.header("Forwarded", format!("for={}", test::ADMIN_PEER_IP))
.header("X-Forwarded-For", test::ADMIN_PEER_IP)
.header("X-Real-IP", test::ADMIN_PEER_IP)
.body(Body::empty())
.unwrap();
// Request from an unknown IP, trying to disguise as an admin using HTTP headers.
let res = server.raw_request("10.80.80.80", req).await;
// addr::remote() filter only look at remote_addr from TCP socket.
// HTTP headers are not considered. This also means that innernet
// server would not function behind an HTTP proxy.
assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
Ok(())
}
#[tokio::test]
async fn test_incorrect_public_key() -> Result<(), Error> {
let server = test::Server::new()?;
let key = Key::generate_private().generate_public();
// Request from an unknown IP, trying to disguise as an admin using HTTP headers.
let req = Request::builder()
.uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP))
.header(shared::INNERNET_PUBKEY_HEADER, key.to_base64())
.body(Body::empty())
.unwrap();
let res = server.raw_request("10.80.80.80", req).await;
// addr::remote() filter only look at remote_addr from TCP socket.
// HTTP headers are not considered. This also means that innernet
// server would not function behind an HTTP proxy.
assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
Ok(())
}
| #[tokio::test]
async fn test_unparseable_public_key() -> Result<(), Error> {
let server = test::Server::new()?;
| random_line_split |
|
main.rs | (&self) -> bool {
self.peer.is_admin && self.user_capable()
}
pub fn user_capable(&self) -> bool {
!self.peer.is_disabled && self.peer.is_redeemed
}
pub fn redeemable(&self) -> bool {
!self.peer.is_disabled && !self.peer.is_redeemed
}
}
#[derive(Deserialize, Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct ConfigFile {
/// The server's WireGuard key
pub private_key: String,
/// The listen port of the server
pub listen_port: u16,
/// The internal WireGuard IP address assigned to the server
pub address: IpAddr,
/// The CIDR prefix of the WireGuard network
pub network_cidr_prefix: u8,
}
impl ConfigFile {
pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> {
let mut invitation_file = File::create(&path).with_path(&path)?;
shared::chmod(&invitation_file, 0o600)?;
invitation_file
.write_all(toml::to_string(self).unwrap().as_bytes())
.with_path(path)?;
Ok(())
}
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let path = path.as_ref();
let file = File::open(path).with_path(path)?;
if shared::chmod(&file, 0o600)? {
println!(
"{} updated permissions for {} to 0600.",
"[!]".yellow(),
path.display()
);
}
Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?)
}
}
#[derive(Clone, Debug, Default)]
pub struct ServerConfig {
wg_manage_dir_override: Option<PathBuf>,
wg_dir_override: Option<PathBuf>,
}
impl ServerConfig {
fn database_dir(&self) -> &Path {
self.wg_manage_dir_override
.as_deref()
.unwrap_or(*SERVER_DATABASE_DIR)
}
fn database_path(&self, interface: &InterfaceName) -> PathBuf {
PathBuf::new()
.join(self.database_dir())
.join(interface.to_string())
.with_extension("db")
}
fn config_dir(&self) -> &Path {
self.wg_dir_override
.as_deref()
.unwrap_or(*SERVER_CONFIG_DIR)
}
fn config_path(&self, interface: &InterfaceName) -> PathBuf {
PathBuf::new()
.join(self.config_dir())
.join(interface.to_string())
.with_extension("conf")
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if env::var_os("RUST_LOG").is_none() {
// Set some default log settings.
env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info");
}
pretty_env_logger::init();
let opt = Opt::from_args();
if unsafe { libc::getuid() } != 0 && !matches!(opt.command, Command::Completions { .. }) {
return Err("innernet-server must run as root.".into());
}
let conf = ServerConfig::default();
match opt.command {
Command::New { opts } => {
if let Err(e) = initialize::init_wizard(&conf, opts) {
eprintln!("{}: {}.", "creation failed".red(), e);
std::process::exit(1);
}
},
Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?,
Command::Serve {
interface,
network: routing,
} => serve(*interface, &conf, routing).await?,
Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?,
Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?,
Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?,
Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?,
Command::Completions { shell } => {
Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout());
std::process::exit(0);
},
}
Ok(())
}
fn open_database_connection(
interface: &InterfaceName,
conf: &ServerConfig,
) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> {
let database_path = conf.database_path(&interface);
if !Path::new(&database_path).exists() {
return Err(format!(
"no database file found at {}",
database_path.to_string_lossy()
)
.into());
}
let conn = Connection::open(&database_path)?;
// Foreign key constraints aren't on in SQLite by default. Enable.
conn.pragma_update(None, "foreign_keys", &1)?;
db::auto_migrate(&conn)?;
Ok(conn)
}
fn add_peer(
interface: &InterfaceName,
conf: &ServerConfig,
opts: AddPeerOpts,
network: NetworkOpt,
) -> Result<(), Error> {
let config = ConfigFile::from_file(conf.config_path(interface))?;
let conn = open_database_connection(interface, conf)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
let cidrs = DatabaseCidr::list(&conn)?;
let cidr_tree = CidrTree::new(&cidrs[..]);
if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? {
let peer = DatabasePeer::create(&conn, peer_request)?;
if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() {
// Update the current WireGuard interface with the new peers.
DeviceUpdate::new()
.add_peer((&*peer).into())
.apply(interface, network.backend)
.map_err(|_| ServerError::WireGuard)?;
println!("adding to WireGuard interface: {}", &*peer);
}
let server_peer = DatabasePeer::get(&conn, 1)?;
prompts::save_peer_invitation(
interface,
&peer,
&*server_peer,
&cidr_tree,
keypair,
&SocketAddr::new(config.address, config.listen_port),
&opts.save_config,
)?;
} else {
println!("exited without creating peer.");
}
Ok(())
}
fn rename_peer(
interface: &InterfaceName,
conf: &ServerConfig,
opts: RenamePeerOpts,
) -> Result<(), Error> {
let conn = open_database_connection(interface, conf)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? {
let mut db_peer = DatabasePeer::list(&conn)?
.into_iter()
.find(|p| p.name == old_name)
.ok_or( "Peer not found.")?;
let _peer = db_peer.update(&conn, peer_request)?;
} else {
println!("exited without creating peer.");
}
Ok(())
}
fn add_cidr(
interface: &InterfaceName,
conf: &ServerConfig,
opts: AddCidrOpts,
) -> Result<(), Error> {
let conn = open_database_connection(interface, conf)?;
let cidrs = DatabaseCidr::list(&conn)?;
if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? {
let cidr = DatabaseCidr::create(&conn, cidr_request)?;
printdoc!(
"
CIDR \"{cidr_name}\" added.
Right now, peers within {cidr_name} can only see peers in the same CIDR, and in
the special \"innernet-server\" CIDR that includes the innernet server peer.
You'll need to add more associations for peers in diffent CIDRs to communicate.
",
cidr_name = cidr.name.bold()
);
} else {
println!("exited without creating CIDR.");
}
Ok(())
}
fn delete_cidr(
interface: &InterfaceName,
conf: &ServerConfig,
args: DeleteCidrOpts,
) -> Result<(), Error> {
println!("Fetching eligible CIDRs");
let conn = open_database_connection(interface, conf)?;
let cidrs = DatabaseCidr::list(&conn)?;
let peers = DatabasePeer::list(&conn)?
.into_iter()
.map(|dp| dp.inner)
.collect::<Vec<_>>();
let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?;
println!("Deleting CIDR...");
let _ = DatabaseCidr::delete(&conn, cidr_id)?;
println!("CIDR deleted.");
Ok(())
}
fn uninstall(
interface: &InterfaceName,
conf: &ServerConfig,
network: NetworkOpt | admin_capable | identifier_name |
|
population.rs | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use unit::Unit;
use crossbeam::scope;
use rand::{SeedableRng, StdRng};
use rand::distributions::{IndependentSample, Range};
use std::mem;
use std::sync::{Arc, Mutex, Condvar};
use std::cmp::Ordering;
use std::sync::mpsc::sync_channel;
/// Wraps a unit within a struct that lazily evaluates its fitness to avoid
/// duplicate work.
struct LazyUnit<T: Unit> {
unit: T,
lazy_fitness: Option<f64>,
}
impl<T: Unit> LazyUnit<T> {
fn from(unit: T) -> Self {
LazyUnit {
unit: unit,
lazy_fitness: None,
}
}
fn fitness(&mut self) -> f64 {
match self.lazy_fitness {
Some(x) => x,
None => {
let fitness = self.unit.fitness();
self.lazy_fitness = Some(fitness);
fitness
}
}
}
}
/// Population is an abstraction that represents a collection of units. Each
/// unit is a combination of variables, which produces an overall fitness. Units
/// mate with other units to produce mutated offspring combining traits from
/// both units.
///
/// The population is responsible for iterating new generations of units by
/// mating fit units and killing unfit units.
pub struct Population<T: Unit> {
units: Vec<T>,
seed: usize,
breed_factor: f64,
survival_factor: f64,
max_size: usize,
}
impl<T: Unit> Population<T> {
/// Creates a new population, starts off with an empty population. If you
/// wish to start with a preset population of units you can call
/// `set_population` before calling epochs.
pub fn new(init_pop: Vec<T>) -> Self {
Population {
units: init_pop,
seed: 1,
breed_factor: 0.5,
survival_factor: 0.5,
max_size: 100,
}
}
//--------------------------------------------------------------------------
/// Sets the random seed of the population.
pub fn set_rand_seed(&mut self, seed: usize) -> &mut Self {
self.seed = seed;
self
}
/// Sets the maximum size of the population. If already populated with more
/// than this amount a random section of the population is killed.
pub fn set_size(&mut self, size: usize) -> &mut Self {
self.units.truncate(size);
self.max_size = size;
self
}
/// Sets the breed_factor (0 < b <= 1) of the genetic algorithm, which is
/// the percentage of the population that will be able to breed per epoch.
/// Units that are more fit are preferred for breeding, and so a high
/// breed_factor results in more poorly performing units being able to
/// breed, which will slow the algorithm down but allow it to escape local
/// peaks.
pub fn set_breed_factor(&mut self, breed_factor: f64) -> &mut Self {
assert!(breed_factor > 0.0 && breed_factor <= 1.0);
self.breed_factor = breed_factor;
self
}
/// Sets the survival_factor (0 <= b <= 1) of the genetic algorithm, which
/// is the percentage of the breeding population that will survive each
/// epoch. Units that are more fit are preferred for survival, and so a high
/// survival rate results in more poorly performing units being carried into
/// the next epoch.
///
/// Note that this value is a percentage of the breeding population. So if
/// your breeding factor is 0.5, and your survival factor is 0.9, the
/// percentage of units that will survive the next epoch is:
///
/// 0.5 * 0.9 * 100 = 45%
///
pub fn set_survival_factor(&mut self, survival_factor: f64) -> &mut Self {
assert!(survival_factor >= 0.0 && survival_factor <= 1.0);
self.survival_factor = survival_factor;
self
}
//--------------------------------------------------------------------------
/// An epoch that allows units to breed and mutate without harsh culling.
/// It's important to sometimes allow 'weak' units to produce generations
/// that might escape local peaks in certain dimensions.
fn epoch(&self, units: &mut Vec<LazyUnit<T>>, mut rng: StdRng) -> StdRng {
assert!(units.len() > 0);
// breed_factor dicates how large a percentage of the population will be
// able to breed.
let breed_up_to = (self.breed_factor * (units.len() as f64)) as usize;
let mut breeders: Vec<LazyUnit<T>> = Vec::new();
while let Some(unit) = units.pop() {
breeders.push(unit);
if breeders.len() == breed_up_to {
break;
}
}
units.clear();
// The strongest half of our breeders will survive each epoch. Always at
// least one.
let surviving_parents = (breeders.len() as f64 * self.survival_factor).ceil() as usize;
let pcnt_range = Range::new(0, breeders.len());
for i in 0..self.max_size - surviving_parents {
let rs = pcnt_range.ind_sample(&mut rng);
units.push(LazyUnit::from(
breeders[i % breeders.len()].unit.breed_with(
&breeders[rs].unit,
),
));
}
// Move our survivors into the new generation.
units.append(&mut breeders.drain(0..surviving_parents).collect());
rng
}
/// Runs a number of epochs where fitness is calculated across n parallel
/// processes. This is useful when the fitness calcuation is an expensive
/// operation.
pub fn epochs_parallel(&mut self, n_epochs: u32, n_processes: u32) -> &mut Self {
scope(|scope| {
let cvar_pair = Arc::new((Mutex::new(0), Condvar::new()));
let (tx, rx) = sync_channel(0); |
for _ in 0..n_processes {
let cvar_pair_clone = cvar_pair.clone();
let processed_stack_clone = processed_stack.clone();
let process_queue_clone = process_queue.clone();
scope.spawn(move || {
let &(ref lock, ref cvar) = &*cvar_pair_clone;
loop {
let mut l_unit: LazyUnit<T> =
match process_queue_clone.lock().ok().unwrap().recv() {
Ok(u) => u,
Err(_) => return,
};
l_unit.fitness();
processed_stack_clone.lock().ok().unwrap().push(l_unit);
{
let mut processed = lock.lock().unwrap();
*processed += 1;
cvar.notify_all();
}
}
});
}
let &(ref lock, ref cvar) = &*cvar_pair;
let mut active_stack = Vec::new();
while let Some(unit) = self.units.pop() {
active_stack.push(LazyUnit::from(unit));
}
let seed: &[_] = &[self.seed];
let mut rng: StdRng = SeedableRng::from_seed(seed);
for i in 0..(n_epochs + 1) {
let jobs_total = active_stack.len();
while let Some(unit) = active_stack.pop() {
tx.send(unit).unwrap();
}
let mut jobs_processed = lock.lock().unwrap();
while *jobs_processed != jobs_total {
jobs_processed = cvar.wait(jobs_processed).unwrap();
}
*jobs_processed = 0;
// Swap the full processed_stack with the active stack.
mem::swap(&mut active_stack, &mut processed_stack.lock().ok().unwrap());
// We want to sort such that highest fitness units are at the
// end.
active_stack.sort_by(|a, b| {
a.lazy_fitness
.unwrap_or(0.0)
.partial_cmp(&b.lazy_fitness.unwrap_or(0.0))
.unwrap_or(Ordering::Equal)
});
// If we have the perfect solution then break early.
if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 {
break;
}
if i != n_epochs {
rng = self.epoch(&mut active_stack, rng);
}
}
// Reverse the order of units such that the first unit is the
// strongest candidate.
while let Some(unit) = active_stack.pop() {
self.units.push(unit.unit);
}
});
self
}
/// Runs a number of epochs on a single process.
pub fn epochs(&mut self, n_epochs: u32) -> &mut Self {
let mut processed_stack = Vec::new();
let mut active_stack = Vec | let process_queue = Arc::new(Mutex::new(rx));
let processed_stack = Arc::new(Mutex::new(Vec::new())); | random_line_split |
population.rs | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use unit::Unit;
use crossbeam::scope;
use rand::{SeedableRng, StdRng};
use rand::distributions::{IndependentSample, Range};
use std::mem;
use std::sync::{Arc, Mutex, Condvar};
use std::cmp::Ordering;
use std::sync::mpsc::sync_channel;
/// Wraps a unit within a struct that lazily evaluates its fitness to avoid
/// duplicate work.
struct LazyUnit<T: Unit> {
unit: T,
lazy_fitness: Option<f64>,
}
impl<T: Unit> LazyUnit<T> {
fn from(unit: T) -> Self {
LazyUnit {
unit: unit,
lazy_fitness: None,
}
}
fn fitness(&mut self) -> f64 {
match self.lazy_fitness {
Some(x) => x,
None => {
let fitness = self.unit.fitness();
self.lazy_fitness = Some(fitness);
fitness
}
}
}
}
/// Population is an abstraction that represents a collection of units. Each
/// unit is a combination of variables, which produces an overall fitness. Units
/// mate with other units to produce mutated offspring combining traits from
/// both units.
///
/// The population is responsible for iterating new generations of units by
/// mating fit units and killing unfit units.
pub struct Population<T: Unit> {
units: Vec<T>,
seed: usize,
breed_factor: f64,
survival_factor: f64,
max_size: usize,
}
impl<T: Unit> Population<T> {
/// Creates a new population, starts off with an empty population. If you
/// wish to start with a preset population of units you can call
/// `set_population` before calling epochs.
pub fn | (init_pop: Vec<T>) -> Self {
Population {
units: init_pop,
seed: 1,
breed_factor: 0.5,
survival_factor: 0.5,
max_size: 100,
}
}
//--------------------------------------------------------------------------
/// Sets the random seed of the population.
pub fn set_rand_seed(&mut self, seed: usize) -> &mut Self {
self.seed = seed;
self
}
/// Sets the maximum size of the population. If already populated with more
/// than this amount a random section of the population is killed.
pub fn set_size(&mut self, size: usize) -> &mut Self {
self.units.truncate(size);
self.max_size = size;
self
}
/// Sets the breed_factor (0 < b <= 1) of the genetic algorithm, which is
/// the percentage of the population that will be able to breed per epoch.
/// Units that are more fit are preferred for breeding, and so a high
/// breed_factor results in more poorly performing units being able to
/// breed, which will slow the algorithm down but allow it to escape local
/// peaks.
pub fn set_breed_factor(&mut self, breed_factor: f64) -> &mut Self {
assert!(breed_factor > 0.0 && breed_factor <= 1.0);
self.breed_factor = breed_factor;
self
}
/// Sets the survival_factor (0 <= b <= 1) of the genetic algorithm, which
/// is the percentage of the breeding population that will survive each
/// epoch. Units that are more fit are preferred for survival, and so a high
/// survival rate results in more poorly performing units being carried into
/// the next epoch.
///
/// Note that this value is a percentage of the breeding population. So if
/// your breeding factor is 0.5, and your survival factor is 0.9, the
/// percentage of units that will survive the next epoch is:
///
/// 0.5 * 0.9 * 100 = 45%
///
pub fn set_survival_factor(&mut self, survival_factor: f64) -> &mut Self {
assert!(survival_factor >= 0.0 && survival_factor <= 1.0);
self.survival_factor = survival_factor;
self
}
//--------------------------------------------------------------------------
/// An epoch that allows units to breed and mutate without harsh culling.
/// It's important to sometimes allow 'weak' units to produce generations
/// that might escape local peaks in certain dimensions.
fn epoch(&self, units: &mut Vec<LazyUnit<T>>, mut rng: StdRng) -> StdRng {
assert!(units.len() > 0);
// breed_factor dicates how large a percentage of the population will be
// able to breed.
let breed_up_to = (self.breed_factor * (units.len() as f64)) as usize;
let mut breeders: Vec<LazyUnit<T>> = Vec::new();
while let Some(unit) = units.pop() {
breeders.push(unit);
if breeders.len() == breed_up_to {
break;
}
}
units.clear();
// The strongest half of our breeders will survive each epoch. Always at
// least one.
let surviving_parents = (breeders.len() as f64 * self.survival_factor).ceil() as usize;
let pcnt_range = Range::new(0, breeders.len());
for i in 0..self.max_size - surviving_parents {
let rs = pcnt_range.ind_sample(&mut rng);
units.push(LazyUnit::from(
breeders[i % breeders.len()].unit.breed_with(
&breeders[rs].unit,
),
));
}
// Move our survivors into the new generation.
units.append(&mut breeders.drain(0..surviving_parents).collect());
rng
}
/// Runs a number of epochs where fitness is calculated across n parallel
/// processes. This is useful when the fitness calcuation is an expensive
/// operation.
pub fn epochs_parallel(&mut self, n_epochs: u32, n_processes: u32) -> &mut Self {
scope(|scope| {
let cvar_pair = Arc::new((Mutex::new(0), Condvar::new()));
let (tx, rx) = sync_channel(0);
let process_queue = Arc::new(Mutex::new(rx));
let processed_stack = Arc::new(Mutex::new(Vec::new()));
for _ in 0..n_processes {
let cvar_pair_clone = cvar_pair.clone();
let processed_stack_clone = processed_stack.clone();
let process_queue_clone = process_queue.clone();
scope.spawn(move || {
let &(ref lock, ref cvar) = &*cvar_pair_clone;
loop {
let mut l_unit: LazyUnit<T> =
match process_queue_clone.lock().ok().unwrap().recv() {
Ok(u) => u,
Err(_) => return,
};
l_unit.fitness();
processed_stack_clone.lock().ok().unwrap().push(l_unit);
{
let mut processed = lock.lock().unwrap();
*processed += 1;
cvar.notify_all();
}
}
});
}
let &(ref lock, ref cvar) = &*cvar_pair;
let mut active_stack = Vec::new();
while let Some(unit) = self.units.pop() {
active_stack.push(LazyUnit::from(unit));
}
let seed: &[_] = &[self.seed];
let mut rng: StdRng = SeedableRng::from_seed(seed);
for i in 0..(n_epochs + 1) {
let jobs_total = active_stack.len();
while let Some(unit) = active_stack.pop() {
tx.send(unit).unwrap();
}
let mut jobs_processed = lock.lock().unwrap();
while *jobs_processed != jobs_total {
jobs_processed = cvar.wait(jobs_processed).unwrap();
}
*jobs_processed = 0;
// Swap the full processed_stack with the active stack.
mem::swap(&mut active_stack, &mut processed_stack.lock().ok().unwrap());
// We want to sort such that highest fitness units are at the
// end.
active_stack.sort_by(|a, b| {
a.lazy_fitness
.unwrap_or(0.0)
.partial_cmp(&b.lazy_fitness.unwrap_or(0.0))
.unwrap_or(Ordering::Equal)
});
// If we have the perfect solution then break early.
if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 {
break;
}
if i != n_epochs {
rng = self.epoch(&mut active_stack, rng);
}
}
// Reverse the order of units such that the first unit is the
// strongest candidate.
while let Some(unit) = active_stack.pop() {
self.units.push(unit.unit);
}
});
self
}
/// Runs a number of epochs on a single process.
pub fn epochs(&mut self, n_epochs: u32) -> &mut Self {
let mut processed_stack = Vec::new();
let mut active_stack = Vec | new | identifier_name |
constructor.go | ([]iprocessor.V1, len(conf.Processors))
for j, procConf := range conf.Processors {
var err error
pMgr := mgr.IntoPath("processors", strconv.Itoa(j))
processors[j], err = processor.New(procConf, pMgr)
if err != nil |
}
return pipeline.NewProcessor(processors...), nil
}}...)
}
return pipelines
}
func fromSimpleConstructor(fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc {
return func(
conf Config,
mgr interop.Manager,
log log.Modular,
stats metrics.Type,
pipelines ...iprocessor.PipelineConstructorFunc,
) (output.Streamed, error) {
output, err := fn(conf, mgr, log, stats)
if err != nil {
return nil, err
}
pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...)
return WrapWithPipelines(output, pipelines...)
}
}
// ConstructorFunc is a func signature able to construct an output.
type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)
// WalkConstructors iterates each component constructor.
func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) {
inferred := docs.ComponentFieldsFromConf(NewConfig())
for k, v := range Constructors {
conf := v.Config
conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k])
spec := docs.ComponentSpec{
Type: docs.TypeOutput,
Name: k,
Summary: v.Summary,
Description: v.Description,
Footnotes: v.Footnotes,
Categories: v.Categories,
Config: conf,
Examples: v.Examples,
Status: v.Status,
Version: v.Version,
}
spec.Description = output.Description(v.Async, v.Batches, spec.Description)
fn(v.constructor, spec)
}
}
// Constructors is a map of all output types with their specs.
var Constructors = map[string]TypeSpec{}
//------------------------------------------------------------------------------
// String constants representing each output type.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
const (
TypeAMQP09 = "amqp_0_9"
TypeAMQP1 = "amqp_1"
TypeAWSDynamoDB = "aws_dynamodb"
TypeAWSKinesis = "aws_kinesis"
TypeAWSKinesisFirehose = "aws_kinesis_firehose"
TypeAWSS3 = "aws_s3"
TypeAWSSNS = "aws_sns"
TypeAWSSQS = "aws_sqs"
TypeAzureBlobStorage = "azure_blob_storage"
TypeAzureQueueStorage = "azure_queue_storage"
TypeAzureTableStorage = "azure_table_storage"
TypeBroker = "broker"
TypeCache = "cache"
TypeCassandra = "cassandra"
TypeDrop = "drop"
TypeDropOn = "drop_on"
TypeDynamic = "dynamic"
TypeDynamoDB = "dynamodb"
TypeElasticsearch = "elasticsearch"
TypeFallback = "fallback"
TypeFile = "file"
TypeGCPCloudStorage = "gcp_cloud_storage"
TypeGCPPubSub = "gcp_pubsub"
TypeHDFS = "hdfs"
TypeHTTPClient = "http_client"
TypeHTTPServer = "http_server"
TypeInproc = "inproc"
TypeKafka = "kafka"
TypeMongoDB = "mongodb"
TypeMQTT = "mqtt"
TypeNanomsg = "nanomsg"
TypeNATS = "nats"
TypeNATSJetStream = "nats_jetstream"
TypeNATSStream = "nats_stream"
TypeNSQ = "nsq"
TypeRedisHash = "redis_hash"
TypeRedisList = "redis_list"
TypeRedisPubSub = "redis_pubsub"
TypeRedisStreams = "redis_streams"
TypeReject = "reject"
TypeResource = "resource"
TypeRetry = "retry"
TypeSFTP = "sftp"
TypeSTDOUT = "stdout"
TypeSubprocess = "subprocess"
TypeSwitch = "switch"
TypeSyncResponse = "sync_response"
TypeSocket = "socket"
TypeWebsocket = "websocket"
)
//------------------------------------------------------------------------------
// Config is the all encompassing configuration struct for all output types.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
type Config struct {
Label string `json:"label" yaml:"label"`
Type string `json:"type" yaml:"type"`
AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"`
AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"`
AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"`
AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"`
AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"`
AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"`
AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"`
AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"`
AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"`
AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"`
AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"`
Broker BrokerConfig `json:"broker" yaml:"broker"`
Cache CacheConfig `json:"cache" yaml:"cache"`
Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"`
Drop DropConfig `json:"drop" yaml:"drop"`
DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"`
Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"`
Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"`
Fallback TryConfig `json:"fallback" yaml:"fallback"`
File FileConfig `json:"file" yaml:"file"`
GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"`
GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"`
HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"`
HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"`
HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"`
Inproc string `json:"inproc" yaml:"inproc"`
Kafka KafkaConfig `json:"kafka" yaml:"kafka"`
MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"`
MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"`
Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"`
NATS NATSConfig `json:"nats" yaml:"nats"`
NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"`
NSQ NSQConfig `json:"nsq" yaml:"nsq"`
Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"`
RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"`
RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"`
RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"`
RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"`
Reject string `json:"reject" yaml:"reject"`
Resource string `json:"resource" yaml:"resource"`
Retry RetryConfig `json:"retry" yaml:"retry"`
SFTP SFTPConfig `json:"sftp" yaml:"sftp"`
STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"`
Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"`
Switch SwitchConfig `json:"switch" yaml:"switch"`
SyncResponse struct{} `json:"sync_response" yaml:"sync_response"`
Socket SocketConfig `json:"socket" yaml:"socket"`
Websocket WebsocketConfig `json:"websocket" yaml:"websocket"`
Processors []processor.Config `json:"processors" yaml:"processors"`
}
// NewConfig returns a configuration struct fully populated with default values.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
func NewConfig | {
return nil, err
} | conditional_block |
constructor.go | Summary string
Description string
Categories []string
Footnotes string
Config docs.FieldSpec
Examples []docs.AnnotatedExample
Version string
}
// AppendProcessorsFromConfig takes a variant arg of pipeline constructor
// functions and returns a new slice of them where the processors of the
// provided output configuration will also be initialized.
func AppendProcessorsFromConfig(conf Config, mgr interop.Manager, pipelines ...iprocessor.PipelineConstructorFunc) []iprocessor.PipelineConstructorFunc {
if len(conf.Processors) > 0 {
pipelines = append(pipelines, []iprocessor.PipelineConstructorFunc{func() (iprocessor.Pipeline, error) {
processors := make([]iprocessor.V1, len(conf.Processors))
for j, procConf := range conf.Processors {
var err error
pMgr := mgr.IntoPath("processors", strconv.Itoa(j))
processors[j], err = processor.New(procConf, pMgr)
if err != nil {
return nil, err
}
}
return pipeline.NewProcessor(processors...), nil
}}...)
}
return pipelines
}
func fromSimpleConstructor(fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc {
return func(
conf Config,
mgr interop.Manager,
log log.Modular,
stats metrics.Type,
pipelines ...iprocessor.PipelineConstructorFunc,
) (output.Streamed, error) {
output, err := fn(conf, mgr, log, stats)
if err != nil {
return nil, err
}
pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...)
return WrapWithPipelines(output, pipelines...)
}
}
// ConstructorFunc is a func signature able to construct an output.
type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)
// WalkConstructors iterates each component constructor.
func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) {
inferred := docs.ComponentFieldsFromConf(NewConfig())
for k, v := range Constructors {
conf := v.Config
conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k])
spec := docs.ComponentSpec{
Type: docs.TypeOutput,
Name: k,
Summary: v.Summary,
Description: v.Description,
Footnotes: v.Footnotes,
Categories: v.Categories,
Config: conf,
Examples: v.Examples,
Status: v.Status,
Version: v.Version,
}
spec.Description = output.Description(v.Async, v.Batches, spec.Description)
fn(v.constructor, spec)
}
}
// Constructors is a map of all output types with their specs.
var Constructors = map[string]TypeSpec{}
//------------------------------------------------------------------------------
// String constants representing each output type.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
const (
TypeAMQP09 = "amqp_0_9"
TypeAMQP1 = "amqp_1"
TypeAWSDynamoDB = "aws_dynamodb"
TypeAWSKinesis = "aws_kinesis"
TypeAWSKinesisFirehose = "aws_kinesis_firehose"
TypeAWSS3 = "aws_s3"
TypeAWSSNS = "aws_sns"
TypeAWSSQS = "aws_sqs"
TypeAzureBlobStorage = "azure_blob_storage"
TypeAzureQueueStorage = "azure_queue_storage"
TypeAzureTableStorage = "azure_table_storage"
TypeBroker = "broker"
TypeCache = "cache"
TypeCassandra = "cassandra"
TypeDrop = "drop"
TypeDropOn = "drop_on"
TypeDynamic = "dynamic"
TypeDynamoDB = "dynamodb"
TypeElasticsearch = "elasticsearch"
TypeFallback = "fallback"
TypeFile = "file"
TypeGCPCloudStorage = "gcp_cloud_storage"
TypeGCPPubSub = "gcp_pubsub"
TypeHDFS = "hdfs"
TypeHTTPClient = "http_client"
TypeHTTPServer = "http_server"
TypeInproc = "inproc"
TypeKafka = "kafka"
TypeMongoDB = "mongodb"
TypeMQTT = "mqtt"
TypeNanomsg = "nanomsg"
TypeNATS = "nats"
TypeNATSJetStream = "nats_jetstream"
TypeNATSStream = "nats_stream"
TypeNSQ = "nsq"
TypeRedisHash = "redis_hash"
TypeRedisList = "redis_list"
TypeRedisPubSub = "redis_pubsub"
TypeRedisStreams = "redis_streams"
TypeReject = "reject"
TypeResource = "resource"
TypeRetry = "retry"
TypeSFTP = "sftp"
TypeSTDOUT = "stdout"
TypeSubprocess = "subprocess"
TypeSwitch = "switch"
TypeSyncResponse = "sync_response"
TypeSocket = "socket"
TypeWebsocket = "websocket"
)
//------------------------------------------------------------------------------
// Config is the all encompassing configuration struct for all output types.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
type Config struct {
Label string `json:"label" yaml:"label"`
Type string `json:"type" yaml:"type"`
AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"`
AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"`
AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"`
AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"`
AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"`
AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"`
AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"`
AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"`
AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"`
AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"`
AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"`
Broker BrokerConfig `json:"broker" yaml:"broker"`
Cache CacheConfig `json:"cache" yaml:"cache"`
Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"`
Drop DropConfig `json:"drop" yaml:"drop"`
DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"`
Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"`
Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"`
Fallback TryConfig `json:"fallback" yaml:"fallback"`
File FileConfig `json:"file" yaml:"file"`
GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"`
GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"`
HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"`
HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"`
HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"`
Inproc string `json:"inproc" yaml:"inproc"`
Kafka KafkaConfig `json:"kafka" yaml:"kafka"`
MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"`
MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"`
Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"`
NATS NATSConfig `json:"nats" yaml:"nats"`
NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"`
NSQ NSQConfig `json:"nsq" yaml:"nsq"`
Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"`
RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"`
RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"`
RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"`
RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"`
Reject string `json:"reject" yaml:"reject"`
Resource string `json:"resource" yaml:"resource"`
Retry RetryConfig `json:"retry" yaml:"retry"`
SFTP SFTPConfig `json:"sftp" yaml:"sftp"`
STDOUT STDOUTConfig | Status docs.Status | random_line_split |
|
constructor.go | make([]iprocessor.V1, len(conf.Processors))
for j, procConf := range conf.Processors {
var err error
pMgr := mgr.IntoPath("processors", strconv.Itoa(j))
processors[j], err = processor.New(procConf, pMgr)
if err != nil {
return nil, err
}
}
return pipeline.NewProcessor(processors...), nil
}}...)
}
return pipelines
}
func | (fn func(Config, interop.Manager, log.Modular, metrics.Type) (output.Streamed, error)) ConstructorFunc {
return func(
conf Config,
mgr interop.Manager,
log log.Modular,
stats metrics.Type,
pipelines ...iprocessor.PipelineConstructorFunc,
) (output.Streamed, error) {
output, err := fn(conf, mgr, log, stats)
if err != nil {
return nil, err
}
pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...)
return WrapWithPipelines(output, pipelines...)
}
}
// ConstructorFunc is a func signature able to construct an output.
type ConstructorFunc func(Config, interop.Manager, log.Modular, metrics.Type, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)
// WalkConstructors iterates each component constructor.
func WalkConstructors(fn func(ConstructorFunc, docs.ComponentSpec)) {
inferred := docs.ComponentFieldsFromConf(NewConfig())
for k, v := range Constructors {
conf := v.Config
conf.Children = conf.Children.DefaultAndTypeFrom(inferred[k])
spec := docs.ComponentSpec{
Type: docs.TypeOutput,
Name: k,
Summary: v.Summary,
Description: v.Description,
Footnotes: v.Footnotes,
Categories: v.Categories,
Config: conf,
Examples: v.Examples,
Status: v.Status,
Version: v.Version,
}
spec.Description = output.Description(v.Async, v.Batches, spec.Description)
fn(v.constructor, spec)
}
}
// Constructors is a map of all output types with their specs.
var Constructors = map[string]TypeSpec{}
//------------------------------------------------------------------------------
// String constants representing each output type.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
const (
TypeAMQP09 = "amqp_0_9"
TypeAMQP1 = "amqp_1"
TypeAWSDynamoDB = "aws_dynamodb"
TypeAWSKinesis = "aws_kinesis"
TypeAWSKinesisFirehose = "aws_kinesis_firehose"
TypeAWSS3 = "aws_s3"
TypeAWSSNS = "aws_sns"
TypeAWSSQS = "aws_sqs"
TypeAzureBlobStorage = "azure_blob_storage"
TypeAzureQueueStorage = "azure_queue_storage"
TypeAzureTableStorage = "azure_table_storage"
TypeBroker = "broker"
TypeCache = "cache"
TypeCassandra = "cassandra"
TypeDrop = "drop"
TypeDropOn = "drop_on"
TypeDynamic = "dynamic"
TypeDynamoDB = "dynamodb"
TypeElasticsearch = "elasticsearch"
TypeFallback = "fallback"
TypeFile = "file"
TypeGCPCloudStorage = "gcp_cloud_storage"
TypeGCPPubSub = "gcp_pubsub"
TypeHDFS = "hdfs"
TypeHTTPClient = "http_client"
TypeHTTPServer = "http_server"
TypeInproc = "inproc"
TypeKafka = "kafka"
TypeMongoDB = "mongodb"
TypeMQTT = "mqtt"
TypeNanomsg = "nanomsg"
TypeNATS = "nats"
TypeNATSJetStream = "nats_jetstream"
TypeNATSStream = "nats_stream"
TypeNSQ = "nsq"
TypeRedisHash = "redis_hash"
TypeRedisList = "redis_list"
TypeRedisPubSub = "redis_pubsub"
TypeRedisStreams = "redis_streams"
TypeReject = "reject"
TypeResource = "resource"
TypeRetry = "retry"
TypeSFTP = "sftp"
TypeSTDOUT = "stdout"
TypeSubprocess = "subprocess"
TypeSwitch = "switch"
TypeSyncResponse = "sync_response"
TypeSocket = "socket"
TypeWebsocket = "websocket"
)
//------------------------------------------------------------------------------
// Config is the all encompassing configuration struct for all output types.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
type Config struct {
Label string `json:"label" yaml:"label"`
Type string `json:"type" yaml:"type"`
AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"`
AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"`
AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"`
AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"`
AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"`
AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"`
AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"`
AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"`
AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"`
AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"`
AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"`
Broker BrokerConfig `json:"broker" yaml:"broker"`
Cache CacheConfig `json:"cache" yaml:"cache"`
Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"`
Drop DropConfig `json:"drop" yaml:"drop"`
DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"`
Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"`
Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"`
Fallback TryConfig `json:"fallback" yaml:"fallback"`
File FileConfig `json:"file" yaml:"file"`
GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"`
GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"`
HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"`
HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"`
HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"`
Inproc string `json:"inproc" yaml:"inproc"`
Kafka KafkaConfig `json:"kafka" yaml:"kafka"`
MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"`
MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"`
Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"`
NATS NATSConfig `json:"nats" yaml:"nats"`
NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"`
NSQ NSQConfig `json:"nsq" yaml:"nsq"`
Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"`
RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"`
RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"`
RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"`
RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"`
Reject string `json:"reject" yaml:"reject"`
Resource string `json:"resource" yaml:"resource"`
Retry RetryConfig `json:"retry" yaml:"retry"`
SFTP SFTPConfig `json:"sftp" yaml:"sftp"`
STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"`
Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"`
Switch SwitchConfig `json:"switch" yaml:"switch"`
SyncResponse struct{} `json:"sync_response" yaml:"sync_response"`
Socket SocketConfig `json:"socket" yaml:"socket"`
Websocket WebsocketConfig `json:"websocket" yaml:"websocket"`
Processors []processor.Config `json:"processors" yaml:"processors"`
}
// NewConfig returns a configuration struct fully populated with default values.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
func NewConfig | fromSimpleConstructor | identifier_name |
constructor.go | "retry"
TypeSFTP = "sftp"
TypeSTDOUT = "stdout"
TypeSubprocess = "subprocess"
TypeSwitch = "switch"
TypeSyncResponse = "sync_response"
TypeSocket = "socket"
TypeWebsocket = "websocket"
)
//------------------------------------------------------------------------------
// Config is the all encompassing configuration struct for all output types.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
type Config struct {
Label string `json:"label" yaml:"label"`
Type string `json:"type" yaml:"type"`
AMQP09 AMQPConfig `json:"amqp_0_9" yaml:"amqp_0_9"`
AMQP1 AMQP1Config `json:"amqp_1" yaml:"amqp_1"`
AWSDynamoDB DynamoDBConfig `json:"aws_dynamodb" yaml:"aws_dynamodb"`
AWSKinesis KinesisConfig `json:"aws_kinesis" yaml:"aws_kinesis"`
AWSKinesisFirehose KinesisFirehoseConfig `json:"aws_kinesis_firehose" yaml:"aws_kinesis_firehose"`
AWSS3 AmazonS3Config `json:"aws_s3" yaml:"aws_s3"`
AWSSNS SNSConfig `json:"aws_sns" yaml:"aws_sns"`
AWSSQS AmazonSQSConfig `json:"aws_sqs" yaml:"aws_sqs"`
AzureBlobStorage AzureBlobStorageConfig `json:"azure_blob_storage" yaml:"azure_blob_storage"`
AzureQueueStorage AzureQueueStorageConfig `json:"azure_queue_storage" yaml:"azure_queue_storage"`
AzureTableStorage AzureTableStorageConfig `json:"azure_table_storage" yaml:"azure_table_storage"`
Broker BrokerConfig `json:"broker" yaml:"broker"`
Cache CacheConfig `json:"cache" yaml:"cache"`
Cassandra CassandraConfig `json:"cassandra" yaml:"cassandra"`
Drop DropConfig `json:"drop" yaml:"drop"`
DropOn DropOnConfig `json:"drop_on" yaml:"drop_on"`
Dynamic DynamicConfig `json:"dynamic" yaml:"dynamic"`
Elasticsearch ElasticsearchConfig `json:"elasticsearch" yaml:"elasticsearch"`
Fallback TryConfig `json:"fallback" yaml:"fallback"`
File FileConfig `json:"file" yaml:"file"`
GCPCloudStorage GCPCloudStorageConfig `json:"gcp_cloud_storage" yaml:"gcp_cloud_storage"`
GCPPubSub GCPPubSubConfig `json:"gcp_pubsub" yaml:"gcp_pubsub"`
HDFS HDFSConfig `json:"hdfs" yaml:"hdfs"`
HTTPClient HTTPClientConfig `json:"http_client" yaml:"http_client"`
HTTPServer HTTPServerConfig `json:"http_server" yaml:"http_server"`
Inproc string `json:"inproc" yaml:"inproc"`
Kafka KafkaConfig `json:"kafka" yaml:"kafka"`
MongoDB MongoDBConfig `json:"mongodb" yaml:"mongodb"`
MQTT MQTTConfig `json:"mqtt" yaml:"mqtt"`
Nanomsg NanomsgConfig `json:"nanomsg" yaml:"nanomsg"`
NATS NATSConfig `json:"nats" yaml:"nats"`
NATSStream NATSStreamConfig `json:"nats_stream" yaml:"nats_stream"`
NSQ NSQConfig `json:"nsq" yaml:"nsq"`
Plugin interface{} `json:"plugin,omitempty" yaml:"plugin,omitempty"`
RedisHash RedisHashConfig `json:"redis_hash" yaml:"redis_hash"`
RedisList RedisListConfig `json:"redis_list" yaml:"redis_list"`
RedisPubSub RedisPubSubConfig `json:"redis_pubsub" yaml:"redis_pubsub"`
RedisStreams RedisStreamsConfig `json:"redis_streams" yaml:"redis_streams"`
Reject string `json:"reject" yaml:"reject"`
Resource string `json:"resource" yaml:"resource"`
Retry RetryConfig `json:"retry" yaml:"retry"`
SFTP SFTPConfig `json:"sftp" yaml:"sftp"`
STDOUT STDOUTConfig `json:"stdout" yaml:"stdout"`
Subprocess SubprocessConfig `json:"subprocess" yaml:"subprocess"`
Switch SwitchConfig `json:"switch" yaml:"switch"`
SyncResponse struct{} `json:"sync_response" yaml:"sync_response"`
Socket SocketConfig `json:"socket" yaml:"socket"`
Websocket WebsocketConfig `json:"websocket" yaml:"websocket"`
Processors []processor.Config `json:"processors" yaml:"processors"`
}
// NewConfig returns a configuration struct fully populated with default values.
// Deprecated: Do not add new components here. Instead, use the public plugin
// APIs. Examples can be found in: ./internal/impl
func NewConfig() Config {
return Config{
Label: "",
Type: "stdout",
AMQP09: NewAMQPConfig(),
AMQP1: NewAMQP1Config(),
AWSDynamoDB: NewDynamoDBConfig(),
AWSKinesis: NewKinesisConfig(),
AWSKinesisFirehose: NewKinesisFirehoseConfig(),
AWSS3: NewAmazonS3Config(),
AWSSNS: NewSNSConfig(),
AWSSQS: NewAmazonSQSConfig(),
AzureBlobStorage: NewAzureBlobStorageConfig(),
AzureQueueStorage: NewAzureQueueStorageConfig(),
AzureTableStorage: NewAzureTableStorageConfig(),
Broker: NewBrokerConfig(),
Cache: NewCacheConfig(),
Cassandra: NewCassandraConfig(),
Drop: NewDropConfig(),
DropOn: NewDropOnConfig(),
Dynamic: NewDynamicConfig(),
Elasticsearch: NewElasticsearchConfig(),
Fallback: NewTryConfig(),
File: NewFileConfig(),
GCPCloudStorage: NewGCPCloudStorageConfig(),
GCPPubSub: NewGCPPubSubConfig(),
HDFS: NewHDFSConfig(),
HTTPClient: NewHTTPClientConfig(),
HTTPServer: NewHTTPServerConfig(),
Inproc: "",
Kafka: NewKafkaConfig(),
MQTT: NewMQTTConfig(),
MongoDB: NewMongoDBConfig(),
Nanomsg: NewNanomsgConfig(),
NATS: NewNATSConfig(),
NATSStream: NewNATSStreamConfig(),
NSQ: NewNSQConfig(),
Plugin: nil,
RedisHash: NewRedisHashConfig(),
RedisList: NewRedisListConfig(),
RedisPubSub: NewRedisPubSubConfig(),
RedisStreams: NewRedisStreamsConfig(),
Reject: "",
Resource: "",
Retry: NewRetryConfig(),
SFTP: NewSFTPConfig(),
STDOUT: NewSTDOUTConfig(),
Subprocess: NewSubprocessConfig(),
Switch: NewSwitchConfig(),
SyncResponse: struct{}{},
Socket: NewSocketConfig(),
Websocket: NewWebsocketConfig(),
Processors: []processor.Config{},
}
}
//------------------------------------------------------------------------------
// UnmarshalYAML ensures that when parsing configs that are in a map or slice
// the default values are still applied.
func (conf *Config) UnmarshalYAML(value *yaml.Node) error {
type confAlias Config
aliased := confAlias(NewConfig())
err := value.Decode(&aliased)
if err != nil {
return fmt.Errorf("line %v: %v", value.Line, err)
}
var spec docs.ComponentSpec
if aliased.Type, spec, err = docs.GetInferenceCandidateFromYAML(docs.DeprecatedProvider, docs.TypeOutput, value); err != nil {
return fmt.Errorf("line %v: %w", value.Line, err)
}
if spec.Plugin {
pluginNode, err := docs.GetPluginConfigYAML(aliased.Type, value)
if err != nil {
return fmt.Errorf("line %v: %v", value.Line, err)
}
aliased.Plugin = &pluginNode
} else {
aliased.Plugin = nil
}
*conf = Config(aliased)
return nil
}
//------------------------------------------------------------------------------
// New creates an output type based on an output configuration.
func New(
conf Config,
mgr interop.Manager,
log log.Modular,
stats metrics.Type,
pipelines ...iprocessor.PipelineConstructorFunc,
) (output.Streamed, error) | {
if mgrV2, ok := mgr.(interface {
NewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)
}); ok {
return mgrV2.NewOutput(conf, pipelines...)
}
if c, ok := Constructors[conf.Type]; ok {
return c.constructor(conf, mgr, log, stats, pipelines...)
}
return nil, component.ErrInvalidType("output", conf.Type)
} | identifier_body |
|
assistant.js | document).on("keyup", "#searchAssistantInput", function ()
{
$(".assistant-list-thumb").addClass("d-none").removeClass("d-flex");
$(".assistant-list-thumb:contains('" + $(this).val() + "')").removeClass("d-none").addClass("d-flex");
});
});
var openAssistanFormModal = function (assistant = false)
{
var title = assistant ? "<i class='iconsminds-file-edit with-rotate-icon'></i> Editar Asistente" :
"<i class='iconsminds-add-user'></i> Crear Nuevo Asistente";
var label = assistant ? "Editar" : "Crear";
$("#create-assistant-modal-title").html(title);
$("#create-assistant-modal-button").html(label);
if (assistant) setEditAssistantFormValues(assistant);
$("#create-assistant-modal").modal();
};
var setEditAssistantFormValues = function (assistant)
{
$.each(assistant, function (key, value)
{
$("#create-assistant-form [name='" + key + "']").val(value);
$("#create-assistant-form input[type='checkbox'][name='" + key + "']").prop("checked", value == 1);
$("#create-assistant-form textarea[name='" + key + "']").html(value);
});
$("#create-assistant-img").attr("src", assistant.basicDataPhoto);
}
var loadAssistantSelectedImage = async function (input)
{
if (typeof cropper !== 'undefined' && cropper !== null)
cropper.destroy();
var loadedImage = await getInputImageData(input);
$("#create-assistant-img").prop("src", loadedImage);
var image = document.querySelector('#create-assistant-img');
cropper = new Cropper(image, { aspectRatio: 1, movable: true, dragMode: "move", viewMode: 2 });
}
var loadAssistants = async function ()
{
let assistants = await webClient.RequestAsync("assistant/getAssistants", "", webClient.ContentType.DEFAULT);
if (assistants.status === REQUEST_STATUS.ERROR) return;
if (typeof assistants.data === "undefined" || assistants.data === '' || assistants.data.length === 0) return;
showAssistants(assistants.data);
};
var showAssistants = function (assistants)
{
var assistantsHtml = "";
$.each(assistants, function (index, assistant)
{
assistantsHtml += "<div class='card mb-3 assistant-list-thumb'>"
+ "<div class='d-flex flex-row'>"
+ getAssistantThumbImageHtml(assistant)
+ getAssistantBasicDataHtml(assistant)
+ "<div class='d-flex p-4'>"
+ "<button class='btn btn-outline-theme-3 icon-button rotate-icon-click collapsed align-self-center'"
+ " type='button' data-toggle='collapse' data-target='#assistant-data-collapse-container-" + index
+ "' aria-expanded='false' aria-controls='q2'>"
+ "<i class='simple-icon-arrow-down with-rotate-icon'></i>"
+ "</button>"
+ "</div>"
+ "</div>"
+ getAssistantCollapsedDataHtml(index, assistant)
+ "</div>";
});
$("#assistants-list-container").html(assistantsHtml);
initAssistantsCalendars();
};
var initAssistantsCalendars = function ()
{
$.each($(".assistant-availability-calendar"), function (index, calendar)
{
var assistant = $(calendar).data("assistant");
$(calendar).fullCalendar
({
header: { left: 'prev,next today', center: 'title', right: 'month,agendaWeek,agendaDay,listWeek' },
defaultDate: moment().format("YYYY-MM-DD"),
navLinks: true,
eventLimit: true,
events: assistant.Events,
dayClick: function (date, jsEvent, view)
{
console.log("DAY SELECTED ==> ", date, jsEvent, view);
$("#edit-availability-date-title").html(moment(date).format("MMM DD YYYY"));
$("#assistant-availability-name").html(assistant.Name);
$("#edit-availability-form [name='assistantId']").val(assistant.Id);
$("#edit-availability-form [name='AvailabilityDate']").val(moment(date).format("YYYY-MM-DD"));
$("#edit-availability-modal").modal();
}
});
});
};
var getAssistantThumbImageHtml = function (assistant)
{
var imageHtml = "<div class='border-right list-thumbnail card-img-left h-auto d-none d-lg-block' "
+ "style='background: url(" + assistant.basicDataPhoto + ") center no-repeat; background-size: cover; width: 8%;'></div>"
+ "<div class='border-right list-thumbnail card-img-left w-20 h-auto d-lg-none' "
+ "style='background: url(" + assistant.basicDataPhoto + ") center no-repeat; background-size: cover;'></div>";
return imageHtml;
};
var getAssistantBasicDataHtml = function (assistant)
{
var assistantName = assistant.basicDataFirstName + " " + assistant.basicDataLastName;
var basicDataHtml = "<div class='d-flex flex-grow-1 min-width-zero'>"
+ "<div class='card-body align-self-center d-flex flex-column flex-lg-row justify-content-between min-width-zero align-items-lg-center'>"
+ "<a href='' class='w-20 w-sm-100'><p class='list-item-heading mb-0 truncate'>" + assistantName + "</p></a>"
+ "<p class='mb-0 text-muted w-15 w-sm-100'>"
+ "<span class='glyph-icon iconsminds-id-card align-text-top' style='font-size: 25px;'></span> "
+ "<span class='align-middle'>" + assistant.basicDataDocNumber + "</span>"
+ "</p>"
+ "<p class='mb-0 text-muted w-15 w-sm-100'>"
+ "<span class='glyph-icon iconsminds-smartphone-4 align-text-top' style='font-size: 25px;'></span> "
+ "<span class='align-middle'>" + assistant.personalDataCellphone + "</span>"
+ "</p>"
+ "<div class='mb-2 d-md-none'></div>"
+ "</div>"
+ "</div>";
//+ getAssistantStatusSelectHtml()
return basicDataHtml;
};
var getAssistantStatusSelectHtml = function ()
{
var assistantStatusSelect = "<div class='w-15 w-sm-100 form-group m-0'>"
+ "<select id='inputState' class='form-control'>"
+ "<option value='1'>Activo</option>"
+ "</select >"
+ "</div > ";
return assistantStatusSelect;
};
var getAssistantCollapsedDataHtml = function (index, assistant)
{
var assistantAddress = assistant.personalDataAddress + " " + assistant.personalDataAddressComplement + " " + assistant.personalDataAddressLocality;
var assistantName = assistant.basicDataFirstName + " " + assistant.basicDataLastName;
var assistantCalendarData = { Name: assistantName, Id: assistant.assistantId, Events: assistant.Availability };
var assistatntDataHtml = "<div class='collapse p-3 border-top' id='assistant-data-collapse-container-" + index + "'>"
+ "<div class='row'>"
+ "<div class='col-sm-5'>"
+ "<div class='card'>"
+ "<div class='card-body'>"
+ "<h4>Información personal</h4>"
+ "<a class='edit-assistant-link position-absolute border p-2 rounded-lg' href='' style='top: 15px; right: 15px;' data-assistant='" + JSON.stringify(assistant) + "'>"
+ "<i class='iconsminds-file-edit with-rotate-icon'></i>Editar" | + "<div class='overflow-auto'><table class='table table-sm table-striped'>"
+ "<tr><th>Dirección:</th><td>" + assistantAddress + "<td></tr>"
+ "<tr><th>Email:</th><td>" + assistant.personalDataEmailAddress + "<td></tr>"
+ "<tr><th>Teléfono:</th><td>" + assistant.personalDataTelephone + "<td></tr>"
+ "<tr><th>Sexo:</th><td>" + assistant.sex + "<td></tr>"
+ "</table></div>"
+ "<h4>Información profesional</h4>"
+ "<div class='overflow-auto'><table class='table table-sm table-striped'>"
+ "<tr><th>Estudios:</th><td>" + assistant.School + "</td></tr>"
+ "<tr><th>Título:</th><td>" + assistant.professionalJobTitle + "</td></tr>"
+ "<tr><th>Compañía:</th><td>" + assistant.CompanyName + "</td></tr>"
+ "<tr><th>Fecha ingreso:</th><td>" + assistant.companyBeginDate + "</td></tr>"
+ "<tr><th>Aspiración salarial:</th><td>" + assistant.professionalSalaryAspiration + "</td></tr>"
+ "<tr><th>Resumen profesional</th><td>" + assistant.professionalResume + "</td></tr>"
+ "<tr><th>Valoración del jefe</th><td>" + assistant.bossObservation + "</td></ | + "</a>" | random_line_split |
infer_lst.py | Enhance
import torchvision.transforms as T
import numpy as np
import torch
from torch.utils.data import DataLoader
import datasets
import util.misc as utils
from util import box_ops
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
import time
import os
def get_args_parser():
parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)
parser.add_argument('--lr', default=2e-4, type=float)
parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+')
parser.add_argument('--lr_backbone', default=2e-5, type=float)
parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--lr_drop', default=40, type=int)
parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--sgd', action='store_true')
# Variants of Deformable DETR
parser.add_argument('--with_box_refine', default=False, action='store_true')
parser.add_argument('--two_stage', default=False, action='store_true')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
help="position / size * scale")
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=1024, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=300, type=int,
help="Number of query slots")
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=2, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float) | parser.add_argument('--focal_alpha', default=0.25, type=float)
# dataset parameters
parser.add_argument('--dataset_file', default='ICDAR2013')
parser.add_argument('--coco_path', default='./data/coco', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--imgs_dir', type=str, help='input images folder for inference')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
return parser
# standard PyTorch mean-std input image normalization
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
label_names = ['table', 'figure', 'natural_image', 'logo', 'signature']
colors = ['red', 'blue', 'green', 'yellow', 'black']
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'], strict=False)
if torch.cuda.is_available():
model.cuda()
model.eval()
for img_file in os.listdir(args.imgs_dir):
t0 = time.time()
img_path = os.path.join(args.imgs_dir, img_file)
out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png'
im = Image.open(img_path)
# mean-std normalize the input image (batch-size: 1)
img = transform(im).unsqueeze(0)
img=img.cuda()
# propagate through the model
outputs = model(img)
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
keep = scores[0] > 0.2
boxes = boxes[0, keep]
labels = labels[0, keep]
# and from relative [0, 1] to absolute [0, height] coordinates
im_h,im_w = im.size
#print('im_h,im_w',im_h,im_w)
target_sizes =torch.tensor([[im_w,im_h]])
target_sizes =target_sizes.cuda()
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
print(time.time()-t0)
#plot_results
source_img = Image.open(img_path).convert("RGBA")
fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18)
draw = ImageDraw.Draw(source_img)
#print ('label' , labels.tolist())
label_list | parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float) | random_line_split |
infer_lst.py | type=int)
parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--sgd', action='store_true')
# Variants of Deformable DETR
parser.add_argument('--with_box_refine', default=False, action='store_true')
parser.add_argument('--two_stage', default=False, action='store_true')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
help="position / size * scale")
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=1024, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=300, type=int,
help="Number of query slots")
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=2, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--focal_alpha', default=0.25, type=float)
# dataset parameters
parser.add_argument('--dataset_file', default='ICDAR2013')
parser.add_argument('--coco_path', default='./data/coco', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--imgs_dir', type=str, help='input images folder for inference')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
return parser
# standard PyTorch mean-std input image normalization
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
label_names = ['table', 'figure', 'natural_image', 'logo', 'signature']
colors = ['red', 'blue', 'green', 'yellow', 'black']
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'], strict=False)
if torch.cuda.is_available():
model.cuda()
model.eval()
for img_file in os.listdir(args.imgs_dir):
t0 = time.time()
img_path = os.path.join(args.imgs_dir, img_file)
out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png'
im = Image.open(img_path)
# mean-std normalize the input image (batch-size: 1)
img = transform(im).unsqueeze(0)
img=img.cuda()
# propagate through the model
outputs = model(img)
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
keep = scores[0] > 0.2
boxes = boxes[0, keep]
labels = labels[0, keep]
# and from relative [0, 1] to absolute [0, height] coordinates
im_h,im_w = im.size
#print('im_h,im_w',im_h,im_w)
target_sizes =torch.tensor([[im_w,im_h]])
target_sizes =target_sizes.cuda()
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
print(time.time()-t0)
#plot_results
source_img = Image.open(img_path).convert("RGBA")
fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18)
draw = ImageDraw.Draw(source_img)
#print ('label' , labels.tolist())
label_list = labels.tolist()
#print("Boxes",boxes,boxes.tolist())
i=0
for xmin, ymin, xmax, ymax in boxes[0].tolist():
draw.rectangle(((xmin, ymin), (xmax, ymax)), outline =colors[label_list[i]-1])
# print('--------')
# print('i= ', i)
# print('label is = ', label_list[i]-1)
# print(label_names[label_list[i]-1])
if ymin-18 >=0 :
ymin = ymin-18
draw.text((xmin, ymin), label_names[label_list[i]-1], anchor = 'md', font=fnt, fill=colors[label_list[i]-1])
i+=1
source_img.save(out_imgName, "png")
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
print("Outputs",results)
if __name__ == '__main__':
| parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args) | conditional_block |
|
infer_lst.py | Enhance
import torchvision.transforms as T
import numpy as np
import torch
from torch.utils.data import DataLoader
import datasets
import util.misc as utils
from util import box_ops
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
import time
import os
def get_args_parser():
|
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
help="position / size * scale")
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=1024, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=300, type=int,
help="Number of query slots")
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=2, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--focal_alpha', default=0.25, type=float)
# dataset parameters
parser.add_argument('--dataset_file', default='ICDAR2013')
parser.add_argument('--coco_path', default='./data/coco', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--imgs_dir', type=str, help='input images folder for inference')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
return parser
# standard PyTorch mean-std input image normalization
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
label_names = ['table', 'figure', 'natural_image', 'logo', 'signature']
colors = ['red', 'blue', 'green', 'yellow', 'black']
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'], strict=False)
if torch.cuda.is_available():
model.cuda()
model.eval()
for img_file in os.listdir(args.imgs_dir):
t0 = time.time()
img_path = os.path.join(args.imgs_dir, img_file)
out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png'
im = Image.open(img_path)
# mean-std normalize the input image (batch-size: 1)
img = transform(im).unsqueeze(0)
img=img.cuda()
# propagate through the model
outputs = model(img)
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
keep = scores[0] > 0.2
boxes = boxes[0, keep]
labels = labels[0, keep]
# and from relative [0, 1] to absolute [0, height] coordinates
im_h,im_w = im.size
#print('im_h,im_w',im_h,im_w)
target_sizes =torch.tensor([[im_w,im_h]])
target_sizes =target_sizes.cuda()
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
print(time.time()-t0)
#plot_results
source_img = Image.open(img_path).convert("RGBA")
fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18)
draw = ImageDraw.Draw(source_img)
#print ('label' , labels.tolist())
label | parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)
parser.add_argument('--lr', default=2e-4, type=float)
parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+')
parser.add_argument('--lr_backbone', default=2e-5, type=float)
parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--lr_drop', default=40, type=int)
parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--sgd', action='store_true')
# Variants of Deformable DETR
parser.add_argument('--with_box_refine', default=False, action='store_true')
parser.add_argument('--two_stage', default=False, action='store_true') | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.