file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
nc_read_functions.py | = varObj[logicSlices]
if (len(ncDims) == 3) and (stageredDim > 0):
if stageredDim == 1:
varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5
elif stageredDim == 2:
varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5
elif (len(ncDims) == 4) and (stageredDim > 0):
if stageredDim == 1:
varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5
elif stageredDim == 2:
varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5
elif stageredDim == 3:
varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5
return varData.data
def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE):
'''
Function to loop over the same netcdf to extract several variables for
a given set of times and subdomain.
It is more efficient as it doesn't need to open and close the nc
file several times
Parameters
----------
ncfid : file identifier class
netcdf file identifier..
varsWRF : dict
dictionary with the list of string of the variables aimed to extract from the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
dOut : dict
dictionary with the variableName:nparray of the subset of data from the netcdf
for each variable.
'''
# create output dictionary
dOut = {}
# loop over all variables
for v2extract in varsWRF:
# print(' extracting from netcdf: '+v2extract)
if v2extract == 'L':
# RMOL stands for the 1/ Obukhov length, thus we directly save it as L
dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE)
dOut['L'][np.isinf(dOut['L'])] = np.nan
elif v2extract == 'TENDENCIES':
# coriolis as it is specified in nc file
fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE)
fc = fc.mean()
dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE)
elif v2extract == 'Th':
# The Potential temperature is extracted as the perturbation potential temperature + base state
# temperature (t00
T00 = ncfid.variables.get('T00')[iTimes]
dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE)
for iT in range(len(T00)):
dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT]
elif v2extract == 'TKE':
dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE)
else:
dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE)
# dummy matrix with zeros, to be added as surface values to 4D data
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
# add a layer at the surface for some variables
for vN in dOut:
if vN == 'Th':
# for better consistency the skin temperature is added as sfc level
tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE)
dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1)
elif len(dOut[vN].shape) == 4:
dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1)
return dOut
def | (ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height (averaged in time) of WRF output
The height is provided in 3D (i.e. bottom-top, north-south, west-east)
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above sea level
'''
nz = len(iBT) + 1
ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0)
LAT = np.tile(ltmp, (nz, 1, 1))
ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0)
LON = np.tile(ltmp, (nz, 1, 1))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE)
# time-average height
zT = PHT.mean(axis=0)
# time-average surface height
hSfc = np.zeros((1, len(iSN), len(iWE)))
hSfc[0, :, :] = HGT.mean(axis=0)
Z = np.concatenate((hSfc, zT), axis=0)
return LON, LAT, Z
def get_zagl(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height above ground of WRF output
The height is provided in 4D (i.e. with time) dimensions and cell-centered
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above ground
'''
# dummy matrix with zeros, to be added as value at the surface
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape)
zaglT = PHT - np.repeat(HGT, PHT.shape[ | get_nc_coordinates | identifier_name |
nc_read_functions.py | = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE)
elif v2extract == 'Th':
# The Potential temperature is extracted as the perturbation potential temperature + base state
# temperature (t00
T00 = ncfid.variables.get('T00')[iTimes]
dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE)
for iT in range(len(T00)):
dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT]
elif v2extract == 'TKE':
dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE)
else:
dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE)
# dummy matrix with zeros, to be added as surface values to 4D data
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
# add a layer at the surface for some variables
for vN in dOut:
if vN == 'Th':
# for better consistency the skin temperature is added as sfc level
tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE)
dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1)
elif len(dOut[vN].shape) == 4:
dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1)
return dOut
def get_nc_coordinates(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height (averaged in time) of WRF output
The height is provided in 3D (i.e. bottom-top, north-south, west-east)
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above sea level
'''
nz = len(iBT) + 1
ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0)
LAT = np.tile(ltmp, (nz, 1, 1))
ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0)
LON = np.tile(ltmp, (nz, 1, 1))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE)
# time-average height
zT = PHT.mean(axis=0)
# time-average surface height
hSfc = np.zeros((1, len(iSN), len(iWE)))
hSfc[0, :, :] = HGT.mean(axis=0)
Z = np.concatenate((hSfc, zT), axis=0)
return LON, LAT, Z
def get_zagl(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height above ground of WRF output
The height is provided in 4D (i.e. with time) dimensions and cell-centered
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above ground
'''
# dummy matrix with zeros, to be added as value at the surface
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape)
zaglT = PHT - np.repeat(HGT, PHT.shape[1], axis=1)
return np.concatenate((zSfc, zaglT), axis=1)
def coriolis(lat):
'''
Compute the Coriolis frequency based on the latitude parameter
Parameters
----------
lat : float
latitude .
Returns
-------
fc : float
Coriolis frequency.
'''
# angular speed of the Earth [rad/s]
omega = 7.2921159e-5
return 2 * omega * np.sin(lat * np.pi / 180)
def getneighbours(Nxy, inear, jnear):
'''
Function to obtain the indexes of the neighours to a given central index
from WRF in a given spatial box
INPUS:
Nxy = number of points to include in the spatial averaging box
inear = given index in the west-east direction
jnear = given index in the south-north direction
'''
if Nxy == 1: # nearest grid point
ixav = np.array([inear]).astype(int)
iyav = np.array([jnear]).astype(int)
elif Nxy == 2: # four nearest grid points
ixav = np.array([inear, inear + 1]).astype(int)
iyav = np.array([jnear, jnear + 1]).astype(int)
else:
if Nxy % 2 == 1: # Nxy (odd) nearest points
ixav = np.arange(inear - 0.5 * (Nxy - 1), inear + 0.5 * (Nxy - 1) + 1).astype(int)
iyav = np.arange(jnear - 0.5 * (Nxy - 1), jnear + 0.5 * (Nxy - 1) + 1).astype(int)
else:
ixav = np.arange(inear - 0.5 * Nxy + 1, inear + 0.5 * Nxy + 1).astype(int)
iyav = np.arange(jnear - 0.5 * Nxy + 1, jnear + 0.5 * Nxy + 1).astype(int)
return ixav, iyav
def get_index_of_subset_domain(ncfid, lat_s, lon_s, L=None):
| lat = ncfid.variables.get('XLAT')[0, :, 0]
lon = ncfid.variables.get('XLONG')[0, 0, :]
# makes sure central box coordinates lie inside wrf domain box
if (min(abs(lat - lat_s)) > max(np.diff(lat))) | (min(abs(lon - lon_s)) > max(np.diff(lon))):
raise Exception("ERROR: lat | lon chosen is outside wrf domain box")
# get the grid-spacing assuming dx=dy
dxy = ncfid.getncattr('DX')
# Extract only a box of LxL of all wrf domain to make the process more memory efficient
# the selection is made by the grid-spacing of wrf output
if L is None:
if dxy >= 9e3:
L = 55e3
elif dxy >= 1e3:
L = 10e3
else:
L = 1e3
| identifier_body |
|
nc_read_functions.py |
elif iStr.startswith('west'):
logicSlices.append(iWE)
# Extract and unstager the data
varData = varObj[logicSlices]
if (len(ncDims) == 3) and (stageredDim > 0):
if stageredDim == 1:
varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5
elif stageredDim == 2:
varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5
elif (len(ncDims) == 4) and (stageredDim > 0):
if stageredDim == 1:
varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5
elif stageredDim == 2:
varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5
elif stageredDim == 3:
varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5
return varData.data
def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE):
'''
Function to loop over the same netcdf to extract several variables for
a given set of times and subdomain.
It is more efficient as it doesn't need to open and close the nc
file several times
Parameters
----------
ncfid : file identifier class
netcdf file identifier..
varsWRF : dict
dictionary with the list of string of the variables aimed to extract from the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
dOut : dict
dictionary with the variableName:nparray of the subset of data from the netcdf
for each variable.
'''
# create output dictionary
dOut = {}
# loop over all variables
for v2extract in varsWRF:
# print(' extracting from netcdf: '+v2extract)
if v2extract == 'L':
# RMOL stands for the 1/ Obukhov length, thus we directly save it as L
dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE)
dOut['L'][np.isinf(dOut['L'])] = np.nan
elif v2extract == 'TENDENCIES':
# coriolis as it is specified in nc file
fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE)
fc = fc.mean()
dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE)
elif v2extract == 'Th':
# The Potential temperature is extracted as the perturbation potential temperature + base state
# temperature (t00
T00 = ncfid.variables.get('T00')[iTimes]
dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE)
for iT in range(len(T00)):
dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT]
elif v2extract == 'TKE':
dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE)
else:
dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE)
# dummy matrix with zeros, to be added as surface values to 4D data
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
# add a layer at the surface for some variables
for vN in dOut:
if vN == 'Th':
# for better consistency the skin temperature is added as sfc level
tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE)
dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1)
elif len(dOut[vN].shape) == 4:
dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1)
return dOut
def get_nc_coordinates(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height (averaged in time) of WRF output
The height is provided in 3D (i.e. bottom-top, north-south, west-east)
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above sea level
'''
nz = len(iBT) + 1
ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0)
LAT = np.tile(ltmp, (nz, 1, 1))
ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0)
LON = np.tile(ltmp, (nz, 1, 1))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE)
# time-average height
zT = PHT.mean(axis=0)
# time-average surface height
hSfc = np.zeros((1, len(iSN), len(iWE)))
hSfc[0, :, :] = HGT.mean(axis=0)
Z = np.concatenate((hSfc, zT), axis=0)
return LON, LAT, Z
def get_zagl(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height above ground of WRF output
The height is provided in 4D (i.e. with time) dimensions and cell-centered
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above ground
'''
# dummy matrix with zeros, to be added as value at the surface
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = np.reshape(getVarEff(ncfid, 'HGT | logicSlices.append(iSN) | conditional_block |
|
nc_read_functions.py | = varObj[logicSlices]
if (len(ncDims) == 3) and (stageredDim > 0):
if stageredDim == 1:
varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5
elif stageredDim == 2:
varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5
elif (len(ncDims) == 4) and (stageredDim > 0):
if stageredDim == 1:
varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5
elif stageredDim == 2:
varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5
elif stageredDim == 3:
varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5
return varData.data
def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE):
'''
Function to loop over the same netcdf to extract several variables for
a given set of times and subdomain.
It is more efficient as it doesn't need to open and close the nc
file several times
Parameters
----------
ncfid : file identifier class
netcdf file identifier..
varsWRF : dict
dictionary with the list of string of the variables aimed to extract from the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
dOut : dict
dictionary with the variableName:nparray of the subset of data from the netcdf
for each variable.
'''
# create output dictionary
dOut = {}
# loop over all variables
for v2extract in varsWRF:
# print(' extracting from netcdf: '+v2extract)
if v2extract == 'L':
# RMOL stands for the 1/ Obukhov length, thus we directly save it as L
dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE)
dOut['L'][np.isinf(dOut['L'])] = np.nan
elif v2extract == 'TENDENCIES':
# coriolis as it is specified in nc file
fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE)
fc = fc.mean()
dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE)
dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE)
dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE)
elif v2extract == 'Th':
# The Potential temperature is extracted as the perturbation potential temperature + base state
# temperature (t00
T00 = ncfid.variables.get('T00')[iTimes]
dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE)
for iT in range(len(T00)):
dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT]
elif v2extract == 'TKE':
dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE)
else:
dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE)
# dummy matrix with zeros, to be added as surface values to 4D data
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
# add a layer at the surface for some variables
for vN in dOut:
if vN == 'Th':
# for better consistency the skin temperature is added as sfc level
tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE)
dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1)
elif len(dOut[vN].shape) == 4:
dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1)
return dOut
def get_nc_coordinates(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height (averaged in time) of WRF output
The height is provided in 3D (i.e. bottom-top, north-south, west-east)
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. | iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above sea level
'''
nz = len(iBT) + 1
ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0)
LAT = np.tile(ltmp, (nz, 1, 1))
ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0)
LON = np.tile(ltmp, (nz, 1, 1))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE)
# time-average height
zT = PHT.mean(axis=0)
# time-average surface height
hSfc = np.zeros((1, len(iSN), len(iWE)))
hSfc[0, :, :] = HGT.mean(axis=0)
Z = np.concatenate((hSfc, zT), axis=0)
return LON, LAT, Z
def get_zagl(ncfid, iTimes, iBT, iSN, iWE):
'''
Memory efficient function to extract the height above ground of WRF output
The height is provided in 4D (i.e. with time) dimensions and cell-centered
Parameters
----------
ncfid : file id
file of the netcdf.
iTimes : int, logic
index of the times to extract form the netcdf data.
iBT : int
CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
iWE : int
CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates.
Returns
-------
out : ndarray
numpy array with the height above ground
'''
# dummy matrix with zeros, to be added as value at the surface
zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE)))
PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81
HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape)
zaglT = PHT - np.repeat(HGT, PHT.shape[1 | iSN : int
CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. | random_line_split |
types.rs | pub structural_type: StructuralType,
}
/// Represents a structural type in a WebAssembly module.
#[derive(Debug, Clone)]
pub enum StructuralType {
/// The type is for a function.
Func(FuncType),
/// The type is for an array.
Array(ArrayType),
/// The type is for a struct.
Struct(StructType),
}
/// Represents a type of a function in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct FuncType {
/// The combined parameters and result types.
params_results: Box<[ValType]>,
/// The number of parameter types.
len_params: usize,
}
/// Represents a type of an array in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ArrayType(pub FieldType);
/// Represents a type of a struct in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct StructType {
/// Struct fields.
pub fields: Box<[FieldType]>,
}
/// Field type in structural types (structs, arrays).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct FieldType {
/// Storage type of the field.
pub element_type: StorageType,
/// Is the field mutable.
pub mutable: bool,
}
/// Storage type for structural type fields.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum StorageType {
/// The `i8` type.
I8,
/// The `i16` type.
I16,
/// A value type.
Val(ValType),
}
/// The type of a core WebAssembly value.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum ValType {
/// The `i32` type.
I32,
/// The `i64` type.
I64,
/// The `f32` type.
F32,
/// The `f64` type.
F64,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128,
/// A reference type.
///
/// The `funcref` and `externref` type fall into this category and the full
/// generalization here is due to the implementation of the
/// function-references proposal.
Ref(RefType),
}
impl FuncType {
/// Creates a new [`FuncType`] from the given `params` and `results`.
pub fn new<P, R>(params: P, results: R) -> Self
where
P: IntoIterator<Item = ValType>,
R: IntoIterator<Item = ValType>,
{
let mut buffer = params.into_iter().collect::<Vec<_>>();
let len_params = buffer.len();
buffer.extend(results);
Self {
params_results: buffer.into(),
len_params,
}
}
/// Returns a shared slice to the parameter types of the [`FuncType`].
#[inline]
pub fn params(&self) -> &[ValType] {
&self.params_results[..self.len_params]
}
/// Returns a shared slice to the result types of the [`FuncType`].
#[inline]
pub fn results(&self) -> &[ValType] {
&self.params_results[self.len_params..]
}
}
impl ValType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF);
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF);
}
impl Encode for StorageType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
StorageType::I8 => sink.push(0x7A),
StorageType::I16 => sink.push(0x79),
StorageType::Val(vt) => vt.encode(sink),
}
}
}
impl Encode for ValType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
ValType::I32 => sink.push(0x7F),
ValType::I64 => sink.push(0x7E),
ValType::F32 => sink.push(0x7D),
ValType::F64 => sink.push(0x7C),
ValType::V128 => sink.push(0x7B),
ValType::Ref(rt) => rt.encode(sink),
}
}
}
/// A reference type.
///
/// This is largely part of the function references proposal for WebAssembly but
/// additionally is used by the `funcref` and `externref` types. The full
/// generality of this type is only exercised with function-references.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[allow(missing_docs)]
pub struct RefType {
pub nullable: bool,
pub heap_type: HeapType,
}
impl RefType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Func,
};
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Extern,
};
}
impl Encode for RefType {
fn encode(&self, sink: &mut Vec<u8>) {
if self.nullable {
// Favor the original encodings of `funcref` and `externref` where
// possible
match self.heap_type {
HeapType::Func => return sink.push(0x70),
HeapType::Extern => return sink.push(0x6f),
_ => {}
}
}
if self.nullable {
sink.push(0x6C);
} else {
sink.push(0x6B);
}
self.heap_type.encode(sink);
}
}
impl From<RefType> for ValType {
fn from(ty: RefType) -> ValType {
ValType::Ref(ty)
}
}
/// Part of the function references proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum HeapType {
/// Untyped (any) function.
Func,
/// External heap type.
Extern,
/// The `any` heap type. The common supertype (a.k.a. top) of all internal types.
Any,
/// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types.
None,
/// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types.
NoExtern,
/// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types.
NoFunc,
/// The `eq` heap type. The common supertype of all referenceable types on which comparison
/// (ref.eq) is allowed.
Eq,
/// The `struct` heap type. The common supertype of all struct types.
Struct,
/// The `array` heap type. The common supertype of all array types.
Array,
/// The i31 heap type.
I31,
/// User defined type at the given index.
Indexed(u32),
}
impl Encode for HeapType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
HeapType::Func => sink.push(0x70),
HeapType::Extern => sink.push(0x6F),
HeapType::Any => sink.push(0x6E),
HeapType::None => sink.push(0x65),
HeapType::NoExtern => sink.push(0x69),
HeapType::NoFunc => sink.push(0x68),
HeapType::Eq => sink.push(0x6D),
HeapType::Struct => sink.push(0x67),
HeapType::Array => sink.push(0x66),
HeapType::I31 => sink.push(0x6A),
// Note that this is encoded as a signed type rather than unsigned
// as it's decoded as an s33
HeapType::Indexed(i) => i64::from(*i).encode(sink),
}
}
}
/// An encoder for the type section of WebAssembly modules.
///
/// # Example
///
/// ```rust
/// use wasm_encoder::{Module, TypeSection, ValType};
///
/// let mut types = TypeSection::new();
///
/// types.function([ValType::I32, ValType::I32], [ValType::I64]);
///
/// let mut module = Module::new();
/// module.section(&types);
///
/// let bytes = module.finish();
/// ```
#[derive(Clone, Debug, Default)]
pub struct TypeSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TypeSection {
/// Create a new | /// The list of supertype indexes. As of GC MVP, there can be at most one supertype.
pub supertype_idx: Option<u32>,
/// The structural type of the subtype. | random_line_split |
|
types.rs | /// Represents a type of an array in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ArrayType(pub FieldType);
/// Represents a type of a struct in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct StructType {
/// Struct fields.
pub fields: Box<[FieldType]>,
}
/// Field type in structural types (structs, arrays).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct FieldType {
/// Storage type of the field.
pub element_type: StorageType,
/// Is the field mutable.
pub mutable: bool,
}
/// Storage type for structural type fields.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum | {
/// The `i8` type.
I8,
/// The `i16` type.
I16,
/// A value type.
Val(ValType),
}
/// The type of a core WebAssembly value.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum ValType {
/// The `i32` type.
I32,
/// The `i64` type.
I64,
/// The `f32` type.
F32,
/// The `f64` type.
F64,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128,
/// A reference type.
///
/// The `funcref` and `externref` type fall into this category and the full
/// generalization here is due to the implementation of the
/// function-references proposal.
Ref(RefType),
}
impl FuncType {
/// Creates a new [`FuncType`] from the given `params` and `results`.
pub fn new<P, R>(params: P, results: R) -> Self
where
P: IntoIterator<Item = ValType>,
R: IntoIterator<Item = ValType>,
{
let mut buffer = params.into_iter().collect::<Vec<_>>();
let len_params = buffer.len();
buffer.extend(results);
Self {
params_results: buffer.into(),
len_params,
}
}
/// Returns a shared slice to the parameter types of the [`FuncType`].
#[inline]
pub fn params(&self) -> &[ValType] {
&self.params_results[..self.len_params]
}
/// Returns a shared slice to the result types of the [`FuncType`].
#[inline]
pub fn results(&self) -> &[ValType] {
&self.params_results[self.len_params..]
}
}
impl ValType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF);
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF);
}
impl Encode for StorageType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
StorageType::I8 => sink.push(0x7A),
StorageType::I16 => sink.push(0x79),
StorageType::Val(vt) => vt.encode(sink),
}
}
}
impl Encode for ValType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
ValType::I32 => sink.push(0x7F),
ValType::I64 => sink.push(0x7E),
ValType::F32 => sink.push(0x7D),
ValType::F64 => sink.push(0x7C),
ValType::V128 => sink.push(0x7B),
ValType::Ref(rt) => rt.encode(sink),
}
}
}
/// A reference type.
///
/// This is largely part of the function references proposal for WebAssembly but
/// additionally is used by the `funcref` and `externref` types. The full
/// generality of this type is only exercised with function-references.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[allow(missing_docs)]
pub struct RefType {
pub nullable: bool,
pub heap_type: HeapType,
}
impl RefType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Func,
};
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Extern,
};
}
impl Encode for RefType {
fn encode(&self, sink: &mut Vec<u8>) {
if self.nullable {
// Favor the original encodings of `funcref` and `externref` where
// possible
match self.heap_type {
HeapType::Func => return sink.push(0x70),
HeapType::Extern => return sink.push(0x6f),
_ => {}
}
}
if self.nullable {
sink.push(0x6C);
} else {
sink.push(0x6B);
}
self.heap_type.encode(sink);
}
}
impl From<RefType> for ValType {
fn from(ty: RefType) -> ValType {
ValType::Ref(ty)
}
}
/// Part of the function references proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum HeapType {
/// Untyped (any) function.
Func,
/// External heap type.
Extern,
/// The `any` heap type. The common supertype (a.k.a. top) of all internal types.
Any,
/// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types.
None,
/// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types.
NoExtern,
/// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types.
NoFunc,
/// The `eq` heap type. The common supertype of all referenceable types on which comparison
/// (ref.eq) is allowed.
Eq,
/// The `struct` heap type. The common supertype of all struct types.
Struct,
/// The `array` heap type. The common supertype of all array types.
Array,
/// The i31 heap type.
I31,
/// User defined type at the given index.
Indexed(u32),
}
impl Encode for HeapType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
HeapType::Func => sink.push(0x70),
HeapType::Extern => sink.push(0x6F),
HeapType::Any => sink.push(0x6E),
HeapType::None => sink.push(0x65),
HeapType::NoExtern => sink.push(0x69),
HeapType::NoFunc => sink.push(0x68),
HeapType::Eq => sink.push(0x6D),
HeapType::Struct => sink.push(0x67),
HeapType::Array => sink.push(0x66),
HeapType::I31 => sink.push(0x6A),
// Note that this is encoded as a signed type rather than unsigned
// as it's decoded as an s33
HeapType::Indexed(i) => i64::from(*i).encode(sink),
}
}
}
/// An encoder for the type section of WebAssembly modules.
///
/// # Example
///
/// ```rust
/// use wasm_encoder::{Module, TypeSection, ValType};
///
/// let mut types = TypeSection::new();
///
/// types.function([ValType::I32, ValType::I32], [ValType::I64]);
///
/// let mut module = Module::new();
/// module.section(&types);
///
/// let bytes = module.finish();
/// ```
#[derive(Clone, Debug, Default)]
pub struct TypeSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TypeSection {
/// Create a new module type section encoder.
pub fn new() -> Self {
Self::default()
}
/// The number of types in the section.
pub fn len(&self) -> u32 {
self.num_added
}
/// Determines if the section is empty.
pub fn is_empty(&self) -> bool {
self.num_added == 0
}
/// Define a function type in this type section.
pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self
where
P: IntoIterator<Item = ValType>,
P::IntoIter: ExactSizeIterator,
R: IntoIterator<Item = ValType>,
R::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
let results = results.into_iter();
self.bytes.push(0 | StorageType | identifier_name |
types.rs | I32,
/// The `i64` type.
I64,
/// The `f32` type.
F32,
/// The `f64` type.
F64,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128,
/// A reference type.
///
/// The `funcref` and `externref` type fall into this category and the full
/// generalization here is due to the implementation of the
/// function-references proposal.
Ref(RefType),
}
impl FuncType {
/// Creates a new [`FuncType`] from the given `params` and `results`.
pub fn new<P, R>(params: P, results: R) -> Self
where
P: IntoIterator<Item = ValType>,
R: IntoIterator<Item = ValType>,
{
let mut buffer = params.into_iter().collect::<Vec<_>>();
let len_params = buffer.len();
buffer.extend(results);
Self {
params_results: buffer.into(),
len_params,
}
}
/// Returns a shared slice to the parameter types of the [`FuncType`].
#[inline]
pub fn params(&self) -> &[ValType] {
&self.params_results[..self.len_params]
}
/// Returns a shared slice to the result types of the [`FuncType`].
#[inline]
pub fn results(&self) -> &[ValType] {
&self.params_results[self.len_params..]
}
}
impl ValType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF);
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF);
}
impl Encode for StorageType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
StorageType::I8 => sink.push(0x7A),
StorageType::I16 => sink.push(0x79),
StorageType::Val(vt) => vt.encode(sink),
}
}
}
impl Encode for ValType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
ValType::I32 => sink.push(0x7F),
ValType::I64 => sink.push(0x7E),
ValType::F32 => sink.push(0x7D),
ValType::F64 => sink.push(0x7C),
ValType::V128 => sink.push(0x7B),
ValType::Ref(rt) => rt.encode(sink),
}
}
}
/// A reference type.
///
/// This is largely part of the function references proposal for WebAssembly but
/// additionally is used by the `funcref` and `externref` types. The full
/// generality of this type is only exercised with function-references.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[allow(missing_docs)]
pub struct RefType {
pub nullable: bool,
pub heap_type: HeapType,
}
impl RefType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Func,
};
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Extern,
};
}
impl Encode for RefType {
fn encode(&self, sink: &mut Vec<u8>) {
if self.nullable {
// Favor the original encodings of `funcref` and `externref` where
// possible
match self.heap_type {
HeapType::Func => return sink.push(0x70),
HeapType::Extern => return sink.push(0x6f),
_ => {}
}
}
if self.nullable {
sink.push(0x6C);
} else {
sink.push(0x6B);
}
self.heap_type.encode(sink);
}
}
impl From<RefType> for ValType {
fn from(ty: RefType) -> ValType {
ValType::Ref(ty)
}
}
/// Part of the function references proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum HeapType {
/// Untyped (any) function.
Func,
/// External heap type.
Extern,
/// The `any` heap type. The common supertype (a.k.a. top) of all internal types.
Any,
/// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types.
None,
/// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types.
NoExtern,
/// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types.
NoFunc,
/// The `eq` heap type. The common supertype of all referenceable types on which comparison
/// (ref.eq) is allowed.
Eq,
/// The `struct` heap type. The common supertype of all struct types.
Struct,
/// The `array` heap type. The common supertype of all array types.
Array,
/// The i31 heap type.
I31,
/// User defined type at the given index.
Indexed(u32),
}
impl Encode for HeapType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
HeapType::Func => sink.push(0x70),
HeapType::Extern => sink.push(0x6F),
HeapType::Any => sink.push(0x6E),
HeapType::None => sink.push(0x65),
HeapType::NoExtern => sink.push(0x69),
HeapType::NoFunc => sink.push(0x68),
HeapType::Eq => sink.push(0x6D),
HeapType::Struct => sink.push(0x67),
HeapType::Array => sink.push(0x66),
HeapType::I31 => sink.push(0x6A),
// Note that this is encoded as a signed type rather than unsigned
// as it's decoded as an s33
HeapType::Indexed(i) => i64::from(*i).encode(sink),
}
}
}
/// An encoder for the type section of WebAssembly modules.
///
/// # Example
///
/// ```rust
/// use wasm_encoder::{Module, TypeSection, ValType};
///
/// let mut types = TypeSection::new();
///
/// types.function([ValType::I32, ValType::I32], [ValType::I64]);
///
/// let mut module = Module::new();
/// module.section(&types);
///
/// let bytes = module.finish();
/// ```
#[derive(Clone, Debug, Default)]
pub struct TypeSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TypeSection {
/// Create a new module type section encoder.
pub fn new() -> Self {
Self::default()
}
/// The number of types in the section.
pub fn len(&self) -> u32 {
self.num_added
}
/// Determines if the section is empty.
pub fn is_empty(&self) -> bool {
self.num_added == 0
}
/// Define a function type in this type section.
pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self
where
P: IntoIterator<Item = ValType>,
P::IntoIter: ExactSizeIterator,
R: IntoIterator<Item = ValType>,
R::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
let results = results.into_iter();
self.bytes.push(0x60);
params.len().encode(&mut self.bytes);
params.for_each(|p| p.encode(&mut self.bytes));
results.len().encode(&mut self.bytes);
results.for_each(|p| p.encode(&mut self.bytes));
self.num_added += 1;
self
}
/// Define an array type in this type section.
pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
self.bytes.push(0x5e);
self.field(ty, mutable);
self.num_added += 1;
self
}
fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
ty.encode(&mut self.bytes);
self.bytes.push(mutable as u8);
self
}
/// Define a struct type in this type section.
pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self | {
self.bytes.push(0x5f);
fields.len().encode(&mut self.bytes);
for f in fields.iter() {
self.field(&f.element_type, f.mutable);
}
self.num_added += 1;
self
} | identifier_body |
|
bid.rs | re,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked ?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
}
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state != AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
}
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos) != BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?;
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
}
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn | (&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state != AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos | hands | identifier_name |
bid.rs | re,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked ?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
}
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state != AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() |
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos) != BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?;
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
}
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn hands(&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state != AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::Player | {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
} | conditional_block |
bid.rs | re,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked ?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result |
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state != AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
}
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos) != BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?;
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
}
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn hands(&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state != AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::Player | {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
} | identifier_body |
bid.rs | re,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked ?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
}
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state != AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
}
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos) != BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?; |
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn hands(&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos != self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state != AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos:: |
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
} | random_line_split |
bot.py | ules and enjoy your stay. Do d.help to check out the bot'.format(member, server))
kats = bot.get_channel('313863292126756864')
if member.server.id == '294262760752152576':
await bot.send_message(kats, '{0.mention} Welcome to **Dragons and Kats**! Have a great time here and enjoy yourselves!!!:wink: !'.format(member))
else:
print('Member joined {}, but message not sent'.format(member.server))
@bot.event
async def on_command(command, ctx):
if str(command) == 'eval':
return
print('------------------------------------')
print('Command > {}{} < invoked with > {} <\nServer: {} | {}\nUser: {} | {}'
.format(ctx.prefix,
command,
ctx.invoked_with,
ctx.message.server.name,
ctx.message.server.id,
ctx.message.author.name,
ctx.message.author.id))
@bot.event
async def on_member_remove(member):
server = member.server
with open('cogs/utils/t_config.json') as f:
data = json.loads(f.read())
status = data[server.id]["leave"]["status"]
if status:
msg = data[server.id]["leave"]["msg"]
channel = data[server.id]['leave']['channel']
if channel == 'default':
channel = server
else:
channel = discord.utils.get(server.channels, id=channel)
await bot.send_message(channel, msg.format(member, server))
@bot.event
async def on_server_join(server):
embed = discord.Embed(title='Darkness Info', color=0xed)
owner = server.owner
servers = len(bot.servers)
embed.add_field(name='Author', value='<@300396755193954306>')
embed.add_field(name='Servers', value=servers)
embed.add_field(name='Prefix', value='d.')
embed.set_footer(text='Powered by discord.py')
embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif')
embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303')
embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf')
embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness')
await bot.send_message(owner, embed=embed)
def fmt_help(page):
cmd = ''
for line in page.splitlines():
if line.startswith('.'):
cmd = line.strip('.')
break
em = discord.Embed(color=0x00FFFF)
em.set_author(name='Help - {}'.format(cmd))
async def send_cmd_help(ctx):
if ctx.invoked_subcommand:
pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
# page = page.strip('```css').strip('```')
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
else:
pages = bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
@bot.event
async def on_command_error(error, ctx):
print(error)
channel = ctx.message.channel
if isinstance(error, commands.MissingRequiredArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.BadArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.DisabledCommand):
await bot.send_message(channel, "That command is disabled.")
print('Command disabled.')
elif isinstance(error, commands.CommandInvokeError):
# A bit hacky, couldn't find a better way
no_dms = "Cannot send messages to this user"
is_help_cmd = ctx.command.qualified_name == "help"
is_forbidden = isinstance(error.original, discord.Forbidden)
if is_help_cmd and is_forbidden and error.original.text == no_dms:
msg = ("I couldn't send the help message to you in DM. Either"
" you blocked me or you disabled DMs in this server.")
await bot.send_message(channel, msg)
return
@bot.command(pass_context=True,name='cog')
@owner_only()
async def _reload(ctx,*, module : str):
"""Reloads a module."""
channel = ctx.message.channel
module = 'cogs.'+module
try:
bot.unload_extension(module)
x = await bot.send_message(channel,'Successfully Unloaded.')
bot.load_extension(module)
x = await bot.edit_message(x,'Successfully Reloaded.')
except Exception as e:
x = await bot.edit_message(x,'\N{PISTOL}')
await bot.say('{}: {}'.format(type(e).__name__, e))
else:
x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}')
@bot.command(name='presence')
async def _set(Type=None,*,thing=None):
"""Change the bot's discord game/stream!"""
server = len(bot.servers)
if Type is None:
await bot.say('Usage: `.presence [game/stream] [message]`')
else:
if Type.lower() == 'stream':
await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online')
await bot.say('Set presence to. `Streaming {}`'.format(thing))
elif Type.lower() == 'game':
await bot.change_presence(game=discord.Game(name=thing))
await bot.say('Set presence to `Playing {}`'.format(thing))
elif Type.lower() == 'clear':
await bot.change_presence(game=None)
await bot.say('Cleared Presence')
elif Type.lower() == 'servers':
await bot.change_presence(game=discord.Game(name='with {} servers'.format(server)))
await bot.say('**Im now playing with {} servers.**'.format(server))
else:
await bot.say('Usage: `.presence [game/stream] [message]`')
@bot.command(pass_context=True)
@is_owner()
async def _leave_all_servers_(ctx):
for server in bot.servers:
await bot.leave_server(server)
await bot.say('I left `{}`'.format(server.name))
@bot.command(pass_context=True)
async def servers(ctx):
servers = ', '.join([i.name for i in bot.servers]).strip(', ')
await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers))
@bot.command(pass_context=True)
@is_owner()
async def _leave_server(ctx, server):
to_leave = discord.utils.get(bot.servers, id=str(server))
try:
await bot.leave_server(to_leave)
except:
await self.bot.say('Failed.')
else:
await self.bot.say('Successfully left {}'.format(to_leave.name))
@bot.command(pass_context=True)
async def register(ctx):
server = ctx.message.server
channel = discord.utils.get(server.channels, name='server-event')
user = ctx.message.author
with open('cogs/utils/registrations.txt') as f:
data = f.read()
print(data )
if ctx.message.channel != channel:
await bot.say('You can only register in {}'.format(channel.mention))
return
if str(user) in data:
await bot.delete_message(ctx.message)
await bot.send_message(user, "You can't register more than once.")
return
with open('cogs/utils/registrations.txt','a') as f:
f.write(str(user)+'\n')
role = discord.utils.get(server.roles, name='4row')
await bot.add_roles(user, role)
await bot.add_reaction(ctx.message, '\u2705')
@bot.command(pass_context = True)
@is_owner()
async def shutdown(ctx):
timestamp = ctx.message.timestamp
embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp)
embed.set_footer(text='Darkness no longer online')
await bot.say(embed=embed)
await bot.logout()
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
print('Loaded: {}'.format(extension))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Error on load: {}\n{}'.format(extension, exc))
def cleanup_code( content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
def get_syntax_error(e):
if e.text is None:
return '```py\n{0.__class__.__name__}: {0}\n```'.format(e)
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__)
async def | to_code_block | identifier_name |
|
bot.py | (game=discord.Game(name='Currently WIP | Darkness'))
await asyncio.sleep(10)
await bot.change_presence(game=discord.Game(name='d.support | d.invite'))
await asyncio.sleep(25)
@bot.command(pass_context=True)
async def help(ctx):
await bot.delete_message(ctx.message)
msg = open('cogs/utils/help.txt').read().replace('\\u200b','\u200b').splitlines()
for i, line in enumerate(msg):
if line.strip().startswith('.'):
x = line.strip().strip('.')
x = ctx.prefix + x
msg[i] = '`' + x + '`'
try:
p = Pages(bot, message=ctx.message, entries=msg)
p.embed.set_author(name='Help - Darkness Commands', icon_url=bot.user.avatar_url)
p.embed.color = 0x00FFFF
await p.paginate()
except:
embed = discord.Embed(title='Darkness Commands', color=0xed)
embed.add_field(name='Moderation:', value='kick, ban, unban, softban, warn, purge')
embed.add_field(name='Information:', value='info, serverinfo, userinfo, avatar')
embed.add_field(name='Miscellaneous:', value='ping, suggest, invite, support')
embed.add_field(name='Utilities:', value='calc, remind, addrole, removerole')
embed.add_field(name='Fun:', value='8ball, cat')
embed.set_footer(text='Bot Dev: -= shadeyg56 =-#1702')
await bot.say(embed=embed)
def owner_only():
return commands.check(lambda ctx: ctx.message.author == ctx.message.server.owner)
def is_owner():
return commands.check(lambda ctx: ctx.message.author.id == owner)
@bot.event
async def on_member_join(member):
darkness = bot.get_channel('356599668739670049')
if member.server.id == '356599668739670048':
|
kats = bot.get_channel('313863292126756864')
if member.server.id == '294262760752152576':
await bot.send_message(kats, '{0.mention} Welcome to **Dragons and Kats**! Have a great time here and enjoy yourselves!!!:wink: !'.format(member))
else:
print('Member joined {}, but message not sent'.format(member.server))
@bot.event
async def on_command(command, ctx):
if str(command) == 'eval':
return
print('------------------------------------')
print('Command > {}{} < invoked with > {} <\nServer: {} | {}\nUser: {} | {}'
.format(ctx.prefix,
command,
ctx.invoked_with,
ctx.message.server.name,
ctx.message.server.id,
ctx.message.author.name,
ctx.message.author.id))
@bot.event
async def on_member_remove(member):
server = member.server
with open('cogs/utils/t_config.json') as f:
data = json.loads(f.read())
status = data[server.id]["leave"]["status"]
if status:
msg = data[server.id]["leave"]["msg"]
channel = data[server.id]['leave']['channel']
if channel == 'default':
channel = server
else:
channel = discord.utils.get(server.channels, id=channel)
await bot.send_message(channel, msg.format(member, server))
@bot.event
async def on_server_join(server):
embed = discord.Embed(title='Darkness Info', color=0xed)
owner = server.owner
servers = len(bot.servers)
embed.add_field(name='Author', value='<@300396755193954306>')
embed.add_field(name='Servers', value=servers)
embed.add_field(name='Prefix', value='d.')
embed.set_footer(text='Powered by discord.py')
embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif')
embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303')
embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf')
embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness')
await bot.send_message(owner, embed=embed)
def fmt_help(page):
cmd = ''
for line in page.splitlines():
if line.startswith('.'):
cmd = line.strip('.')
break
em = discord.Embed(color=0x00FFFF)
em.set_author(name='Help - {}'.format(cmd))
async def send_cmd_help(ctx):
if ctx.invoked_subcommand:
pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
# page = page.strip('```css').strip('```')
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
else:
pages = bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
@bot.event
async def on_command_error(error, ctx):
print(error)
channel = ctx.message.channel
if isinstance(error, commands.MissingRequiredArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.BadArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.DisabledCommand):
await bot.send_message(channel, "That command is disabled.")
print('Command disabled.')
elif isinstance(error, commands.CommandInvokeError):
# A bit hacky, couldn't find a better way
no_dms = "Cannot send messages to this user"
is_help_cmd = ctx.command.qualified_name == "help"
is_forbidden = isinstance(error.original, discord.Forbidden)
if is_help_cmd and is_forbidden and error.original.text == no_dms:
msg = ("I couldn't send the help message to you in DM. Either"
" you blocked me or you disabled DMs in this server.")
await bot.send_message(channel, msg)
return
@bot.command(pass_context=True,name='cog')
@owner_only()
async def _reload(ctx,*, module : str):
"""Reloads a module."""
channel = ctx.message.channel
module = 'cogs.'+module
try:
bot.unload_extension(module)
x = await bot.send_message(channel,'Successfully Unloaded.')
bot.load_extension(module)
x = await bot.edit_message(x,'Successfully Reloaded.')
except Exception as e:
x = await bot.edit_message(x,'\N{PISTOL}')
await bot.say('{}: {}'.format(type(e).__name__, e))
else:
x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}')
@bot.command(name='presence')
async def _set(Type=None,*,thing=None):
"""Change the bot's discord game/stream!"""
server = len(bot.servers)
if Type is None:
await bot.say('Usage: `.presence [game/stream] [message]`')
else:
if Type.lower() == 'stream':
await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online')
await bot.say('Set presence to. `Streaming {}`'.format(thing))
elif Type.lower() == 'game':
await bot.change_presence(game=discord.Game(name=thing))
await bot.say('Set presence to `Playing {}`'.format(thing))
elif Type.lower() == 'clear':
await bot.change_presence(game=None)
await bot.say('Cleared Presence')
elif Type.lower() == 'servers':
await bot.change_presence(game=discord.Game(name='with {} servers'.format(server)))
await bot.say('**Im now playing with {} servers.**'.format(server))
else:
await bot.say('Usage: `.presence [game/stream] [message]`')
@bot.command(pass_context=True)
@is_owner()
async def _leave_all_servers_(ctx):
for server in bot.servers:
await bot.leave_server(server)
await bot.say('I left `{}`'.format(server.name))
@bot.command(pass_context=True)
async def servers(ctx):
servers = ', '.join([i.name for i in bot.servers]).strip(', ')
await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers))
@bot.command(pass_context=True)
@is_owner()
async def _leave_server(ctx, server):
to_leave = discord.utils.get(bot.servers, id=str(server))
try:
await bot.leave_server(to_leave)
except:
await self.bot.say('Failed.')
else:
await self.bot.say('Successfully left {}'.format(to_leave.name))
@bot.command(pass_context=True)
async def | await bot.send_message(darkness, 'Welcome {0.mention} to {}. Please read #info-and-rules and enjoy your stay. Do d.help to check out the bot'.format(member, server)) | conditional_block |
bot.py | servers = len(bot.servers)
embed.add_field(name='Author', value='<@300396755193954306>')
embed.add_field(name='Servers', value=servers)
embed.add_field(name='Prefix', value='d.')
embed.set_footer(text='Powered by discord.py')
embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif')
embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303')
embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf')
embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness')
await bot.send_message(owner, embed=embed)
def fmt_help(page):
cmd = ''
for line in page.splitlines():
if line.startswith('.'):
cmd = line.strip('.')
break
em = discord.Embed(color=0x00FFFF)
em.set_author(name='Help - {}'.format(cmd))
async def send_cmd_help(ctx):
if ctx.invoked_subcommand:
pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
# page = page.strip('```css').strip('```')
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
else:
pages = bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
@bot.event
async def on_command_error(error, ctx):
print(error)
channel = ctx.message.channel
if isinstance(error, commands.MissingRequiredArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.BadArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.DisabledCommand):
await bot.send_message(channel, "That command is disabled.")
print('Command disabled.')
elif isinstance(error, commands.CommandInvokeError):
# A bit hacky, couldn't find a better way
no_dms = "Cannot send messages to this user"
is_help_cmd = ctx.command.qualified_name == "help"
is_forbidden = isinstance(error.original, discord.Forbidden)
if is_help_cmd and is_forbidden and error.original.text == no_dms:
msg = ("I couldn't send the help message to you in DM. Either"
" you blocked me or you disabled DMs in this server.")
await bot.send_message(channel, msg)
return
@bot.command(pass_context=True,name='cog')
@owner_only()
async def _reload(ctx,*, module : str):
"""Reloads a module."""
channel = ctx.message.channel
module = 'cogs.'+module
try:
bot.unload_extension(module)
x = await bot.send_message(channel,'Successfully Unloaded.')
bot.load_extension(module)
x = await bot.edit_message(x,'Successfully Reloaded.')
except Exception as e:
x = await bot.edit_message(x,'\N{PISTOL}')
await bot.say('{}: {}'.format(type(e).__name__, e))
else:
x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}')
@bot.command(name='presence')
async def _set(Type=None,*,thing=None):
"""Change the bot's discord game/stream!"""
server = len(bot.servers)
if Type is None:
await bot.say('Usage: `.presence [game/stream] [message]`')
else:
if Type.lower() == 'stream':
await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online')
await bot.say('Set presence to. `Streaming {}`'.format(thing))
elif Type.lower() == 'game':
await bot.change_presence(game=discord.Game(name=thing))
await bot.say('Set presence to `Playing {}`'.format(thing))
elif Type.lower() == 'clear':
await bot.change_presence(game=None)
await bot.say('Cleared Presence')
elif Type.lower() == 'servers':
await bot.change_presence(game=discord.Game(name='with {} servers'.format(server)))
await bot.say('**Im now playing with {} servers.**'.format(server))
else:
await bot.say('Usage: `.presence [game/stream] [message]`')
@bot.command(pass_context=True)
@is_owner()
async def _leave_all_servers_(ctx):
for server in bot.servers:
await bot.leave_server(server)
await bot.say('I left `{}`'.format(server.name))
@bot.command(pass_context=True)
async def servers(ctx):
servers = ', '.join([i.name for i in bot.servers]).strip(', ')
await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers))
@bot.command(pass_context=True)
@is_owner()
async def _leave_server(ctx, server):
to_leave = discord.utils.get(bot.servers, id=str(server))
try:
await bot.leave_server(to_leave)
except:
await self.bot.say('Failed.')
else:
await self.bot.say('Successfully left {}'.format(to_leave.name))
@bot.command(pass_context=True)
async def register(ctx):
server = ctx.message.server
channel = discord.utils.get(server.channels, name='server-event')
user = ctx.message.author
with open('cogs/utils/registrations.txt') as f:
data = f.read()
print(data )
if ctx.message.channel != channel:
await bot.say('You can only register in {}'.format(channel.mention))
return
if str(user) in data:
await bot.delete_message(ctx.message)
await bot.send_message(user, "You can't register more than once.")
return
with open('cogs/utils/registrations.txt','a') as f:
f.write(str(user)+'\n')
role = discord.utils.get(server.roles, name='4row')
await bot.add_roles(user, role)
await bot.add_reaction(ctx.message, '\u2705')
@bot.command(pass_context = True)
@is_owner()
async def shutdown(ctx):
timestamp = ctx.message.timestamp
embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp)
embed.set_footer(text='Darkness no longer online')
await bot.say(embed=embed)
await bot.logout()
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
print('Loaded: {}'.format(extension))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Error on load: {}\n{}'.format(extension, exc))
def cleanup_code( content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
def get_syntax_error(e):
if e.text is None:
return '```py\n{0.__class__.__name__}: {0}\n```'.format(e)
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__)
async def to_code_block(ctx, body):
if body.startswith('```') and body.endswith('```'):
content = '\n'.join(body.split('\n')[1:-1])
else:
content = body.strip('`')
await bot.edit_message(ctx.message, '```py\n'+content+'```')
@bot.command(pass_context=True, name='eval')
@is_owner()
async def _eval(ctx, *, body: str):
'''Run python scripts on discord!'''
env = {
'bot': bot,
'ctx': ctx,
'channel': ctx.message.channel,
'author': ctx.message.author,
'server': ctx.message.server,
'message': ctx.message,
}
env.update(globals())
body = cleanup_code(content=body)
stdout = io.StringIO()
to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ')
try:
exec(to_compile, env)
except SyntaxError as e:
return await bot.say(get_syntax_error(e))
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
x = await bot.say('```py\n{}{}\n```'.format(value, traceback.format_exc()))
try:
await bot.add_reaction(x, '\U0001f534')
except:
pass
else:
value = stdout.getvalue()
if TOKEN in value:
value = value.replace(TOKEN,"[EXPUNGED]")
if ret is None:
if value:
try: | x = await bot.say('```py\n%s\n```' % value)
except:
x = await bot.say('```py\n\'Result was too long.\'```')
try: | random_line_split |
|
bot.py | .Embed(color=0x00FFFF)
em.set_author(name='Help - {}'.format(cmd))
async def send_cmd_help(ctx):
if ctx.invoked_subcommand:
pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
# page = page.strip('```css').strip('```')
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
else:
pages = bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await bot.send_message(ctx.message.channel, page)
print('Sent command help')
@bot.event
async def on_command_error(error, ctx):
print(error)
channel = ctx.message.channel
if isinstance(error, commands.MissingRequiredArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.BadArgument):
await send_cmd_help(ctx)
print('Sent command help')
elif isinstance(error, commands.DisabledCommand):
await bot.send_message(channel, "That command is disabled.")
print('Command disabled.')
elif isinstance(error, commands.CommandInvokeError):
# A bit hacky, couldn't find a better way
no_dms = "Cannot send messages to this user"
is_help_cmd = ctx.command.qualified_name == "help"
is_forbidden = isinstance(error.original, discord.Forbidden)
if is_help_cmd and is_forbidden and error.original.text == no_dms:
msg = ("I couldn't send the help message to you in DM. Either"
" you blocked me or you disabled DMs in this server.")
await bot.send_message(channel, msg)
return
@bot.command(pass_context=True,name='cog')
@owner_only()
async def _reload(ctx,*, module : str):
"""Reloads a module."""
channel = ctx.message.channel
module = 'cogs.'+module
try:
bot.unload_extension(module)
x = await bot.send_message(channel,'Successfully Unloaded.')
bot.load_extension(module)
x = await bot.edit_message(x,'Successfully Reloaded.')
except Exception as e:
x = await bot.edit_message(x,'\N{PISTOL}')
await bot.say('{}: {}'.format(type(e).__name__, e))
else:
x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}')
@bot.command(name='presence')
async def _set(Type=None,*,thing=None):
"""Change the bot's discord game/stream!"""
server = len(bot.servers)
if Type is None:
await bot.say('Usage: `.presence [game/stream] [message]`')
else:
if Type.lower() == 'stream':
await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online')
await bot.say('Set presence to. `Streaming {}`'.format(thing))
elif Type.lower() == 'game':
await bot.change_presence(game=discord.Game(name=thing))
await bot.say('Set presence to `Playing {}`'.format(thing))
elif Type.lower() == 'clear':
await bot.change_presence(game=None)
await bot.say('Cleared Presence')
elif Type.lower() == 'servers':
await bot.change_presence(game=discord.Game(name='with {} servers'.format(server)))
await bot.say('**Im now playing with {} servers.**'.format(server))
else:
await bot.say('Usage: `.presence [game/stream] [message]`')
@bot.command(pass_context=True)
@is_owner()
async def _leave_all_servers_(ctx):
for server in bot.servers:
await bot.leave_server(server)
await bot.say('I left `{}`'.format(server.name))
@bot.command(pass_context=True)
async def servers(ctx):
servers = ', '.join([i.name for i in bot.servers]).strip(', ')
await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers))
@bot.command(pass_context=True)
@is_owner()
async def _leave_server(ctx, server):
to_leave = discord.utils.get(bot.servers, id=str(server))
try:
await bot.leave_server(to_leave)
except:
await self.bot.say('Failed.')
else:
await self.bot.say('Successfully left {}'.format(to_leave.name))
@bot.command(pass_context=True)
async def register(ctx):
server = ctx.message.server
channel = discord.utils.get(server.channels, name='server-event')
user = ctx.message.author
with open('cogs/utils/registrations.txt') as f:
data = f.read()
print(data )
if ctx.message.channel != channel:
await bot.say('You can only register in {}'.format(channel.mention))
return
if str(user) in data:
await bot.delete_message(ctx.message)
await bot.send_message(user, "You can't register more than once.")
return
with open('cogs/utils/registrations.txt','a') as f:
f.write(str(user)+'\n')
role = discord.utils.get(server.roles, name='4row')
await bot.add_roles(user, role)
await bot.add_reaction(ctx.message, '\u2705')
@bot.command(pass_context = True)
@is_owner()
async def shutdown(ctx):
timestamp = ctx.message.timestamp
embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp)
embed.set_footer(text='Darkness no longer online')
await bot.say(embed=embed)
await bot.logout()
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
print('Loaded: {}'.format(extension))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Error on load: {}\n{}'.format(extension, exc))
def cleanup_code( content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
def get_syntax_error(e):
if e.text is None:
return '```py\n{0.__class__.__name__}: {0}\n```'.format(e)
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__)
async def to_code_block(ctx, body):
if body.startswith('```') and body.endswith('```'):
content = '\n'.join(body.split('\n')[1:-1])
else:
content = body.strip('`')
await bot.edit_message(ctx.message, '```py\n'+content+'```')
@bot.command(pass_context=True, name='eval')
@is_owner()
async def _eval(ctx, *, body: str):
'''Run python scripts on discord!'''
env = {
'bot': bot,
'ctx': ctx,
'channel': ctx.message.channel,
'author': ctx.message.author,
'server': ctx.message.server,
'message': ctx.message,
}
env.update(globals())
body = cleanup_code(content=body)
stdout = io.StringIO()
to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ')
try:
exec(to_compile, env)
except SyntaxError as e:
return await bot.say(get_syntax_error(e))
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
x = await bot.say('```py\n{}{}\n```'.format(value, traceback.format_exc()))
try:
await bot.add_reaction(x, '\U0001f534')
except:
pass
else:
value = stdout.getvalue()
if TOKEN in value:
value = value.replace(TOKEN,"[EXPUNGED]")
if ret is None:
if value:
try:
x = await bot.say('```py\n%s\n```' % value)
except:
x = await bot.say('```py\n\'Result was too long.\'```')
try:
await bot.add_reaction(x, '\U0001f535')
except:
pass
else:
try:
await bot.add_reaction(ctx.message, '\U0001f535')
except:
pass
else:
try:
x = await bot.say('```py\n%s%s\n```' % (value, ret))
except:
x = await bot.say('```py\n\'Result was too long.\'```')
try:
await bot.add_reaction(x, '\U0001f535')
except:
pass
@bot.command(pass_context = True)
async def devcontact(ctx, *, msg: str):
| dev = '@-= shadeyg56™ =-#1702'
user = ctx.message.author
await bot.send_message(dev, '{} sent the following message: {}'.format(user, msg))
await bot.say('Your message has been sent. It will be checked by the dev asap. If your message was a troll or you keep resending/spamming a message you will be blacklisted from the command')
await bot.delete_message(ctx.message)
| identifier_body |
|
trigger.go | "TriggerReconcileFailed"
triggerUpdateStatusFailed = "TriggerUpdateStatusFailed"
subscriptionDeleteFailed = "SubscriptionDeleteFailed"
subscriptionCreateFailed = "SubscriptionCreateFailed"
)
type reconciler struct {
client client.Client
dynamicClient dynamic.Interface
recorder record.EventRecorder
logger *zap.Logger
}
// Verify the struct implements reconcile.Reconciler.
var _ reconcile.Reconciler = &reconciler{}
// ProvideController returns a function that returns a Trigger controller.
func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
// Setup a new controller to Reconcile Triggers.
r := &reconciler{
recorder: mgr.GetRecorder(controllerAgentName),
logger: logger,
}
c, err := controller.New(controllerAgentName, mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return nil, err
}
// Watch Triggers.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil {
return nil, err
}
// Watch all the resources that the Trigger reconciles.
for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} {
err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true})
if err != nil {
return nil, err
}
}
// Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile
// the Trigger again.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil {
return nil, err
}
// TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we
// can find a way.
return c, nil
}
// mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker.
type mapBrokerToTriggers struct {
r *reconciler
}
// Map implements handler.Mapper.Map.
func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request {
ctx := context.Background()
triggers := make([]reconcile.Request, 0)
opts := &client.ListOptions{
Namespace: o.Meta.GetNamespace(),
// Set Raw because if we need to get more than one page, then we will put the continue token
// into opts.Raw.Continue.
Raw: &metav1.ListOptions{},
}
for {
tl := &v1alpha1.TriggerList{}
if err := b.r.client.List(ctx, opts, tl); err != nil {
b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o))
return triggers
}
for _, t := range tl.Items {
if t.Spec.Broker == o.Meta.GetName() {
triggers = append(triggers, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: t.Namespace,
Name: t.Name,
},
})
}
}
if tl.Continue != "" {
opts.Raw.Continue = tl.Continue
} else {
return triggers
}
}
}
// InjectClient implements controller runtime's inject.Client.
func (r *reconciler) InjectClient(c client.Client) error {
r.client = c
return nil
}
// InjectConfig implements controller runtime's inject.Config.
func (r *reconciler) InjectConfig(c *rest.Config) error {
var err error
r.dynamicClient, err = dynamic.NewForConfig(c)
return err
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Trigger resource
// with the current status of the resource.
func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.TODO()
ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
trigger := &v1alpha1.Trigger{}
err := r.client.Get(ctx, request.NamespacedName, trigger)
if errors.IsNotFound(err) {
logging.FromContext(ctx).Info("Could not find Trigger")
return reconcile.Result{}, nil
}
if err != nil {
logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err))
return reconcile.Result{}, err
}
// Reconcile this copy of the Trigger and then write back any status updates regardless of
// whether the reconcile error out.
reconcileErr := r.reconcile(ctx, trigger)
if reconcileErr != nil {
logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr)
} else {
logging.FromContext(ctx).Debug("Trigger reconciled")
r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled")
}
if _, err = r.updateStatus(trigger); err != nil {
logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err)
return reconcile.Result{}, err
}
// Requeue if the resource is not ready
return reconcile.Result{}, reconcileErr
}
func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
t.Status.InitializeConditions()
// 1. Verify the Broker exists.
// 2. Get the Broker's:
// - Filter Channel
// - Ingress Channel
// - Filter Service
// 3. Find the Subscriber's URI.
// 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's
// Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
if t.DeletionTimestamp != nil {
// Everything is cleaned up by the garbage collector.
return nil
}
b, err := r.getBroker(ctx, t)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
t.Status.MarkBrokerDoesNotExist()
return err
}
t.Status.MarkBrokerExists()
brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err))
return err
}
brokerIngress, err := r.getBrokerIngressChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err))
return err
}
// Get Broker filter service.
filterSvc, err := r.getBrokerFilterService(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err))
return err
}
subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
return err
}
t.Status.SubscriberURI = subscriberURI
_, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
if err != nil {
logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
t.Status.MarkNotSubscribed("notSubscribed", "%v", err)
return err
}
t.Status.MarkSubscribed()
return nil
}
// updateStatus may in fact update the trigger's finalizers in addition to the status.
func (r *reconciler) | (trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) {
ctx := context.TODO()
objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name}
latestTrigger := &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
triggerChanged := false
if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) {
latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers)
if err := r.client.Update(ctx, latestTrigger); err != nil {
return nil, err
}
triggerChanged = true
}
if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) {
return latestTrigger, nil
}
if triggerChanged {
// Refetch
latestTrigger = &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
}
latestTrigger.Status = trigger.Status
if err := r.client.Status().Update(ctx, latestTrigger); | updateStatus | identifier_name |
trigger.go | alpha1.Subscription{}} {
err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true})
if err != nil {
return nil, err
}
}
// Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile
// the Trigger again.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil {
return nil, err
}
// TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we
// can find a way.
return c, nil
}
// mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker.
type mapBrokerToTriggers struct {
r *reconciler
}
// Map implements handler.Mapper.Map.
func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request {
ctx := context.Background()
triggers := make([]reconcile.Request, 0)
opts := &client.ListOptions{
Namespace: o.Meta.GetNamespace(),
// Set Raw because if we need to get more than one page, then we will put the continue token
// into opts.Raw.Continue.
Raw: &metav1.ListOptions{},
}
for {
tl := &v1alpha1.TriggerList{}
if err := b.r.client.List(ctx, opts, tl); err != nil {
b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o))
return triggers
}
for _, t := range tl.Items {
if t.Spec.Broker == o.Meta.GetName() {
triggers = append(triggers, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: t.Namespace,
Name: t.Name,
},
})
}
}
if tl.Continue != "" {
opts.Raw.Continue = tl.Continue
} else {
return triggers
}
}
}
// InjectClient implements controller runtime's inject.Client.
func (r *reconciler) InjectClient(c client.Client) error {
r.client = c
return nil
}
// InjectConfig implements controller runtime's inject.Config.
func (r *reconciler) InjectConfig(c *rest.Config) error {
var err error
r.dynamicClient, err = dynamic.NewForConfig(c)
return err
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Trigger resource
// with the current status of the resource.
func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.TODO()
ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
trigger := &v1alpha1.Trigger{}
err := r.client.Get(ctx, request.NamespacedName, trigger)
if errors.IsNotFound(err) {
logging.FromContext(ctx).Info("Could not find Trigger")
return reconcile.Result{}, nil
}
if err != nil {
logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err))
return reconcile.Result{}, err
}
// Reconcile this copy of the Trigger and then write back any status updates regardless of
// whether the reconcile error out.
reconcileErr := r.reconcile(ctx, trigger)
if reconcileErr != nil {
logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr)
} else {
logging.FromContext(ctx).Debug("Trigger reconciled")
r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled")
}
if _, err = r.updateStatus(trigger); err != nil {
logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err)
return reconcile.Result{}, err
}
// Requeue if the resource is not ready
return reconcile.Result{}, reconcileErr
}
func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
t.Status.InitializeConditions()
// 1. Verify the Broker exists.
// 2. Get the Broker's:
// - Filter Channel
// - Ingress Channel
// - Filter Service
// 3. Find the Subscriber's URI.
// 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's
// Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
if t.DeletionTimestamp != nil {
// Everything is cleaned up by the garbage collector.
return nil
}
b, err := r.getBroker(ctx, t)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
t.Status.MarkBrokerDoesNotExist()
return err
}
t.Status.MarkBrokerExists()
brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err))
return err
}
brokerIngress, err := r.getBrokerIngressChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err))
return err
}
// Get Broker filter service.
filterSvc, err := r.getBrokerFilterService(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err))
return err
}
subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
return err
}
t.Status.SubscriberURI = subscriberURI
_, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
if err != nil {
logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
t.Status.MarkNotSubscribed("notSubscribed", "%v", err)
return err
}
t.Status.MarkSubscribed()
return nil
}
// updateStatus may in fact update the trigger's finalizers in addition to the status.
func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) {
ctx := context.TODO()
objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name}
latestTrigger := &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
triggerChanged := false
if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) {
latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers)
if err := r.client.Update(ctx, latestTrigger); err != nil {
return nil, err
}
triggerChanged = true
}
if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) {
return latestTrigger, nil
}
if triggerChanged {
// Refetch
latestTrigger = &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
}
latestTrigger.Status = trigger.Status
if err := r.client.Status().Update(ctx, latestTrigger); err != nil {
return nil, err
}
return latestTrigger, nil
}
// getBroker returns the Broker for Trigger 't' if exists, otherwise it returns an error.
func (r *reconciler) getBroker(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Broker, error) {
b := &v1alpha1.Broker{}
name := types.NamespacedName{
Namespace: t.Namespace,
Name: t.Spec.Broker,
}
err := r.client.Get(ctx, name, b)
return b, err
}
// getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an
// error.
func (r *reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b)))
}
// getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an
// error.
func (r *reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) | {
return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b)))
} | identifier_body |
|
trigger.go | "TriggerReconcileFailed"
triggerUpdateStatusFailed = "TriggerUpdateStatusFailed"
subscriptionDeleteFailed = "SubscriptionDeleteFailed"
subscriptionCreateFailed = "SubscriptionCreateFailed"
)
type reconciler struct {
client client.Client
dynamicClient dynamic.Interface
recorder record.EventRecorder
logger *zap.Logger
}
// Verify the struct implements reconcile.Reconciler.
var _ reconcile.Reconciler = &reconciler{}
// ProvideController returns a function that returns a Trigger controller.
func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
// Setup a new controller to Reconcile Triggers.
r := &reconciler{
recorder: mgr.GetRecorder(controllerAgentName),
logger: logger,
}
c, err := controller.New(controllerAgentName, mgr, controller.Options{
Reconciler: r,
})
if err != nil |
// Watch Triggers.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil {
return nil, err
}
// Watch all the resources that the Trigger reconciles.
for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} {
err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true})
if err != nil {
return nil, err
}
}
// Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile
// the Trigger again.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil {
return nil, err
}
// TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we
// can find a way.
return c, nil
}
// mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker.
type mapBrokerToTriggers struct {
r *reconciler
}
// Map implements handler.Mapper.Map.
func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request {
ctx := context.Background()
triggers := make([]reconcile.Request, 0)
opts := &client.ListOptions{
Namespace: o.Meta.GetNamespace(),
// Set Raw because if we need to get more than one page, then we will put the continue token
// into opts.Raw.Continue.
Raw: &metav1.ListOptions{},
}
for {
tl := &v1alpha1.TriggerList{}
if err := b.r.client.List(ctx, opts, tl); err != nil {
b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o))
return triggers
}
for _, t := range tl.Items {
if t.Spec.Broker == o.Meta.GetName() {
triggers = append(triggers, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: t.Namespace,
Name: t.Name,
},
})
}
}
if tl.Continue != "" {
opts.Raw.Continue = tl.Continue
} else {
return triggers
}
}
}
// InjectClient implements controller runtime's inject.Client.
func (r *reconciler) InjectClient(c client.Client) error {
r.client = c
return nil
}
// InjectConfig implements controller runtime's inject.Config.
func (r *reconciler) InjectConfig(c *rest.Config) error {
var err error
r.dynamicClient, err = dynamic.NewForConfig(c)
return err
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Trigger resource
// with the current status of the resource.
func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.TODO()
ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
trigger := &v1alpha1.Trigger{}
err := r.client.Get(ctx, request.NamespacedName, trigger)
if errors.IsNotFound(err) {
logging.FromContext(ctx).Info("Could not find Trigger")
return reconcile.Result{}, nil
}
if err != nil {
logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err))
return reconcile.Result{}, err
}
// Reconcile this copy of the Trigger and then write back any status updates regardless of
// whether the reconcile error out.
reconcileErr := r.reconcile(ctx, trigger)
if reconcileErr != nil {
logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr)
} else {
logging.FromContext(ctx).Debug("Trigger reconciled")
r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled")
}
if _, err = r.updateStatus(trigger); err != nil {
logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err)
return reconcile.Result{}, err
}
// Requeue if the resource is not ready
return reconcile.Result{}, reconcileErr
}
func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
t.Status.InitializeConditions()
// 1. Verify the Broker exists.
// 2. Get the Broker's:
// - Filter Channel
// - Ingress Channel
// - Filter Service
// 3. Find the Subscriber's URI.
// 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's
// Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
if t.DeletionTimestamp != nil {
// Everything is cleaned up by the garbage collector.
return nil
}
b, err := r.getBroker(ctx, t)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
t.Status.MarkBrokerDoesNotExist()
return err
}
t.Status.MarkBrokerExists()
brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err))
return err
}
brokerIngress, err := r.getBrokerIngressChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err))
return err
}
// Get Broker filter service.
filterSvc, err := r.getBrokerFilterService(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err))
return err
}
subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
return err
}
t.Status.SubscriberURI = subscriberURI
_, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
if err != nil {
logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
t.Status.MarkNotSubscribed("notSubscribed", "%v", err)
return err
}
t.Status.MarkSubscribed()
return nil
}
// updateStatus may in fact update the trigger's finalizers in addition to the status.
func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) {
ctx := context.TODO()
objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name}
latestTrigger := &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
triggerChanged := false
if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) {
latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers)
if err := r.client.Update(ctx, latestTrigger); err != nil {
return nil, err
}
triggerChanged = true
}
if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) {
return latestTrigger, nil
}
if triggerChanged {
// Refetch
latestTrigger = &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
}
latestTrigger.Status = trigger.Status
if err := r.client.Status().Update(ctx, latestTrigger | {
return nil, err
} | conditional_block |
trigger.go | "TriggerReconcileFailed"
triggerUpdateStatusFailed = "TriggerUpdateStatusFailed"
subscriptionDeleteFailed = "SubscriptionDeleteFailed"
subscriptionCreateFailed = "SubscriptionCreateFailed"
)
type reconciler struct {
client client.Client
dynamicClient dynamic.Interface
recorder record.EventRecorder
logger *zap.Logger
}
// Verify the struct implements reconcile.Reconciler.
var _ reconcile.Reconciler = &reconciler{}
// ProvideController returns a function that returns a Trigger controller.
func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
// Setup a new controller to Reconcile Triggers.
r := &reconciler{
recorder: mgr.GetRecorder(controllerAgentName),
logger: logger,
}
c, err := controller.New(controllerAgentName, mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return nil, err
}
// Watch Triggers.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil {
return nil, err
}
// Watch all the resources that the Trigger reconciles.
for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} {
err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true})
if err != nil {
return nil, err
}
}
// Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile
// the Trigger again.
if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil {
return nil, err
}
// TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we
// can find a way.
return c, nil
}
// mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker.
type mapBrokerToTriggers struct {
r *reconciler
}
// Map implements handler.Mapper.Map.
func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request {
ctx := context.Background()
triggers := make([]reconcile.Request, 0)
opts := &client.ListOptions{
Namespace: o.Meta.GetNamespace(),
// Set Raw because if we need to get more than one page, then we will put the continue token
// into opts.Raw.Continue.
Raw: &metav1.ListOptions{},
}
for {
tl := &v1alpha1.TriggerList{}
if err := b.r.client.List(ctx, opts, tl); err != nil {
b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o))
return triggers
}
for _, t := range tl.Items {
if t.Spec.Broker == o.Meta.GetName() {
triggers = append(triggers, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: t.Namespace,
Name: t.Name,
},
})
}
}
if tl.Continue != "" {
opts.Raw.Continue = tl.Continue
} else {
return triggers
}
}
}
// InjectClient implements controller runtime's inject.Client.
func (r *reconciler) InjectClient(c client.Client) error {
r.client = c
return nil
}
// InjectConfig implements controller runtime's inject.Config.
func (r *reconciler) InjectConfig(c *rest.Config) error {
var err error
r.dynamicClient, err = dynamic.NewForConfig(c)
return err
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Trigger resource
// with the current status of the resource.
func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.TODO()
ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
trigger := &v1alpha1.Trigger{}
err := r.client.Get(ctx, request.NamespacedName, trigger)
if errors.IsNotFound(err) {
logging.FromContext(ctx).Info("Could not find Trigger")
return reconcile.Result{}, nil
}
if err != nil {
logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err))
return reconcile.Result{}, err
}
// Reconcile this copy of the Trigger and then write back any status updates regardless of
// whether the reconcile error out.
reconcileErr := r.reconcile(ctx, trigger) | logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr)
} else {
logging.FromContext(ctx).Debug("Trigger reconciled")
r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled")
}
if _, err = r.updateStatus(trigger); err != nil {
logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err))
r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err)
return reconcile.Result{}, err
}
// Requeue if the resource is not ready
return reconcile.Result{}, reconcileErr
}
func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
t.Status.InitializeConditions()
// 1. Verify the Broker exists.
// 2. Get the Broker's:
// - Filter Channel
// - Ingress Channel
// - Filter Service
// 3. Find the Subscriber's URI.
// 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's
// Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
if t.DeletionTimestamp != nil {
// Everything is cleaned up by the garbage collector.
return nil
}
b, err := r.getBroker(ctx, t)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
t.Status.MarkBrokerDoesNotExist()
return err
}
t.Status.MarkBrokerExists()
brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err))
return err
}
brokerIngress, err := r.getBrokerIngressChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err))
return err
}
// Get Broker filter service.
filterSvc, err := r.getBrokerFilterService(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err))
return err
}
subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
return err
}
t.Status.SubscriberURI = subscriberURI
_, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
if err != nil {
logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
t.Status.MarkNotSubscribed("notSubscribed", "%v", err)
return err
}
t.Status.MarkSubscribed()
return nil
}
// updateStatus may in fact update the trigger's finalizers in addition to the status.
func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) {
ctx := context.TODO()
objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name}
latestTrigger := &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
triggerChanged := false
if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) {
latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers)
if err := r.client.Update(ctx, latestTrigger); err != nil {
return nil, err
}
triggerChanged = true
}
if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) {
return latestTrigger, nil
}
if triggerChanged {
// Refetch
latestTrigger = &v1alpha1.Trigger{}
if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
return nil, err
}
}
latestTrigger.Status = trigger.Status
if err := r.client.Status().Update(ctx, latestTrigger); err | if reconcileErr != nil { | random_line_split |
mod.rs | type Body: MessageBody<B>;
/// The type corresponding to this message type.
///
/// The value of the "type" field in the ICMP header corresponding to
/// messages of this type.
const TYPE: I::IcmpMessageType;
/// Parse a `Code` from an 8-bit number.
///
/// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all
/// values for this field are valid. If an invalid value is passed,
/// `code_from_u8` returns `None`.
fn code_from_u8(code: u8) -> Option<Self::Code>;
}
/// The type of an ICMP message.
///
/// `IcmpMessageType` is implemented by `Icmpv4MessageType` and
/// `Icmpv6MessageType`.
pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy {
/// Is this an error message?
///
/// For ICMP, this is true for the Destination Unreachable, Redirect, Source
/// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6,
/// this is true for the Destination Unreachable, Packet Too Big, Time
/// Exceeded, and Parameter Problem message types.
fn is_err(self) -> bool;
}
#[derive(Copy, Clone, Debug, FromBytes, Unaligned)]
#[repr(C)]
struct Header<M> {
prefix: HeaderPrefix,
message: M,
}
// So long as `M: Unaligned`, there will be no padding between the
// `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the
// alignment of `Header<M>` will be 1, meaning that no post-padding will need to
// be added to get to a multiple of the alignment. Since there is no padding,
// then so long as `M: AsBytes`, all of `Header<M>: AsBytes`.
unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> {
// We're doing a bad thing, but it's necessary until derive(AsBytes)
// supports type parameters.
fn only_derive_is_allowed_to_implement_this_trait() {}
}
/// A partially parsed and not yet validated ICMP packet.
///
/// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it
/// only requires that the header and message (in ICMPv6, these are both
/// considered part of the header) are present, and that the header has the
/// expected message type. The body may be missing (or an unexpected body may be
/// present). Other than the message type, no header, message, or body field
/// values will be validated.
///
/// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to
/// validate an [`IcmpPacketRaw`].
#[derive(Debug)]
pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: B,
_marker: PhantomData<I>,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
}
/// An ICMP packet.
///
/// An `IcmpPacket` shares its underlying memory with the byte slice it was
/// parsed from, meaning that no copying or extra allocation is necessary.
#[derive(Debug)]
pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: M::Body,
_marker: PhantomData<I>,
}
/// Arguments required to parse an ICMP packet.
pub struct IcmpParseArgs<A: IpAddress> {
src_ip: A,
dst_ip: A,
}
impl<A: IpAddress> IcmpParseArgs<A> {
/// Construct a new `IcmpParseArgs`.
pub fn new<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> {
IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() }
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()>
for IcmpPacketRaw<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> {
let header = buffer
.take_obj_front::<Header<M>>()
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
let message_body = buffer.into_rest();
if header.prefix.msg_type != M::TYPE.into() {
return debug_err!(Err(ParseError::NotExpected), "unexpected message type");
}
Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>>
FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn try_from_raw_with(
raw: IcmpPacketRaw<I, B, M>,
args: IcmpParseArgs<I::Addr>,
) -> ParseResult<Self> {
let IcmpPacketRaw { header, message_body, _marker } = raw;
if !M::Body::EXPECTS_BODY && !message_body.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!(
ParseError::Format,
"unrecognized code: {}",
header.prefix.code
))?;
let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip)
.ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?;
if checksum != [0, 0] {
return debug_err!(Err(ParseError::Checksum), "invalid checksum");
}
let message_body = M::Body::parse(message_body)?;
Ok(IcmpPacket { header, message_body, _marker })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>>
for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> {
IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
/// Get the ICMP body.
pub fn body(&self) -> &M::Body {
&self.message_body
}
/// Get the ICMP message code.
///
/// The code provides extra details about the message. Each message type has
/// its own set of codes that are allowed.
pub fn code(&self) -> M::Code {
// infallible since it was validated in parse
M::code_from_u8(self.header.prefix.code).unwrap()
}
/// Construct a builder with the same contents as this packet.
pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() }
}
}
fn compute_checksum_fragmented<
I: IcmpIpExt,
B: ByteSlice,
BB: packet::Fragment,
M: IcmpMessage<I, B>,
>(
header: &Header<M>,
message_body: &FragmentedByteSlice<'_, BB>,
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut c = Checksum::new();
if I::VERSION.is_v6() | {
c.add_bytes(src_ip.bytes());
c.add_bytes(dst_ip.bytes());
let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len();
let mut len_bytes = [0; 4];
NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?);
c.add_bytes(&len_bytes[..]);
c.add_bytes(&[0, 0, 0]);
c.add_bytes(&[IpProto::Icmpv6.into()]);
} | conditional_block |
|
mod.rs | function. If it's called, it
// doesn't make sense for the program to continue executing; if we did,
// it would cause bugs in the caller.
unimplemented!()
}
}
/// An ICMP or ICMPv6 packet
///
/// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet`
pub trait IcmpPacketType<B: ByteSlice, I: Ip>:
Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError>
{
}
impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {}
impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {}
// TODO(joshlf): Once we have generic associated types, refactor this so that we
// don't have to bind B ahead of time. Removing that requirement would make some
// APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter
// from them as well.
/// `MessageBody` represents the parsed body of the ICMP packet.
///
/// - For messages that expect no body, the `MessageBody` is of type `()`.
/// - For NDP messages, the `MessageBody` is of the type `ndp::Options`.
/// - For all other messages, the `MessageBody` will be of the type
/// `OriginalPacket`, which is a thin wrapper around `B`.
pub trait MessageBody<B>: Sized {
/// Whether or not a message body is expected in an ICMP packet.
const EXPECTS_BODY: bool = true;
/// Parse the MessageBody from the provided bytes.
fn parse(bytes: B) -> ParseResult<Self>
where
B: ByteSlice;
/// The length of the underlying buffer.
fn len(&self) -> usize
where
B: ByteSlice;
/// Is the body empty?
///
/// `b.is_empty()` is equivalent to `b.len() == 0`.
fn is_empty(&self) -> bool
where
B: ByteSlice,
{
self.len() == 0
}
/// Return the underlying bytes.
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>;
}
impl<B> MessageBody<B> for () {
const EXPECTS_BODY: bool = false;
fn parse(bytes: B) -> ParseResult<()>
where
B: ByteSlice,
{
if !bytes.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
Ok(())
}
fn len(&self) -> usize {
0
}
fn bytes(&self) -> &[u8] {
&[]
}
}
/// A thin wrapper around B which implements `MessageBody`.
#[derive(Debug)]
pub struct OriginalPacket<B>(B);
impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> {
/// Returns the the body of the original packet.
pub fn body<I: IcmpIpExt>(&self) -> &[u8] {
// TODO(joshlf): Can these debug_asserts be triggered by external input?
let header_len = I::header_len(&self.0);
debug_assert!(header_len <= self.0.len());
debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8);
&self.0[header_len..]
}
}
impl<B> MessageBody<B> for OriginalPacket<B> {
fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> {
Ok(OriginalPacket(bytes))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.0.len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
&self.0
}
}
impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> {
fn parse(bytes: B) -> ParseResult<Options<B, O>>
where
B: ByteSlice,
{
Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options"))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.bytes().len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
self.bytes()
}
}
/// An ICMP message.
pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>:
Sized + Copy + FromBytes + AsBytes + Unaligned
{
/// The type of codes used with this message.
///
/// The ICMP header includes an 8-bit "code" field. For a given message
/// type, different values of this field carry different meanings. Not all
/// code values are used - some may be invalid. This type represents a
/// parsed code. For example, for TODO, it is the TODO type.
type Code: Into<u8> + Copy + Debug;
/// The type of the body used with this message.
type Body: MessageBody<B>;
/// The type corresponding to this message type.
///
/// The value of the "type" field in the ICMP header corresponding to
/// messages of this type.
const TYPE: I::IcmpMessageType;
/// Parse a `Code` from an 8-bit number.
///
/// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all
/// values for this field are valid. If an invalid value is passed,
/// `code_from_u8` returns `None`.
fn code_from_u8(code: u8) -> Option<Self::Code>;
}
/// The type of an ICMP message.
///
/// `IcmpMessageType` is implemented by `Icmpv4MessageType` and
/// `Icmpv6MessageType`.
pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy {
/// Is this an error message?
///
/// For ICMP, this is true for the Destination Unreachable, Redirect, Source
/// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6,
/// this is true for the Destination Unreachable, Packet Too Big, Time
/// Exceeded, and Parameter Problem message types.
fn is_err(self) -> bool;
}
#[derive(Copy, Clone, Debug, FromBytes, Unaligned)]
#[repr(C)]
struct Header<M> {
prefix: HeaderPrefix,
message: M,
}
// So long as `M: Unaligned`, there will be no padding between the
// `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the
// alignment of `Header<M>` will be 1, meaning that no post-padding will need to
// be added to get to a multiple of the alignment. Since there is no padding,
// then so long as `M: AsBytes`, all of `Header<M>: AsBytes`.
unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> {
// We're doing a bad thing, but it's necessary until derive(AsBytes)
// supports type parameters.
fn only_derive_is_allowed_to_implement_this_trait() {}
}
/// A partially parsed and not yet validated ICMP packet.
///
/// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it
/// only requires that the header and message (in ICMPv6, these are both
/// considered part of the header) are present, and that the header has the
/// expected message type. The body may be missing (or an unexpected body may be
/// present). Other than the message type, no header, message, or body field
/// values will be validated.
///
/// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to
/// validate an [`IcmpPacketRaw`].
#[derive(Debug)]
pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: B,
_marker: PhantomData<I>,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
}
/// An ICMP packet.
///
/// An `IcmpPacket` shares its underlying memory with the byte slice it was
/// parsed from, meaning that no copying or extra allocation is necessary.
#[derive(Debug)]
pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: M::Body,
_marker: PhantomData<I>,
}
/// Arguments required to parse an ICMP packet.
pub struct IcmpParseArgs<A: IpAddress> {
src_ip: A,
dst_ip: A,
}
impl<A: IpAddress> IcmpParseArgs<A> {
/// Construct a new `IcmpParseArgs`.
pub fn | new | identifier_name |
|
mod.rs | (Debug)]
pub struct OriginalPacket<B>(B);
impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> {
/// Returns the the body of the original packet.
pub fn body<I: IcmpIpExt>(&self) -> &[u8] {
// TODO(joshlf): Can these debug_asserts be triggered by external input?
let header_len = I::header_len(&self.0);
debug_assert!(header_len <= self.0.len());
debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8);
&self.0[header_len..]
}
}
impl<B> MessageBody<B> for OriginalPacket<B> {
fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> {
Ok(OriginalPacket(bytes))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.0.len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
&self.0
}
}
impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> {
fn parse(bytes: B) -> ParseResult<Options<B, O>>
where
B: ByteSlice,
{
Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options"))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.bytes().len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
self.bytes()
}
}
/// An ICMP message.
pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>:
Sized + Copy + FromBytes + AsBytes + Unaligned
{
/// The type of codes used with this message.
///
/// The ICMP header includes an 8-bit "code" field. For a given message
/// type, different values of this field carry different meanings. Not all
/// code values are used - some may be invalid. This type represents a
/// parsed code. For example, for TODO, it is the TODO type.
type Code: Into<u8> + Copy + Debug;
/// The type of the body used with this message.
type Body: MessageBody<B>;
/// The type corresponding to this message type.
///
/// The value of the "type" field in the ICMP header corresponding to
/// messages of this type.
const TYPE: I::IcmpMessageType;
/// Parse a `Code` from an 8-bit number.
///
/// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all
/// values for this field are valid. If an invalid value is passed,
/// `code_from_u8` returns `None`.
fn code_from_u8(code: u8) -> Option<Self::Code>;
}
/// The type of an ICMP message.
///
/// `IcmpMessageType` is implemented by `Icmpv4MessageType` and
/// `Icmpv6MessageType`.
pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy {
/// Is this an error message?
///
/// For ICMP, this is true for the Destination Unreachable, Redirect, Source
/// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6,
/// this is true for the Destination Unreachable, Packet Too Big, Time
/// Exceeded, and Parameter Problem message types.
fn is_err(self) -> bool;
}
#[derive(Copy, Clone, Debug, FromBytes, Unaligned)]
#[repr(C)]
struct Header<M> {
prefix: HeaderPrefix,
message: M,
}
// So long as `M: Unaligned`, there will be no padding between the
// `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the
// alignment of `Header<M>` will be 1, meaning that no post-padding will need to
// be added to get to a multiple of the alignment. Since there is no padding,
// then so long as `M: AsBytes`, all of `Header<M>: AsBytes`.
unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> {
// We're doing a bad thing, but it's necessary until derive(AsBytes)
// supports type parameters.
fn only_derive_is_allowed_to_implement_this_trait() {}
}
/// A partially parsed and not yet validated ICMP packet.
///
/// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it
/// only requires that the header and message (in ICMPv6, these are both
/// considered part of the header) are present, and that the header has the
/// expected message type. The body may be missing (or an unexpected body may be
/// present). Other than the message type, no header, message, or body field
/// values will be validated.
///
/// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to
/// validate an [`IcmpPacketRaw`].
#[derive(Debug)]
pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: B,
_marker: PhantomData<I>,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
}
/// An ICMP packet.
///
/// An `IcmpPacket` shares its underlying memory with the byte slice it was
/// parsed from, meaning that no copying or extra allocation is necessary.
#[derive(Debug)]
pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: M::Body,
_marker: PhantomData<I>,
}
/// Arguments required to parse an ICMP packet.
pub struct IcmpParseArgs<A: IpAddress> {
src_ip: A,
dst_ip: A,
}
impl<A: IpAddress> IcmpParseArgs<A> {
/// Construct a new `IcmpParseArgs`.
pub fn new<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> {
IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() }
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()>
for IcmpPacketRaw<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> {
let header = buffer
.take_obj_front::<Header<M>>()
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
let message_body = buffer.into_rest();
if header.prefix.msg_type != M::TYPE.into() {
return debug_err!(Err(ParseError::NotExpected), "unexpected message type");
}
Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>>
FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn try_from_raw_with(
raw: IcmpPacketRaw<I, B, M>,
args: IcmpParseArgs<I::Addr>,
) -> ParseResult<Self> {
let IcmpPacketRaw { header, message_body, _marker } = raw;
if !M::Body::EXPECTS_BODY && !message_body.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!(
ParseError::Format,
"unrecognized code: {}",
header.prefix.code
))?;
let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip)
.ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?;
if checksum != [0, 0] {
return debug_err!(Err(ParseError::Checksum), "invalid checksum");
}
let message_body = M::Body::parse(message_body)?;
Ok(IcmpPacket { header, message_body, _marker })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>>
for IcmpPacket<I, B, M> | {
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata { | random_line_split |
|
calc_codon_usage.py | _diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False):
#read fasta files
seqs = {}
if isinstance(fasta, str):
gene = "".join(os.path.basename(fasta).split(".")[:-1])
seqs[gene] = read_fasta(fasta)
elif isinstance(fasta, (list, tuple)):
for path in fasta:
gene = "".join(os.path.basename(path).split(".")[:-1])
seqs[gene] = read_fasta(path)
if verbose:
print("Loaded sequences for %d genes" % len(seqs))
gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys()))
#read abundance files
try:
with open(abundances, 'r') as f:
abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'}
except Exception as e:
abundances = {}
'''
if gi_index != None:
with open(gi_index, 'r') as f:
gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \
for line in f if len(line) > 1 and line[0] != '#'}
print("GIs:")
for gi in gis:
print("%32s: %s" % (gi, gi_index[gi]))
'''
#delete the sequences whose length differs from the WT too much
nonwt_gis = [gi for gi in gis if gi != wt_gi]
for gene in seqs:
if wt_gi in seqs[gene]:
wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-')
for gi in nonwt_gis:
if gi in seqs[gene]:
gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-')
if abs(1. - gilen / wtlen) > max_len_diff:
del seqs[gene][gi]
rerun_flag = False
try: # split sequences into deciles based on rare codon usage (calculated from first run)
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f:
input_relative_usage = pickle.load(f)['overall_codon_usage']
def get_frac_rare(seq):
return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \
rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \
for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \
sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \
for gi in gis if gi in seq])
frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0}
groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \
np.percentile(list(frac_rare.values()), percentile \
+ group_dpercentile)) \
for percentile in range(0, 100, group_dpercentile)][::-1]
def get_gene_group(gene):
|
gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \
if i > 0 else 'ND' for i in range(len(groups))]
except IOError: #this is the first run, get general usage info
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct)
os.remove(os.path.join(output, 'input_codon_usage.p.gz'))
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
gene_groups = {gene : get_gene_group(gene) for gene in seqs}
if verbose:
print("Gene groups:")
for i in range(len(gene_group_labels)):
print("%11s: n = %3d" % (gene_group_labels[i], \
sum(1 for gene in seqs if gene_groups[gene] == i)))
#compute codon usage
computed_codon_usage = {}
computed_codon_usage_unw = {}
computed_codon_usage_groupw = {}
absolute_usage = {}
relative_usage = {}
relative_usage_unw = {}
relative_usage_groupw = {}
for gi in gis:
computed_codon_usage[gi] = defaultdict(int)
computed_codon_usage_unw[gi] = defaultdict(int)
computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))]
for gene,gene_seqs in seqs.items():
if gi in gene_seqs:
seq = gene_seqs[gi]
for i in range(len(seq) // 3):
c = seq[3*i:3*(i + 1)]
if c != '---' and codon_to_aa[c] != 'Stop':
if gene in abundances:
computed_codon_usage[gi][c] += abundances[gene]
else:
computed_codon_usage[gi][c] += 1
computed_codon_usage_unw[gi][c] += 1
computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1
codons_total_gi = sum(computed_codon_usage[gi].values())
absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()}
relative_usage[gi] = {}
relative_usage_unw[gi] = {}
relative_usage_groupw[gi] = {i : {} for i in range(len(groups))}
for aa in aa_codons:
aa_total_gi = 0
aa_total_unw_gi = 0
for c in list(codon_to_aa):
if codon_to_aa[c] == aa:
aa_total_gi = aa_total_gi + computed_codon_usage[gi][c]
aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c]
for c in aa_codons[aa]:
try:
relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi
relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi
except:
relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]])
relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]])
for i in range(len(groups)):
aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa])
for c in aa_codons[aa]:
if aa_total_groupw_gi_i > 0:
relative_usage_groupw[gi][i][c] \
= computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i
else:
relative_usage_groupw[gi][i][c] = 0
if rerun_flag: #first run through, print general codon usage data
if verbose:
print("Writing input_codon_usage.p.gz")
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f:
pickle.dump({'groups' : groups,
'gene_groups' : gene_groups,
'overall_codon_usage' : relative_usage,
'unweighted_codon_usage' : relative_usage_unw,
'gene_group_codon_usage' : relative_usage_groupw}, f)
if verbose:
print("WARNING: Rerun analysis to compute frac-rare groups")
else: #second run through, print group codon usage data
codon_list = sorted(c for c | if len(seqs[gene]) == 0:
return 0
else:
x = get_frac_rare(seqs[gene])
for i in range(1, len(groups)):
if x >= groups[i][0] and x <= groups[i][1]:
return i | identifier_body |
calc_codon_usage.py | _diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False):
#read fasta files
seqs = {}
if isinstance(fasta, str):
gene = "".join(os.path.basename(fasta).split(".")[:-1])
seqs[gene] = read_fasta(fasta)
elif isinstance(fasta, (list, tuple)):
for path in fasta:
gene = "".join(os.path.basename(path).split(".")[:-1])
seqs[gene] = read_fasta(path)
if verbose:
print("Loaded sequences for %d genes" % len(seqs))
gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys()))
#read abundance files
try: | abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'}
except Exception as e:
abundances = {}
'''
if gi_index != None:
with open(gi_index, 'r') as f:
gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \
for line in f if len(line) > 1 and line[0] != '#'}
print("GIs:")
for gi in gis:
print("%32s: %s" % (gi, gi_index[gi]))
'''
#delete the sequences whose length differs from the WT too much
nonwt_gis = [gi for gi in gis if gi != wt_gi]
for gene in seqs:
if wt_gi in seqs[gene]:
wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-')
for gi in nonwt_gis:
if gi in seqs[gene]:
gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-')
if abs(1. - gilen / wtlen) > max_len_diff:
del seqs[gene][gi]
rerun_flag = False
try: # split sequences into deciles based on rare codon usage (calculated from first run)
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f:
input_relative_usage = pickle.load(f)['overall_codon_usage']
def get_frac_rare(seq):
return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \
rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \
for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \
sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \
for gi in gis if gi in seq])
frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0}
groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \
np.percentile(list(frac_rare.values()), percentile \
+ group_dpercentile)) \
for percentile in range(0, 100, group_dpercentile)][::-1]
def get_gene_group(gene):
if len(seqs[gene]) == 0:
return 0
else:
x = get_frac_rare(seqs[gene])
for i in range(1, len(groups)):
if x >= groups[i][0] and x <= groups[i][1]:
return i
gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \
if i > 0 else 'ND' for i in range(len(groups))]
except IOError: #this is the first run, get general usage info
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct)
os.remove(os.path.join(output, 'input_codon_usage.p.gz'))
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
gene_groups = {gene : get_gene_group(gene) for gene in seqs}
if verbose:
print("Gene groups:")
for i in range(len(gene_group_labels)):
print("%11s: n = %3d" % (gene_group_labels[i], \
sum(1 for gene in seqs if gene_groups[gene] == i)))
#compute codon usage
computed_codon_usage = {}
computed_codon_usage_unw = {}
computed_codon_usage_groupw = {}
absolute_usage = {}
relative_usage = {}
relative_usage_unw = {}
relative_usage_groupw = {}
for gi in gis:
computed_codon_usage[gi] = defaultdict(int)
computed_codon_usage_unw[gi] = defaultdict(int)
computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))]
for gene,gene_seqs in seqs.items():
if gi in gene_seqs:
seq = gene_seqs[gi]
for i in range(len(seq) // 3):
c = seq[3*i:3*(i + 1)]
if c != '---' and codon_to_aa[c] != 'Stop':
if gene in abundances:
computed_codon_usage[gi][c] += abundances[gene]
else:
computed_codon_usage[gi][c] += 1
computed_codon_usage_unw[gi][c] += 1
computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1
codons_total_gi = sum(computed_codon_usage[gi].values())
absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()}
relative_usage[gi] = {}
relative_usage_unw[gi] = {}
relative_usage_groupw[gi] = {i : {} for i in range(len(groups))}
for aa in aa_codons:
aa_total_gi = 0
aa_total_unw_gi = 0
for c in list(codon_to_aa):
if codon_to_aa[c] == aa:
aa_total_gi = aa_total_gi + computed_codon_usage[gi][c]
aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c]
for c in aa_codons[aa]:
try:
relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi
relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi
except:
relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]])
relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]])
for i in range(len(groups)):
aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa])
for c in aa_codons[aa]:
if aa_total_groupw_gi_i > 0:
relative_usage_groupw[gi][i][c] \
= computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i
else:
relative_usage_groupw[gi][i][c] = 0
if rerun_flag: #first run through, print general codon usage data
if verbose:
print("Writing input_codon_usage.p.gz")
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f:
pickle.dump({'groups' : groups,
'gene_groups' : gene_groups,
'overall_codon_usage' : relative_usage,
'unweighted_codon_usage' : relative_usage_unw,
'gene_group_codon_usage' : relative_usage_groupw}, f)
if verbose:
print("WARNING: Rerun analysis to compute frac-rare groups")
else: #second run through, print group codon usage data
codon_list = sorted(c for c | with open(abundances, 'r') as f: | random_line_split |
calc_codon_usage.py |
elif rare_model == 'cmax_norm':
if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold:
return True
else:
return False
def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False):
#read fasta files
seqs = {}
if isinstance(fasta, str):
gene = "".join(os.path.basename(fasta).split(".")[:-1])
seqs[gene] = read_fasta(fasta)
elif isinstance(fasta, (list, tuple)):
for path in fasta:
gene = "".join(os.path.basename(path).split(".")[:-1])
seqs[gene] = read_fasta(path)
if verbose:
print("Loaded sequences for %d genes" % len(seqs))
gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys()))
#read abundance files
try:
with open(abundances, 'r') as f:
abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'}
except Exception as e:
abundances = {}
'''
if gi_index != None:
with open(gi_index, 'r') as f:
gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \
for line in f if len(line) > 1 and line[0] != '#'}
print("GIs:")
for gi in gis:
print("%32s: %s" % (gi, gi_index[gi]))
'''
#delete the sequences whose length differs from the WT too much
nonwt_gis = [gi for gi in gis if gi != wt_gi]
for gene in seqs:
if wt_gi in seqs[gene]:
wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-')
for gi in nonwt_gis:
if gi in seqs[gene]:
gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-')
if abs(1. - gilen / wtlen) > max_len_diff:
del seqs[gene][gi]
rerun_flag = False
try: # split sequences into deciles based on rare codon usage (calculated from first run)
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f:
input_relative_usage = pickle.load(f)['overall_codon_usage']
def get_frac_rare(seq):
return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \
rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \
for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \
sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \
for gi in gis if gi in seq])
frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0}
groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \
np.percentile(list(frac_rare.values()), percentile \
+ group_dpercentile)) \
for percentile in range(0, 100, group_dpercentile)][::-1]
def get_gene_group(gene):
if len(seqs[gene]) == 0:
return 0
else:
x = get_frac_rare(seqs[gene])
for i in range(1, len(groups)):
if x >= groups[i][0] and x <= groups[i][1]:
return i
gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \
if i > 0 else 'ND' for i in range(len(groups))]
except IOError: #this is the first run, get general usage info
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct)
os.remove(os.path.join(output, 'input_codon_usage.p.gz'))
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
gene_groups = {gene : get_gene_group(gene) for gene in seqs}
if verbose:
print("Gene groups:")
for i in range(len(gene_group_labels)):
print("%11s: n = %3d" % (gene_group_labels[i], \
sum(1 for gene in seqs if gene_groups[gene] == i)))
#compute codon usage
computed_codon_usage = {}
computed_codon_usage_unw = {}
computed_codon_usage_groupw = {}
absolute_usage = {}
relative_usage = {}
relative_usage_unw = {}
relative_usage_groupw = {}
for gi in gis:
computed_codon_usage[gi] = defaultdict(int)
computed_codon_usage_unw[gi] = defaultdict(int)
computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))]
for gene,gene_seqs in seqs.items():
if gi in gene_seqs:
seq = gene_seqs[gi]
for i in range(len(seq) // 3):
c = seq[3*i:3*(i + 1)]
if c != '---' and codon_to_aa[c] != 'Stop':
if gene in abundances:
computed_codon_usage[gi][c] += abundances[gene]
else:
computed_codon_usage[gi][c] += 1
computed_codon_usage_unw[gi][c] += 1
computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1
codons_total_gi = sum(computed_codon_usage[gi].values())
absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()}
relative_usage[gi] = {}
relative_usage_unw[gi] = {}
relative_usage_groupw[gi] = {i : {} for i in range(len(groups))}
for aa in aa_codons:
aa_total_gi = 0
aa_total_unw_gi = 0
for c in list(codon_to_aa):
if codon_to_aa[c] == aa:
aa_total_gi = aa_total_gi + computed_codon_usage[gi][c]
aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c]
for c in aa_codons[aa]:
try:
relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi
relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi
except:
relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]])
relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]])
for i in range(len(groups)):
aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa])
for c in aa_codons[aa]:
if aa_total_groupw_gi_i > 0:
relative_usage_groupw[gi][i][c] \
= computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i
else:
relative_usage_groupw[gi][i][c] = 0
if rerun_flag: #first run through, print general codon usage data
if verbose:
print("Writing input_codon_usage.p.gz")
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f:
pickle.dump({'groups' : groups,
'gene_groups' : gene_groups,
| return False | conditional_block |
|
calc_codon_usage.py | (codon_usage, rare_model, rare_threshold, c):
if rare_model == 'no_norm':
if codon_usage[c] <= rare_threshold:
return True
else:
return False
elif rare_model == 'cmax_norm':
if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold:
return True
else:
return False
def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False):
#read fasta files
seqs = {}
if isinstance(fasta, str):
gene = "".join(os.path.basename(fasta).split(".")[:-1])
seqs[gene] = read_fasta(fasta)
elif isinstance(fasta, (list, tuple)):
for path in fasta:
gene = "".join(os.path.basename(path).split(".")[:-1])
seqs[gene] = read_fasta(path)
if verbose:
print("Loaded sequences for %d genes" % len(seqs))
gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys()))
#read abundance files
try:
with open(abundances, 'r') as f:
abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'}
except Exception as e:
abundances = {}
'''
if gi_index != None:
with open(gi_index, 'r') as f:
gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \
for line in f if len(line) > 1 and line[0] != '#'}
print("GIs:")
for gi in gis:
print("%32s: %s" % (gi, gi_index[gi]))
'''
#delete the sequences whose length differs from the WT too much
nonwt_gis = [gi for gi in gis if gi != wt_gi]
for gene in seqs:
if wt_gi in seqs[gene]:
wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-')
for gi in nonwt_gis:
if gi in seqs[gene]:
gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-')
if abs(1. - gilen / wtlen) > max_len_diff:
del seqs[gene][gi]
rerun_flag = False
try: # split sequences into deciles based on rare codon usage (calculated from first run)
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f:
input_relative_usage = pickle.load(f)['overall_codon_usage']
def get_frac_rare(seq):
return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \
rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \
for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \
sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \
for gi in gis if gi in seq])
frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0}
groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \
np.percentile(list(frac_rare.values()), percentile \
+ group_dpercentile)) \
for percentile in range(0, 100, group_dpercentile)][::-1]
def get_gene_group(gene):
if len(seqs[gene]) == 0:
return 0
else:
x = get_frac_rare(seqs[gene])
for i in range(1, len(groups)):
if x >= groups[i][0] and x <= groups[i][1]:
return i
gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \
if i > 0 else 'ND' for i in range(len(groups))]
except IOError: #this is the first run, get general usage info
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct)
os.remove(os.path.join(output, 'input_codon_usage.p.gz'))
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
gene_groups = {gene : get_gene_group(gene) for gene in seqs}
if verbose:
print("Gene groups:")
for i in range(len(gene_group_labels)):
print("%11s: n = %3d" % (gene_group_labels[i], \
sum(1 for gene in seqs if gene_groups[gene] == i)))
#compute codon usage
computed_codon_usage = {}
computed_codon_usage_unw = {}
computed_codon_usage_groupw = {}
absolute_usage = {}
relative_usage = {}
relative_usage_unw = {}
relative_usage_groupw = {}
for gi in gis:
computed_codon_usage[gi] = defaultdict(int)
computed_codon_usage_unw[gi] = defaultdict(int)
computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))]
for gene,gene_seqs in seqs.items():
if gi in gene_seqs:
seq = gene_seqs[gi]
for i in range(len(seq) // 3):
c = seq[3*i:3*(i + 1)]
if c != '---' and codon_to_aa[c] != 'Stop':
if gene in abundances:
computed_codon_usage[gi][c] += abundances[gene]
else:
computed_codon_usage[gi][c] += 1
computed_codon_usage_unw[gi][c] += 1
computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1
codons_total_gi = sum(computed_codon_usage[gi].values())
absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()}
relative_usage[gi] = {}
relative_usage_unw[gi] = {}
relative_usage_groupw[gi] = {i : {} for i in range(len(groups))}
for aa in aa_codons:
aa_total_gi = 0
aa_total_unw_gi = 0
for c in list(codon_to_aa):
if codon_to_aa[c] == aa:
aa_total_gi = aa_total_gi + computed_codon_usage[gi][c]
aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c]
for c in aa_codons[aa]:
try:
relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi
relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi
except:
relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]])
relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]])
for i in range(len(groups)):
aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa])
for c in aa_codons[aa]:
if aa_total_groupw_gi_i > 0:
relative_usage_groupw[gi][i][c] \
= computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i
else:
relative_usage_groupw[gi][i][c] = 0
if rerun_flag: #first run through, print general codon usage data
if verbose:
print("Writing input_codon_usage.p.gz")
| israre | identifier_name |
|
remote_cache.rs | _cache_client,
headers,
platform,
cache_read,
cache_write,
eager_fetch,
warnings_behavior,
read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
})
}
/// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants
/// merged final output directory to find the specific path to extract. (REAPI requires
/// output directories to be stored as `Tree` protos that contain all of the `Directory`
/// protos that constitute the directory tree.)
///
/// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path
/// gets stored on the OutputDirectory proto.
///
/// If the output directory does not exist, then returns Ok(None).
pub(crate) async fn make_tree_for_output_directory(
root_directory_digest: Digest,
directory_path: RelativePath,
store: &Store,
) -> Result<Option<Tree>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
for next_path_component in directory_path.as_ref().components() {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
// At this point, `current_directory_digest` holds the digest of the output directory.
// This will be the root of the Tree. Add it to a queue of digests to traverse.
let mut tree = Tree::default();
let mut digest_queue = VecDeque::new();
digest_queue.push_back(current_directory_digest);
while let Some(directory_digest) = digest_queue.pop_front() {
let directory = match store.load_directory(directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"illegal state: directory for digest {:?} did not exist locally",
¤t_directory_digest
))
}
};
// Add all of the digests for subdirectories into the queue so they are processed
// in future iterations of the loop.
for subdirectory_node in &directory.directories {
let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?;
digest_queue.push_back(subdirectory_digest);
}
// Store this directory either as the `root` or one of the `children` if not the root.
if directory_digest == current_directory_digest | else {
tree.children.push(directory)
}
}
Ok(Some(tree))
}
pub(crate) async fn extract_output_file(
root_directory_digest: Digest,
file_path: RelativePath,
store: &Store,
) -> Result<Option<FileNode>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
let parent_path = file_path.as_ref().parent();
let components_opt = parent_path.map(|x| x.components());
if let Some(components) = components_opt {
for next_path_component in components {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
}
// Load the final directory.
let directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => return Ok(None),
};
// Search for the file.
let file_base_name = file_path.as_ref().file_name().unwrap();
Ok(
directory
.files
.iter()
.find(|node| {
let name = OsString::from(&node.name);
name == file_base_name
})
.cloned(),
)
}
/// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing
/// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache.
///
/// This function also returns a vector of all `Digest`s referenced directly and indirectly by
/// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The
/// digests may include both File and Tree digests.)
pub(crate) async fn make_action_result(
&self,
command: &Command,
result: &FallibleProcessResultWithPlatform,
store: &Store,
) -> Result<(ActionResult, Vec<Digest>), String> {
// Keep track of digests that need to be uploaded.
let mut digests = HashSet::new();
let mut action_result = ActionResult {
exit_code: result.exit_code,
stdout_digest: Some(result.stdout_digest.into()),
stderr_digest: Some(result.stderr_digest.into()),
execution_metadata: Some(result.metadata.clone().into()),
..ActionResult::default()
};
digests.insert(result.stdout_digest);
digests.insert(result.stderr_digest);
for output_directory in &command.output_directories {
let tree = match Self::make_tree_for_output_directory(
result.output_directory,
RelativePath::new(output_directory).unwrap(),
store,
)
.await?
{
Some(t) => t,
None => continue,
};
let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?;
digests.insert(tree_digest);
action_result
.output_directories
.push(remexec::OutputDirectory {
path: output_directory.to_owned(),
tree_digest: Some(tree_digest.into()),
});
}
for output_file in &command.output_files {
let file_node = match Self::extract_output_file(
result.output_directory,
RelativePath::new(output_file).unwrap(),
store,
)
.await?
{
Some(node) => node,
None => continue,
};
let digest = require_digest(file_node.digest.as_ref())?;
digests.insert(digest);
action_result.output_files.push({
remexec::OutputFile {
digest: Some(digest.into()),
path: output_file.to_owned(),
is_executable: file_node.is_executable,
..remexec::OutputFile::default()
}
})
}
Ok((action_result, digests.into_iter().collect::<Vec<_>>()))
}
/// Stores an execution result into the remote Action Cache.
async fn update_action_cache(
&self,
context: &Context,
request: &Process,
result: &FallibleProcessResultWithPlatform,
metadata: &ProcessMetadata,
command: &Command,
action_digest: Digest,
command_digest: Digest,
) -> Result<(), String> {
// Upload the action (and related data, i.e. the embedded command and input files).
// Assumption: The Action and related data has already been stored locally.
with_workunit(
context.workunit_store.clone(),
"ensure_action_uploaded".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action uploaded for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_uploaded(
&self.store,
command_digest,
action_digest,
request.input_files,
),
|_, md| md,
| {
tree.root = Some(directory);
} | conditional_block |
remote_cache.rs | _path: RelativePath,
store: &Store,
) -> Result<Option<Tree>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
for next_path_component in directory_path.as_ref().components() {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
// At this point, `current_directory_digest` holds the digest of the output directory.
// This will be the root of the Tree. Add it to a queue of digests to traverse.
let mut tree = Tree::default();
let mut digest_queue = VecDeque::new();
digest_queue.push_back(current_directory_digest);
while let Some(directory_digest) = digest_queue.pop_front() {
let directory = match store.load_directory(directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"illegal state: directory for digest {:?} did not exist locally",
¤t_directory_digest
))
}
};
// Add all of the digests for subdirectories into the queue so they are processed
// in future iterations of the loop.
for subdirectory_node in &directory.directories {
let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?;
digest_queue.push_back(subdirectory_digest);
}
// Store this directory either as the `root` or one of the `children` if not the root.
if directory_digest == current_directory_digest {
tree.root = Some(directory);
} else {
tree.children.push(directory)
}
}
Ok(Some(tree))
}
pub(crate) async fn extract_output_file(
root_directory_digest: Digest,
file_path: RelativePath,
store: &Store,
) -> Result<Option<FileNode>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
let parent_path = file_path.as_ref().parent();
let components_opt = parent_path.map(|x| x.components());
if let Some(components) = components_opt {
for next_path_component in components {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
}
// Load the final directory.
let directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => return Ok(None),
};
// Search for the file.
let file_base_name = file_path.as_ref().file_name().unwrap();
Ok(
directory
.files
.iter()
.find(|node| {
let name = OsString::from(&node.name);
name == file_base_name
})
.cloned(),
)
}
/// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing
/// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache.
///
/// This function also returns a vector of all `Digest`s referenced directly and indirectly by
/// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The
/// digests may include both File and Tree digests.)
pub(crate) async fn make_action_result(
&self,
command: &Command,
result: &FallibleProcessResultWithPlatform,
store: &Store,
) -> Result<(ActionResult, Vec<Digest>), String> {
// Keep track of digests that need to be uploaded.
let mut digests = HashSet::new();
let mut action_result = ActionResult {
exit_code: result.exit_code,
stdout_digest: Some(result.stdout_digest.into()),
stderr_digest: Some(result.stderr_digest.into()),
execution_metadata: Some(result.metadata.clone().into()),
..ActionResult::default()
};
digests.insert(result.stdout_digest);
digests.insert(result.stderr_digest);
for output_directory in &command.output_directories {
let tree = match Self::make_tree_for_output_directory(
result.output_directory,
RelativePath::new(output_directory).unwrap(),
store,
)
.await?
{
Some(t) => t,
None => continue,
};
let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?;
digests.insert(tree_digest);
action_result
.output_directories
.push(remexec::OutputDirectory {
path: output_directory.to_owned(),
tree_digest: Some(tree_digest.into()),
});
}
for output_file in &command.output_files {
let file_node = match Self::extract_output_file(
result.output_directory,
RelativePath::new(output_file).unwrap(),
store,
)
.await?
{
Some(node) => node,
None => continue,
};
let digest = require_digest(file_node.digest.as_ref())?;
digests.insert(digest);
action_result.output_files.push({
remexec::OutputFile {
digest: Some(digest.into()),
path: output_file.to_owned(),
is_executable: file_node.is_executable,
..remexec::OutputFile::default()
}
})
}
Ok((action_result, digests.into_iter().collect::<Vec<_>>()))
}
/// Stores an execution result into the remote Action Cache.
async fn update_action_cache(
&self,
context: &Context,
request: &Process,
result: &FallibleProcessResultWithPlatform,
metadata: &ProcessMetadata,
command: &Command,
action_digest: Digest,
command_digest: Digest,
) -> Result<(), String> {
// Upload the action (and related data, i.e. the embedded command and input files).
// Assumption: The Action and related data has already been stored locally.
with_workunit(
context.workunit_store.clone(),
"ensure_action_uploaded".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action uploaded for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_uploaded(
&self.store,
command_digest,
action_digest,
request.input_files,
),
|_, md| md,
)
.await?;
// Create an ActionResult from the process result.
let (action_result, digests_for_action_result) = self
.make_action_result(command, result, &self.store)
.await?;
// Ensure that all digests referenced by directly and indirectly by the ActionResult
// have been uploaded to the remote cache.
self
.store
.ensure_remote_has_recursive(digests_for_action_result)
.await?;
let update_action_cache_request = remexec::UpdateActionResultRequest {
instance_name: metadata
.instance_name
.as_ref()
.cloned()
.unwrap_or_else(|| "".to_owned()),
action_digest: Some(action_digest.into()),
action_result: Some(action_result),
..remexec::UpdateActionResultRequest::default()
};
let mut client = self.action_cache_client.as_ref().clone();
client
.update_action_result(update_action_cache_request)
.await | .map_err(status_to_str)?; | random_line_split |
|
remote_cache.rs | _proto_locally(&self.store, &tree).await?;
digests.insert(tree_digest);
action_result
.output_directories
.push(remexec::OutputDirectory {
path: output_directory.to_owned(),
tree_digest: Some(tree_digest.into()),
});
}
for output_file in &command.output_files {
let file_node = match Self::extract_output_file(
result.output_directory,
RelativePath::new(output_file).unwrap(),
store,
)
.await?
{
Some(node) => node,
None => continue,
};
let digest = require_digest(file_node.digest.as_ref())?;
digests.insert(digest);
action_result.output_files.push({
remexec::OutputFile {
digest: Some(digest.into()),
path: output_file.to_owned(),
is_executable: file_node.is_executable,
..remexec::OutputFile::default()
}
})
}
Ok((action_result, digests.into_iter().collect::<Vec<_>>()))
}
/// Stores an execution result into the remote Action Cache.
async fn update_action_cache(
&self,
context: &Context,
request: &Process,
result: &FallibleProcessResultWithPlatform,
metadata: &ProcessMetadata,
command: &Command,
action_digest: Digest,
command_digest: Digest,
) -> Result<(), String> {
// Upload the action (and related data, i.e. the embedded command and input files).
// Assumption: The Action and related data has already been stored locally.
with_workunit(
context.workunit_store.clone(),
"ensure_action_uploaded".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action uploaded for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_uploaded(
&self.store,
command_digest,
action_digest,
request.input_files,
),
|_, md| md,
)
.await?;
// Create an ActionResult from the process result.
let (action_result, digests_for_action_result) = self
.make_action_result(command, result, &self.store)
.await?;
// Ensure that all digests referenced by directly and indirectly by the ActionResult
// have been uploaded to the remote cache.
self
.store
.ensure_remote_has_recursive(digests_for_action_result)
.await?;
let update_action_cache_request = remexec::UpdateActionResultRequest {
instance_name: metadata
.instance_name
.as_ref()
.cloned()
.unwrap_or_else(|| "".to_owned()),
action_digest: Some(action_digest.into()),
action_result: Some(action_result),
..remexec::UpdateActionResultRequest::default()
};
let mut client = self.action_cache_client.as_ref().clone();
client
.update_action_result(update_action_cache_request)
.await
.map_err(status_to_str)?;
Ok(())
}
fn log_cache_error(&self, err: String, err_type: CacheErrorType) {
let err_count = {
let mut errors_counter = match err_type {
CacheErrorType::ReadError => self.read_errors_counter.lock(),
CacheErrorType::WriteError => self.write_errors_counter.lock(),
};
let count = errors_counter.entry(err.clone()).or_insert(0);
*count += 1;
*count
};
let failure_desc = match err_type {
CacheErrorType::ReadError => "read from",
CacheErrorType::WriteError => "write to",
};
let log_msg = format!(
"Failed to {} remote cache ({} occurrences so far): {}",
failure_desc, err_count, err
);
let log_at_warn = match self.warnings_behavior {
RemoteCacheWarningsBehavior::Ignore => false,
RemoteCacheWarningsBehavior::FirstOnly => err_count == 1,
RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(),
};
if log_at_warn {
log::warn!("{}", log_msg);
} else {
log::debug!("{}", log_msg);
}
}
}
enum CacheErrorType {
ReadError,
WriteError,
}
#[async_trait]
impl crate::CommandRunner for CommandRunner {
async fn run(
&self,
req: MultiPlatformProcess,
context: Context,
) -> Result<FallibleProcessResultWithPlatform, String> {
let cache_lookup_start = Instant::now();
// Construct the REv2 ExecuteRequest and related data for this execution request.
let request = self
.extract_compatible_request(&req)
.ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?;
let (action, command, _execute_request) =
make_execute_request(&request, self.metadata.clone())?;
// Ensure the action and command are stored locally.
let (command_digest, action_digest) = with_workunit(
context.workunit_store.clone(),
"ensure_action_stored_locally".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action stored locally for {:?}", action)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_stored_locally(&self.store, &command, &action),
|_, md| md,
)
.await?;
let mut local_execution_future = self.underlying.run(req, context.clone());
let result = if self.cache_read {
// A future to read from the cache and log the results accordingly.
let cache_read_future = async {
let response = with_workunit(
context.workunit_store.clone(),
"check_action_cache".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("check action cache for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::check_action_cache(
action_digest,
&self.metadata,
self.platform,
&context,
self.action_cache_client.clone(),
self.store.clone(),
self.eager_fetch,
),
|_, md| md,
)
.await;
match response {
Ok(cached_response_opt) => {
log::debug!(
"remote cache response: digest={:?}: {:?}",
action_digest,
cached_response_opt
);
cached_response_opt
}
Err(err) => {
self.log_cache_error(err, CacheErrorType::ReadError);
None
}
}
}
.boxed();
// We speculate between reading from the remote cache vs. running locally. If there was a
// cache hit, we return early because there will be no need to write to the cache. Otherwise,
// we run the process locally and will possibly write it to the cache later.
tokio::select! {
cache_result = cache_read_future => {
if let Some(cached_response) = cache_result {
let lookup_elapsed = cache_lookup_start.elapsed();
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1);
if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) {
let time_saved = time_saved.as_millis() as u64;
context
.workunit_store
.increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved);
context
.workunit_store
.record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved);
}
return Ok(cached_response);
} else {
// Note that we don't increment a counter here, as there is nothing of note in this
// scenario: the remote cache did not save unnecessary local work, nor was the remote
// trip unusually slow such that local execution was faster.
local_execution_future.await?
}
}
local_result = &mut local_execution_future => {
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1);
local_result?
}
}
} else {
local_execution_future.await?
};
if result.exit_code == 0 && self.cache_write {
let command_runner = self.clone();
let result = result.clone();
let context2 = context.clone();
// NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works.
let cache_write_future = async move {
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteStarted, 1);
let write_result = command_runner
.update_action_cache(
&context2,
&request,
&result,
&command_runner.metadata,
&command,
action_digest,
command_digest,
)
.await;
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteFinished, 1);
if let Err(err) = write_result {
command_runner.log_cache_error(err, CacheErrorType::WriteError);
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteErrors, 1);
};
}
.boxed();
let _write_join = self.executor.spawn(with_workunit(
context.workunit_store,
"remote_cache_write".to_owned(),
WorkunitMetadata {
level: Level::Trace,
..WorkunitMetadata::default()
},
cache_write_future,
|_, md| md,
));
}
Ok(result)
}
fn | extract_compatible_request | identifier_name |
|
bare_index.rs | = if !exists {
let mut opts = git2::RepositoryInitOptions::new();
opts.external_template(false);
let repo = git2::Repository::init_opts(&index.path, &opts)?;
{
let mut origin_remote = repo
.find_remote("origin")
.or_else(|_| repo.remote_anonymous(&index.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
repo
} else {
git2::Repository::open(&index.path)?
};
// It's going to be used in a self-referential type. Boxing prevents it from being moved
// and adds a layer of indirection that will hopefully not upset noalias analysis.
let repo = Box::new(repo);
let head = repo
// Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD
.refname_to_id("FETCH_HEAD")
.or_else(|_| repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let tree = {
let commit = repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }
};
Ok(Self {
inner: index,
head_str,
rt: UnsafeRepoTree {
repo,
tree,
_self_referential: PhantomPinned,
},
})
}
/// Fetches latest from the remote index repository. Note that using this
/// method will mean no cache entries will be used, if a new commit is fetched
/// from the repository, as their commit version will no longer match.
pub fn | (&mut self) -> Result<(), Error> {
{
let mut origin_remote = self
.rt
.repo
.find_remote("origin")
.or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
let head = self
.rt
.repo
.refname_to_id("FETCH_HEAD")
.or_else(|_| self.rt.repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let commit = self.rt.repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) };
self.head_str = head_str;
self.rt.tree = tree;
Ok(())
}
/// Reads a crate from the index, it will attempt to use a cached entry if
/// one is available, otherwise it will fallback to reading the crate
/// directly from the git blob containing the crate information.
pub fn crate_(&self, name: &str) -> Option<Crate> {
let rel_path = match crate::crate_name_to_relative_path(name) {
Some(rp) => rp,
None => return None,
};
// Attempt to load the .cache/ entry first, this is purely an acceleration
// mechanism and can fail for a few reasons that are non-fatal
{
let mut cache_path = self.inner.path.join(".cache");
cache_path.push(&rel_path);
if let Ok(cache_bytes) = std::fs::read(&cache_path) {
if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) {
return Some(krate);
}
}
}
// Fallback to reading the blob directly via git if we don't have a
// valid cache entry
self.crate_from_rel_path(&rel_path).ok()
}
fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> {
let entry = self.rt.tree.get_path(&Path::new(path))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?;
Crate::from_slice(blob.content()).map_err(Error::Io)
}
/// Retrieve an iterator over all the crates in the index.
/// skips crates that can not be parsed.
#[inline]
pub fn crates(&self) -> Crates<'_> {
Crates {
blobs: self.crates_refs(),
}
}
/// Retrieve an iterator over all the crates in the index.
/// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`]
fn crates_refs(&self) -> CrateRefs<'_> {
let mut stack = Vec::with_capacity(800);
// Scan only directories at top level (skip config.json, etc.)
for entry in self.rt.tree.iter() {
let entry = entry.to_object(&self.rt.repo).unwrap();
if entry.as_tree().is_some() {
stack.push(entry);
}
}
CrateRefs {
stack,
rt: &self.rt,
}
}
/// Get the global configuration of the index.
pub fn index_config(&self) -> Result<IndexConfig, Error> {
let entry = self.rt.tree.get_path(&Path::new("config.json"))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?;
serde_json::from_slice(blob.content()).map_err(Error::Json)
}
}
/// Iterator over all crates in the index, but returns opaque objects that can be parsed separately.
///
/// See [`CrateRef::parse`].
struct CrateRefs<'a> {
stack: Vec<git2::Object<'a>>,
rt: &'a UnsafeRepoTree,
}
/// Opaque representation of a crate in the index. See [`CrateRef::parse`].
pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>);
impl CrateRef<'_> {
#[inline]
/// Parse a crate from [`BareIndex::crates_blobs`] iterator
pub fn parse(&self) -> Option<Crate> {
Crate::from_slice(self.as_slice()?).ok()
}
/// Raw crate data that can be parsed with [`Crate::from_slice`]
pub fn as_slice(&self) -> Option<&[u8]> {
Some(self.0.as_blob()?.content())
}
}
impl<'a> Iterator for CrateRefs<'a> {
type Item = CrateRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(last) = self.stack.pop() {
match last.as_tree() {
None => return Some(CrateRef(last)),
Some(tree) => {
for entry in tree.iter().rev() {
self.stack.push(entry.to_object(&self.rt.repo).unwrap());
}
continue;
}
}
}
None
}
}
pub struct Crates<'a> {
blobs: CrateRefs<'a>,
}
impl<'a> Iterator for Crates<'a> {
type Item = Crate;
fn next(&mut self) -> Option<Self::Item> {
while let Some(next) = self.blobs.next() {
if let Some(k) = CrateRef::parse(&next) {
return Some(k);
}
}
None
}
}
/// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into
/// the root directory name where cargo itself will fetch it on disk
fn url_to_local_dir(url: &str) -> Result<(String, String), Error> {
fn to_hex(num: u64) -> String {
const CHARS: &[u8] = b"0123456789abcdef";
let bytes = &[
num as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
];
let mut output = vec![0u8; 16];
let mut ind = 0;
for &byte in bytes {
output[ind] = CHARS[(byte >> 4) as usize];
output[ind + 1] = CHARS[(byte & 0xf) as usize];
ind += 2;
}
String::from_utf8(output).expect("valid utf-8 hex string")
}
#[allow(deprecated)]
fn hash_u64(url: &str) -> u64 {
use std::hash::{Hash, Hasher, SipHasher};
let mut hasher = SipHasher::new_with_keys(0, 0);
// Registry
2usize.hash | retrieve | identifier_name |
bare_index.rs | > {
type Item = CrateRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(last) = self.stack.pop() {
match last.as_tree() {
None => return Some(CrateRef(last)),
Some(tree) => {
for entry in tree.iter().rev() {
self.stack.push(entry.to_object(&self.rt.repo).unwrap());
}
continue;
}
}
}
None
}
}
pub struct Crates<'a> {
blobs: CrateRefs<'a>,
}
impl<'a> Iterator for Crates<'a> {
type Item = Crate;
fn next(&mut self) -> Option<Self::Item> {
while let Some(next) = self.blobs.next() {
if let Some(k) = CrateRef::parse(&next) {
return Some(k);
}
}
None
}
}
/// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into
/// the root directory name where cargo itself will fetch it on disk
fn url_to_local_dir(url: &str) -> Result<(String, String), Error> {
fn to_hex(num: u64) -> String {
const CHARS: &[u8] = b"0123456789abcdef";
let bytes = &[
num as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
];
let mut output = vec![0u8; 16];
let mut ind = 0;
for &byte in bytes {
output[ind] = CHARS[(byte >> 4) as usize];
output[ind + 1] = CHARS[(byte & 0xf) as usize];
ind += 2;
}
String::from_utf8(output).expect("valid utf-8 hex string")
}
#[allow(deprecated)]
fn hash_u64(url: &str) -> u64 {
use std::hash::{Hash, Hasher, SipHasher};
let mut hasher = SipHasher::new_with_keys(0, 0);
// Registry
2usize.hash(&mut hasher);
// Url
url.hash(&mut hasher);
hasher.finish()
}
// Ensure we have a registry or bare url
let (url, scheme_ind) = {
let scheme_ind = url
.find("://")
.ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?;
let scheme_str = &url[..scheme_ind];
if let Some(ind) = scheme_str.find('+') {
if &scheme_str[..ind] != "registry" {
return Err(Error::Url(format!("'{}' is not a valid registry url", url)));
}
(&url[ind + 1..], scheme_ind - ind - 1)
} else {
(url, scheme_ind)
}
};
// Could use the Url crate for this, but it's simple enough and we don't
// need to deal with every possible url (I hope...)
let host = match url[scheme_ind + 3..].find('/') {
Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end],
None => &url[scheme_ind + 3..],
};
// cargo special cases github.com for reasons, so do the same
let mut canonical = if host == "github.com" {
url.to_lowercase()
} else {
url.to_owned()
};
// Chop off any query params/fragments
if let Some(hash) = canonical.rfind('#') {
canonical.truncate(hash);
}
if let Some(query) = canonical.rfind('?') {
canonical.truncate(query);
}
let ident = to_hex(hash_u64(&canonical));
if canonical.ends_with('/') {
canonical.pop();
}
if canonical.ends_with(".git") {
canonical.truncate(canonical.len() - 4);
}
Ok((format!("{}-{}", host, ident), canonical))
}
#[cfg(test)]
mod test {
#[test]
fn matches_cargo() {
assert_eq!(
super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(),
(
"github.com-1ecc6299db9ec823".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
// I've confirmed this also works with a custom registry, unfortunately
// that one includes a secret key as part of the url which would allow
// anyone to publish to the registry, so uhh...here's a fake one instead
assert_eq!(
super::url_to_local_dir(
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git"
)
.unwrap(),
(
"dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(),
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned()
)
);
// Ensure we actually strip off the irrelevant parts of a url, note that
// the .git suffix is not part of the canonical url, but *is* used when hashing
assert_eq!(
super::url_to_local_dir(&format!(
"registry+{}.git?one=1&two=2#fragment",
crate::INDEX_GIT_URL
))
.unwrap(),
(
"github.com-c786010fb7ef2e6e".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
}
#[test]
fn bare_iterator() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
let mut found_gcc_crate = false;
for c in repo.crates() {
if c.name() == "gcc" {
found_gcc_crate = true;
}
}
assert!(found_gcc_crate);
}
#[test]
fn clones_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let mut repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval does not have expected dependency?");
assert_ne!(
dep_with_package_name.name(),
dep_with_package_name.package().unwrap()
);
assert_eq!(
dep_with_package_name.crate_name(),
dep_with_package_name.package().unwrap()
);
}
test_sval(&repo);
repo.retrieve().expect("Failed to fetch crates.io index");
test_sval(&repo);
}
#[test]
fn opens_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
{
let _ = index
.open_or_clone()
.expect("Failed to clone crates.io index");
}
let mut repo = index
.open_or_clone()
.expect("Failed to open crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) | {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval does not have expected dependency?");
assert_ne!(
dep_with_package_name.name(),
dep_with_package_name.package().unwrap()
);
assert_eq!( | identifier_body |
|
bare_index.rs | pub fn with_path(path: PathBuf, url: &str) -> Self {
Self {
path,
url: url.to_owned(),
}
}
/// Creates an index for the default crates.io registry, using the same
/// disk location as cargo itself.
#[inline]
pub fn new_cargo_default() -> Self {
// UNWRAP: The default index git URL is known to safely convert to a path.
Self::from_url(crate::INDEX_GIT_URL).unwrap()
}
/// Opens the local index, which acts as a kind of lock for source control
/// operations
#[inline]
pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> {
BareIndexRepo::new(self)
}
/// Get the index directory.
#[inline]
pub fn path(&self) -> &Path {
&self.path
}
}
/// Self-referential struct where `Tree` borrows from `Repository`
struct UnsafeRepoTree {
/// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`.
tree: git2::Tree<'static>,
repo: Box<git2::Repository>,
// Currently !Unpin is Rust's heuristic for self-referential structs
_self_referential: PhantomPinned,
}
/// Opened instance of [`BareIndex`]
pub struct BareIndexRepo<'a> {
inner: &'a BareIndex,
head_str: String,
rt: UnsafeRepoTree,
}
impl<'a> BareIndexRepo<'a> {
fn new(index: &'a BareIndex) -> Result<Self, Error> {
let exists = git2::Repository::discover(&index.path)
.map(|repository| {
repository
.find_remote("origin")
.ok()
// Cargo creates a checkout without an origin set,
// so default to true in case of missing origin
.map_or(true, |remote| {
remote.url().map_or(true, |url| url == index.url)
})
})
.unwrap_or(false);
let repo = if !exists {
let mut opts = git2::RepositoryInitOptions::new();
opts.external_template(false);
let repo = git2::Repository::init_opts(&index.path, &opts)?;
{
let mut origin_remote = repo
.find_remote("origin")
.or_else(|_| repo.remote_anonymous(&index.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
repo
} else {
git2::Repository::open(&index.path)?
};
// It's going to be used in a self-referential type. Boxing prevents it from being moved
// and adds a layer of indirection that will hopefully not upset noalias analysis.
let repo = Box::new(repo);
let head = repo
// Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD
.refname_to_id("FETCH_HEAD")
.or_else(|_| repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let tree = {
let commit = repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }
};
Ok(Self {
inner: index,
head_str,
rt: UnsafeRepoTree {
repo,
tree,
_self_referential: PhantomPinned,
},
})
}
/// Fetches latest from the remote index repository. Note that using this
/// method will mean no cache entries will be used, if a new commit is fetched
/// from the repository, as their commit version will no longer match.
pub fn retrieve(&mut self) -> Result<(), Error> {
{
let mut origin_remote = self
.rt
.repo
.find_remote("origin")
.or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
let head = self
.rt
.repo
.refname_to_id("FETCH_HEAD")
.or_else(|_| self.rt.repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let commit = self.rt.repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) };
self.head_str = head_str;
self.rt.tree = tree;
Ok(())
}
/// Reads a crate from the index, it will attempt to use a cached entry if
/// one is available, otherwise it will fallback to reading the crate
/// directly from the git blob containing the crate information.
pub fn crate_(&self, name: &str) -> Option<Crate> {
let rel_path = match crate::crate_name_to_relative_path(name) {
Some(rp) => rp,
None => return None,
};
// Attempt to load the .cache/ entry first, this is purely an acceleration
// mechanism and can fail for a few reasons that are non-fatal
{
let mut cache_path = self.inner.path.join(".cache");
cache_path.push(&rel_path);
if let Ok(cache_bytes) = std::fs::read(&cache_path) {
if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) {
return Some(krate);
}
}
}
// Fallback to reading the blob directly via git if we don't have a
// valid cache entry
self.crate_from_rel_path(&rel_path).ok()
}
fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> {
let entry = self.rt.tree.get_path(&Path::new(path))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?;
Crate::from_slice(blob.content()).map_err(Error::Io)
}
/// Retrieve an iterator over all the crates in the index.
/// skips crates that can not be parsed.
#[inline]
pub fn crates(&self) -> Crates<'_> {
Crates {
blobs: self.crates_refs(),
}
}
/// Retrieve an iterator over all the crates in the index.
/// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`]
fn crates_refs(&self) -> CrateRefs<'_> {
let mut stack = Vec::with_capacity(800);
// Scan only directories at top level (skip config.json, etc.)
for entry in self.rt.tree.iter() {
let entry = entry.to_object(&self.rt.repo).unwrap();
if entry.as_tree().is_some() {
stack.push(entry);
}
}
CrateRefs {
stack,
rt: &self.rt,
}
}
/// Get the global configuration of the index.
pub fn index_config(&self) -> Result<IndexConfig, Error> {
let entry = self.rt.tree.get_path(&Path::new("config.json"))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?;
serde_json::from_slice(blob.content()).map_err(Error::Json)
}
}
/// Iterator over all crates in the index, but returns opaque objects that can be parsed separately.
///
/// See [`CrateRef::parse`].
struct CrateRefs<'a> {
stack: Vec<git2::Object<'a>>,
rt: &'a UnsafeRepoTree,
}
/// Opaque representation of a crate in the index. See [`CrateRef::parse`].
pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>);
impl CrateRef<'_> {
#[inline]
/// Parse a crate from [`BareIndex::crates_blobs`] iterator
pub fn parse(&self) -> Option<Crate> {
Crate::from_slice(self.as_slice()?).ok()
}
/// Raw crate data that can be parsed with [`Crate::from_slice`]
pub fn as_slice(&self) -> Option<&[u8]> {
Some(self.0.as_blob()?.content())
}
}
impl<'a> Iterator for CrateRefs<'a> {
type Item = CrateRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(last) = self.stack.pop() {
match last.as_tree() {
None => return Some(CrateRef(last)),
Some(tree) => {
for entry in tree.iter(). | /// Creates a bare index at the provided path with the specified repository URL.
#[inline] | random_line_split |
|
encode.rs | None => continue,
};
for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge)? {
g.link(id.clone(), to_depend_on);
}
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, ref pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace)? {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or(BTreeMap::new());
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Craft
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Craft
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k.parse()
.chain_error(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Ok(Some(id)) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
Ok(Resolve {
graph: g,
features: HashMap::new(),
replacements: replacements,
checksums: checksums,
metadata: metadata,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a chest is *not* a path source, then we're probably in a situation
// such as `craft install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) {
let replace = pkg.manifest().replace();
let deps = pkg.dependencies()
.iter()
.chain(replace.iter().map(|p| &p.1))
.filter(|d| !ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Craft.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = Box<CraftError>;
fn from_str(s: &str) -> CraftResult<EncodablePackageId> {
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?;
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source_id = match captures.at(3) {
Some(s) => Some(SourceId::from_url(s)?),
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.to_string().encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
String::decode(d).and_then(|string| {
string.parse::<EncodablePackageId>()
.map_err(|e| d.error(&e.to_string()))
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
pub use_root_key: bool,
}
impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect();
ids.sort();
let root = self.ws
.members()
.max_by_key(|member| member.name())
.unwrap()
.package_id();
let encodable = ids.iter()
.filter_map(|&id| {
if self.use_root_key && root == id {
return None;
}
Some(encodable_resolve_node(id, self.resolve))
})
.collect::<Vec<_>>();
let mut metadata = self.resolve.metadata.clone();
for id in ids.iter().filter(|id| !id.source_id().is_path()) {
let checksum = match self.resolve.checksums[*id] {
Some(ref s) => &s[..],
None => "<none>",
};
let id = encodable_package_id(id);
metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string());
}
let metadata = if metadata.len() == 0 {
None
} else {
Some(metadata)
};
let root = if self.use_root_key {
Some(encodable_resolve_node(&root, self.resolve))
} else {
None
};
EncodableResolve {
package: Some(encodable),
root: root,
metadata: metadata,
}
.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => (Some(encodable_package_id(id)), None),
None => {
let mut deps = resolve.graph
.edges(id)
.into_iter()
.flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn encodable_package_id(id: &PackageId) -> EncodablePackageId | {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
} | identifier_body |
|
encode.rs | packages = self.package.unwrap_or(Vec::new());
if let Some(root) = self.root {
packages.insert(0, root);
}
packages
};
// `PackageId`s in the lock file don't include the `source` part
// for workspace members, so we reconstruct proper ids.
let (live_pkgs, all_pkgs) = {
let mut live_pkgs = HashMap::new();
let mut all_pkgs = HashSet::new();
for pkg in packages.iter() {
let enc_id = EncodablePackageId {
name: pkg.name.clone(),
version: pkg.version.clone(),
source: pkg.source.clone(),
};
if !all_pkgs.insert(enc_id.clone()) {
return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name)));
}
let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) {
// We failed to find a local package in the workspace.
// It must have been removed and should be ignored.
None => continue,
Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?,
};
assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none())
}
(live_pkgs, all_pkgs)
};
let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> {
match live_pkgs.get(enc_id) {
Some(&(ref id, _)) => Ok(Some(id.clone())),
None => {
if all_pkgs.contains(enc_id) {
// Package is found in the lockfile, but it is
// no longer a member of the workspace.
Ok(None)
} else {
Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \
package list",
enc_id)))
}
}
}
};
let g = {
let mut g = Graph::new();
for &(ref id, _) in live_pkgs.values() {
g.add(id.clone(), &[]);
}
for &(ref id, ref pkg) in live_pkgs.values() {
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => continue,
};
for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge)? {
g.link(id.clone(), to_depend_on);
}
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, ref pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace)? {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or(BTreeMap::new());
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Craft
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Craft
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k.parse()
.chain_error(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Ok(Some(id)) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
Ok(Resolve {
graph: g,
features: HashMap::new(),
replacements: replacements,
checksums: checksums,
metadata: metadata,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a chest is *not* a path source, then we're probably in a situation
// such as `craft install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) {
let replace = pkg.manifest().replace();
let deps = pkg.dependencies()
.iter()
.chain(replace.iter().map(|p| &p.1))
.filter(|d| !ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Craft.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = Box<CraftError>;
fn | (s: &str) -> CraftResult<EncodablePackageId> {
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?;
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source_id = match captures.at(3) {
Some(s) => Some(SourceId::from_url(s)?),
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.to_string().encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
String::decode(d).and_then(|string| {
string.parse::<EncodablePackageId>()
.map_err(|e| d.error(&e.to_string()))
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
pub use_root_key: bool,
}
impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect();
ids.sort();
let root = self.ws
.members()
.max_by_key(|member| member.name())
.unwrap()
.package_id();
let encodable = ids.iter()
.filter_map(|&id| {
| from_str | identifier_name |
encode.rs | packages = self.package.unwrap_or(Vec::new());
if let Some(root) = self.root {
packages.insert(0, root);
}
packages
};
// `PackageId`s in the lock file don't include the `source` part
// for workspace members, so we reconstruct proper ids.
let (live_pkgs, all_pkgs) = {
let mut live_pkgs = HashMap::new();
let mut all_pkgs = HashSet::new();
for pkg in packages.iter() {
let enc_id = EncodablePackageId {
name: pkg.name.clone(),
version: pkg.version.clone(),
source: pkg.source.clone(),
};
if !all_pkgs.insert(enc_id.clone()) {
return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name)));
}
let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) {
// We failed to find a local package in the workspace.
// It must have been removed and should be ignored.
None => continue,
Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?,
};
assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none())
}
(live_pkgs, all_pkgs)
};
let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> {
match live_pkgs.get(enc_id) {
Some(&(ref id, _)) => Ok(Some(id.clone())),
None => {
if all_pkgs.contains(enc_id) {
// Package is found in the lockfile, but it is
// no longer a member of the workspace.
Ok(None)
} else {
Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \
package list",
enc_id)))
}
}
}
};
let g = {
let mut g = Graph::new();
for &(ref id, _) in live_pkgs.values() {
g.add(id.clone(), &[]);
}
for &(ref id, ref pkg) in live_pkgs.values() {
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => continue,
};
| }
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, ref pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace)? {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or(BTreeMap::new());
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Craft
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Craft
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k.parse()
.chain_error(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Ok(Some(id)) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
Ok(Resolve {
graph: g,
features: HashMap::new(),
replacements: replacements,
checksums: checksums,
metadata: metadata,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a chest is *not* a path source, then we're probably in a situation
// such as `craft install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) {
let replace = pkg.manifest().replace();
let deps = pkg.dependencies()
.iter()
.chain(replace.iter().map(|p| &p.1))
.filter(|d| !ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Craft.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = Box<CraftError>;
fn from_str(s: &str) -> CraftResult<EncodablePackageId> {
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?;
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source_id = match captures.at(3) {
Some(s) => Some(SourceId::from_url(s)?),
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.to_string().encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
String::decode(d).and_then(|string| {
string.parse::<EncodablePackageId>()
.map_err(|e| d.error(&e.to_string()))
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
pub use_root_key: bool,
}
impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect();
ids.sort();
let root = self.ws
.members()
.max_by_key(|member| member.name())
.unwrap()
.package_id();
let encodable = ids.iter()
.filter_map(|&id| {
| for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge)? {
g.link(id.clone(), to_depend_on); | random_line_split |
MCObserver.py | the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied.
#
# SWIF DOCUMENTATION:
# https://scicomp.jlab.org/docs/swif
# https://scicomp.jlab.org/docs/swif-cli
# https://scicomp.jlab.org/help/swif/add-job.txt #consider phase!
#
##########################################################################################################################
import MySQLdb
#import MySQLdb.cursors
from os import environ
from optparse import OptionParser
import os.path
#import mysql.connector
import time
import os
import getpass
import sys
import re
import subprocess
from subprocess import call
import socket
import glob
import json
import time
from datetime import timedelta
from datetime import datetime
import smtplib
from email.message import EmailMessage
from multiprocessing import Process
import random
import pipes
import random
import pwd
MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname())
dbhost = "hallddb.jlab.org"
dbuser = 'mcuser'
dbpass = ''
dbname = 'gluex_mc'
try:
dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor)
except:
print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED")
pass
runner_name=pwd.getpwuid( os.getuid() )[0]
if( not (runner_name=="tbritton" or runner_name=="mcwrap")):
print("ERROR: You must be tbritton or mcwrap to run this script")
sys.exit(1)
def exists_remote(host, path):
"""Test if a file exists at path on a host accessible with SSH."""
status = subprocess.call(
['ssh', host, 'test -f {}'.format(pipes.quote(path))])
if status == 0:
return True
if status == 1:
return False
raise Exception('SSH failed')
def CheckForFile(rootLoc,expFile):
found=False
subloc="hddm"
parse_expFile=expFile.split(".")
#print(parse_expFile[len(parse_expFile)-1])
if(parse_expFile[len(parse_expFile)-1]=="root"):
subloc="root/monitoring_hists"
#if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
found=True
else:
print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND")
return found
def checkJobFilesForCompletion(comp_assignment):
#OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL"
#dbcursor.execute(OutstandingProjectsQuery)
#OutstandingProjects=dbcursor.fetchall()
dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor)
outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/"
print("checking "+str(len(comp_assignment)))
for attempt in comp_assignment:#OutstandingProjects:
jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"])
dbcursor.execute(jobinfoq)
job = dbcursor.fetchall()[0]
projq="SELECT * From Project Where ID="+str(job["Project_ID"])
dbcursor.execute(projq)
proj = dbcursor.fetchall()[0]
locparts=proj['OutputLocation'].split("/")
#print("~~~~~~~~~~~~~~~~~~")
#print("ProjID:",proj['ID'])
files=[]
dirs=[]
#print locparts[len(locparts)-2]
for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) :
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
#print("NumFiles:",len(files))
#print(dirs)
#DISTINCT ID ------in query below
#print(fulfilledJobs)
#print("Jobs fulfilled:",str(len(fulfilledJobs)))
if(proj["Tested"]==2 or proj["Tested"]==3):
continue
rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","")
nullify_list=[]
#print("Data already Verified?",job['DataVerified'])
if(job['DataVerified'] !=0 ):
continue
STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3)
if(proj['Generator']!="file:"):
STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME
#print(STANDARD_NAME)
#check if postprocessor is being run
postproc_append=""
if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""):
print("Postprocessing:",proj['GenPostProcessing'])
postproc_append="_"+proj['GenPostProcessing'].split(":")[0]
Expected_returned_files=[]
if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"):
Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm")
if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm')
if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm')
if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"):
Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm')
Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root')
found_AllexpFile=True
for expFile in Expected_returned_files:
#print(expFile)
#print("checking for",expFile,"@",rootLoc)
found=CheckForFile(rootLoc,expFile)
if not found:
#print(expFile+" NOT FOUND!!!!")
found_AllexpFile=False
break
if found_AllexpFile:
Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"])
print(Update_q)
dbcursor_comp.execute(Update_q)
dbcnx_comp.commit()
else:
continue
########################################################## MAIN ##########################################################
def array_split(lst,n):
to_return=[]
for i in range(0,n):
to_return.append([])
for count, ele in enumerate(lst):
#print(ele)
index=count%n
#print(index)
to_return[index].append(ele)
#print(count)
#print(len(to_return))
return to_return
def main(argv):
| runnum=0
runmax=-1
spawnNum=10
numOverRide=False
if(len(argv) !=0):
numOverRide=True
numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True)
print(int(numprocesses_running))
if(int(numprocesses_running) <2 or numOverRide):
while(runnum<runmax or runmax==-1):
runnum=runnum+1
try:
queryosgjobs="SELECT * from Attempts WHERE BatchSystem='OSG' && SubmitHost=\""+MCWRAPPER_BOT_HOST_NAME+"\" && Status !='4' && Status !='3' && Status!= '6' && Status != '5' && Status != '44';"# || (Status='4' && ExitCode != 0 && ProgramFailed is NULL) ORDER BY ID desc;"
#print queryosgjobs
dbcursor.execute(queryosgjobs)
Alljobs = list(dbcursor.fetchall()) | identifier_body |
|
MCObserver.py | wishes to retain the files created by any step you can supply the cleangenr8=0, cleangeant=0, cleanmcsmear=0, or cleanrecon=0 options. By default all but the reconstruction files # are cleaned.
#
# The reconstruction step is multi-threaded, for this step, if enabled, the script will use 4 threads. This threading can be changed with the "numthreads=xxx" option
#
# By default the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied.
#
# SWIF DOCUMENTATION:
# https://scicomp.jlab.org/docs/swif
# https://scicomp.jlab.org/docs/swif-cli
# https://scicomp.jlab.org/help/swif/add-job.txt #consider phase!
#
##########################################################################################################################
import MySQLdb
#import MySQLdb.cursors
from os import environ
from optparse import OptionParser
import os.path
#import mysql.connector
import time
import os
import getpass
import sys
import re
import subprocess
from subprocess import call
import socket
import glob
import json
import time
from datetime import timedelta
from datetime import datetime
import smtplib
from email.message import EmailMessage
from multiprocessing import Process
import random
import pipes
import random
import pwd
MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname())
dbhost = "hallddb.jlab.org"
dbuser = 'mcuser'
dbpass = ''
dbname = 'gluex_mc'
try:
dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor)
except:
print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED")
pass
runner_name=pwd.getpwuid( os.getuid() )[0]
if( not (runner_name=="tbritton" or runner_name=="mcwrap")):
print("ERROR: You must be tbritton or mcwrap to run this script")
sys.exit(1)
def exists_remote(host, path):
"""Test if a file exists at path on a host accessible with SSH."""
status = subprocess.call(
['ssh', host, 'test -f {}'.format(pipes.quote(path))])
if status == 0:
return True
if status == 1:
return False
raise Exception('SSH failed')
def CheckForFile(rootLoc,expFile):
found=False
subloc="hddm"
parse_expFile=expFile.split(".")
#print(parse_expFile[len(parse_expFile)-1])
if(parse_expFile[len(parse_expFile)-1]=="root"):
subloc="root/monitoring_hists"
#if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
found=True
else:
print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND")
return found
def checkJobFilesForCompletion(comp_assignment):
#OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL"
#dbcursor.execute(OutstandingProjectsQuery)
#OutstandingProjects=dbcursor.fetchall()
dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor)
outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/"
print("checking "+str(len(comp_assignment)))
for attempt in comp_assignment:#OutstandingProjects:
jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"])
dbcursor.execute(jobinfoq)
job = dbcursor.fetchall()[0]
projq="SELECT * From Project Where ID="+str(job["Project_ID"])
dbcursor.execute(projq)
proj = dbcursor.fetchall()[0]
locparts=proj['OutputLocation'].split("/")
#print("~~~~~~~~~~~~~~~~~~")
#print("ProjID:",proj['ID'])
files=[]
dirs=[]
#print locparts[len(locparts)-2]
for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) :
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
#print("NumFiles:",len(files))
#print(dirs)
#DISTINCT ID ------in query below
#print(fulfilledJobs)
#print("Jobs fulfilled:",str(len(fulfilledJobs)))
if(proj["Tested"]==2 or proj["Tested"]==3):
continue
rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","")
nullify_list=[]
#print("Data already Verified?",job['DataVerified'])
if(job['DataVerified'] !=0 ):
continue
STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3)
if(proj['Generator']!="file:"):
STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME
#print(STANDARD_NAME)
#check if postprocessor is being run
postproc_append=""
if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""):
print("Postprocessing:",proj['GenPostProcessing'])
postproc_append="_"+proj['GenPostProcessing'].split(":")[0]
Expected_returned_files=[]
if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"):
Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm")
if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm')
if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm')
if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"):
Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm')
Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root')
found_AllexpFile=True
for expFile in Expected_returned_files:
#print(expFile)
#print("checking for",expFile,"@",rootLoc)
found=CheckForFile(rootLoc,expFile)
if not found:
#print(expFile+" NOT FOUND!!!!")
found_AllexpFile=False
break
if found_AllexpFile:
Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"])
print(Update_q)
dbcursor_comp.execute(Update_q)
dbcnx_comp.commit()
else:
continue
########################################################## MAIN ##########################################################
def array_split(lst,n):
to_return=[]
for i in range(0,n):
to_return.append([])
for count, ele in enumerate(lst):
#print(ele)
index=count%n
#print(index)
to_return[index].append(ele)
#print(count)
#print(len(to_return))
return to_return
def | (argv):
runnum=0
runmax=-1
spawnNum=10
numOverRide=False
if(len(argv) !=0):
numOverRide=True
numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True)
print(int(numprocesses_running))
if(int(numprocesses_running) <2 or numOverRide):
while(runnum<runmax or runmax==-1):
runnum=runnum+1
try:
queryosgjobs="SELECT | main | identifier_name |
MCObserver.py | wishes to retain the files created by any step you can supply the cleangenr8=0, cleangeant=0, cleanmcsmear=0, or cleanrecon=0 options. By default all but the reconstruction files # are cleaned.
#
# The reconstruction step is multi-threaded, for this step, if enabled, the script will use 4 threads. This threading can be changed with the "numthreads=xxx" option
#
# By default the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied.
#
# SWIF DOCUMENTATION:
# https://scicomp.jlab.org/docs/swif
# https://scicomp.jlab.org/docs/swif-cli
# https://scicomp.jlab.org/help/swif/add-job.txt #consider phase!
#
##########################################################################################################################
import MySQLdb
#import MySQLdb.cursors
from os import environ
from optparse import OptionParser
import os.path
#import mysql.connector
import time
import os
import getpass
import sys
import re
import subprocess
from subprocess import call
import socket
import glob
import json
import time
from datetime import timedelta
from datetime import datetime
import smtplib
from email.message import EmailMessage
from multiprocessing import Process
import random
import pipes
import random
import pwd
MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname())
dbhost = "hallddb.jlab.org"
dbuser = 'mcuser'
dbpass = ''
dbname = 'gluex_mc'
try:
dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor)
except:
print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED")
pass
runner_name=pwd.getpwuid( os.getuid() )[0]
if( not (runner_name=="tbritton" or runner_name=="mcwrap")):
print("ERROR: You must be tbritton or mcwrap to run this script")
sys.exit(1)
def exists_remote(host, path):
"""Test if a file exists at path on a host accessible with SSH."""
status = subprocess.call(
['ssh', host, 'test -f {}'.format(pipes.quote(path))])
if status == 0:
return True
if status == 1:
return False
raise Exception('SSH failed')
def CheckForFile(rootLoc,expFile):
found=False
subloc="hddm"
parse_expFile=expFile.split(".")
#print(parse_expFile[len(parse_expFile)-1])
if(parse_expFile[len(parse_expFile)-1]=="root"):
subloc="root/monitoring_hists"
#if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
found=True
else:
print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND")
return found
def checkJobFilesForCompletion(comp_assignment):
#OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL"
#dbcursor.execute(OutstandingProjectsQuery)
#OutstandingProjects=dbcursor.fetchall()
dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor)
outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/"
print("checking "+str(len(comp_assignment)))
for attempt in comp_assignment:#OutstandingProjects:
jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"])
dbcursor.execute(jobinfoq)
job = dbcursor.fetchall()[0]
projq="SELECT * From Project Where ID="+str(job["Project_ID"])
dbcursor.execute(projq)
proj = dbcursor.fetchall()[0]
locparts=proj['OutputLocation'].split("/")
#print("~~~~~~~~~~~~~~~~~~")
#print("ProjID:",proj['ID'])
files=[]
dirs=[]
#print locparts[len(locparts)-2]
for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) :
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
#print("NumFiles:",len(files))
#print(dirs)
#DISTINCT ID ------in query below
#print(fulfilledJobs)
#print("Jobs fulfilled:",str(len(fulfilledJobs)))
if(proj["Tested"]==2 or proj["Tested"]==3):
continue
rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","")
nullify_list=[]
#print("Data already Verified?",job['DataVerified'])
if(job['DataVerified'] !=0 ):
continue
STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3)
if(proj['Generator']!="file:"):
STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME
#print(STANDARD_NAME)
#check if postprocessor is being run
postproc_append=""
if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""):
print("Postprocessing:",proj['GenPostProcessing'])
postproc_append="_"+proj['GenPostProcessing'].split(":")[0]
Expected_returned_files=[]
if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"):
Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm")
if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm')
if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm')
if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"):
Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm')
Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root')
found_AllexpFile=True
for expFile in Expected_returned_files:
#print(expFile)
#print("checking for",expFile,"@",rootLoc)
found=CheckForFile(rootLoc,expFile)
if not found:
#print(expFile+" NOT FOUND!!!!")
found_AllexpFile=False
break
if found_AllexpFile:
Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"])
print(Update_q)
dbcursor_comp.execute(Update_q)
dbcnx_comp.commit()
else:
continue
########################################################## MAIN ##########################################################
def array_split(lst,n):
to_return=[]
for i in range(0,n):
to_return.append([])
for count, ele in enumerate(lst):
#print(ele)
index=count%n
#print(index)
to_return[index].append(ele)
#print(count)
#print(len(to_return))
return to_return
def main(argv):
runnum=0
runmax=-1
spawnNum=10
numOverRide=False |
print(int(numprocesses_running))
if(int(numprocesses_running) <2 or numOverRide):
while(runnum<runmax or runmax==-1):
runnum=runnum+1
try:
queryosgjobs="SELECT |
if(len(argv) !=0):
numOverRide=True
numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True) | random_line_split |
MCObserver.py | lddb.jlab.org"
dbuser = 'mcuser'
dbpass = ''
dbname = 'gluex_mc'
try:
dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor)
except:
print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED")
pass
runner_name=pwd.getpwuid( os.getuid() )[0]
if( not (runner_name=="tbritton" or runner_name=="mcwrap")):
print("ERROR: You must be tbritton or mcwrap to run this script")
sys.exit(1)
def exists_remote(host, path):
"""Test if a file exists at path on a host accessible with SSH."""
status = subprocess.call(
['ssh', host, 'test -f {}'.format(pipes.quote(path))])
if status == 0:
return True
if status == 1:
return False
raise Exception('SSH failed')
def CheckForFile(rootLoc,expFile):
found=False
subloc="hddm"
parse_expFile=expFile.split(".")
#print(parse_expFile[len(parse_expFile)-1])
if(parse_expFile[len(parse_expFile)-1]=="root"):
subloc="root/monitoring_hists"
#if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ):
found=True
else:
print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND")
return found
def checkJobFilesForCompletion(comp_assignment):
#OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL"
#dbcursor.execute(OutstandingProjectsQuery)
#OutstandingProjects=dbcursor.fetchall()
dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname)
dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor)
outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/"
print("checking "+str(len(comp_assignment)))
for attempt in comp_assignment:#OutstandingProjects:
jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"])
dbcursor.execute(jobinfoq)
job = dbcursor.fetchall()[0]
projq="SELECT * From Project Where ID="+str(job["Project_ID"])
dbcursor.execute(projq)
proj = dbcursor.fetchall()[0]
locparts=proj['OutputLocation'].split("/")
#print("~~~~~~~~~~~~~~~~~~")
#print("ProjID:",proj['ID'])
files=[]
dirs=[]
#print locparts[len(locparts)-2]
for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) :
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
#print("NumFiles:",len(files))
#print(dirs)
#DISTINCT ID ------in query below
#print(fulfilledJobs)
#print("Jobs fulfilled:",str(len(fulfilledJobs)))
if(proj["Tested"]==2 or proj["Tested"]==3):
continue
rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","")
nullify_list=[]
#print("Data already Verified?",job['DataVerified'])
if(job['DataVerified'] !=0 ):
continue
STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3)
if(proj['Generator']!="file:"):
STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME
#print(STANDARD_NAME)
#check if postprocessor is being run
postproc_append=""
if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""):
print("Postprocessing:",proj['GenPostProcessing'])
postproc_append="_"+proj['GenPostProcessing'].split(":")[0]
Expected_returned_files=[]
if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"):
Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm")
if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm')
if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"):
Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm')
if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"):
Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm')
Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root')
found_AllexpFile=True
for expFile in Expected_returned_files:
#print(expFile)
#print("checking for",expFile,"@",rootLoc)
found=CheckForFile(rootLoc,expFile)
if not found:
#print(expFile+" NOT FOUND!!!!")
found_AllexpFile=False
break
if found_AllexpFile:
Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"])
print(Update_q)
dbcursor_comp.execute(Update_q)
dbcnx_comp.commit()
else:
continue
########################################################## MAIN ##########################################################
def array_split(lst,n):
to_return=[]
for i in range(0,n):
to_return.append([])
for count, ele in enumerate(lst):
#print(ele)
index=count%n
#print(index)
to_return[index].append(ele)
#print(count)
#print(len(to_return))
return to_return
def main(argv):
runnum=0
runmax=-1
spawnNum=10
numOverRide=False
if(len(argv) !=0):
numOverRide=True
numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True)
print(int(numprocesses_running))
if(int(numprocesses_running) <2 or numOverRide):
while(runnum<runmax or runmax==-1):
runnum=runnum+1
try:
queryosgjobs="SELECT * from Attempts WHERE BatchSystem='OSG' && SubmitHost=\""+MCWRAPPER_BOT_HOST_NAME+"\" && Status !='4' && Status !='3' && Status!= '6' && Status != '5' && Status != '44';"# || (Status='4' && ExitCode != 0 && ProgramFailed is NULL) ORDER BY ID desc;"
#print queryosgjobs
dbcursor.execute(queryosgjobs)
Alljobs = list(dbcursor.fetchall())
#print(Alljobs[:5])
random.shuffle(Alljobs)
#print(Alljobs[:5])
Monitoring_assignments=array_split(Alljobs,spawnNum)
spawns=[]
for i in range(0,spawnNum):
time.sleep(random.randint(1,spawnNum))
print("block "+str(i))
print(len(Monitoring_assignments[i]))
if(len(Monitoring_assignments[i])>0):
p=Process(target=checkJobFilesForCompletion,args=(Monitoring_assignments[i],))
p.daemon = True
spawns.append(p)
#p.join()
for i in range(0,len(spawns)):
#print("join "+str(i))
time.sleep(random.randint(1,spawnNum))
spawns[i].start()
#time.sleep(2)
for i in range(0,len(spawns)):
| if spawns[i].is_alive():
#print("join "+str(i))
spawns[i].join() | conditional_block |
|
connector.js | Body = "<div id=\"visualization\"></div>\n";
element.innerHTML = appBody;
//Добавляем кастомный стиль выбора элементов
// var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }"
// addStyle(customSelectionStyle);
var now = Date.now();
var minusDay = 0.5
var plusDay = 0.5
var options = {
maxHeight: 400,
stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются
groupEditable: true, //Можно ли драгать группы
horizontalScroll: true,
verticalScroll: true,
zoomKey: "ctrlKey",
orientation: { //Настройки осей
axis: "top",
item: "top",
},
showCurrentTime: false,
moment: function (date) {
return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны
},
start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days
end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days
};
var items = new vis.DataSet();
var groups = new vis.DataSet();
var lastSelectedItem = "";
var windowStartTime = "";
var windowEndTime = "";
function isContainsStyleInHtml(styleName){
var styletags = document.getElementsByTagName("style");
//loop over all the style tags
for(var i = 0; i < styletags.length; i++)
{
var selectedStyle = styletags[i].innerHTML;
// console.log(styletags[i].innerHTML)
if(selectedStyle.includes(styleName)){
// console.log("Contains" +styletags[i].innerHTML +"||");
return true;
}
else{
// console.log("Not contains");
}
}
return false;
}
// Handle changes from the server-side
connector.onStateChange = function () {
var state = connector.getState();
var data = state.data;
items = data.usesItems
console.log("Items", items);
groups = data.usesGroups
lastSelectedItem = data.lastSelectedItem
windowStartTime = data.windowStartTime
windowEndTime = data.windowEndTime
console.log("State data: ", data);
items.forEach(function(item, i, arr) {
var color = item.color
if(color==null || color=="")
{
color = getRandomColor();
}
var temple = '.vis-background.';
if(item.itemType=="item"){
temple = '.vis-item.';
}
var styleName = temple+'bg-'+item.id;
var isContains = isContainsStyleInHtml(styleName);
if(isContains){
console.log("Contains " +styleName+"||");
}
else{
console.log("Not contains "+styleName);
addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}')
}
});
timeline.setGroups(groups);
timeline.setItems(items);
//Смещаем окно на последнее местоположение
if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){
}
else{
console.log("Set windowStartTime ", windowStartTime);
console.log("Set windowEndTime ", windowEndTime);
// timeline.setWindow(windowStartTime,windowEndTime);
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}
if(lastSelectedItem === null || lastSelectedItem === ""){
}
else{
//Перемещаем на последний выбранный элемент
console.log("Last selected item id for move ",lastSelectedItem)
timeline.setSelection(lastSelectedItem);
setTimeout(() =>{
moveToItem(lastSelectedItem, timeline);
console.log("moveToItem is end ");
setTimeout(() =>{
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}, 600);
}, 300);
} | timeline = new vis.Timeline(container);
timeline.setOptions(options);
timeline.setGroups(groups);
timeline.setItems(items);
timeline.on("click", (e) => { connector.onClick(e) });
timeline.on("dblclick", (e) => { connector.onDoubleClick(e); });
timeline.on('select', function (e) {
console.log('selected items: ' + e.items);
var itemID = e.items[0];
if(itemID === null || itemID === ""){
console.log("Empty object");
timeline.setSelection(lastSelectedItem);
}
else{
console.log("Object with ID "+itemID);
lastSelectedItem = itemID
connector.onItemClick(itemID);
}
});
timeline.on("rangechanged", function (properties) {
// console.log("rangechanged", properties);
console.log("Save windowStartTime", properties.start);
console.log("Save windowEndTime", properties.end);
connector.onRangeChanged(properties.start,properties.end);
});
function getRandomColor() {
return "#"+((1<<24)*Math.random()|0).toString(16);
};
function addStyle(styleText) {
var style = document.createElement('style');
style.type = 'text/css';
style.innerHTML = styleText;
document.getElementsByTagName('head')[0].appendChild(style);
};
/* Work-Around */
// This is a quick-and-dirty animation for scrolling
var animateScroll = function(from, to, duration, timeline) {
var initTime = new Date().valueOf();
//var duration = 500;
var easingFunction = function(t) {
return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t
};
var defer = $.Deferred();
var next = function() {
var now = new Date().valueOf();
var time = now - initTime;
var ease = easingFunction(time / duration);
var done = time > duration;
var s = done ? to : (from + (to - from) * ease);
timeline._setScrollTop(-s);
timeline._redraw();
if (!done) {
//setTimeout(next, 20);
window.requestAnimationFrame(next);
} else {
defer.resolve();
}
};
next();
return defer.promise();
};
var moveToItem = function(eventId, timeline, duration) {
console.log("moveToItem:", lastSelectedItem);
duration = 200;
var event = timeline.itemSet.items[eventId];
var leftHeight = timeline.props.leftContainer.height;
var contentHeight = timeline.props.left.height;
var alreadyVisible = false;
if (event.displayed) {
alreadyVisible = true;
if (!event.selected) {
timeline.setSelection(eventId);
}
}
var groupId = event.data.group;
var group = timeline.itemSet.groups[groupId] || {
top: 0,
height: 0
}; // Use a default if we don't have a group
var offset = group.top;
var orientation = timeline.timeAxis.options.orientation.axis;
var eventTop = function(event, group) {
if (orientation == "bottom") {
return group.height - event.top - event.height;
} else {
return event.top;
}
};
var currentScrollHeight = timeline._getScrollTop() * -1;
var targetOffset = offset + eventTop(event, group);
var height = event.height;
if (targetOffset < currentScrollHeight) {
if (offset + leftHeight <= offset + eventTop(event, group) + height) {
offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical;
}
} else {
if (targetOffset + height > currentScrollHeight + leftHeight) {
offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical;
}
}
offset = Math.min(offset, contentHeight - leftHeight);
if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) {
animateScroll(currentScrollHeight, offset, duration, timeline);
timeline.setSelection(eventId);
timeline.focus(eventId);
}
};
function funForCall(){
console.log("call a funForCall function from java !")
}
// function debounce(func, wait = 100) {
// let timeout;
// return function (...args) {
// clearTimeout(timeout);
// timeout = setTimeout(() => {
// func.apply(this, args);
// }, wait);
// };
// }
//
// let groupFocus = (e) => {
// let vGroups = timeline.getVisibleGroups();
// let vItems = vGroups.reduce((res, groupId) => {
// let group = timeline.itemSet.groups[groupId];
// if (group.items) {
// res = res.concat(Object.keys(group.items));
// }
// return res;
// }, []);
// timeline.focus(vItems);
// };
// this.timeline.on | };
// create a Timeline
var container = document.getElementById("visualization"); | random_line_split |
connector.js | = "<div id=\"visualization\"></div>\n";
element.innerHTML = appBody;
//Добавляем кастомный стиль выбора элементов
// var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }"
// addStyle(customSelectionStyle);
var now = Date.now();
var minusDay = 0.5
var plusDay = 0.5
var options = {
maxHeight: 400,
stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются
groupEditable: true, //Можно ли драгать группы
horizontalScroll: true,
verticalScroll: true,
zoomKey: "ctrlKey",
orientation: { //Настройки осей
axis: "top",
item: "top",
},
showCurrentTime: false,
moment: function (date) {
return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны
},
start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days
end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days
};
var items = new vis.DataSet();
var groups = new vis.DataSet();
var lastSelectedItem = "";
var windowStartTime = "";
var windowEndTime = "";
function isContainsStyleInHtml(styleName){
var styletags = document.getElementsByTagName("style");
//loop over all the style tags
for(var i = 0; i < styletags.length; i | var selectedStyle = styletags[i].innerHTML;
// console.log(styletags[i].innerHTML)
if(selectedStyle.includes(styleName)){
// console.log("Contains" +styletags[i].innerHTML +"||");
return true;
}
else{
// console.log("Not contains");
}
}
return false;
}
// Handle changes from the server-side
connector.onStateChange = function () {
var state = connector.getState();
var data = state.data;
items = data.usesItems
console.log("Items", items);
groups = data.usesGroups
lastSelectedItem = data.lastSelectedItem
windowStartTime = data.windowStartTime
windowEndTime = data.windowEndTime
console.log("State data: ", data);
items.forEach(function(item, i, arr) {
var color = item.color
if(color==null || color=="")
{
color = getRandomColor();
}
var temple = '.vis-background.';
if(item.itemType=="item"){
temple = '.vis-item.';
}
var styleName = temple+'bg-'+item.id;
var isContains = isContainsStyleInHtml(styleName);
if(isContains){
console.log("Contains " +styleName+"||");
}
else{
console.log("Not contains "+styleName);
addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}')
}
});
timeline.setGroups(groups);
timeline.setItems(items);
//Смещаем окно на последнее местоположение
if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){
}
else{
console.log("Set windowStartTime ", windowStartTime);
console.log("Set windowEndTime ", windowEndTime);
// timeline.setWindow(windowStartTime,windowEndTime);
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}
if(lastSelectedItem === null || lastSelectedItem === ""){
}
else{
//Перемещаем на последний выбранный элемент
console.log("Last selected item id for move ",lastSelectedItem)
timeline.setSelection(lastSelectedItem);
setTimeout(() =>{
moveToItem(lastSelectedItem, timeline);
console.log("moveToItem is end ");
setTimeout(() =>{
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}, 600);
}, 300);
}
};
// create a Timeline
var container = document.getElementById("visualization");
timeline = new vis.Timeline(container);
timeline.setOptions(options);
timeline.setGroups(groups);
timeline.setItems(items);
timeline.on("click", (e) => { connector.onClick(e) });
timeline.on("dblclick", (e) => { connector.onDoubleClick(e); });
timeline.on('select', function (e) {
console.log('selected items: ' + e.items);
var itemID = e.items[0];
if(itemID === null || itemID === ""){
console.log("Empty object");
timeline.setSelection(lastSelectedItem);
}
else{
console.log("Object with ID "+itemID);
lastSelectedItem = itemID
connector.onItemClick(itemID);
}
});
timeline.on("rangechanged", function (properties) {
// console.log("rangechanged", properties);
console.log("Save windowStartTime", properties.start);
console.log("Save windowEndTime", properties.end);
connector.onRangeChanged(properties.start,properties.end);
});
function getRandomColor() {
return "#"+((1<<24)*Math.random()|0).toString(16);
};
function addStyle(styleText) {
var style = document.createElement('style');
style.type = 'text/css';
style.innerHTML = styleText;
document.getElementsByTagName('head')[0].appendChild(style);
};
/* Work-Around */
// This is a quick-and-dirty animation for scrolling
var animateScroll = function(from, to, duration, timeline) {
var initTime = new Date().valueOf();
//var duration = 500;
var easingFunction = function(t) {
return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t
};
var defer = $.Deferred();
var next = function() {
var now = new Date().valueOf();
var time = now - initTime;
var ease = easingFunction(time / duration);
var done = time > duration;
var s = done ? to : (from + (to - from) * ease);
timeline._setScrollTop(-s);
timeline._redraw();
if (!done) {
//setTimeout(next, 20);
window.requestAnimationFrame(next);
} else {
defer.resolve();
}
};
next();
return defer.promise();
};
var moveToItem = function(eventId, timeline, duration) {
console.log("moveToItem:", lastSelectedItem);
duration = 200;
var event = timeline.itemSet.items[eventId];
var leftHeight = timeline.props.leftContainer.height;
var contentHeight = timeline.props.left.height;
var alreadyVisible = false;
if (event.displayed) {
alreadyVisible = true;
if (!event.selected) {
timeline.setSelection(eventId);
}
}
var groupId = event.data.group;
var group = timeline.itemSet.groups[groupId] || {
top: 0,
height: 0
}; // Use a default if we don't have a group
var offset = group.top;
var orientation = timeline.timeAxis.options.orientation.axis;
var eventTop = function(event, group) {
if (orientation == "bottom") {
return group.height - event.top - event.height;
} else {
return event.top;
}
};
var currentScrollHeight = timeline._getScrollTop() * -1;
var targetOffset = offset + eventTop(event, group);
var height = event.height;
if (targetOffset < currentScrollHeight) {
if (offset + leftHeight <= offset + eventTop(event, group) + height) {
offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical;
}
} else {
if (targetOffset + height > currentScrollHeight + leftHeight) {
offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical;
}
}
offset = Math.min(offset, contentHeight - leftHeight);
if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) {
animateScroll(currentScrollHeight, offset, duration, timeline);
timeline.setSelection(eventId);
timeline.focus(eventId);
}
};
function funForCall(){
console.log("call a funForCall function from java !")
}
// function debounce(func, wait = 100) {
// let timeout;
// return function (...args) {
// clearTimeout(timeout);
// timeout = setTimeout(() => {
// func.apply(this, args);
// }, wait);
// };
// }
//
// let groupFocus = (e) => {
// let vGroups = timeline.getVisibleGroups();
// let vItems = vGroups.reduce((res, groupId) => {
// let group = timeline.itemSet.groups[groupId];
// if (group.items) {
// res = res.concat(Object.keys(group.items));
// }
// return res;
// }, []);
// timeline.focus(vItems);
// };
// this.timeline.on | ++)
{
| identifier_name |
connector.js | = "<div id=\"visualization\"></div>\n";
element.innerHTML = appBody;
//Добавляем кастомный стиль выбора элементов
// var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }"
// addStyle(customSelectionStyle);
var now = Date.now();
var minusDay = 0.5
var plusDay = 0.5
var options = {
maxHeight: 400,
stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются
groupEditable: true, //Можно ли драгать группы
horizontalScroll: true,
verticalScroll: true,
zoomKey: "ctrlKey",
orientation: { //Настройки осей
axis: "top",
item: "top",
},
showCurrentTime: false,
moment: function (date) {
return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны
},
start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days
end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days
};
var items = new vis.DataSet();
var groups = new vis.DataSet();
var lastSelectedItem = "";
var windowStartTime = "";
var windowEndTime = "";
function isContainsStyleInHtml(styleName){
var styletags = document.getElementsByTagName("style");
//loop over all the style tags
for(var i = 0; i < styletags.length; i++)
{
| s = data.usesItems
console.log("Items", items);
groups = data.usesGroups
lastSelectedItem = data.lastSelectedItem
windowStartTime = data.windowStartTime
windowEndTime = data.windowEndTime
console.log("State data: ", data);
items.forEach(function(item, i, arr) {
var color = item.color
if(color==null || color=="")
{
color = getRandomColor();
}
var temple = '.vis-background.';
if(item.itemType=="item"){
temple = '.vis-item.';
}
var styleName = temple+'bg-'+item.id;
var isContains = isContainsStyleInHtml(styleName);
if(isContains){
console.log("Contains " +styleName+"||");
}
else{
console.log("Not contains "+styleName);
addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}')
}
});
timeline.setGroups(groups);
timeline.setItems(items);
//Смещаем окно на последнее местоположение
if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){
}
else{
console.log("Set windowStartTime ", windowStartTime);
console.log("Set windowEndTime ", windowEndTime);
// timeline.setWindow(windowStartTime,windowEndTime);
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}
if(lastSelectedItem === null || lastSelectedItem === ""){
}
else{
//Перемещаем на последний выбранный элемент
console.log("Last selected item id for move ",lastSelectedItem)
timeline.setSelection(lastSelectedItem);
setTimeout(() =>{
moveToItem(lastSelectedItem, timeline);
console.log("moveToItem is end ");
setTimeout(() =>{
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}, 600);
}, 300);
}
};
// create a Timeline
var container = document.getElementById("visualization");
timeline = new vis.Timeline(container);
timeline.setOptions(options);
timeline.setGroups(groups);
timeline.setItems(items);
timeline.on("click", (e) => { connector.onClick(e) });
timeline.on("dblclick", (e) => { connector.onDoubleClick(e); });
timeline.on('select', function (e) {
console.log('selected items: ' + e.items);
var itemID = e.items[0];
if(itemID === null || itemID === ""){
console.log("Empty object");
timeline.setSelection(lastSelectedItem);
}
else{
console.log("Object with ID "+itemID);
lastSelectedItem = itemID
connector.onItemClick(itemID);
}
});
timeline.on("rangechanged", function (properties) {
// console.log("rangechanged", properties);
console.log("Save windowStartTime", properties.start);
console.log("Save windowEndTime", properties.end);
connector.onRangeChanged(properties.start,properties.end);
});
function getRandomColor() {
return "#"+((1<<24)*Math.random()|0).toString(16);
};
function addStyle(styleText) {
var style = document.createElement('style');
style.type = 'text/css';
style.innerHTML = styleText;
document.getElementsByTagName('head')[0].appendChild(style);
};
/* Work-Around */
// This is a quick-and-dirty animation for scrolling
var animateScroll = function(from, to, duration, timeline) {
var initTime = new Date().valueOf();
//var duration = 500;
var easingFunction = function(t) {
return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t
};
var defer = $.Deferred();
var next = function() {
var now = new Date().valueOf();
var time = now - initTime;
var ease = easingFunction(time / duration);
var done = time > duration;
var s = done ? to : (from + (to - from) * ease);
timeline._setScrollTop(-s);
timeline._redraw();
if (!done) {
//setTimeout(next, 20);
window.requestAnimationFrame(next);
} else {
defer.resolve();
}
};
next();
return defer.promise();
};
var moveToItem = function(eventId, timeline, duration) {
console.log("moveToItem:", lastSelectedItem);
duration = 200;
var event = timeline.itemSet.items[eventId];
var leftHeight = timeline.props.leftContainer.height;
var contentHeight = timeline.props.left.height;
var alreadyVisible = false;
if (event.displayed) {
alreadyVisible = true;
if (!event.selected) {
timeline.setSelection(eventId);
}
}
var groupId = event.data.group;
var group = timeline.itemSet.groups[groupId] || {
top: 0,
height: 0
}; // Use a default if we don't have a group
var offset = group.top;
var orientation = timeline.timeAxis.options.orientation.axis;
var eventTop = function(event, group) {
if (orientation == "bottom") {
return group.height - event.top - event.height;
} else {
return event.top;
}
};
var currentScrollHeight = timeline._getScrollTop() * -1;
var targetOffset = offset + eventTop(event, group);
var height = event.height;
if (targetOffset < currentScrollHeight) {
if (offset + leftHeight <= offset + eventTop(event, group) + height) {
offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical;
}
} else {
if (targetOffset + height > currentScrollHeight + leftHeight) {
offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical;
}
}
offset = Math.min(offset, contentHeight - leftHeight);
if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) {
animateScroll(currentScrollHeight, offset, duration, timeline);
timeline.setSelection(eventId);
timeline.focus(eventId);
}
};
function funForCall(){
console.log("call a funForCall function from java !")
}
// function debounce(func, wait = 100) {
// let timeout;
// return function (...args) {
// clearTimeout(timeout);
// timeout = setTimeout(() => {
// func.apply(this, args);
// }, wait);
// };
// }
//
// let groupFocus = (e) => {
// let vGroups = timeline.getVisibleGroups();
// let vItems = vGroups.reduce((res, groupId) => {
// let group = timeline.itemSet.groups[groupId];
// if (group.items) {
// res = res.concat(Object.keys(group.items));
// }
// return res;
// }, []);
// timeline.focus(vItems);
// };
// this.timeline.on | var selectedStyle = styletags[i].innerHTML;
// console.log(styletags[i].innerHTML)
if(selectedStyle.includes(styleName)){
// console.log("Contains" +styletags[i].innerHTML +"||");
return true;
}
else{
// console.log("Not contains");
}
}
return false;
}
// Handle changes from the server-side
connector.onStateChange = function () {
var state = connector.getState();
var data = state.data;
item | identifier_body |
connector.js | = "<div id=\"visualization\"></div>\n";
element.innerHTML = appBody;
//Добавляем кастомный стиль выбора элементов
// var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }"
// addStyle(customSelectionStyle);
var now = Date.now();
var minusDay = 0.5
var plusDay = 0.5
var options = {
maxHeight: 400,
stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются
groupEditable: true, //Можно ли драгать группы
horizontalScroll: true,
verticalScroll: true,
zoomKey: "ctrlKey",
orientation: { //Настройки осей
axis: "top",
item: "top",
},
showCurrentTime: false,
moment: function (date) {
return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны
},
start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days
end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days
};
var items = new vis.DataSet();
var groups = new vis.DataSet();
var lastSelectedItem = "";
var windowStartTime = "";
var windowEndTime = "";
function isContainsStyleInHtml(styleName){
var styletags = document.getElementsByTagName("style");
//loop over all the style tags
for(var i = 0; i < styletags.length; i++)
{
var selectedStyle = styletags[i].innerHTML;
// console.log(styletags[i].innerHTML)
if(selectedStyle.includes(styleName)){
// console.log("Contains" +styletags[i].innerHTML +"||");
return true;
}
else{
// console.log("Not contains");
}
}
return false;
}
// Handle changes from the server-side
connector.onStateChange = function () {
var state = connector.getState();
var data = state.data;
items = data.usesItems
console.log("Items", items);
groups = data.usesGroups
lastSelectedItem = data.lastSelectedItem
windowStartTime = data.windowStartTime
windowEndTime = data.windowEndTime
console.log("State data: ", data);
items.forEach(function(item, i, arr) {
var color = item.color
if(color==null || color=="")
{
color = getRandomColor();
}
var temple = '.vis-background.';
if(item.itemType=="item"){
temple = '.vis-item.';
}
var styleName = temple+'bg-'+item.id;
var isContains = isContainsStyleInHtml(styleName);
if(isContains){
console.log("Contains " +styleName+"||");
}
else{
console.log("Not contains "+styleName);
addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}')
}
});
timeline.setGroups(groups);
timeline.setItems(items);
//Смещаем окно на последнее местоположение
if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){
}
else{
console.log("Set windowStartTime ", windowStartTime);
console.log("Set windowEndTime ", windowEndTime);
// timeline.setWindow(windowStartTime,windowEndTime);
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}
if(lastSelectedItem === null || lastSelectedItem === ""){
}
else{
//Перемещаем на последний выбранный элемент
console.log("Last selected item id for move ",lastSelectedItem)
timeline.setSelection(lastSelectedItem);
setTimeout(() =>{
moveToItem(lastSelectedItem, timeline);
console.log("moveToItem is end ");
setTimeout(() =>{
options.start = windowStartTime;
options.end = windowEndTime;
timeline.setOptions(options);
}, 600);
}, 300);
}
};
// create a Timeline
var container = document.getElementById("visualization");
timeline = new vis.Timeline(container);
timeline.setOptions(options);
timeline.setGroups(groups);
timeline.setItems(items);
timeline.on("click", (e) => { connector.onClick(e) });
timeline.on("dblclick", (e) => { connector.onDoubleClick(e); });
timeline.on('select', function (e) {
console.log('selected items: ' + e.items);
var itemID = e.items[0];
if(itemID === null || itemID === ""){
console.log("Empty object");
timeline.setSelection(lastSelectedItem);
}
else{
console.log("Object with ID "+itemID);
lastSelectedItem = itemID
connector.onItemClick(itemID);
}
});
timeline.on("rangechanged", function (properties) {
// console.log("rangechanged", properties);
console.log("Save windowStartTime", properties.start);
console.log("Save windowEndTime", properties.end);
connector.onRangeChanged(properties.start,properties.end);
});
function getRandomColor() {
return "#"+((1<<24)*Math.random()|0).toString(16);
};
function addStyle(styleText) {
var style = document.createElement('style');
style.type = 'text/css';
style.innerHTML = styleText;
document.getElementsByTagName('head')[0].appendChild(style);
};
/* Work-Around */
// This is a quick-and-dirty animation for scrolling
var animateScroll = function(from, to, duration, timeline) {
var initTime = new Date().valueOf();
//var duration = 500;
var easingFunction = function(t) {
return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t
};
var defer = $.Deferred();
var next = function() {
var now = new Date().valueOf();
var time = now - initTime;
var ease = easingFunction(time / duration);
var done = time > duration;
var s = done ? to : (from + (to - from) * ease);
timeline._setScrollTop(-s);
timeline._redraw();
if (!done) {
//setTimeout(next, 20);
window.requestAnimationFrame(next);
} else {
defer.resolve();
}
};
next();
return defer.promise();
};
var moveToItem = function(eventId, timeline, duration) {
console.log("moveToItem:", lastSelectedItem);
duration = 200;
var event = timeline.itemSet.items[eventId];
var leftHeight = timeline.props.leftContainer.height;
var contentHeight = timeline.props.left.height;
var alreadyVisible = false;
if (event.displayed) {
alreadyVisible = true;
if (!event.selected) {
timeline.setSelection(eventId);
}
}
var groupId = event.data.group;
var group = timeline.itemSet.groups[groupId] || {
top: 0,
height: 0
}; // Use a default if we don't have a group
var offset = group.top;
var orientation = timeline.timeAxis.options.orientation.axis;
var eventTop = function(event, group) {
if (orientation == "bottom") {
return group.height - event.top - event.height;
} else {
return event.top;
}
};
var currentScrollHeight = timeline._getScrollTop() * -1;
var targetOffset = offset + eventTop(event, group);
var height = event.height;
if (targetOffset < currentScrollHeight) {
if (offset + leftHeight <= offset + eventTop(event, group) + height) {
offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical;
}
} else {
if (targetOffset + height > currentScrollHe | entHeight - leftHeight);
if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) {
animateScroll(currentScrollHeight, offset, duration, timeline);
timeline.setSelection(eventId);
timeline.focus(eventId);
}
};
function funForCall(){
console.log("call a funForCall function from java !")
}
// function debounce(func, wait = 100) {
// let timeout;
// return function (...args) {
// clearTimeout(timeout);
// timeout = setTimeout(() => {
// func.apply(this, args);
// }, wait);
// };
// }
//
// let groupFocus = (e) => {
// let vGroups = timeline.getVisibleGroups();
// let vItems = vGroups.reduce((res, groupId) => {
// let group = timeline.itemSet.groups[groupId];
// if (group.items) {
// res = res.concat(Object.keys(group.items));
// }
// return res;
// }, []);
// timeline.focus(vItems);
// };
// this.timeline | ight + leftHeight) {
offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical;
}
}
offset = Math.min(offset, cont | conditional_block |
test_conv_layer.py |
result = activation(result)
# expand dimensions from K, time_steps, batch_size to (K, 1, time_steps, 1, batch_size)
result = np.expand_dims(np.expand_dims(result, axis=1), axis=3)
return result
# TODO: Remove these to conftest.py
@pytest.fixture(params=[1])
def input_size(request):
return request.param
@pytest.fixture(params=[16])
def output_size(request):
return request.param
@pytest.fixture(params=[4])
def batch_size(request):
return request.param
@pytest.fixture
def width_axis(width):
return ng.make_axis(length=width, name="W")
@pytest.fixture
def conv1d_placeholder(channel_axis, width_axis, batch_axis):
return ng.placeholder((channel_axis, width_axis, batch_axis))
@pytest.fixture
def conv1d_no_channel_axis(width_axis, batch_axis):
return ng.placeholder((width_axis, batch_axis))
@pytest.fixture
def spatial_onehot(input_size, width, batch_size):
value = np.zeros((input_size, width, batch_size))
value[:, width // 2, :] = 1
return value
@pytest.mark.xfail(reason='1d conv not supported')
def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width):
""" Test that causal convolutions only operate on leftward inputs"""
conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == width, "Causal convolution output width != " \
"input width: {} != {}".format(output_width, width)
with executor(output, conv1d_placeholder) as comp:
output_val = comp(spatial_onehot)
# First 1 is at width // 2, so anything before that should be 0
assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution"
@pytest.mark.xfail(reason='1d conv not supported')
@pytest.mark.parametrize("stride", (1, 3))
def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):
""" Test that 'same' always results in out_size = np.ceil(in_size / stride) """
conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != "
"ceil(input_width / stride): {} != "
"ceil({} / {})").format(output_width,
width,
stride)
@pytest.mark.xfail(reason='1d conv not supported')
def test_axis_preservation(conv1d_placeholder, output_size):
""" Test that axes into a conv are the same as axes out"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_placeholder)
assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: "
"{} != {}").format(output.axes,
conv1d_placeholder.axes)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):
""" Test that a channel axis is added when it doesn't exist in the input"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_no_channel_axis)
t_axes = conv1d_no_channel_axis.axes + channel_axis
assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:"
"{} != {} + {}").format(output.axes,
conv1d_no_channel_axis.axes,
channel_axis)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):
""" Test that spatial axis names are modifiable """
width_axis.name = "time"
assert len(conv1d_placeholder.axes.find_by_name("time")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
# As a dictionary
output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"})
assert output.axes == conv1d_placeholder.axes
# As a tuple
output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time"))
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):
""" Test that channel axis names are modifiable"""
channel_axis.name = "channel"
assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
output = conv_layer(conv1d_placeholder, channel_axes="channel")
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='resolution issue')
@pytest.mark.parametrize('dilation', [1, 2, 3])
def test_dilated_conv(dilation):
"""Test that the dilated convolution layer output matches expected. This test compares
the maximum output value to an expected max output value. The expected value is computed
based on the dilation parameter. The test also checks that the output size matches the
expected size based on the dilaton parameter value."""
image_size = 3
batch_size = 1
init_val = 0.1
conv_size = 3
pad = 3
N_filters = 1
image_channels = 3
model = Sequential([Convolution((conv_size, conv_size, N_filters),
filter_init=ConstantInit(val=init_val),
padding=pad, dilation=dilation)])
X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image
data = {'image': X, 'iteration': 1}
data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)])
ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())]
p_axes = ng.make_axes(ax)
named_inputs = {'image': ng.placeholder(p_axes)}
outputs = model(named_inputs['image'])
named_outputs = {outputs.name: outputs}
with closing(ngt.make_transformer()) as transformer:
m = make_bound_computation(transformer, named_outputs, named_inputs)
output = m(data)[list(m(data).keys())[0]]
filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size
# Compute the expected output size based on convolution parameters
out_size = (image_size + 2 * pad - filter_size) + 1
filt_tmp = np.zeros(filter_size)
filt_tmp[0::dilation] = 1
# max overlap between dilated filter and image (in 1-d)
max_overlap = int(np.min([filter_size, image_size]))
exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2
# Expected max output changes for different dilation parameter values#
assert int(10 * np.max(output)) == int(10 * exp_max_output), \
("Dilated conv max outputs do not match expected: "
"{} != {}").format(np.max(output),
init_val * conv_size * ((image_size - (dilation - 1))**2))
assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \
("Dilated conv output is not expected size: "
"{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
@pytest.mark.xfail(reason='Not implemented')
@pytest.mark.parametrize('filter_width', [3])
@pytest.mark.parametrize('num_filters', [2])
@pytest.mark.parametrize('strides', [1])
@pytest.mark.parametrize('padding', [0])
@pytest.mark.parametrize('time_steps', [5])
@pytest.mark.parametrize('feature_dimension', [4])
@pytest.mark.parametrize('batch_size', [2])
def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding,
time_steps, feature_dimension, batch_size):
dilation = 1 # reference conv does not support dilation
F = ng.make_axis(name='F', length=feature_dimension)
REC = ng.make_axis(name='REC', length=time_steps)
N = ng.make_axis(name='N', length=batch_size)
in_axes = ng.make_axes([F, REC, N])
inputs = ng.placeholder(axes=in_axes)
input_vals = np.random.randn(*in_axes.lengths)
filter_init = GaussianInit()
conv1d = Convolution((filter_width | for k in range(K):
for n in range(batch_size):
result[k, t, n] = np.sum(inputs[:, t:t + filter_width, n] * filters[:, :, k]) | conditional_block |
|
test_conv_layer.py | time_steps, batch_size to (K, 1, time_steps, 1, batch_size)
result = np.expand_dims(np.expand_dims(result, axis=1), axis=3)
return result
# TODO: Remove these to conftest.py
@pytest.fixture(params=[1])
def input_size(request):
return request.param
@pytest.fixture(params=[16])
def output_size(request):
return request.param
@pytest.fixture(params=[4])
def batch_size(request):
return request.param
@pytest.fixture
def width_axis(width):
return ng.make_axis(length=width, name="W")
@pytest.fixture
def conv1d_placeholder(channel_axis, width_axis, batch_axis):
return ng.placeholder((channel_axis, width_axis, batch_axis))
@pytest.fixture
def conv1d_no_channel_axis(width_axis, batch_axis):
return ng.placeholder((width_axis, batch_axis))
@pytest.fixture
def spatial_onehot(input_size, width, batch_size):
value = np.zeros((input_size, width, batch_size))
value[:, width // 2, :] = 1
return value
@pytest.mark.xfail(reason='1d conv not supported')
def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width):
""" Test that causal convolutions only operate on leftward inputs"""
conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == width, "Causal convolution output width != " \
"input width: {} != {}".format(output_width, width)
with executor(output, conv1d_placeholder) as comp:
output_val = comp(spatial_onehot)
# First 1 is at width // 2, so anything before that should be 0
assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution"
@pytest.mark.xfail(reason='1d conv not supported')
@pytest.mark.parametrize("stride", (1, 3))
def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):
|
@pytest.mark.xfail(reason='1d conv not supported')
def test_axis_preservation(conv1d_placeholder, output_size):
""" Test that axes into a conv are the same as axes out"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_placeholder)
assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: "
"{} != {}").format(output.axes,
conv1d_placeholder.axes)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):
""" Test that a channel axis is added when it doesn't exist in the input"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_no_channel_axis)
t_axes = conv1d_no_channel_axis.axes + channel_axis
assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:"
"{} != {} + {}").format(output.axes,
conv1d_no_channel_axis.axes,
channel_axis)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):
""" Test that spatial axis names are modifiable """
width_axis.name = "time"
assert len(conv1d_placeholder.axes.find_by_name("time")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
# As a dictionary
output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"})
assert output.axes == conv1d_placeholder.axes
# As a tuple
output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time"))
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):
""" Test that channel axis names are modifiable"""
channel_axis.name = "channel"
assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
output = conv_layer(conv1d_placeholder, channel_axes="channel")
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='resolution issue')
@pytest.mark.parametrize('dilation', [1, 2, 3])
def test_dilated_conv(dilation):
"""Test that the dilated convolution layer output matches expected. This test compares
the maximum output value to an expected max output value. The expected value is computed
based on the dilation parameter. The test also checks that the output size matches the
expected size based on the dilaton parameter value."""
image_size = 3
batch_size = 1
init_val = 0.1
conv_size = 3
pad = 3
N_filters = 1
image_channels = 3
model = Sequential([Convolution((conv_size, conv_size, N_filters),
filter_init=ConstantInit(val=init_val),
padding=pad, dilation=dilation)])
X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image
data = {'image': X, 'iteration': 1}
data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)])
ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())]
p_axes = ng.make_axes(ax)
named_inputs = {'image': ng.placeholder(p_axes)}
outputs = model(named_inputs['image'])
named_outputs = {outputs.name: outputs}
with closing(ngt.make_transformer()) as transformer:
m = make_bound_computation(transformer, named_outputs, named_inputs)
output = m(data)[list(m(data).keys())[0]]
filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size
# Compute the expected output size based on convolution parameters
out_size = (image_size + 2 * pad - filter_size) + 1
filt_tmp = np.zeros(filter_size)
filt_tmp[0::dilation] = 1
# max overlap between dilated filter and image (in 1-d)
max_overlap = int(np.min([filter_size, image_size]))
exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2
# Expected max output changes for different dilation parameter values#
assert int(10 * np.max(output)) == int(10 * exp_max_output), \
("Dilated conv max outputs do not match expected: "
"{} != {}").format(np.max(output),
init_val * conv_size * ((image_size - (dilation - 1))**2))
assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \
("Dilated conv output is not expected size: "
"{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
@pytest.mark.xfail(reason='Not implemented')
@pytest.mark.parametrize('filter_width', [3])
@pytest.mark.parametrize('num_filters', [2])
@pytest.mark.parametrize('strides', [1])
@pytest.mark.parametrize('padding', [0])
@pytest.mark.parametrize('time_steps', [5])
@pytest.mark.parametrize('feature_dimension', [4])
@pytest.mark.parametrize('batch_size', [2])
def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding,
time_steps, feature_dimension, batch_size):
dilation = 1 # reference conv does not support dilation
F = ng.make_axis(name='F', length=feature_dimension)
REC = ng.make_axis(name='REC', length=time_steps)
N = ng.make_axis(name='N', length=batch_size)
in_axes = ng.make_axes([F, REC, N])
inputs = ng.placeholder(axes=in_axes)
input_vals = np.random.randn(*in_axes.lengths)
filter_init = GaussianInit()
conv1d = Convolution((filter_width, num_filters), filter_init,
strides=strides, padding=padding, dilation=dilation,
bias_init=None, activation=Rectlin(), batch_norm=None)
result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': ' | """ Test that 'same' always results in out_size = np.ceil(in_size / stride) """
conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != "
"ceil(input_width / stride): {} != "
"ceil({} / {})").format(output_width,
width,
stride) | identifier_body |
test_conv_layer.py | time_steps, batch_size to (K, 1, time_steps, 1, batch_size)
result = np.expand_dims(np.expand_dims(result, axis=1), axis=3)
return result
# TODO: Remove these to conftest.py
@pytest.fixture(params=[1])
def input_size(request):
return request.param
@pytest.fixture(params=[16])
def output_size(request):
return request.param
@pytest.fixture(params=[4])
def batch_size(request):
return request.param
@pytest.fixture
def width_axis(width):
return ng.make_axis(length=width, name="W")
@pytest.fixture
def conv1d_placeholder(channel_axis, width_axis, batch_axis):
return ng.placeholder((channel_axis, width_axis, batch_axis))
@pytest.fixture
def conv1d_no_channel_axis(width_axis, batch_axis):
return ng.placeholder((width_axis, batch_axis))
@pytest.fixture
def spatial_onehot(input_size, width, batch_size):
value = np.zeros((input_size, width, batch_size))
value[:, width // 2, :] = 1
return value
@pytest.mark.xfail(reason='1d conv not supported')
def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width):
""" Test that causal convolutions only operate on leftward inputs"""
conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == width, "Causal convolution output width != " \
"input width: {} != {}".format(output_width, width)
with executor(output, conv1d_placeholder) as comp:
output_val = comp(spatial_onehot)
# First 1 is at width // 2, so anything before that should be 0
assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution"
@pytest.mark.xfail(reason='1d conv not supported')
@pytest.mark.parametrize("stride", (1, 3))
def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):
""" Test that 'same' always results in out_size = np.ceil(in_size / stride) """
conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != "
"ceil(input_width / stride): {} != "
"ceil({} / {})").format(output_width,
width,
stride)
@pytest.mark.xfail(reason='1d conv not supported')
def test_axis_preservation(conv1d_placeholder, output_size):
""" Test that axes into a conv are the same as axes out"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_placeholder)
assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: "
"{} != {}").format(output.axes,
conv1d_placeholder.axes)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):
""" Test that a channel axis is added when it doesn't exist in the input"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_no_channel_axis)
t_axes = conv1d_no_channel_axis.axes + channel_axis
assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:"
"{} != {} + {}").format(output.axes,
conv1d_no_channel_axis.axes,
channel_axis)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):
""" Test that spatial axis names are modifiable """
width_axis.name = "time"
assert len(conv1d_placeholder.axes.find_by_name("time")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
# As a dictionary
output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"})
assert output.axes == conv1d_placeholder.axes
# As a tuple
output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time"))
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):
""" Test that channel axis names are modifiable"""
channel_axis.name = "channel"
assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
output = conv_layer(conv1d_placeholder, channel_axes="channel")
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='resolution issue')
@pytest.mark.parametrize('dilation', [1, 2, 3])
def | (dilation):
"""Test that the dilated convolution layer output matches expected. This test compares
the maximum output value to an expected max output value. The expected value is computed
based on the dilation parameter. The test also checks that the output size matches the
expected size based on the dilaton parameter value."""
image_size = 3
batch_size = 1
init_val = 0.1
conv_size = 3
pad = 3
N_filters = 1
image_channels = 3
model = Sequential([Convolution((conv_size, conv_size, N_filters),
filter_init=ConstantInit(val=init_val),
padding=pad, dilation=dilation)])
X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image
data = {'image': X, 'iteration': 1}
data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)])
ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())]
p_axes = ng.make_axes(ax)
named_inputs = {'image': ng.placeholder(p_axes)}
outputs = model(named_inputs['image'])
named_outputs = {outputs.name: outputs}
with closing(ngt.make_transformer()) as transformer:
m = make_bound_computation(transformer, named_outputs, named_inputs)
output = m(data)[list(m(data).keys())[0]]
filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size
# Compute the expected output size based on convolution parameters
out_size = (image_size + 2 * pad - filter_size) + 1
filt_tmp = np.zeros(filter_size)
filt_tmp[0::dilation] = 1
# max overlap between dilated filter and image (in 1-d)
max_overlap = int(np.min([filter_size, image_size]))
exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2
# Expected max output changes for different dilation parameter values#
assert int(10 * np.max(output)) == int(10 * exp_max_output), \
("Dilated conv max outputs do not match expected: "
"{} != {}").format(np.max(output),
init_val * conv_size * ((image_size - (dilation - 1))**2))
assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \
("Dilated conv output is not expected size: "
"{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
@pytest.mark.xfail(reason='Not implemented')
@pytest.mark.parametrize('filter_width', [3])
@pytest.mark.parametrize('num_filters', [2])
@pytest.mark.parametrize('strides', [1])
@pytest.mark.parametrize('padding', [0])
@pytest.mark.parametrize('time_steps', [5])
@pytest.mark.parametrize('feature_dimension', [4])
@pytest.mark.parametrize('batch_size', [2])
def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding,
time_steps, feature_dimension, batch_size):
dilation = 1 # reference conv does not support dilation
F = ng.make_axis(name='F', length=feature_dimension)
REC = ng.make_axis(name='REC', length=time_steps)
N = ng.make_axis(name='N', length=batch_size)
in_axes = ng.make_axes([F, REC, N])
inputs = ng.placeholder(axes=in_axes)
input_vals = np.random.randn(*in_axes.lengths)
filter_init = GaussianInit()
conv1d = Convolution((filter_width, num_filters), filter_init,
strides=strides, padding=padding, dilation=dilation,
bias_init=None, activation=Rectlin(), batch_norm=None)
result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': ' | test_dilated_conv | identifier_name |
test_conv_layer.py | , time_steps, batch_size to (K, 1, time_steps, 1, batch_size)
result = np.expand_dims(np.expand_dims(result, axis=1), axis=3)
return result
# TODO: Remove these to conftest.py
@pytest.fixture(params=[1])
def input_size(request):
return request.param
@pytest.fixture(params=[16])
def output_size(request):
return request.param
@pytest.fixture(params=[4])
def batch_size(request):
return request.param
@pytest.fixture
def width_axis(width):
return ng.make_axis(length=width, name="W")
@pytest.fixture
def conv1d_placeholder(channel_axis, width_axis, batch_axis):
return ng.placeholder((channel_axis, width_axis, batch_axis))
@pytest.fixture
def conv1d_no_channel_axis(width_axis, batch_axis):
return ng.placeholder((width_axis, batch_axis))
@pytest.fixture
def spatial_onehot(input_size, width, batch_size):
value = np.zeros((input_size, width, batch_size))
value[:, width // 2, :] = 1
return value
@pytest.mark.xfail(reason='1d conv not supported')
def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width):
""" Test that causal convolutions only operate on leftward inputs"""
conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == width, "Causal convolution output width != " \
"input width: {} != {}".format(output_width, width)
with executor(output, conv1d_placeholder) as comp:
output_val = comp(spatial_onehot)
# First 1 is at width // 2, so anything before that should be 0
assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution"
@pytest.mark.xfail(reason='1d conv not supported')
@pytest.mark.parametrize("stride", (1, 3))
def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):
""" Test that 'same' always results in out_size = np.ceil(in_size / stride) """
conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same")
output = conv_layer(conv1d_placeholder)
output_width = output.axes.find_by_name("W")[0].length
assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != "
"ceil(input_width / stride): {} != "
"ceil({} / {})").format(output_width,
width,
stride)
@pytest.mark.xfail(reason='1d conv not supported')
def test_axis_preservation(conv1d_placeholder, output_size):
""" Test that axes into a conv are the same as axes out"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_placeholder)
assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: "
"{} != {}").format(output.axes,
conv1d_placeholder.axes)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):
""" Test that a channel axis is added when it doesn't exist in the input"""
conv_layer = Convolution((3, output_size), lambda x: 1)
output = conv_layer(conv1d_no_channel_axis)
t_axes = conv1d_no_channel_axis.axes + channel_axis
assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:"
"{} != {} + {}").format(output.axes,
conv1d_no_channel_axis.axes,
channel_axis)
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):
""" Test that spatial axis names are modifiable """
width_axis.name = "time"
assert len(conv1d_placeholder.axes.find_by_name("time")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
# As a dictionary
output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"})
assert output.axes == conv1d_placeholder.axes
# As a tuple
output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time"))
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='1d conv and channel name not supported')
def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):
""" Test that channel axis names are modifiable"""
channel_axis.name = "channel"
assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1
conv_layer = Convolution((3, output_size), lambda x: 1)
with pytest.raises(IncompatibleAxesError):
conv_layer(conv1d_placeholder)
output = conv_layer(conv1d_placeholder, channel_axes="channel")
assert output.axes == conv1d_placeholder.axes
@pytest.mark.xfail(reason='resolution issue')
@pytest.mark.parametrize('dilation', [1, 2, 3])
def test_dilated_conv(dilation):
"""Test that the dilated convolution layer output matches expected. This test compares
the maximum output value to an expected max output value. The expected value is computed
based on the dilation parameter. The test also checks that the output size matches the
expected size based on the dilaton parameter value."""
image_size = 3
batch_size = 1
init_val = 0.1 | N_filters = 1
image_channels = 3
model = Sequential([Convolution((conv_size, conv_size, N_filters),
filter_init=ConstantInit(val=init_val),
padding=pad, dilation=dilation)])
X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image
data = {'image': X, 'iteration': 1}
data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)])
ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())]
p_axes = ng.make_axes(ax)
named_inputs = {'image': ng.placeholder(p_axes)}
outputs = model(named_inputs['image'])
named_outputs = {outputs.name: outputs}
with closing(ngt.make_transformer()) as transformer:
m = make_bound_computation(transformer, named_outputs, named_inputs)
output = m(data)[list(m(data).keys())[0]]
filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size
# Compute the expected output size based on convolution parameters
out_size = (image_size + 2 * pad - filter_size) + 1
filt_tmp = np.zeros(filter_size)
filt_tmp[0::dilation] = 1
# max overlap between dilated filter and image (in 1-d)
max_overlap = int(np.min([filter_size, image_size]))
exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2
# Expected max output changes for different dilation parameter values#
assert int(10 * np.max(output)) == int(10 * exp_max_output), \
("Dilated conv max outputs do not match expected: "
"{} != {}").format(np.max(output),
init_val * conv_size * ((image_size - (dilation - 1))**2))
assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \
("Dilated conv output is not expected size: "
"{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
@pytest.mark.xfail(reason='Not implemented')
@pytest.mark.parametrize('filter_width', [3])
@pytest.mark.parametrize('num_filters', [2])
@pytest.mark.parametrize('strides', [1])
@pytest.mark.parametrize('padding', [0])
@pytest.mark.parametrize('time_steps', [5])
@pytest.mark.parametrize('feature_dimension', [4])
@pytest.mark.parametrize('batch_size', [2])
def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding,
time_steps, feature_dimension, batch_size):
dilation = 1 # reference conv does not support dilation
F = ng.make_axis(name='F', length=feature_dimension)
REC = ng.make_axis(name='REC', length=time_steps)
N = ng.make_axis(name='N', length=batch_size)
in_axes = ng.make_axes([F, REC, N])
inputs = ng.placeholder(axes=in_axes)
input_vals = np.random.randn(*in_axes.lengths)
filter_init = GaussianInit()
conv1d = Convolution((filter_width, num_filters), filter_init,
strides=strides, padding=padding, dilation=dilation,
bias_init=None, activation=Rectlin(), batch_norm=None)
result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': 'REC'})
| conv_size = 3
pad = 3 | random_line_split |
controller.go | interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.V(1).Info("Starting WorkerPodAutoScaler controller")
// Wait for the caches to be synced before starting workers
klog.V(1).Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.V(1).Info("Starting workers")
// Launch two workers to process WorkerPodAutoScaler resources
for i := 0; i < threadiness; i++ {
// TOOD: move from stopCh to context, use: UntilWithContext()
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.V(1).Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem(c.ctx) {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.(PS: not anymore, its an WPA event)
event, ok := obj.(WokerPodAutoScalerEvent)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// WorkerPodAutoScaler resource to be synced.
if err := c.syncHandler(ctx, event); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(event)
return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the WorkerPodAutoScaler resource
// with the current status of the resource.
func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error {
now := time.Now()
key := event.key
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the WorkerPodAutoScaler resource with this namespace/name
workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name)
if err != nil {
// The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key))
c.Queues.Delete(namespace, name)
return nil
}
return err
}
var currentWorkers, availableWorkers int32
deploymentName := workerPodAutoScaler.Spec.DeploymentName
replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName
if deploymentName != "" {
// Get the Deployment with the name specified in WorkerPodAutoScaler.spec
deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName)
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s not found in namespace %s",
deploymentName, workerPodAutoScaler.Namespace)
} else if err != nil {
return err
}
currentWorkers = *deployment.Spec.Replicas
availableWorkers = deployment.Status.AvailableReplicas
} else if replicaSetName != "" {
// Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec
replicaSet, err := c.replicaSetLister.ReplicaSets(workerPodAutoScaler.Namespace).Get(replicaSetName)
if errors.IsNotFound(err) {
return fmt.Errorf("ReplicaSet %s not found in namespace %s",
replicaSetName, workerPodAutoScaler.Namespace)
} else if err != nil {
return err
}
currentWorkers = *replicaSet.Spec.Replicas
availableWorkers = replicaSet.Status.AvailableReplicas
} else {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
utilruntime.HandleError(fmt.Errorf("%s: deployment or replicaset name must be specified", key))
return nil
}
var secondsToProcessOneJob float64
if workerPodAutoScaler.Spec.SecondsToProcessOneJob != nil {
secondsToProcessOneJob = *workerPodAutoScaler.Spec.SecondsToProcessOneJob
}
switch event.name {
case WokerPodAutoScalerEventAdd:
err = c.Queues.Add(
namespace,
name,
workerPodAutoScaler.Spec.QueueURI,
currentWorkers,
secondsToProcessOneJob,
)
case WokerPodAutoScalerEventUpdate:
err = c.Queues.Add(
namespace,
name,
workerPodAutoScaler.Spec.QueueURI,
currentWorkers,
secondsToProcessOneJob,
)
case WokerPodAutoScalerEventDelete:
err = c.Queues.Delete(namespace, name)
}
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to sync queue: %s", err.Error()))
return err
}
queueName, queueMessages, messagesSentPerMinute, idleWorkers := c.Queues.GetQueueInfo(
namespace, name)
if queueName == "" {
return nil
}
if queueMessages == queue.UnsyncedQueueMessageCount {
klog.Warningf(
"%s qMsgs: %d, q not initialized, waiting for init to complete",
queueName,
queueMessages,
)
return nil
}
desiredWorkers := GetDesiredWorkers(
queueName,
queueMessages,
messagesSentPerMinute,
secondsToProcessOneJob,
*workerPodAutoScaler.Spec.TargetMessagesPerWorker,
currentWorkers,
idleWorkers,
*workerPodAutoScaler.Spec.MinReplicas,
*workerPodAutoScaler.Spec.MaxReplicas,
workerPodAutoScaler.GetMaxDisruption(c.defaultMaxDisruption),
)
klog.V(2).Infof("%s current: %d", queueName, currentWorkers)
klog.V(2).Infof("%s qMsgs: %d, desired: %d",
queueName, queueMessages, desiredWorkers)
// set metrics
qMsgs.WithLabelValues(
name,
namespace,
queueName,
).Set(float64(queueMessages))
qMsgsSPM.WithLabelValues(
name,
namespace,
queueName,
).Set(messagesSentPerMinute)
workersIdle.WithLabelValues(
name,
namespace,
queueName, | ).Set(float64(idleWorkers))
workersCurrent.WithLabelValues(
name,
namespace,
queueName, | random_line_split |
|
controller.go | an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// defaultMaxDisruption
// it is the default value for the maxDisruption in the WPA spec.
// This specifies how much percentage of pods can be disrupted in a
// single scale down acitivity.
// Can be expressed as integers or as a percentage.
defaultMaxDisruption string
// QueueList keeps the list of all the queues in memeory
// which is used by the core controller and the sqs exporter
// scaleDownDelay after last scale up
// the no of seconds to wait after the last scale up before scaling down
scaleDownDelay time.Duration
Queues *queue.Queues
}
// NewController returns a new sample controller
func NewController(
ctx context.Context,
kubeclientset kubernetes.Interface,
customclientset clientset.Interface,
deploymentInformer appsinformers.DeploymentInformer,
replicaSetInformer appsinformers.ReplicaSetInformer,
workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer,
defaultMaxDisruption string,
resyncPeriod time.Duration,
scaleDownDelay time.Duration,
queues *queue.Queues) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
controller := &Controller{
ctx: ctx,
kubeclientset: kubeclientset,
customclientset: customclientset,
deploymentLister: deploymentInformer.Lister(),
deploymentsSynced: deploymentInformer.Informer().HasSynced,
replicaSetLister: replicaSetInformer.Lister(),
replicaSetsSynced: replicaSetInformer.Informer().HasSynced,
workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(),
workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"),
recorder: recorder,
defaultMaxDisruption: defaultMaxDisruption,
scaleDownDelay: scaleDownDelay,
Queues: queues,
}
klog.V(4).Info("Setting up event handlers")
// Set up an event handler for when WorkerPodAutoScaler resources change
workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueAddWorkerPodAutoScaler,
UpdateFunc: func(old, new interface{}) {
controller.enqueueUpdateWorkerPodAutoScaler(new)
},
DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler,
}, resyncPeriod)
return controller
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.V(1).Info("Starting WorkerPodAutoScaler controller")
// Wait for the caches to be synced before starting workers
klog.V(1).Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.V(1).Info("Starting workers")
// Launch two workers to process WorkerPodAutoScaler resources
for i := 0; i < threadiness; i++ {
// TOOD: move from stopCh to context, use: UntilWithContext()
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.V(1).Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem(c.ctx) {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.(PS: not anymore, its an WPA event)
event, ok := obj.(WokerPodAutoScalerEvent)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// WorkerPodAutoScaler resource to be synced.
if err := c.syncHandler(ctx, event); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(event)
return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the WorkerPodAutoScaler resource
// with the current status of the resource.
func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error {
now := time.Now()
key := event.key
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the WorkerPodAutoScaler resource with this namespace/name
workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name)
if err != nil {
// The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key))
c.Queues.Delete(namespace, name)
return nil
}
return err
}
var currentWorkers, availableWorkers int32
deploymentName := workerPodAutoScaler.Spec.DeploymentName
replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName
if deploymentName != "" {
// Get the Deployment with the name specified in WorkerPodAutoScaler.spec
deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName)
if errors.IsNotFound(err) | else if err != nil {
return err
}
currentWorkers = *deployment.Spec.Replicas
availableWorkers = deployment.Status.AvailableReplicas
} else if replicaSetName != "" {
// Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec
replicaSet, err := c.replicaSetList | {
return fmt.Errorf("deployment %s not found in namespace %s",
deploymentName, workerPodAutoScaler.Namespace)
} | conditional_block |
controller.go | an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// defaultMaxDisruption
// it is the default value for the maxDisruption in the WPA spec.
// This specifies how much percentage of pods can be disrupted in a
// single scale down acitivity.
// Can be expressed as integers or as a percentage.
defaultMaxDisruption string
// QueueList keeps the list of all the queues in memeory
// which is used by the core controller and the sqs exporter
// scaleDownDelay after last scale up
// the no of seconds to wait after the last scale up before scaling down
scaleDownDelay time.Duration
Queues *queue.Queues
}
// NewController returns a new sample controller
func NewController(
ctx context.Context,
kubeclientset kubernetes.Interface,
customclientset clientset.Interface,
deploymentInformer appsinformers.DeploymentInformer,
replicaSetInformer appsinformers.ReplicaSetInformer,
workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer,
defaultMaxDisruption string,
resyncPeriod time.Duration,
scaleDownDelay time.Duration,
queues *queue.Queues) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
controller := &Controller{
ctx: ctx,
kubeclientset: kubeclientset,
customclientset: customclientset,
deploymentLister: deploymentInformer.Lister(),
deploymentsSynced: deploymentInformer.Informer().HasSynced,
replicaSetLister: replicaSetInformer.Lister(),
replicaSetsSynced: replicaSetInformer.Informer().HasSynced,
workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(),
workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"),
recorder: recorder,
defaultMaxDisruption: defaultMaxDisruption,
scaleDownDelay: scaleDownDelay,
Queues: queues,
}
klog.V(4).Info("Setting up event handlers")
// Set up an event handler for when WorkerPodAutoScaler resources change
workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueAddWorkerPodAutoScaler,
UpdateFunc: func(old, new interface{}) {
controller.enqueueUpdateWorkerPodAutoScaler(new)
},
DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler,
}, resyncPeriod)
return controller
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.V(1).Info("Starting WorkerPodAutoScaler controller")
// Wait for the caches to be synced before starting workers
klog.V(1).Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.V(1).Info("Starting workers")
// Launch two workers to process WorkerPodAutoScaler resources
for i := 0; i < threadiness; i++ {
// TOOD: move from stopCh to context, use: UntilWithContext()
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.V(1).Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem(c.ctx) {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) | (ctx context.Context) bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.(PS: not anymore, its an WPA event)
event, ok := obj.(WokerPodAutoScalerEvent)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// WorkerPodAutoScaler resource to be synced.
if err := c.syncHandler(ctx, event); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(event)
return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the WorkerPodAutoScaler resource
// with the current status of the resource.
func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error {
now := time.Now()
key := event.key
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the WorkerPodAutoScaler resource with this namespace/name
workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name)
if err != nil {
// The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key))
c.Queues.Delete(namespace, name)
return nil
}
return err
}
var currentWorkers, availableWorkers int32
deploymentName := workerPodAutoScaler.Spec.DeploymentName
replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName
if deploymentName != "" {
// Get the Deployment with the name specified in WorkerPodAutoScaler.spec
deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName)
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s not found in namespace %s",
deploymentName, workerPodAutoScaler.Namespace)
} else if err != nil {
return err
}
currentWorkers = *deployment.Spec.Replicas
availableWorkers = deployment.Status.AvailableReplicas
} else if replicaSetName != "" {
// Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec
replicaSet, err := c.replicaSetList | processNextWorkItem | identifier_name |
controller.go | an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// defaultMaxDisruption
// it is the default value for the maxDisruption in the WPA spec.
// This specifies how much percentage of pods can be disrupted in a
// single scale down acitivity.
// Can be expressed as integers or as a percentage.
defaultMaxDisruption string
// QueueList keeps the list of all the queues in memeory
// which is used by the core controller and the sqs exporter
// scaleDownDelay after last scale up
// the no of seconds to wait after the last scale up before scaling down
scaleDownDelay time.Duration
Queues *queue.Queues
}
// NewController returns a new sample controller
func NewController(
ctx context.Context,
kubeclientset kubernetes.Interface,
customclientset clientset.Interface,
deploymentInformer appsinformers.DeploymentInformer,
replicaSetInformer appsinformers.ReplicaSetInformer,
workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer,
defaultMaxDisruption string,
resyncPeriod time.Duration,
scaleDownDelay time.Duration,
queues *queue.Queues) *Controller | workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(),
workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"),
recorder: recorder,
defaultMaxDisruption: defaultMaxDisruption,
scaleDownDelay: scaleDownDelay,
Queues: queues,
}
klog.V(4).Info("Setting up event handlers")
// Set up an event handler for when WorkerPodAutoScaler resources change
workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueAddWorkerPodAutoScaler,
UpdateFunc: func(old, new interface{}) {
controller.enqueueUpdateWorkerPodAutoScaler(new)
},
DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler,
}, resyncPeriod)
return controller
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.V(1).Info("Starting WorkerPodAutoScaler controller")
// Wait for the caches to be synced before starting workers
klog.V(1).Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.V(1).Info("Starting workers")
// Launch two workers to process WorkerPodAutoScaler resources
for i := 0; i < threadiness; i++ {
// TOOD: move from stopCh to context, use: UntilWithContext()
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
klog.V(1).Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem(c.ctx) {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.(PS: not anymore, its an WPA event)
event, ok := obj.(WokerPodAutoScalerEvent)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// WorkerPodAutoScaler resource to be synced.
if err := c.syncHandler(ctx, event); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(event)
return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the WorkerPodAutoScaler resource
// with the current status of the resource.
func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error {
now := time.Now()
key := event.key
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the WorkerPodAutoScaler resource with this namespace/name
workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name)
if err != nil {
// The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key))
c.Queues.Delete(namespace, name)
return nil
}
return err
}
var currentWorkers, availableWorkers int32
deploymentName := workerPodAutoScaler.Spec.DeploymentName
replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName
if deploymentName != "" {
// Get the Deployment with the name specified in WorkerPodAutoScaler.spec
deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName)
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s not found in namespace %s",
deploymentName, workerPodAutoScaler.Namespace)
} else if err != nil {
return err
}
currentWorkers = *deployment.Spec.Replicas
availableWorkers = deployment.Status.AvailableReplicas
} else if replicaSetName != "" {
// Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec
replicaSet, err := c.replicaSetList | {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
controller := &Controller{
ctx: ctx,
kubeclientset: kubeclientset,
customclientset: customclientset,
deploymentLister: deploymentInformer.Lister(),
deploymentsSynced: deploymentInformer.Informer().HasSynced,
replicaSetLister: replicaSetInformer.Lister(),
replicaSetsSynced: replicaSetInformer.Informer().HasSynced, | identifier_body |
generate.py | float, help='Threshold for tag binarization')
parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model')
parser.add_argument('-m', action='store', dest='model_file', help='Model File Name')
# parser.add_argument('-d', action='store', dest='gpu', help='GPU to use')
results = parser.parse_args()
TAG_TYPE = results.tag_type
THRESHOLD = results.tag_threshold
LSTM_SIZE = results.lstm_size
# Load single frame feature vectors and attribute/entity/action vectors
if TAG_TYPE == 'predicted':
video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb"))
video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb"))
#video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb"))
# video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb"))
#video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb"))
else:
video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb"))
video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb"))
video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb"))
# Remove videos for which clean captions aren't available
# available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys()))))
# test = list(set(test).intersection(available_vids))
# Read feature sizes from data
NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0]
NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0]
NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0]
# NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1]
NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0]
X_ent_test = []
X_act_test = []
X_att_test = []
X_vgg_test = []
X_prev_words_begin = []
vocabulary = pickle.load(open("vocabulary_10.p", "rb"))
# Turn vocabulary into list of words
vocabulary_words = [x[1] for x in vocabulary]
#Load the model with pre-trained weights
TRUNCATED_CAPTION_LEN = 15 + 2
NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1
EMBEDDING_DIM = 256
VOCABULARY_SIZE = len(vocabulary_words)
# Load the video features
for video in test:
X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0))
X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0))
X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0))
# X_vgg_test.append(np.array(video_frame_features[video][0]))
X_vgg_test.append(np.array(video_frame_features[video]))
X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1))
X_ent_test = np.array(X_ent_test)
X_act_test = np.array(X_act_test)
X_att_test = np.array(X_att_test)
X_vgg_test = np.array(X_vgg_test)
X_prev_words_begin = np.array(X_prev_words_begin)
beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference
beam_model.load_weights("../models/"+results.model_file+".h5")
#preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin])
# helper function to sample an index from a probability array
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
results_folder = "./"
# # Load All Captions
fname = folder + "cleaned_descriptions.csv"
with open(fname) as f:
content = f.readlines()
all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content]
# # Write correct caption file
correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w")
correct_annotations = {}
correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'}
correct_annotations['images'] = []
correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}]
correct_annotations['type'] = "captions"
correct_annotations['annotations'] = []
for video in test:
correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)})
count = 0
for video,caption in all_captions:
if video in test:
correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video})
count +=1
correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True))
correct_captions.close()
def indices(k):
combos = []
for x in range(k):
for y in range(k):
combos.append((x,y))
return combos
def greedy_search(captioning_model, prev_words_input, other_inputs):
for itr in range(NUM_PREV_WORDS-1):
|
return prev_words_input
def beam_search(captioning_model, prev_words_input, other_inputs, k):
top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)]
top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k)
# First Iteration
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
for version in range(k):
top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)]
top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)]
for itr in range(2,NUM_PREV_WORDS):
top_k_copy = copy.deepcopy(top_k_predictions)
print top_k_predictions[0][0]
print top_k_predictions[1][0]
print top_k_predictions[2][0]
predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)]
for idx,video in enumerate(test):
scores = []
for version,lookahead in indices(k):
scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx])
scores = np.array(scores)
top_score_indices = np.argsort(scores)[-k:]
for num, top_id in enumerate(top_score_indices):
version, lookahead = indices(k)[top_id]
top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)]
top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr]
top_k_score[num][idx] = scores[top_id]
return top_k_predictions, top_k_score
preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3)
print len(preds), "x", preds[0].shape
print len(scores),"x", scores[0].shape
preds = preds | predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
prev_words_input[idx][itr+1] = np.argmax(predictions[idx]) | conditional_block |
generate.py | float, help='Threshold for tag binarization')
parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model')
parser.add_argument('-m', action='store', dest='model_file', help='Model File Name')
# parser.add_argument('-d', action='store', dest='gpu', help='GPU to use')
results = parser.parse_args() |
TAG_TYPE = results.tag_type
THRESHOLD = results.tag_threshold
LSTM_SIZE = results.lstm_size
# Load single frame feature vectors and attribute/entity/action vectors
if TAG_TYPE == 'predicted':
video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb"))
video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb"))
#video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb"))
# video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb"))
#video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb"))
else:
video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb"))
video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb"))
video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb"))
# Remove videos for which clean captions aren't available
# available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys()))))
# test = list(set(test).intersection(available_vids))
# Read feature sizes from data
NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0]
NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0]
NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0]
# NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1]
NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0]
X_ent_test = []
X_act_test = []
X_att_test = []
X_vgg_test = []
X_prev_words_begin = []
vocabulary = pickle.load(open("vocabulary_10.p", "rb"))
# Turn vocabulary into list of words
vocabulary_words = [x[1] for x in vocabulary]
#Load the model with pre-trained weights
TRUNCATED_CAPTION_LEN = 15 + 2
NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1
EMBEDDING_DIM = 256
VOCABULARY_SIZE = len(vocabulary_words)
# Load the video features
for video in test:
X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0))
X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0))
X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0))
# X_vgg_test.append(np.array(video_frame_features[video][0]))
X_vgg_test.append(np.array(video_frame_features[video]))
X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1))
X_ent_test = np.array(X_ent_test)
X_act_test = np.array(X_act_test)
X_att_test = np.array(X_att_test)
X_vgg_test = np.array(X_vgg_test)
X_prev_words_begin = np.array(X_prev_words_begin)
beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference
beam_model.load_weights("../models/"+results.model_file+".h5")
#preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin])
# helper function to sample an index from a probability array
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
results_folder = "./"
# # Load All Captions
fname = folder + "cleaned_descriptions.csv"
with open(fname) as f:
content = f.readlines()
all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content]
# # Write correct caption file
correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w")
correct_annotations = {}
correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'}
correct_annotations['images'] = []
correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}]
correct_annotations['type'] = "captions"
correct_annotations['annotations'] = []
for video in test:
correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)})
count = 0
for video,caption in all_captions:
if video in test:
correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video})
count +=1
correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True))
correct_captions.close()
def indices(k):
combos = []
for x in range(k):
for y in range(k):
combos.append((x,y))
return combos
def greedy_search(captioning_model, prev_words_input, other_inputs):
for itr in range(NUM_PREV_WORDS-1):
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
prev_words_input[idx][itr+1] = np.argmax(predictions[idx])
return prev_words_input
def beam_search(captioning_model, prev_words_input, other_inputs, k):
top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)]
top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k)
# First Iteration
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
for version in range(k):
top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)]
top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)]
for itr in range(2,NUM_PREV_WORDS):
top_k_copy = copy.deepcopy(top_k_predictions)
print top_k_predictions[0][0]
print top_k_predictions[1][0]
print top_k_predictions[2][0]
predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)]
for idx,video in enumerate(test):
scores = []
for version,lookahead in indices(k):
scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx])
scores = np.array(scores)
top_score_indices = np.argsort(scores)[-k:]
for num, top_id in enumerate(top_score_indices):
version, lookahead = indices(k)[top_id]
top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)]
top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr]
top_k_score[num][idx] = scores[top_id]
return top_k_predictions, top_k_score
preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3)
print len(preds), "x", preds[0].shape
print len(scores),"x", scores[0].shape
preds = | random_line_split |
|
generate.py | float, help='Threshold for tag binarization')
parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model')
parser.add_argument('-m', action='store', dest='model_file', help='Model File Name')
# parser.add_argument('-d', action='store', dest='gpu', help='GPU to use')
results = parser.parse_args()
TAG_TYPE = results.tag_type
THRESHOLD = results.tag_threshold
LSTM_SIZE = results.lstm_size
# Load single frame feature vectors and attribute/entity/action vectors
if TAG_TYPE == 'predicted':
video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb"))
video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb"))
#video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb"))
# video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb"))
#video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb"))
else:
video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb"))
video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb"))
video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb"))
# Remove videos for which clean captions aren't available
# available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys()))))
# test = list(set(test).intersection(available_vids))
# Read feature sizes from data
NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0]
NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0]
NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0]
# NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1]
NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0]
X_ent_test = []
X_act_test = []
X_att_test = []
X_vgg_test = []
X_prev_words_begin = []
vocabulary = pickle.load(open("vocabulary_10.p", "rb"))
# Turn vocabulary into list of words
vocabulary_words = [x[1] for x in vocabulary]
#Load the model with pre-trained weights
TRUNCATED_CAPTION_LEN = 15 + 2
NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1
EMBEDDING_DIM = 256
VOCABULARY_SIZE = len(vocabulary_words)
# Load the video features
for video in test:
X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0))
X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0))
X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0))
# X_vgg_test.append(np.array(video_frame_features[video][0]))
X_vgg_test.append(np.array(video_frame_features[video]))
X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1))
X_ent_test = np.array(X_ent_test)
X_act_test = np.array(X_act_test)
X_att_test = np.array(X_att_test)
X_vgg_test = np.array(X_vgg_test)
X_prev_words_begin = np.array(X_prev_words_begin)
beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference
beam_model.load_weights("../models/"+results.model_file+".h5")
#preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin])
# helper function to sample an index from a probability array
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
results_folder = "./"
# # Load All Captions
fname = folder + "cleaned_descriptions.csv"
with open(fname) as f:
content = f.readlines()
all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content]
# # Write correct caption file
correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w")
correct_annotations = {}
correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'}
correct_annotations['images'] = []
correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}]
correct_annotations['type'] = "captions"
correct_annotations['annotations'] = []
for video in test:
correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)})
count = 0
for video,caption in all_captions:
if video in test:
correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video})
count +=1
correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True))
correct_captions.close()
def indices(k):
combos = []
for x in range(k):
for y in range(k):
combos.append((x,y))
return combos
def | (captioning_model, prev_words_input, other_inputs):
for itr in range(NUM_PREV_WORDS-1):
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
prev_words_input[idx][itr+1] = np.argmax(predictions[idx])
return prev_words_input
def beam_search(captioning_model, prev_words_input, other_inputs, k):
top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)]
top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k)
# First Iteration
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
for version in range(k):
top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)]
top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)]
for itr in range(2,NUM_PREV_WORDS):
top_k_copy = copy.deepcopy(top_k_predictions)
print top_k_predictions[0][0]
print top_k_predictions[1][0]
print top_k_predictions[2][0]
predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)]
for idx,video in enumerate(test):
scores = []
for version,lookahead in indices(k):
scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx])
scores = np.array(scores)
top_score_indices = np.argsort(scores)[-k:]
for num, top_id in enumerate(top_score_indices):
version, lookahead = indices(k)[top_id]
top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)]
top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr]
top_k_score[num][idx] = scores[top_id]
return top_k_predictions, top_k_score
preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3)
print len(preds), "x", preds[0].shape
print len(scores),"x", scores[0].shape
preds | greedy_search | identifier_name |
generate.py | float, help='Threshold for tag binarization')
parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model')
parser.add_argument('-m', action='store', dest='model_file', help='Model File Name')
# parser.add_argument('-d', action='store', dest='gpu', help='GPU to use')
results = parser.parse_args()
TAG_TYPE = results.tag_type
THRESHOLD = results.tag_threshold
LSTM_SIZE = results.lstm_size
# Load single frame feature vectors and attribute/entity/action vectors
if TAG_TYPE == 'predicted':
video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb"))
video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb"))
#video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb"))
# video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb"))
#video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb"))
else:
video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb"))
video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb"))
video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb"))
video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb"))
# Remove videos for which clean captions aren't available
# available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys()))))
# test = list(set(test).intersection(available_vids))
# Read feature sizes from data
NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0]
NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0]
NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0]
# NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1]
NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0]
X_ent_test = []
X_act_test = []
X_att_test = []
X_vgg_test = []
X_prev_words_begin = []
vocabulary = pickle.load(open("vocabulary_10.p", "rb"))
# Turn vocabulary into list of words
vocabulary_words = [x[1] for x in vocabulary]
#Load the model with pre-trained weights
TRUNCATED_CAPTION_LEN = 15 + 2
NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1
EMBEDDING_DIM = 256
VOCABULARY_SIZE = len(vocabulary_words)
# Load the video features
for video in test:
X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0))
X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0))
X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0))
# X_vgg_test.append(np.array(video_frame_features[video][0]))
X_vgg_test.append(np.array(video_frame_features[video]))
X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1))
X_ent_test = np.array(X_ent_test)
X_act_test = np.array(X_act_test)
X_att_test = np.array(X_att_test)
X_vgg_test = np.array(X_vgg_test)
X_prev_words_begin = np.array(X_prev_words_begin)
beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference
beam_model.load_weights("../models/"+results.model_file+".h5")
#preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin])
# helper function to sample an index from a probability array
def sample(preds, temperature=1.0):
|
results_folder = "./"
# # Load All Captions
fname = folder + "cleaned_descriptions.csv"
with open(fname) as f:
content = f.readlines()
all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content]
# # Write correct caption file
correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w")
correct_annotations = {}
correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'}
correct_annotations['images'] = []
correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}]
correct_annotations['type'] = "captions"
correct_annotations['annotations'] = []
for video in test:
correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)})
count = 0
for video,caption in all_captions:
if video in test:
correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video})
count +=1
correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True))
correct_captions.close()
def indices(k):
combos = []
for x in range(k):
for y in range(k):
combos.append((x,y))
return combos
def greedy_search(captioning_model, prev_words_input, other_inputs):
for itr in range(NUM_PREV_WORDS-1):
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
prev_words_input[idx][itr+1] = np.argmax(predictions[idx])
return prev_words_input
def beam_search(captioning_model, prev_words_input, other_inputs, k):
top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)]
top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k)
# First Iteration
predictions = captioning_model.predict(other_inputs + [prev_words_input])
for idx,video in enumerate(test):
for version in range(k):
top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)]
top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)]
for itr in range(2,NUM_PREV_WORDS):
top_k_copy = copy.deepcopy(top_k_predictions)
print top_k_predictions[0][0]
print top_k_predictions[1][0]
print top_k_predictions[2][0]
predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)]
for idx,video in enumerate(test):
scores = []
for version,lookahead in indices(k):
scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx])
scores = np.array(scores)
top_score_indices = np.argsort(scores)[-k:]
for num, top_id in enumerate(top_score_indices):
version, lookahead = indices(k)[top_id]
top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)]
top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr]
top_k_score[num][idx] = scores[top_id]
return top_k_predictions, top_k_score
preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3)
print len(preds), "x", preds[0].shape
print len(scores),"x", scores[0].shape
preds | preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas) | identifier_body |
jiayuan.py | ,callback=self.get_main_info)
def get_main_info(self,response):#解析搜索业面的url
#info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads(
# for url in self.start_urls:
time.sleep(1)
print("当前的url",response.url)
print('重新加载url')
self.driver.get(response.url)
self.driver.implicitly_wait(3)
user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签
if user_list==[]:
print("user_list为空了,解析有问题")
#print("user_list",type(user_list),user_list)
url_details = []#详情页面的url
for user in user_list:
main_url_main = user.get_attribute("href")
print("人员主页url",main_url_main)
url_details.append(main_url_main)
# self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写
# self.redis_pipe.execute()
print("人员详情url2",len(url_details))
if url_details!=[]:
for url in url_details:
yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息
# yield item
def get_details(self,response):
'''
<class 'str'>
年 龄:
26-29岁之间
身 高:
169-185厘米
民 族:
汉族
学 历:
不限
相 册:
有照片
婚姻状况:
未婚
居 住 地:
湖北十堰
诚 信:
不限
将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库
'''
pass
def parse(str1):
temp_list = str1.split('\n')
result={}
result_str=''
# temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来
# result_dict = {}#多个dict合并后的结果
if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息
for i in range(len(temp_list)):
if i%2==0:
result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1]
return result
#其他则返回str
else:
result_str = str1
return result_str
item = JiayuanItem()
self.driver.get(response.url)
self.driver.implicitly_wait(3)
print('打开浏览器')
print("当前的url",response.url)
age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text
person_id = response.url[response.url.rfind('/')+1:response.url.index('?')]
print("年龄地址信息",type(age_info),age_info)
address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text
str_address=''
str_sheng=address[0].get_attribute("text")
str_shi=address[1].get_attribute("text")
print("人员地址",str_sheng+'sssss'+str_shi)
'''
人个信息
'''
person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]')
person_dict={}
for i in person_info:
person_dict = parse(i.text)
print("个人信息",person_dict)
'''
处理item,对应mysql的person_info表
'''
item['person_id'] = person_id
item['province'] = str_sheng
item['municipal'] = str_shi
nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4')
nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")]
print("昵称", nick_name)
item['nike_name'] = nick_name
item['education'] = person_dict['学历']
item['height'] = person_dict['身高']
item['buy_car'] = person_dict['购车']
item['salary'] = person_dict['月薪']
item['housing'] = person_dict['住房']
item['weight'] = person_dict['体重']
item['constellation'] = person_dict['星座']
item['nation'] = person_dict['民族']
item['zodiac'] = person_dict['属相']
item['blood_type'] = person_dict['血型']
item['age'] = age_info[0:age_info.index(',')]
print("年龄",age_info[0:age_info.index(',')])
item['address'] = str_sheng+str_shi
item['age_info'] = age_info
item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类
item['url'] = response.url
#个人短语
item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text
print("个性短语",item['introduce_oneself'])
#个性标签,有些人是没有个性标签的
#需要点击”更多“才能全部显示出来,否则只有4个
item['interest_label']=''
item['personality_label']=''
try:
#link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a')
#link_a.click()
self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click()
time.sleep(1)
gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]')
print("aaa",type(gexing_info),gexing_info)
gexing_tag=''
for i in gexing_info:
gexing_tag += i.text
# a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text
item['personality_label'] = "".join(gexing_tag)
except Exception as e:
item['personality_label'] = '还没有填写个性元素'
print("个性",item['personality_label'])
#她的兴趣爱好有可能也是找不到的
try:
#link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a')
#link_a.click()
self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click()
# self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click
self.driver.implicitly_wait(1)
aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')
print("bbb",type(aihao_info),aihao_info)
aihao_tag=''
for i in aihao_info:
aihao_tag += i.text
# a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text
item['interest_label'] = "".join(aihao_tag)
except Exception as e:
item['interest_label'] = '还没有填写兴趣爱好'
print("她的兴趣爱好",item['interest_label'])
find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]')
'''
择偶要求
'''
mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
mate_dict={}
for i in mate:
| item['nation_mate'] = mate_dict['民族']
item['education_mate'] = mate_dict['学历']
item['image_mate'] = mate_dict['相册']
item['marital_status'] = mate_dict['婚姻状况']
item['address_mate'] = mate_dict['居住地']
item['s | mate_dict = parse(i.text)
item['person_id_mate'] = person_id
item['age_mate'] = mate_dict['年龄']
item['height_mate'] = mate_dict['身高']
| random_line_split |
jiayuan.py | onnectionPool(host='127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录
r = redis.StrictRedis(connection_pool=pool)
name = "jiayuan_main"
redis_key = 'jiayuan_main:start_urls'
url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'
redis_key = "sinaspider:start_urls"
login_url = 'http://login.jiayuan.com/'#登录时的url
start_urls = []
pre_page_num = 25#每个搜索业面有25条记录
#head less模拟登录
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument("--window-size=1920,1080")
prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片
option.add_experimental_option("prefs",prefs)
try:
driver = webdriver.Chrome(chrome_options=option)
except Exception as e:
driver.close()
print("spider出现了异常,关闭",str(e))
driver.get(login_url)
time.sleep(3)
driver.find_element_by_id("login_btn").click()
driver.find_element_by_id("login_email").clear()
driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名
driver.find_element_by_id("login_password").clear()
driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码
#登录url
#url="http://login.jiayuan.com/"
driver.find_element_by_id("login_btn").click()#点击登录按钮
cookies = driver.get_cookies()#获取cookies
for p in range(1,173649):
search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p)
start_urls.append(search_url)
#print("start_urls",len(start_urls))
# start_urls = [
# "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录)
#"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据
# ]
'''
下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。
在中间件中,对request进行处理的函数是process_request(request, spider)
'''
def start_requests(self):#
for url in self.start_urls:
yield Request(url=url,callback=self.get_main_info)
# yield scrapy.Request(url=search_url,callback=self.get_main_info)
# return Request(url=url,callback=self.get_main_info)
def get_main_info(self,response):#解析搜索业面的url
#info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads(
# for url in self.start_urls:
time.sleep(1)
print("当前的url",response.url)
print('重新加载url')
self.driver.get(response.url)
self.driver.implicitly_wait(3)
user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签
if user_list==[]:
print("user_list为空了,解析有问题")
#print("user_list",type(user_list),user_list)
url_details = []#详情页面的url
for user in user_list:
main_url_main = user.get_attribute("href")
print("人员主页url",main_url_main)
url_details.append(main_url_main)
# self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写
# self.redis_pipe.execute()
print("人员详情url2",len(url_details))
if url_details!=[]:
for url in url_details:
yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息
# yield item
def get_details(self,response):
'''
<class 'str'>
年 龄:
26-29岁之间
身 高:
169-185厘米
民 族:
汉族
学 历:
不限
相 册:
有照片
婚姻状况:
未婚
居 住 地:
湖北十堰
诚 信:
不限
将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库
'''
pass
def parse(str1):
temp_list = str1.split('\n')
result={}
result_str=''
# temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来
# result_dict = {}#多个dict合并后的结果
if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息
for i in range(len(temp_list)):
if i%2==0:
result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1]
return result
#其他则返回str
else:
result_str = str1
return result_str
item = JiayuanItem()
self.driver.get(response.url)
self.driver.implicitly_wait(3)
print('打开浏览器')
print("当前的url",response.url)
age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text
person_id = response.url[response.url.rfind('/')+1:response.url.index('?')]
print("年龄地址信息",type(age_info),age_info)
address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text
str_address=''
str_sheng=address[0].get_attribute("text")
str_shi=address[1].get_attribute("text")
print("人员地址",str_sheng+'sssss'+str_shi)
'''
人个信息
'''
person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]')
person_dict={}
for i in person_info:
person_dict = parse(i.text)
print("个人信息",person_dict)
'''
处理item,对应mysql的person_info表
'''
item['person_id'] = person_id
item['province'] = str_sheng
item['municipal'] = str_shi
nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4')
nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")]
print("昵称", nick_name)
item['nike_name'] = nick_name
item['education'] = person_dict['学历']
item['height'] = person_dict['身高']
item['buy_car'] = person_dict['购车']
item['salary'] = person_dict['月薪']
item['housing'] = person_dict['住房']
item['weight'] = person_dict['体重']
item['constellation'] = person_dict['星座']
item['nation'] = person_dict['民族']
item['zodiac'] = person_dict['属相']
item['blood_type'] = person_dict['血型']
item['age'] = age_info[0:age_info.index(',')]
print("年龄",age_info[0:age_info.index(',')])
item['address'] = str_sheng+str_shi
item['age_info'] = age_info
item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类
item['url'] = response.url
#个人短语
item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text
print("个性短语",item['introduce_oneself'])
#个性标签,有些人是没有个性标签的
#需要点击”更多“才能全部显示出来,否则只有4个
item['interest_label']=''
item['personality | pool=redis.C | identifier_name |
|
jiayuan.py | driver.get(login_url)
time.sleep(3)
driver.find_element_by_id("login_btn").click()
driver.find_element_by_id("login_email").clear()
driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名
driver.find_element_by_id("login_password").clear()
driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码
#登录url
#url="http://login.jiayuan.com/"
driver.find_element_by_id("login_btn").click()#点击登录按钮
cookies = driver.get_cookies()#获取cookies
for p in range(1,173649):
search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p)
start_urls.append(search_url)
#print("start_urls",len(start_urls))
# start_urls = [
# "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录)
#"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据
# ]
'''
下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。
在中间件中,对request进行处理的函数是process_request(request, spider)
'''
def start_requests(self):#
for url in self.start_urls:
yield Request(url=url,callback=self.get_main_info)
# yield scrapy.Request(url=search_url,callback=self.get_main_info)
# return Request(url=url,callback=self.get_main_info)
def get_main_info(self,response):#解析搜索业面的url
#info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads(
# for url in self.start_urls:
time.sleep(1)
print("当前的url",response.url)
print('重新加载url')
self.driver.get(response.url)
self.driver.implicitly_wait(3)
user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签
if user_list==[]:
print("user_list为空了,解析有问题")
#print("user_list",type(user_list),user_list)
url_details = []#详情页面的url
for user in user_list:
main_url_main = user.get_attribute("href")
print("人员主页url",main_url_main)
url_details.append(main_url_main)
# self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写
# self.redis_pipe.execute()
print("人员详情url2",len(url_details))
if url_details!=[]:
for url in url_details:
yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息
# yield item
def get_details(self,response):
'''
<class 'str'>
年 龄:
26-29岁之间
身 高:
169-185厘米
民 族:
汉族
学 历:
不限
相 册:
有照片
婚姻状况:
未婚
居 住 地:
湖北十堰
诚 信:
不限
将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库
'''
pass
def parse(str1):
temp_list = str1.split('\n')
result={}
result_str=''
# temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来
# result_dict = {}#多个dict合并后的结果
if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息
for i in range(len(temp_list)):
if i%2==0:
result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1]
return result
#其他则返回str
else:
result_str = str1
return result_str
item = JiayuanItem()
self.driver.get(response.url)
self.driver.implicitly_wait(3)
print('打开浏览器')
print("当前的url",response.url)
age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text
person_id = response.url[response.url.rfind('/')+1:response.url.index('?')]
print("年龄地址信息",type(age_info),age_info)
address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text
str_address=''
str_sheng=address[0].get_attribute("text")
str_shi=address[1].get_attribute("text")
print("人员地址",str_sheng+'sssss'+str_shi)
'''
人个信息
'''
person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]')
person_dict={}
for i in person_info:
person_dict = parse(i.text)
print("个人信息",person_dict)
'''
处理item,对应mysql的person_info表
'''
item['person_id'] = person_id
item['province'] = str_sheng
item['municipal'] = str_shi
nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4')
nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")]
print("昵称", nick_name)
item['nike_name'] = nick_name
item['education'] = person_dict['学历']
item['height'] = person_dict['身高']
item['buy_car'] = person_dict['购车']
item['salary'] = person_dict['月薪']
item['housing'] = person_dict['住房']
item['weight'] = person_dict['体重']
item['constellation'] = person_dict['星座']
item['nation'] = person_dict['民族']
item['zodiac'] = person_dict['属相']
item['blood_type'] = person_dict['血型']
item['age'] = age_info[0:age_info.index(',')]
print("年龄",age_info[0:age_info.index(',')])
item['address'] = str_sheng+str_shi
item['age_info'] = age_info
item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类
item['url'] = response.url
#个人短语
item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text
print("个性短语",item['introduce_oneself'])
#个性标签,有些人是没有个性标签的
#需要点击”更多“才能全部显示出来,否则只有4个
item['interest_label']=''
item['personality_label']=''
try:
# | 127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录
r = redis.StrictRedis(connection_pool=pool)
name = "jiayuan_main"
redis_key = 'jiayuan_main:start_urls'
url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'
redis_key = "sinaspider:start_urls"
login_url = 'http://login.jiayuan.com/'#登录时的url
start_urls = []
pre_page_num = 25#每个搜索业面有25条记录
#head less模拟登录
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument("--window-size=1920,1080")
prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片
option.add_experimental_option("prefs",prefs)
try:
driver = webdriver.Chrome(chrome_options=option)
except Exception as e:
driver.close()
print("spider出现了异常,关闭",str(e))
| identifier_body |
|
jiayuan.py | ,callback=self.get_main_info)
def get_main_info(self,response):#解析搜索业面的url
#info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads(
# for url in self.start_urls:
time.sleep(1)
print("当前的url",response.url)
print('重新加载url')
self.driver.get(response.url)
self.driver.implicitly_wait(3)
user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签
if user_list==[]:
print("user_list为空了,解析有问题")
#print("user_list",type(user_list),user_list)
url_details = []#详情页面的url
for user in user_list:
main_url_main = user.get_attribute("href")
print("人员主页url",main_url_main)
url_details.append(main_url_main)
# self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写
# self.redis_pipe.execute()
print("人员详情url2",len(url_details))
if url_details!=[]:
for url in url_details:
yield Request(url | <class 'str'>
年 龄:
26-29岁之间
身 高:
169-185厘米
民 族:
汉族
学 历:
不限
相 册:
有照片
婚姻状况:
未婚
居 住 地:
湖北十堰
诚 信:
不限
将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库
'''
pass
def parse(str1):
temp_list = str1.split('\n')
result={}
result_str=''
# temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来
# result_dict = {}#多个dict合并后的结果
if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息
for i in range(len(temp_list)):
if i%2==0:
result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1]
return result
#其他则返回str
else:
result_str = str1
return result_str
item = JiayuanItem()
self.driver.get(response.url)
self.driver.implicitly_wait(3)
print('打开浏览器')
print("当前的url",response.url)
age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text
person_id = response.url[response.url.rfind('/')+1:response.url.index('?')]
print("年龄地址信息",type(age_info),age_info)
address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text
str_address=''
str_sheng=address[0].get_attribute("text")
str_shi=address[1].get_attribute("text")
print("人员地址",str_sheng+'sssss'+str_shi)
'''
人个信息
'''
person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]')
person_dict={}
for i in person_info:
person_dict = parse(i.text)
print("个人信息",person_dict)
'''
处理item,对应mysql的person_info表
'''
item['person_id'] = person_id
item['province'] = str_sheng
item['municipal'] = str_shi
nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4')
nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")]
print("昵称", nick_name)
item['nike_name'] = nick_name
item['education'] = person_dict['学历']
item['height'] = person_dict['身高']
item['buy_car'] = person_dict['购车']
item['salary'] = person_dict['月薪']
item['housing'] = person_dict['住房']
item['weight'] = person_dict['体重']
item['constellation'] = person_dict['星座']
item['nation'] = person_dict['民族']
item['zodiac'] = person_dict['属相']
item['blood_type'] = person_dict['血型']
item['age'] = age_info[0:age_info.index(',')]
print("年龄",age_info[0:age_info.index(',')])
item['address'] = str_sheng+str_shi
item['age_info'] = age_info
item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类
item['url'] = response.url
#个人短语
item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text
print("个性短语",item['introduce_oneself'])
#个性标签,有些人是没有个性标签的
#需要点击”更多“才能全部显示出来,否则只有4个
item['interest_label']=''
item['personality_label']=''
try:
#link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a')
#link_a.click()
self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click()
time.sleep(1)
gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]')
print("aaa",type(gexing_info),gexing_info)
gexing_tag=''
for i in gexing_info:
gexing_tag += i.text
# a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text
item['personality_label'] = "".join(gexing_tag)
except Exception as e:
item['personality_label'] = '还没有填写个性元素'
print("个性",item['personality_label'])
#她的兴趣爱好有可能也是找不到的
try:
#link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a')
#link_a.click()
self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click()
# self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click
self.driver.implicitly_wait(1)
aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')
print("bbb",type(aihao_info),aihao_info)
aihao_tag=''
for i in aihao_info:
aihao_tag += i.text
# a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text
item['interest_label'] = "".join(aihao_tag)
except Exception as e:
item['interest_label'] = '还没有填写兴趣爱好'
print("她的兴趣爱好",item['interest_label'])
find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]')
'''
择偶要求
'''
mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
mate_dict={}
for i in mate:
mate_dict = parse(i.text)
item['person_id_mate'] = person_id
item['age_mate'] = mate_dict['年龄']
item['height_mate'] = mate_dict['身高']
item['nation_mate'] = mate_dict['民族']
item['education_mate'] = mate_dict['学历']
item['image_mate'] = mate_dict['相册']
item['marital_status'] = mate_dict['婚姻状况']
item['address_mate'] = mate_dict['居住地']
item['sincer | =url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息
# yield item
def get_details(self,response):
'''
| conditional_block |
wxkfmanager.go | 码
Redircturl string //公众号授权完成后的回调URL用来接收授权码auth_code
Compentauthurl string //当前第三方平台授权移动端连接
AppAuthinfos []APPAuthInfoResp //第三方公众号令牌数组,自动刷新
}
//初始化开放平台管理器
func InitWXKFManager(token,appid,EncodingAESKey,appseceret,Redircturl string,refreshAppAuthHanlder func(appauth ...APPAuthInfoResp),AppAuthinfos...[]APPAuthInfoResp) *WXKFManager {
var wx WXKFManager
wx.Compenttoken = token
wx.CompentAeskey = EncodingAESKey
wx.CompentAppid = appid
wx.Componentsecret = appseceret
wx.Redircturl=Redircturl
//每小时刷新一次component_accesstoken
cr := cron.New()
spec := "0 0 */1 * * ?"
err:=cr.AddFunc(spec,wx.getComponent_access_token)
err= cr.AddFunc(spec, func() {
if len(AppAuthinfos)>0 {
wx.AppAuthinfos=AppAuthinfos[0]
for _,value := range wx.AppAuthinfos{
wx.RefreshCompentAuthAccessToken(value.AuthorizationInfo.AuthorizerAppid,value.AuthorizationInfo.AuthorizerRefreshToken, func(APPAuthInfo APPAuthInfoResp) {
refreshAppAuthHanlder(APPAuthInfo)
})
}
}else {
refreshAppAuthHanlder()
}
})
fmt.Println("开房平台定时任务初始化",err,spec)
spe :="0 */10 * * * ?"
err=cr.AddFunc(spe,wx.getCompent_pre_auth_code)
fmt.Println("开房平台定时任务初始化",err,spe)
cr.Start()
return &wx
}
//授权流程
//授权回调 获取授权码并根据授权码获取授权信息
func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){
authcode := context.Query("auth_code")
//查询公众号授权第三方平台的权限
wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) {
wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo)
redicturl := responsehandler(appAuthInfo)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
}
//获取授权方公众号账号基本信息
func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) {
var result APPUserInfoResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(response)>0 {
response[0](result)
}
})
}
//获取授权方选项设置信息
func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) {
var result APPOptionResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(responsehandler)>0 {
responsehandler[0](result)
}
})
}
//设置授权方选项信息
func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name,
"option_value":option_value}, func(resp JsonResponse) {
var result BaseResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(responsehandler)>0 {
responsehandler[0](result)
}
})
}
//授权通知处理
func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){
wx.parsereqToAPPAuthMsg(context, func(CheckSign boo | Decrptmsg APPAuthMsg, safe bool) {
if CheckSign {
if len(responsehandler)>0 {
responsehandler[0](Decrptmsg)
}
}
})
context.String(http.StatusOK,"success")
}
//代公众号实现网页授权
/****************代公众号实现业务*******************/
//1.代公众号调用接口
//获取用户信息
func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){
if len(hanlder)>0 {
getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0])
}else {
getuserInfo(authorizer_access_token,authorizer_appid)
}
}
//获取用户列表
func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){
if len(nestopid)>0 {
getuserlist(authorizer_access_token,hanlder,nestopid[0] )
}else {
getuserlist(authorizer_access_token,hanlder)
}
}
//2.代公众号处理消息和事件
func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){
wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) {
if !CheckSign {
ctx.String(http.StatusForbidden, "验证签名错误")
return
}
if ctx.Request.Method!= http.MethodPost {
echostr := ctx.Query("echostr")
ctx.String(http.StatusOK,echostr)
return
}
def,replymsg := handler(Decrptmsg)
if def {
ctx.String(http.StatusOK,"success")
return
}
if safe {
//
ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg))))
}else {
ctx.String(http.StatusOK, string(ReplyMsgData(replymsg)))
}
})
}
//3.代公众号发起网页授权
//获取代公众号发起网页授权url
func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string {
str := url.QueryEscape(redirect_uri)
url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid="
url=url+appid+"&redirect_uri="+str
url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid
url =url+"#wechat_redirect"
return url
}
//网页授权后回调
func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) {
code := context.Query("code")
appid := context.Query("appid")
state := context.Query("state")
fmt.Println(code,appid,"接收到的微信信息时")
wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) {
wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) {
redicturl:=completeHandler(authresp,authuser,state)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
})
}
//4代公众号调用jssdk
/*...........私有方法.................*/
//加密XML结构体消息体
func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg {
return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken)
}
/*授权流程使用*/
//1.获取第三方平台access_token
func (wx *WXKFManager) getComponent_access_token() {
if len(wx.ComponentVerifyTicket)>0 {
wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket)
}else {
wx.Component_access_token=""
}
}
//2.获取预授权码
func (wx *WXKFManager) getCompent_pre_auth_code() {
if len(wx.Component_access_token)>0 {
wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid})
wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl)
}else {
wx.Pre_auth_code=""
}
}
//3.使用授权码换取公众号或小程序的接口调用凭据和授权信息
func (wx *WXKFManager) getCompent | l, Orignmsg ReqMsg, | identifier_name |
wxkfmanager.go | 码获取授权信息
func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){
authcode := context.Query("auth_code")
//查询公众号授权第三方平台的权限
wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) {
wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo)
redicturl := responsehandler(appAuthInfo)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
}
//获取授权方公众号账号基本信息
func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) {
var result APPUserInfoResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(response)>0 {
response[0](result)
}
})
}
//获取授权方选项设置信息
func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) {
var result APPOptionResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(responsehandler)>0 {
responsehandler[0](result)
}
})
}
//设置授权方选项信息
func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name,
"option_value":option_value}, func(resp JsonResponse) {
var result BaseResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(responsehandler)>0 {
responsehandler[0](result)
}
})
}
//授权通知处理
func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){
wx.parsereqToAPPAuthMsg(context, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg APPAuthMsg, safe bool) {
if CheckSign {
if len(responsehandler)>0 {
responsehandler[0](Decrptmsg)
}
}
})
context.String(http.StatusOK,"success")
}
//代公众号实现网页授权
/****************代公众号实现业务*******************/
//1.代公众号调用接口
//获取用户信息
func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){
if len(hanlder)>0 {
getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0])
}else {
getuserInfo(authorizer_access_token,authorizer_appid)
}
}
//获取用户列表
func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){
if len(nestopid)>0 {
getuserlist(authorizer_access_token,hanlder,nestopid[0] )
}else {
getuserlist(authorizer_access_token,hanlder)
}
}
//2.代公众号处理消息和事件
func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){
wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) {
if !CheckSign {
ctx.String(http.StatusForbidden, "验证签名错误")
return
}
if ctx.Request.Method!= http.MethodPost {
echostr := ctx.Query("echostr")
ctx.String(http.StatusOK,echostr)
return
}
def,replymsg := handler(Decrptmsg)
if def {
ctx.String(http.StatusOK,"success")
return
}
if safe {
//
ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg))))
}else {
ctx.String(http.StatusOK, string(ReplyMsgData(replymsg)))
}
})
}
//3.代公众号发起网页授权
//获取代公众号发起网页授权url
func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string {
str := url.QueryEscape(redirect_uri)
url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid="
url=url+appid+"&redirect_uri="+str
url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid
url =url+"#wechat_redirect"
return url
}
//网页授权后回调
func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) {
code := context.Query("code")
appid := context.Query("appid")
state := context.Query("state")
fmt.Println(code,appid,"接收到的微信信息时")
wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) {
wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) {
redicturl:=completeHandler(authresp,authuser,state)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
})
}
//4代公众号调用jssdk
/*...........私有方法.................*/
//加密XML结构体消息体
func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg {
return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken)
}
/*授权流程使用*/
//1.获取第三方平台access_token
func (wx *WXKFManager) getComponent_access_token() {
if len(wx.ComponentVerifyTicket)>0 {
wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket)
}else {
wx.Component_access_token=""
}
}
//2.获取预授权码
func (wx *WXKFManager) getCompent_pre_auth_code() {
if len(wx.Component_access_token)>0 {
wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid})
wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl)
}else {
wx.Pre_auth_code=""
}
}
//3.使用授权码换取公众号或小程序的接口调用凭据和授权信息
func (wx *WXKFManager) getCompentAuthAccesstoken(authorization_code string,handler ...func(appAuthInfo APPAuthInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorization_code":authorization_code}, func(response JsonResponse) {
var result APPAuthInfoResp
mapstructure.Decode(response.Dic,&result)
result.JsonResponse=&response
if len(handler)>0 {
handler[0](result)
}
})
}
//4.(刷新)授权公众号或小程序的接口调用凭据
func (wx *WXKFManager) RefreshCompentAuthAccessToken(authorizer_appid,authorizer_refresh_token string,response ...func(APPAuthInfo APPAuthInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"authorizer_refresh_token":authorizer_refresh_token}, func(res JsonResponse) {
var result APPAuthInfoResp
mapstructure.Decode(res.Dic,&result)
result.JsonResponse=&res
if len(response)>0 {
response[0](result)
}
})
}
/*代公众号实现业务使用*/
//代公众号获取useraccess_token
func (wx *WXKFManager) getAppAuthUserAccesstoken(code ,appid string,handler ...func(authresp AuthResp)) {
url :="https://api.weixin.qq.com/sns/oauth2/component/access_token?appid="+appid
url = url +"&code="+code+"&grant_type=authorization_code&component_appid="+wx.CompentAppid+"&component_access_token="
url = url + wx.Component_access_token
resp := Get(url)
var result | AuthResp
mapstructure.Dec | conditional_block |
|
wxkfmanager.go | if len(responsehandler)>0 {
responsehandler[0](Decrptmsg)
}
}
})
context.String(http.StatusOK,"success")
}
//代公众号实现网页授权
/****************代公众号实现业务*******************/
//1.代公众号调用接口
//获取用户信息
func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){
if len(hanlder)>0 {
getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0])
}else {
getuserInfo(authorizer_access_token,authorizer_appid)
}
}
//获取用户列表
func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){
if len(nestopid)>0 {
getuserlist(authorizer_access_token,hanlder,nestopid[0] )
}else {
getuserlist(authorizer_access_token,hanlder)
}
}
//2.代公众号处理消息和事件
func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){
wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) {
if !CheckSign {
ctx.String(http.StatusForbidden, "验证签名错误")
return
}
if ctx.Request.Method!= http.MethodPost {
echostr := ctx.Query("echostr")
ctx.String(http.StatusOK,echostr)
return
}
def,replymsg := handler(Decrptmsg)
if def {
ctx.String(http.StatusOK,"success")
return
}
if safe {
//
ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg))))
}else {
ctx.String(http.StatusOK, string(ReplyMsgData(replymsg)))
}
})
}
//3.代公众号发起网页授权
//获取代公众号发起网页授权url
func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string {
str := url.QueryEscape(redirect_uri)
url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid="
url=url+appid+"&redirect_uri="+str
url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid
url =url+"#wechat_redirect"
return url
}
//网页授权后回调
func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) {
code := context.Query("code")
appid := context.Query("appid")
state := context.Query("state")
fmt.Println(code,appid,"接收到的微信信息时")
wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) {
wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) {
redicturl:=completeHandler(authresp,authuser,state)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
})
}
//4代公众号调用jssdk
/*...........私有方法.................*/
//加密XML结构体消息体
func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg {
return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken)
}
/*授权流程使用*/
//1.获取第三方平台access_token
func (wx *WXKFManager) getComponent_access_token() {
if len(wx.ComponentVerifyTicket)>0 {
wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket)
}else {
wx.Component_access_token=""
}
}
//2.获取预授权码
func (wx *WXKFManager) getCompent_pre_auth_code() {
if len(wx.Component_access_token)>0 {
wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid})
wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl)
}else {
wx.Pre_auth_code=""
}
}
//3.使用授权码换取公众号或小程序的接口调用凭据和授权信息
func (wx *WXKFManager) getCompentAuthAccesstoken(authorization_code string,handler ...func(appAuthInfo APPAuthInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorization_code":authorization_code}, func(response JsonResponse) {
var result APPAuthInfoResp
mapstructure.Decode(response.Dic,&result)
result.JsonResponse=&response
if len(handler)>0 {
handler[0](result)
}
})
}
//4.(刷新)授权公众号或小程序的接口调用凭据
func (wx *WXKFManager) RefreshCompentAuthAccessToken(authorizer_appid,authorizer_refresh_token string,response ...func(APPAuthInfo APPAuthInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"authorizer_refresh_token":authorizer_refresh_token}, func(res JsonResponse) {
var result APPAuthInfoResp
mapstructure.Decode(res.Dic,&result)
result.JsonResponse=&res
if len(response)>0 {
response[0](result)
}
})
}
/*代公众号实现业务使用*/
//代公众号获取useraccess_token
func (wx *WXKFManager) getAppAuthUserAccesstoken(code ,appid string,handler ...func(authresp AuthResp)) {
url :="https://api.weixin.qq.com/sns/oauth2/component/access_token?appid="+appid
url = url +"&code="+code+"&grant_type=authorization_code&component_appid="+wx.CompentAppid+"&component_access_token="
url = url + wx.Component_access_token
resp := Get(url)
var result AuthResp
mapstructure.Decode(resp,&result)
fmt.Println("代公众号获取usertoken",resp,result,appid,url)
if len(handler)>0 {
handler[0](result)
}
}
//代公众号获取网页登录用户信息
func (wx *WXKFManager) getAppAuthuserInfo(auth AuthResp,handler ...func(authuser JsonResponse)) {
url :="https://api.weixin.qq.com/sns/userinfo?access_token="
url =url +auth.Access_token+"&openid="+auth.Openid+"&lang=zh_CN"
resp := Get(url)
fmt.Println("代公众号获取网页用户信息",resp.Dic)
if len(handler)>0 {
handler[0](resp)
}
}
/*解析回电xml使用*/
//解析推送授权事件的XML
func (wx *WXKFManager) parsereqToAPPAuthMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg APPAuthMsg,safe bool)){
parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) {
var decreptMsg APPAuthMsg
if safe{
decreptMsg,_ = decryptAPPAuthMsg(Orignmsg.Encrypt,wx.CompentAeskey)
wx.ComponentVerifyTicket = decreptMsg.ComponentVerifyTicket
if wx.Component_access_token=="" {
wx.getComponent_access_token()
wx.getCompent_pre_auth_code()
}
}
f(CheckSign,Orignmsg,decreptMsg,safe)
})
}
//解析代公众号实现事件消息XML
func (wx *WXKFManager) parsereqToReqMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg ReqMsg,safe bool)){
parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) {
var decreptMsg ReqMsg
if safe{
decreptMsg,_ = decryptReqmsg(Orignmsg.Encrypt,wx.CompentAeskey)
}
f(CheckSign,Orignmsg,decreptMsg,safe)
})
}
//解析请求
func parsereqMsg(context2 * gin.Context,token string,aeskey string,handler func(CheckSign bool,Orignmsg ReqMsg,safe bool)) {
sign := context2.Query("signature")
timestamp := context2.Query("timestamp")
nonce := context2.Query("nonce")
encrypt_type := context2.Query("encrypt_type")
msgsign := context2.Query("msg_signature")
var event ReqMsg
s,_:=ioutil.ReadAll(context2.Request.Body)
xml.Unmarshal(s,&event)
safe := encrypt_type=="aes"
checksign :=false
if safe{
checksign = SignMsg(token,timestamp,nonce,event.Encrypt)==msgsign
}else {
checksign =SignMsg(token,timestamp,nonce)==sign
}
handler(checksign,event,safe)
}
| identifier_body |
||
wxkfmanager.go | 授权码
Redircturl string //公众号授权完成后的回调URL用来接收授权码auth_code
Compentauthurl string //当前第三方平台授权移动端连接
AppAuthinfos []APPAuthInfoResp //第三方公众号令牌数组,自动刷新
}
//初始化开放平台管理器
func InitWXKFManager(token,appid,EncodingAESKey,appseceret,Redircturl string,refreshAppAuthHanlder func(appauth ...APPAuthInfoResp),AppAuthinfos...[]APPAuthInfoResp) *WXKFManager {
var wx WXKFManager
wx.Compenttoken = token
wx.CompentAeskey = EncodingAESKey
wx.CompentAppid = appid
wx.Componentsecret = appseceret
wx.Redircturl=Redircturl
//每小时刷新一次component_accesstoken
cr := cron.New()
spec := "0 0 */1 * * ?"
err:=cr.AddFunc(spec,wx.getComponent_access_token)
err= cr.AddFunc(spec, func() {
if len(AppAuthinfos)>0 {
wx.AppAuthinfos=AppAuthinfos[0]
for _,value := range wx.AppAuthinfos{
wx.RefreshCompentAuthAccessToken(value.AuthorizationInfo.AuthorizerAppid,value.AuthorizationInfo.AuthorizerRefreshToken, func(APPAuthInfo APPAuthInfoResp) {
refreshAppAuthHanlder(APPAuthInfo)
})
}
}else {
refreshAppAuthHanlder()
}
})
fmt.Println("开房平台定时任务初始化",err,spec)
spe :="0 */10 * * * ?"
err=cr.AddFunc(spe,wx.getCompent_pre_auth_code)
fmt.Println("开房平台定时任务初始化",err,spe)
cr.Start()
return &wx
}
//授权流程
//授权回调 获取授权码并根据授权码获取授权信息
func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){
authcode := context.Query("auth_code")
//查询公众号授权第三方平台的权限
wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) {
wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo)
redicturl := responsehandler(appAuthInfo)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
}
//获取授权方公众号账号基本信息
func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) {
var result APPUserInfoResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(response)>0 {
response[0](result)
}
})
} |
//获取授权方选项设置信息
func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) {
var result APPOptionResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(responsehandler)>0 {
responsehandler[0](result)
}
})
}
//设置授权方选项信息
func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) {
url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token="
url =url +wx.Component_access_token
POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name,
"option_value":option_value}, func(resp JsonResponse) {
var result BaseResp
mapstructure.Decode(resp.Dic,&result)
result.JsonResponse=&resp
if len(responsehandler)>0 {
responsehandler[0](result)
}
})
}
//授权通知处理
func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){
wx.parsereqToAPPAuthMsg(context, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg APPAuthMsg, safe bool) {
if CheckSign {
if len(responsehandler)>0 {
responsehandler[0](Decrptmsg)
}
}
})
context.String(http.StatusOK,"success")
}
//代公众号实现网页授权
/****************代公众号实现业务*******************/
//1.代公众号调用接口
//获取用户信息
func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){
if len(hanlder)>0 {
getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0])
}else {
getuserInfo(authorizer_access_token,authorizer_appid)
}
}
//获取用户列表
func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){
if len(nestopid)>0 {
getuserlist(authorizer_access_token,hanlder,nestopid[0] )
}else {
getuserlist(authorizer_access_token,hanlder)
}
}
//2.代公众号处理消息和事件
func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){
wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) {
if !CheckSign {
ctx.String(http.StatusForbidden, "验证签名错误")
return
}
if ctx.Request.Method!= http.MethodPost {
echostr := ctx.Query("echostr")
ctx.String(http.StatusOK,echostr)
return
}
def,replymsg := handler(Decrptmsg)
if def {
ctx.String(http.StatusOK,"success")
return
}
if safe {
//
ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg))))
}else {
ctx.String(http.StatusOK, string(ReplyMsgData(replymsg)))
}
})
}
//3.代公众号发起网页授权
//获取代公众号发起网页授权url
func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string {
str := url.QueryEscape(redirect_uri)
url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid="
url=url+appid+"&redirect_uri="+str
url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid
url =url+"#wechat_redirect"
return url
}
//网页授权后回调
func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) {
code := context.Query("code")
appid := context.Query("appid")
state := context.Query("state")
fmt.Println(code,appid,"接收到的微信信息时")
wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) {
wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) {
redicturl:=completeHandler(authresp,authuser,state)
context.Redirect(http.StatusMovedPermanently,redicturl)
})
})
}
//4代公众号调用jssdk
/*...........私有方法.................*/
//加密XML结构体消息体
func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg {
return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken)
}
/*授权流程使用*/
//1.获取第三方平台access_token
func (wx *WXKFManager) getComponent_access_token() {
if len(wx.ComponentVerifyTicket)>0 {
wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket)
}else {
wx.Component_access_token=""
}
}
//2.获取预授权码
func (wx *WXKFManager) getCompent_pre_auth_code() {
if len(wx.Component_access_token)>0 {
wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid})
wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl)
}else {
wx.Pre_auth_code=""
}
}
//3.使用授权码换取公众号或小程序的接口调用凭据和授权信息
func (wx *WXKFManager) getCompent | random_line_split |
|
technique.rs | (json_file: &Path, rl_file: &Path) -> Result<()> {
let config_data = fs::read_to_string("data/config.toml").expect("Cannot read config.toml file");
let config: toml::Value = toml::from_str(&config_data).expect("Invalig config.toml file");
// we use if let for error conversion
// we don't use match for better linear reading
let json_data = fs::read_to_string(&json_file);
if json_data.is_err() { return Err(Error::User(format!("Cannot read file {}", json_file.to_string_lossy()))) }
let technique = serde_json::from_str::<Technique>(&json_data.unwrap());
if technique.is_err() { return Err(Error::User(format!("Invalid technique in file {}", json_file.to_string_lossy()))) }
let rl_technique = translate(&config, &technique.unwrap())?;
if fs::write(&rl_file, rl_technique).is_err() { return Err(Error::User(format!("Cannot write file {}", rl_file.to_string_lossy()))) }
Ok(())
}
fn translate(config: &toml::Value, technique: &Technique) -> Result<String> {
let parameters_meta = serde_json::to_string(&technique.parameter);
if parameters_meta.is_err() { return Err(Error::User("Unable to parse technique file".to_string())) }
let parameters = technique.bundle_args.join(",");
let calls = map_strings_results(
technique.method_calls.iter(), |c| translate_call(config, c), "\n"
)?;
let out = format!(r#"@format=0
# This file has been generated with rltranslate
@name="{name}"
@description="{description}"
@version="{version}"
@parameters={parameters_meta}
resource {bundle_name}({parameters})
{bundle_name} state technique() {{
{calls}
}}
"#, description=technique.description,
version=technique.version,
name=technique.name,
bundle_name=technique.bundle_name,
parameters_meta=parameters_meta.unwrap(),
parameters=parameters,
calls=calls);
Ok(out)
}
fn translate_call(config: &toml::Value, call: &MethodCall) -> Result<String> {
lazy_static! {
static ref RE:Regex = Regex::new(r"^([a-z]+)_(\w+)$").unwrap();
}
// separate resource and state
let (resource,state) = match RE.captures(&call.method_name) {
Some(caps) => (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()),
None => return Err(Error::User(format!("Invalid method name '{}'", call.method_name))),
};
// split argument list
let rconf = match config.get("resources") {
None => return Err(Error::User("No resources section in config.toml".into())),
Some(m) => m,
};
let res_arg_v = match rconf.get(resource) {
None => toml::value::Value::Integer(1),
Some(r) => r.clone(),
};
let res_arg_count: usize = match res_arg_v.as_integer() {
None => return Err(Error::User(format!("Resource prefix '{}' must have a number as its parameter count",resource))),
Some(v) => v as usize,
};
let it = &mut call.args.iter();
let res_args = map_strings_results(it.take(res_arg_count), |x| translate_arg(config,x), ",")?;
let st_args = map_strings_results(it, |x| translate_arg(config,x), ",")?;
// call formating
let call_str = format!("{}({}).{}({})", resource, res_args, state, st_args);
let out_state = if call.class_context == "any" {
format!(" {}", call_str)
} else {
let condition = translate_condition(config, &call.class_context)?;
format!(" if {} => {}", condition, call_str)
};
// outcome detection and formating
let mconf = match config.get("methods") {
None => return Err(Error::User("No methods section in config.toml".into())),
Some(m) => m,
};
let method = match mconf.get(&call.method_name) {
None => return Err(Error::User(format!("Unknown generic method call: {}",&call.method_name))),
Some(m) => m,
};
let class_prefix = match method.get("class_prefix") {
None => return Err(Error::User(format!("Undefined class_prefix for {}",&call.method_name))),
Some(m) => m.as_str().unwrap(),
};
let class_parameter_id = match method.get("class_parameter_id") {
None => return Err(Error::User(format!("Undefined class_parameter_id for {}",&call.method_name))),
Some(m) => m.as_integer().unwrap(),
};
let class_parameter_value = &call.args[class_parameter_id as usize];
let canonic_parameter = canonify(class_parameter_value);
let outcome = format!(" as {}_{}",class_prefix,canonic_parameter);
// TODO remove outcome if there is no usage
Ok(format!(" @component = \"{}\"\n{}{}", &call.component, out_state, outcome))
}
fn canonify(input: &str) -> String {
let s = input.as_bytes().iter()
.map(|x|
if x.is_ascii_alphanumeric() || *x == b'_' {
*x
} else {
b'_'
}
)
.collect::<Vec<u8>>();
str::from_utf8(&s).expect(&format!("Canonify failed on {}",input)).to_owned()
}
#[derive(Clone)]
struct CFVariable {
ns: Option<String>,
name: String,
}
fn parse_cfvariable(i: &str) -> IResult<&str,CFVariable> {
map(tuple((
opt(map(terminated(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),tag(".")),|x: &str| x.into())),
map(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),|x: &str| x.into()),
)), |(ns, name)| CFVariable { ns, name })(i)
}
#[derive(Clone)]
enum CFStringElt {
Static(String), // static content
Variable(CFVariable), // variable name
}
impl CFStringElt {
fn to_string(&self) -> Result<String> {
Ok(match self {
CFStringElt::Static(s) => s.to_string(),
CFStringElt::Variable(v) => {
match &v.ns {
None => v.name.clone(), // a parameter
Some(ns) => match ns.as_ref() {
"const" => (match v.name.as_ref() {
"dollar" => "$",
"dirsep" => "/",
"endl" => "\\n",
"n" => "\\n",
"r" => "\\r",
"t" => "\\t",
_ => return Err(Error::User(format!("Unknown constant '{}.{}'", ns, v.name))),
}).into(),
"sys" => return Err(Error::User(format!("Not implemented variable namespace sys '{}.{}'", ns, v.name))),
"this" => return Err(Error::User(format!("Unsupported variable namespace this '{}.{}'", ns, v.name))),
ns => format!("${{{}.{}}}",ns,v.name),
},
}
// TODO
// - array -> ?
// - list -> ?
},
})
}
}
fn parse_cfstring(i: &str) -> IResult<&str,Vec<CFStringElt>> {
// There is a rest inside so this just serve as a guard
all_consuming(
alt((
many1(alt((
// variable ${}
map(
delimited(tag("${"), parse_cfvariable, tag("}")),
CFStringElt::Variable),
// variable $()
map(
delimited(tag("$("), parse_cfvariable, tag(")")),
CFStringElt::Variable),
// constant
map(take_until("$"), |s: &str| CFStringElt::Static(s.into())),
// end of string
map(preceded(
peek(anychar), // do no take rest if we are already at the end
rest),
|s: &str| CFStringElt::Static(s.into())),
))),
// empty string
value(vec![CFStringElt::Static("".into())], not(anychar)),
))
)(i)
}
fn translate_arg(config: &toml::Value, arg: &str) -> Result<String> {
let var = match parse_cfstring(arg) {
Err(_) => return Err(Error::User(format!("Invalid variable syntax in '{}'", arg))),
Ok((_,o)) => o
};
map_strings_results(var.iter(), |x| Ok(format!("\"{}\"",x.to_string()?)), ",")
}
fn translate_condition(config: &toml::Value, cond: &str) -> Result<String> {
lazy_static! {
static ref METHOD_RE:Regex = Regex::new(r"^(\w+)_(\w+)$").unwrap();
static ref OS_RE:Regex = Regex::new(r"^([a-zA-Z]+)(_(\d+))*$").unwrap();
}
// detect method outcome class
if let Some(caps) = METHOD_RE.captures(cond) {
let (method, status) = (caps.get(1). | translate_file | identifier_name |
|
technique.rs | for better linear reading
let json_data = fs::read_to_string(&json_file);
if json_data.is_err() { return Err(Error::User(format!("Cannot read file {}", json_file.to_string_lossy()))) }
let technique = serde_json::from_str::<Technique>(&json_data.unwrap());
if technique.is_err() { return Err(Error::User(format!("Invalid technique in file {}", json_file.to_string_lossy()))) }
let rl_technique = translate(&config, &technique.unwrap())?;
if fs::write(&rl_file, rl_technique).is_err() { return Err(Error::User(format!("Cannot write file {}", rl_file.to_string_lossy()))) }
Ok(())
}
fn translate(config: &toml::Value, technique: &Technique) -> Result<String> {
let parameters_meta = serde_json::to_string(&technique.parameter);
if parameters_meta.is_err() { return Err(Error::User("Unable to parse technique file".to_string())) }
let parameters = technique.bundle_args.join(",");
let calls = map_strings_results(
technique.method_calls.iter(), |c| translate_call(config, c), "\n"
)?;
let out = format!(r#"@format=0
# This file has been generated with rltranslate
@name="{name}"
@description="{description}"
@version="{version}"
@parameters={parameters_meta}
resource {bundle_name}({parameters})
{bundle_name} state technique() {{
{calls}
}}
"#, description=technique.description,
version=technique.version,
name=technique.name,
bundle_name=technique.bundle_name,
parameters_meta=parameters_meta.unwrap(),
parameters=parameters,
calls=calls);
Ok(out)
}
fn translate_call(config: &toml::Value, call: &MethodCall) -> Result<String> {
lazy_static! {
static ref RE:Regex = Regex::new(r"^([a-z]+)_(\w+)$").unwrap();
}
// separate resource and state
let (resource,state) = match RE.captures(&call.method_name) {
Some(caps) => (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()),
None => return Err(Error::User(format!("Invalid method name '{}'", call.method_name))),
};
// split argument list
let rconf = match config.get("resources") {
None => return Err(Error::User("No resources section in config.toml".into())),
Some(m) => m,
};
let res_arg_v = match rconf.get(resource) {
None => toml::value::Value::Integer(1),
Some(r) => r.clone(),
};
let res_arg_count: usize = match res_arg_v.as_integer() {
None => return Err(Error::User(format!("Resource prefix '{}' must have a number as its parameter count",resource))),
Some(v) => v as usize,
};
let it = &mut call.args.iter();
let res_args = map_strings_results(it.take(res_arg_count), |x| translate_arg(config,x), ",")?;
let st_args = map_strings_results(it, |x| translate_arg(config,x), ",")?;
// call formating
let call_str = format!("{}({}).{}({})", resource, res_args, state, st_args);
let out_state = if call.class_context == "any" {
format!(" {}", call_str)
} else {
let condition = translate_condition(config, &call.class_context)?;
format!(" if {} => {}", condition, call_str)
};
// outcome detection and formating
let mconf = match config.get("methods") {
None => return Err(Error::User("No methods section in config.toml".into())),
Some(m) => m,
};
let method = match mconf.get(&call.method_name) {
None => return Err(Error::User(format!("Unknown generic method call: {}",&call.method_name))),
Some(m) => m,
};
let class_prefix = match method.get("class_prefix") {
None => return Err(Error::User(format!("Undefined class_prefix for {}",&call.method_name))),
Some(m) => m.as_str().unwrap(),
};
let class_parameter_id = match method.get("class_parameter_id") {
None => return Err(Error::User(format!("Undefined class_parameter_id for {}",&call.method_name))),
Some(m) => m.as_integer().unwrap(),
};
let class_parameter_value = &call.args[class_parameter_id as usize];
let canonic_parameter = canonify(class_parameter_value);
let outcome = format!(" as {}_{}",class_prefix,canonic_parameter);
// TODO remove outcome if there is no usage
Ok(format!(" @component = \"{}\"\n{}{}", &call.component, out_state, outcome))
}
fn canonify(input: &str) -> String {
let s = input.as_bytes().iter()
.map(|x|
if x.is_ascii_alphanumeric() || *x == b'_' {
*x
} else {
b'_'
}
)
.collect::<Vec<u8>>();
str::from_utf8(&s).expect(&format!("Canonify failed on {}",input)).to_owned()
}
#[derive(Clone)]
struct CFVariable {
ns: Option<String>,
name: String,
}
fn parse_cfvariable(i: &str) -> IResult<&str,CFVariable> {
map(tuple((
opt(map(terminated(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),tag(".")),|x: &str| x.into())),
map(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),|x: &str| x.into()),
)), |(ns, name)| CFVariable { ns, name })(i)
}
#[derive(Clone)]
enum CFStringElt {
Static(String), // static content
Variable(CFVariable), // variable name
}
impl CFStringElt {
fn to_string(&self) -> Result<String> {
Ok(match self {
CFStringElt::Static(s) => s.to_string(),
CFStringElt::Variable(v) => {
match &v.ns {
None => v.name.clone(), // a parameter
Some(ns) => match ns.as_ref() {
"const" => (match v.name.as_ref() {
"dollar" => "$",
"dirsep" => "/",
"endl" => "\\n",
"n" => "\\n",
"r" => "\\r",
"t" => "\\t",
_ => return Err(Error::User(format!("Unknown constant '{}.{}'", ns, v.name))),
}).into(),
"sys" => return Err(Error::User(format!("Not implemented variable namespace sys '{}.{}'", ns, v.name))),
"this" => return Err(Error::User(format!("Unsupported variable namespace this '{}.{}'", ns, v.name))),
ns => format!("${{{}.{}}}",ns,v.name),
},
}
// TODO
// - array -> ?
// - list -> ?
},
})
}
}
fn parse_cfstring(i: &str) -> IResult<&str,Vec<CFStringElt>> {
// There is a rest inside so this just serve as a guard
all_consuming(
alt((
many1(alt((
// variable ${}
map(
delimited(tag("${"), parse_cfvariable, tag("}")),
CFStringElt::Variable),
// variable $()
map(
delimited(tag("$("), parse_cfvariable, tag(")")),
CFStringElt::Variable),
// constant
map(take_until("$"), |s: &str| CFStringElt::Static(s.into())),
// end of string
map(preceded(
peek(anychar), // do no take rest if we are already at the end
rest),
|s: &str| CFStringElt::Static(s.into())),
))), |
fn translate_arg(config: &toml::Value, arg: &str) -> Result<String> {
let var = match parse_cfstring(arg) {
Err(_) => return Err(Error::User(format!("Invalid variable syntax in '{}'", arg))),
Ok((_,o)) => o
};
map_strings_results(var.iter(), |x| Ok(format!("\"{}\"",x.to_string()?)), ",")
}
fn translate_condition(config: &toml::Value, cond: &str) -> Result<String> {
lazy_static! {
static ref METHOD_RE:Regex = Regex::new(r"^(\w+)_(\w+)$").unwrap();
static ref OS_RE:Regex = Regex::new(r"^([a-zA-Z]+)(_(\d+))*$").unwrap();
}
// detect method outcome class
if let Some(caps) = METHOD_RE.captures(cond) {
let (method, status) = (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str());
if vec![ "kept", "success" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ success", method));
} else if vec![ "error", "not_ok", "failed", "denied", "timeout" ].iter().any(|x| x == &status) {
return | // empty string
value(vec![CFStringElt::Static("".into())], not(anychar)),
))
)(i)
} | random_line_split |
html5_player.js |
};
//getting large screen button only for watch video page
if (enlarge_small == 'true')
{
$('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>');
$('#largescr').insertBefore("#fs");
}
//Large screen function
$(".largescr").click(function() {
$(this).toggleClass('smallscr');
if(!$(this).hasClass('smallscr'))
{
$(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast");
$('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8');
}
else
{
$(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast");
$('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8');
}
});
//Right Click Menu
$('#cont').append('<div id="rightcmenu"></div>');
//$('#rightcmenu').append('<span id="op">CB Html5 menu</span>');
$('#rightcmenu').append('<ul id="ritems"></ul>');
$('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>');
$('#ritems').append('<li class="rlist about">About</li>');
$('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>');
$('.cont').bind("contextmenu", function (e) {
e.preventDefault(); // To prevent the default context menu.
$("#rightcmenu").css("left", e.pageX); // For updating the menu position.
$("#rightcmenu").css("top", e.pageY); //
$("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture.
});
function startFocusOut() {
$(document).on("click", function () {
$("#rightcmenu").hide(500); // To hide the context menu
$('.cont').off("click");
});
}
$(".clip").click(function(event) {
window.open(homepath, '_blank');
});
$(".about").click(function(event) {
window.open(homepath, '_blank');
});
$('.copy').click(function() {
alert(document.URL);
});
//Logo
$('.cb-playerLogo').append('<div id="path" class="path hbtn" > </div>');
$('#path').prop("href",product_link);
// $("#path").insertAfter("#hd");
$('#path').css({
'backgroundImage': 'url(data:image/png;base64,' + webpath + ')',
'margin-right':'7px',
'margin-top':'0px',
'background-repeat':'no-repeat',
'background-position' : '100% 50%',
});
$("#path").click(function(event) {
window.open(product_link, '_blank');
});
$('#name_v,#thumb_v').mouseover(function() {
$(this).css({'opacity':'1',
'border': '0px solid #000',
'box-shadow':'1px 0 5px #fff',
'-moz-box-shadow':'1px 0 5px #fff',
'-webkit-box-shadow':'1px 0 5px #fff',});
});
$('#name_v,#thumb_v').mouseout(function() {
$(this).css({'opacity':'.9',
'border': '0px solid #000',
'box-shadow':'0px 0 0px #fff',
'-moz-box-shadow':'0px 0 0px #fff',
'-webkit-box-shadow':'0px 0 0px #fff',});
});
// Setting in-video logo for player
if( iv_logo_enable == 'yes')
{
$('.cont').append('<img id="web" src=data:image/png;base64,'+ web +'> ');
$('#web').css({
'top' : $top,
'left' : $left,
'bottom' : $bottom,
'right' : $right ,
'position': 'absolute',
'width': '100px',
'height': '30px',
'z-index' : '-1'
});
}
//For multiserver plugin videos :)
if(files)
{
var toggle = false;
var time_var = false;
var start_time = 0;
$('#res').on('click',function(event){
if(toggle == false)
{
$('.video_files').show(10);
toggle = true;
}
else
{
$('.video_files').hide(10);
toggle = false;
}
event.stopPropagation();
});
$('html').click(function() {
$('.video_files').hide(100);
toggle =false;
});
// All multiserver Video Files (Json)
var jsonData = JSON.parse(files);
// cheking for if 360 resolution is not available
if(!jsonData["360"])
{
video.attr('src',jsonData["240"]);
$('#li_240').addClass('selected_player');
}
else
$('#li_360').addClass('selected_player');
$.each(jsonData, function (key, data) {
$('#li_' + key).on('mouseenter', function(){
$(this).css({'background-color':'#000'});
});
$('#li_' + key).on('mouseleave', function(){
$(this).css({'background-color':'#1D1D1D'});
});
$('#li_' + key).on('click', function(){
//getting current time variable for video to play .. on change resolution and passing to loadmetadata
start_time = video[0].currentTime;
//Changing source attribute for the required resolution ..
console.log("current_video=>"+jsonData[key]);
video.attr('src',jsonData[key]);
load_meta_data(start_time,video);
time_var = true;
});
});
$('#ul_files .list_player').click(function() {
$('#ul_files .list_player.selected_player').removeClass('selected_player');
$(this).closest('li').addClass('selected_player');
});
}
/**
* For multiserver plugin videos <<--END-->> :)
*/
//Time format converter - 00:00
var timeFormat = function(seconds){
var m = Math.floor(seconds/60)<10 ? "0"+Math.floor(seconds/60) : Math.floor(seconds/60);
var s = Math.floor(seconds-(m*60))<10 ? "0"+Math.floor(seconds-(m*60)) : Math.floor(seconds-(m*60));
return m+":"+s;
};
/**
* Following function is used to attach fullscreen events to document
*/
function attachEvents (){
$( document ).on( 'fullscreenchange', toggleFullScreen );
$( document ).on( 'webkitfullscreenchange', toggleFullScreen );
$( document ).on( 'mozfullscreenchange', toggleFullScreen );
$( document ).on( 'MSFullscreenchange', toggleFullScreen );
}
/**
* Following events are used to check fullscreen events
*/
document.addEventListener("fullscreenchange", function (e) {
toggleFullScreen(e);
}, false);
document.addEventListener("mozfullscreenchange", function (e) {
toggleFullScreen(e);
}, false);
document.addEventListener("webkitfullscreenchange", function (e) {
toggleFullScreen(e);
}, false);
/**
* Following function is used to show controls on exit fullscreen
*/
function showControl(){
$('.caption').hide();
$(".largescr").hide();
$(".control").hover(
function() {
$('.control').stop().animate({'bottom':0}, 100);
},
function() {
$('.control').stop().animate({'bottom':-40}, 1000);
});
}
/**
* Following function is used to show controls on enter fullscreen
*/
function hideControl()
{
$('.caption').show();
$(".largescr").show();
$('.cb-item-title-container').css({'margin-top':0});
$(".control").hover(
function() {
$(this).unbind('mouseenter').unbind('mouseleave');
});
}
/**
* Following function is used to call events on toggle screen
*/
function toggleFullScreen (e) {
isFullScreen = ( document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement );
if ( isFullScreen ) {
showControl();
} else {
hideControl();
}
}
});
/**
* caption hide
*/
function caption_hide()
{
$('.control').stop().animate({'bottom':-51}, 500);
$('.caption').stop().animate({'top':-200}, 500);
}
/**
* caption hide
*/
function caption_show()
{
$('.control').stop().animate({'bottom':0}, 100);
$('.caption').stop().animate({'top':-7}, 100);
}
function load_meta_data(start_time,video)
| {
//before everything get started
video.on('loadedmetadata', function() {
video[0].currentTime = start_time;
video[0].play();
//set video properties
$('.loading').fadeOut(500);
$('.init').hide();
});
} | identifier_body |
|
html5_player.js | end
* ==================
*/
//CONTROLS EVENTS
//video screen and play button clicked
video.on('click', function() { playpause(); } );
$('.btnPlay').on('click', function() { playpause(); } );
$('.caption').on('click', function() { playpause(); } );
$('.init').on('click', function() { playpause(); } );
var playpause = function() {
if(video[0].paused || video[0].ended) {
$('.init').hide();
$('.btnPlay').addClass('paused');
video[0].play();
_pause = false;
}
else |
};
$( "#replay_v" ).click(function() {
video[0].play();
$('#opacity').hide();
$('#related_1').hide();
$('.control').show();
$('.caption').show();
$('#web').show();
//showing pause icon by before video load
$('.btnPlay').addClass('paused');
});
$( "#cancel_v" ).click(function() {
$('#opacity').hide();
$('#related_1').hide();
$('.control').show();
$('.caption').show();
$('.init').show();
$('#web').show();
});
//speed text clicked
$('.btnx1').on('click', function() { fastfowrd(this, 1); });
$('.btnx3').on('click', function() { fastfowrd(this, 3); });
var fastfowrd = function(obj, spd) {
$('.text').removeClass('selected');
$(obj).addClass('selected');
video[0].playbackRate = spd;
video[0].play();
};
//stop button clicked
$('.btnStop').on('click', function() {
$('.btnPlay').removeClass('paused');
updatebar($('.progress').offset().left);
video[0].pause();
});
$('.btnFS').on('click', function() {
$(this).toggleClass('enterbtnFS');
isButtonFullscreen = true;
if($.isFunction(container[0].webkitRequestFullScreen))
{
if(isFullScreen)
{
isFullScreen = false;
document.webkitCancelFullScreen();
}
else
{
isFullScreen = true;
container[0].webkitRequestFullScreen();
}
}
else if($.isFunction(container[0].mozRequestFullScreen))
{
if(isFullScreen)
{
isFullScreen = false;
document.mozCancelFullScreen();
}
else
{
isFullScreen = true;
container[0].mozRequestFullScreen();
}
}
});
var _HD_flag = false;
//HD on/off button clicked
$(".hdon").on('click', function() {
$(this).toggleClass('hdoff');
$('.myVideo').removeClass('init');
$('source', '#myVideo').eq(1).prependTo('#myVideo');
$('#myVideo')[0].load();
$('#myVideo')[0].play();
$('.init').hide();
$('.btnPlay').addClass('paused');
_HD_flag = true;
});
//sound button clicked
$('.sound').click(function() {
video[0].muted = !video[0].muted;
$(this).toggleClass('muted');
if(video[0].muted) {
$('.volumeBar').css('width',0);
}
else{
$('.volumeBar').css('width', video[0].volume*100+'%');
}
});
//VIDEO EVENTS
//video canplay event
video.on('canplay', function() {
$('.loading').fadeOut(100);
});
//video canplaythrough event
//solve Chrome cache issue
var completeloaded = false;
video.on('canplaythrough', function() {
completeloaded = true;
});
//video ended event
video.on('ended', function() {
$('.btnPlay').removeClass('paused');
video[0].pause();
$('#opacity').show();
$('#related_1').show();
$('.control').hide();
$('.caption').hide();
$('#web').hide();
});
//video seeking event
video.on('seeking', function() {
//if video fully loaded, ignore loading screen
if(!completeloaded) {
$('.loading').fadeIn(200);
}
});
//video seeked event
video.on('seeked', function() {
$('.loading').fadeOut(200);
});
//video waiting for more data event
video.on('waiting', function() {
$('.loading').fadeIn(200);
});
//VIDEO PROGRESS BAR
//when video timebar clicked
var timeDrag = false; /* check for drag event */
$('.progress').on('mousedown', function(e) {
timeDrag = true;
updatebar(e.pageX);
});
$(document).on('mouseup', function(e) {
if(timeDrag) {
timeDrag = false;
updatebar(e.pageX);
}
});
$(document).on('mousemove', function(e) {
if(timeDrag) {
updatebar(e.pageX);
}
});
var updatebar = function(x) {
var progress = $('.progress');
//calculate drag position
//and update video currenttime
//as well as progress bar
var maxduration = video[0].duration;
var position = x - progress.offset().left;
var percentage = 100 * position / progress.width();
if(percentage > 100) {
percentage = 100;
}
if(percentage < 0) {
percentage = 0;
}
$('.timeBar').css('width',percentage+'%');
video[0].currentTime = maxduration * percentage / 100;
};
//VOLUME BAR
//volume bar event
var volumeDrag = false;
$('.volume').on('mousedown', function(e) {
volumeDrag = true;
video[0].muted = false;
$('.sound').removeClass('muted');
updateVolume(e.pageX);
});
$(document).on('mouseup', function(e) {
if(volumeDrag) {
volumeDrag = false;
updateVolume(e.pageX);
}
});
$(document).on('mousemove', function(e) {
if(volumeDrag) {
updateVolume(e.pageX);
}
});
var updateVolume = function(x, vol) {
var volume = $('.volume');
var percentage;
//if only volume have specificed
//then direct update volume
if(vol) {
percentage = vol * 100;
}
else {
var position = x - volume.offset().left;
percentage = 100 * position / volume.width();
}
if(percentage > 100) {
percentage = 100;
}
if(percentage < 0) {
percentage = 0;
}
//update volume bar and video volume
$('.volumeBar').css('width',percentage+'%');
video[0].volume = percentage / 100;
//change sound icon based on volume
if(video[0].volume == 0){
$('.sound').removeClass('sound2').addClass('muted');
}
else if(video[0].volume > 0.5){
$('.sound').removeClass('muted').addClass('sound2');
}
else{
$('.sound').removeClass('muted').removeClass('sound2');
}
};
//getting large screen button only for watch video page
if (enlarge_small == 'true')
{
$('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>');
$('#largescr').insertBefore("#fs");
}
//Large screen function
$(".largescr").click(function() {
$(this).toggleClass('smallscr');
if(!$(this).hasClass('smallscr'))
{
$(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast");
$('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8');
}
else
{
$(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast");
$('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8');
}
});
//Right Click Menu
$('#cont').append('<div id="rightcmenu"></div>');
//$('#rightcmenu').append('<span id="op">CB Html5 menu</span>');
$('#rightcmenu').append('<ul id="ritems"></ul>');
$('#ritems').append('<li id="copy" | {
$('.init').show();
$('.btnPlay').removeClass('paused');
video[0].pause();
_pause = true;
} | conditional_block |
html5_player.js | }
else {
$('.init').show();
$('.btnPlay').removeClass('paused');
video[0].pause();
_pause = true;
}
};
$( "#replay_v" ).click(function() {
video[0].play();
$('#opacity').hide();
$('#related_1').hide();
$('.control').show();
$('.caption').show();
$('#web').show();
//showing pause icon by before video load
$('.btnPlay').addClass('paused');
});
$( "#cancel_v" ).click(function() {
$('#opacity').hide();
$('#related_1').hide();
$('.control').show();
$('.caption').show();
$('.init').show();
$('#web').show();
});
//speed text clicked
$('.btnx1').on('click', function() { fastfowrd(this, 1); });
$('.btnx3').on('click', function() { fastfowrd(this, 3); });
var fastfowrd = function(obj, spd) {
$('.text').removeClass('selected');
$(obj).addClass('selected');
video[0].playbackRate = spd;
video[0].play();
};
//stop button clicked
$('.btnStop').on('click', function() {
$('.btnPlay').removeClass('paused');
updatebar($('.progress').offset().left);
video[0].pause();
});
$('.btnFS').on('click', function() {
$(this).toggleClass('enterbtnFS');
isButtonFullscreen = true;
if($.isFunction(container[0].webkitRequestFullScreen))
{
if(isFullScreen)
{
isFullScreen = false;
document.webkitCancelFullScreen();
}
else
{
isFullScreen = true;
container[0].webkitRequestFullScreen();
}
}
else if($.isFunction(container[0].mozRequestFullScreen))
{
if(isFullScreen)
{
isFullScreen = false;
document.mozCancelFullScreen();
}
else
{
isFullScreen = true;
container[0].mozRequestFullScreen();
}
}
});
var _HD_flag = false;
//HD on/off button clicked
$(".hdon").on('click', function() {
$(this).toggleClass('hdoff');
$('.myVideo').removeClass('init');
$('source', '#myVideo').eq(1).prependTo('#myVideo');
$('#myVideo')[0].load();
$('#myVideo')[0].play();
$('.init').hide();
$('.btnPlay').addClass('paused');
_HD_flag = true;
});
//sound button clicked
$('.sound').click(function() {
video[0].muted = !video[0].muted;
$(this).toggleClass('muted');
if(video[0].muted) {
$('.volumeBar').css('width',0);
}
else{
$('.volumeBar').css('width', video[0].volume*100+'%');
}
});
//VIDEO EVENTS
//video canplay event
video.on('canplay', function() {
$('.loading').fadeOut(100);
});
//video canplaythrough event
//solve Chrome cache issue
var completeloaded = false;
video.on('canplaythrough', function() {
completeloaded = true;
});
//video ended event
video.on('ended', function() {
$('.btnPlay').removeClass('paused');
video[0].pause();
$('#opacity').show();
$('#related_1').show();
$('.control').hide();
$('.caption').hide();
$('#web').hide();
});
//video seeking event
video.on('seeking', function() {
//if video fully loaded, ignore loading screen
if(!completeloaded) {
$('.loading').fadeIn(200);
}
});
//video seeked event
video.on('seeked', function() {
$('.loading').fadeOut(200);
});
//video waiting for more data event
video.on('waiting', function() {
$('.loading').fadeIn(200);
});
//VIDEO PROGRESS BAR
//when video timebar clicked
var timeDrag = false; /* check for drag event */
$('.progress').on('mousedown', function(e) {
timeDrag = true;
updatebar(e.pageX);
});
$(document).on('mouseup', function(e) {
if(timeDrag) {
timeDrag = false;
updatebar(e.pageX);
}
});
$(document).on('mousemove', function(e) {
if(timeDrag) {
updatebar(e.pageX);
}
});
var updatebar = function(x) {
var progress = $('.progress');
//calculate drag position
//and update video currenttime
//as well as progress bar
var maxduration = video[0].duration;
var position = x - progress.offset().left;
var percentage = 100 * position / progress.width();
if(percentage > 100) {
percentage = 100;
}
if(percentage < 0) {
percentage = 0;
}
$('.timeBar').css('width',percentage+'%');
video[0].currentTime = maxduration * percentage / 100;
};
//VOLUME BAR
//volume bar event
var volumeDrag = false;
$('.volume').on('mousedown', function(e) {
volumeDrag = true;
video[0].muted = false;
$('.sound').removeClass('muted');
updateVolume(e.pageX);
});
$(document).on('mouseup', function(e) {
if(volumeDrag) {
volumeDrag = false;
updateVolume(e.pageX);
}
});
$(document).on('mousemove', function(e) {
if(volumeDrag) {
updateVolume(e.pageX);
}
});
var updateVolume = function(x, vol) {
var volume = $('.volume');
var percentage;
//if only volume have specificed
//then direct update volume
if(vol) {
percentage = vol * 100;
}
else {
var position = x - volume.offset().left;
percentage = 100 * position / volume.width();
}
if(percentage > 100) {
percentage = 100;
}
if(percentage < 0) {
percentage = 0;
}
//update volume bar and video volume
$('.volumeBar').css('width',percentage+'%');
video[0].volume = percentage / 100;
//change sound icon based on volume
if(video[0].volume == 0){
$('.sound').removeClass('sound2').addClass('muted');
}
else if(video[0].volume > 0.5){
$('.sound').removeClass('muted').addClass('sound2');
}
else{
$('.sound').removeClass('muted').removeClass('sound2');
}
};
//getting large screen button only for watch video page
if (enlarge_small == 'true')
{
$('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>');
$('#largescr').insertBefore("#fs");
}
//Large screen function
$(".largescr").click(function() {
$(this).toggleClass('smallscr');
if(!$(this).hasClass('smallscr'))
{
$(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast");
$('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8');
}
else
{
$(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast");
$('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8');
}
});
//Right Click Menu
$('#cont').append('<div id="rightcmenu"></div>');
//$('#rightcmenu').append('<span id="op">CB Html5 menu</span>');
$('#rightcmenu').append('<ul id="ritems"></ul>');
$('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>');
$('#ritems').append('<li class="rlist about">About</li>');
$('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>');
$('.cont').bind("contextmenu", function (e) {
e.preventDefault(); // To prevent the default context menu.
$("#rightcmenu").css("left", e.pageX); // For updating the menu position.
$("#rightcmenu").css("top", e.pageY); //
$("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture.
});
function | startFocusOut | identifier_name |
|
html5_player.js | //VIDEO EVENTS
//video canplay event
video.on('canplay', function() {
$('.loading').fadeOut(100);
});
//video canplaythrough event
//solve Chrome cache issue
var completeloaded = false;
video.on('canplaythrough', function() {
completeloaded = true;
});
//video ended event
video.on('ended', function() {
$('.btnPlay').removeClass('paused');
video[0].pause();
$('#opacity').show();
$('#related_1').show();
$('.control').hide();
$('.caption').hide();
$('#web').hide();
});
//video seeking event
video.on('seeking', function() {
//if video fully loaded, ignore loading screen
if(!completeloaded) {
$('.loading').fadeIn(200);
}
});
//video seeked event
video.on('seeked', function() {
$('.loading').fadeOut(200);
});
//video waiting for more data event
video.on('waiting', function() {
$('.loading').fadeIn(200);
});
//VIDEO PROGRESS BAR
//when video timebar clicked
var timeDrag = false; /* check for drag event */
$('.progress').on('mousedown', function(e) {
timeDrag = true;
updatebar(e.pageX);
});
$(document).on('mouseup', function(e) {
if(timeDrag) {
timeDrag = false;
updatebar(e.pageX);
}
});
$(document).on('mousemove', function(e) {
if(timeDrag) {
updatebar(e.pageX);
}
});
var updatebar = function(x) {
var progress = $('.progress');
//calculate drag position
//and update video currenttime
//as well as progress bar
var maxduration = video[0].duration;
var position = x - progress.offset().left;
var percentage = 100 * position / progress.width();
if(percentage > 100) {
percentage = 100;
}
if(percentage < 0) {
percentage = 0;
}
$('.timeBar').css('width',percentage+'%');
video[0].currentTime = maxduration * percentage / 100;
};
//VOLUME BAR
//volume bar event
var volumeDrag = false;
$('.volume').on('mousedown', function(e) {
volumeDrag = true;
video[0].muted = false;
$('.sound').removeClass('muted');
updateVolume(e.pageX);
});
$(document).on('mouseup', function(e) {
if(volumeDrag) {
volumeDrag = false;
updateVolume(e.pageX);
}
});
$(document).on('mousemove', function(e) {
if(volumeDrag) {
updateVolume(e.pageX);
}
});
var updateVolume = function(x, vol) {
var volume = $('.volume');
var percentage;
//if only volume have specificed
//then direct update volume
if(vol) {
percentage = vol * 100;
}
else {
var position = x - volume.offset().left;
percentage = 100 * position / volume.width();
}
if(percentage > 100) {
percentage = 100;
}
if(percentage < 0) {
percentage = 0;
}
//update volume bar and video volume
$('.volumeBar').css('width',percentage+'%');
video[0].volume = percentage / 100;
//change sound icon based on volume
if(video[0].volume == 0){
$('.sound').removeClass('sound2').addClass('muted');
}
else if(video[0].volume > 0.5){
$('.sound').removeClass('muted').addClass('sound2');
}
else{
$('.sound').removeClass('muted').removeClass('sound2');
}
};
//getting large screen button only for watch video page
if (enlarge_small == 'true')
{
$('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>');
$('#largescr').insertBefore("#fs");
}
//Large screen function
$(".largescr").click(function() {
$(this).toggleClass('smallscr');
if(!$(this).hasClass('smallscr'))
{
$(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast");
$('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8');
}
else
{
$(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast");
$('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8');
}
});
//Right Click Menu
$('#cont').append('<div id="rightcmenu"></div>');
//$('#rightcmenu').append('<span id="op">CB Html5 menu</span>');
$('#rightcmenu').append('<ul id="ritems"></ul>');
$('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>');
$('#ritems').append('<li class="rlist about">About</li>');
$('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>');
$('.cont').bind("contextmenu", function (e) {
e.preventDefault(); // To prevent the default context menu.
$("#rightcmenu").css("left", e.pageX); // For updating the menu position.
$("#rightcmenu").css("top", e.pageY); //
$("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture.
});
function startFocusOut() {
$(document).on("click", function () {
$("#rightcmenu").hide(500); // To hide the context menu
$('.cont').off("click");
});
}
$(".clip").click(function(event) {
window.open(homepath, '_blank');
});
$(".about").click(function(event) {
window.open(homepath, '_blank');
});
$('.copy').click(function() {
alert(document.URL);
});
//Logo
$('.cb-playerLogo').append('<div id="path" class="path hbtn" > </div>');
$('#path').prop("href",product_link);
// $("#path").insertAfter("#hd");
$('#path').css({
'backgroundImage': 'url(data:image/png;base64,' + webpath + ')',
'margin-right':'7px',
'margin-top':'0px',
'background-repeat':'no-repeat',
'background-position' : '100% 50%',
});
$("#path").click(function(event) {
window.open(product_link, '_blank');
});
$('#name_v,#thumb_v').mouseover(function() {
$(this).css({'opacity':'1',
'border': '0px solid #000',
'box-shadow':'1px 0 5px #fff',
'-moz-box-shadow':'1px 0 5px #fff',
'-webkit-box-shadow':'1px 0 5px #fff',});
});
$('#name_v,#thumb_v').mouseout(function() {
$(this).css({'opacity':'.9',
'border': '0px solid #000',
'box-shadow':'0px 0 0px #fff',
'-moz-box-shadow':'0px 0 0px #fff',
'-webkit-box-shadow':'0px 0 0px #fff',});
});
// Setting in-video logo for player
if( iv_logo_enable == 'yes')
{
$('.cont').append('<img id="web" src=data:image/png;base64,'+ web +'> ');
$('#web').css({
'top' : $top,
'left' : $left,
'bottom' : $bottom,
'right' : $right ,
'position': 'absolute',
'width': '100px',
'height': '30px',
'z-index' : '-1'
});
}
//For multiserver plugin videos :)
if(files)
{
var toggle = false;
var time_var = false;
var start_time = 0;
$('#res').on('click',function(event){
if(toggle == false)
{
$('.video_files').show(10);
toggle = true;
}
else
{
$('.video_files').hide(10);
toggle = false;
}
event.stopPropagation();
});
$('html').click(function() {
$('.video_files').hide(100);
toggle =false;
});
// All multiserver Video Files (Json)
var jsonData = JSON.parse(files);
// cheking for if 360 resolution is not available | if(!jsonData["360"])
{ | random_line_split |
|
SignalDef.rs | pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub eflags: u64,
pub cs: u16,
pub gs: u16,
// always 0 on amd64.
pub fs: u16,
// always 0 on amd64.
pub ss: u16,
// only restored if _UC_STRICT_RESTORE_SS (unsupported).
pub err: u64,
pub trapno: u64,
pub oldmask: u64,
pub cr2: u64,
// Pointer to a struct _fpstate.
pub fpstate: u64,
pub reserved: [u64; 8],
}
impl SigContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self {
return Self {
r8: ptRegs.r8,
r9: ptRegs.r9,
r10: ptRegs.r10,
r11: ptRegs.r11,
r12: ptRegs.r12,
r13: ptRegs.r13,
r14: ptRegs.r14,
r15: ptRegs.r15,
rdi: ptRegs.rdi,
rsi: ptRegs.rsi,
rbp: ptRegs.rbp,
rbx: ptRegs.rbx,
rdx: ptRegs.rdx,
rax: ptRegs.rax,
rcx: ptRegs.rcx,
rsp: ptRegs.rsp,
rip: ptRegs.rip,
eflags: ptRegs.eflags,
cs: ptRegs.cs as u16,
gs: 0,
fs: 0,
ss: ptRegs.ss as u16,
err: 0,
trapno: 0,
oldmask: oldMask,
cr2: cr2,
fpstate: fpstate,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFlag(pub u64);
impl SigFlag {
pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001;
pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002;
pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004;
pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000;
pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000;
pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000;
pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000;
pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000;
pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000;
pub fn IsNoCldStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0;
}
pub fn IsNoCldWait(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT != 0;
}
pub fn IsSigInfo(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_SIG_INFO != 0;
}
pub fn IsNoDefer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_DEFER != 0;
}
pub fn IsRestart(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTART != 0;
}
pub fn IsResetHandler(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER != 0;
}
pub fn IsOnStack(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_ON_STACK != 0;
}
pub fn HasRestorer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTORER != 0;
}
pub fn IsNoChildStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0
}
}
// https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h
#[derive(Copy, Clone, Default)]
#[repr(C)]
pub struct SigAct {
pub handler: u64,
pub flags: SigFlag,
pub restorer: u64,
pub mask: u64,
}
impl fmt::Debug for SigAct {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SigAction {{ \n\
handler: {:x}, \n\
flag : {:x}, \n \
flags::HasRestorer: {}, \n \
flags::IsOnStack: {}, \n \
flags::IsRestart: {}, \n \
flags::IsResetHandler: {}, \n \
flags::IsNoDefer: {}, \n \
flags::IsSigInfo: {}, \n \
restorer : {:x}, \n\
mask: {:x}, \n}}",
self.handler,
self.flags.0,
self.flags.HasRestorer(),
self.flags.IsOnStack(),
self.flags.IsRestart(),
self.flags.IsResetHandler(),
self.flags.IsNoDefer(),
self.flags.IsSigInfo(),
self.restorer,
self.mask
)
}
}
impl SigAct {
// SignalActDefault is SIG_DFL and specifies that the default behavior for
// a signal should be taken.
pub const SIGNAL_ACT_DEFAULT: u64 = 0;
// SignalActIgnore is SIG_IGN and specifies that a signal should be
// ignored.
pub const SIGNAL_ACT_IGNORE: u64 = 1;
}
pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1);
#[derive(Clone, Copy, Debug)]
pub struct SignalSet(pub u64);
impl Default for SignalSet {
fn default() -> Self {
return Self(0)
}
}
impl SignalSet {
pub fn New(sig: Signal) -> Self {
return SignalSet(1 << sig.Index())
}
pub fn Add(&mut self, sig: Signal) {
self.0 |= 1 << sig.Index()
}
pub fn Remove(&mut self, sig: Signal) {
self.0 &= !(1 << sig.0)
}
pub fn TailingZero(&self) -> usize {
for i in 0..64 {
let idx = 64 - i - 1;
if self.0 & (1 << idx) != 0 | {
return idx
} | conditional_block |
|
SignalDef.rs | 3;
// CLD_TRAPPED indicates that a task was stopped by ptrace.
pub const CLD_TRAPPED: i32 = 4;
// CLD_STOPPED indicates that a thread group completed a group stop.
pub const CLD_STOPPED: i32 = 5;
// CLD_CONTINUED indicates that a group-stopped thread group was continued.
pub const CLD_CONTINUED: i32 = 6;
// SYS_* codes are only meaningful for SIGSYS.
// SYS_SECCOMP indicates that a signal originates from seccomp.
pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub eflags: u64,
pub cs: u16,
pub gs: u16,
// always 0 on amd64.
pub fs: u16,
// always 0 on amd64.
pub ss: u16,
// only restored if _UC_STRICT_RESTORE_SS (unsupported).
pub err: u64,
pub trapno: u64,
pub oldmask: u64,
pub cr2: u64,
// Pointer to a struct _fpstate.
pub fpstate: u64,
pub reserved: [u64; 8],
}
impl SigContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self {
return Self {
r8: ptRegs.r8,
r9: ptRegs.r9,
r10: ptRegs.r10,
r11: ptRegs.r11,
r12: ptRegs.r12,
r13: ptRegs.r13,
r14: ptRegs.r14,
r15: ptRegs.r15,
rdi: ptRegs.rdi,
rsi: ptRegs.rsi,
rbp: ptRegs.rbp,
rbx: ptRegs.rbx,
rdx: ptRegs.rdx,
rax: ptRegs.rax,
rcx: ptRegs.rcx,
rsp: ptRegs.rsp,
rip: ptRegs.rip,
eflags: ptRegs.eflags,
cs: ptRegs.cs as u16,
gs: 0,
fs: 0,
ss: ptRegs.ss as u16,
err: 0,
trapno: 0,
oldmask: oldMask,
cr2: cr2,
fpstate: fpstate,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFlag(pub u64);
impl SigFlag {
pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001;
pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002;
pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004;
pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000;
pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000;
pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000;
pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000;
pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000;
pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000;
pub fn IsNoCldStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0;
}
pub fn IsNoCldWait(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT != 0;
}
pub fn IsSigInfo(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_SIG_INFO != 0;
}
pub fn IsNoDefer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_DEFER != 0;
}
pub fn IsRestart(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTART != 0;
}
pub fn IsResetHandler(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER != 0;
}
pub fn IsOnStack(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_ON_STACK != 0;
}
pub fn HasRestorer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTORER != 0;
}
pub fn IsNoChildStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0
}
}
// https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h
#[derive(Copy, Clone, Default)]
#[repr(C)]
pub struct SigAct {
pub handler: u64,
pub flags: SigFlag,
pub restorer: u64,
pub mask: u64,
}
impl fmt::Debug for SigAct {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SigAction {{ \n\
handler: {:x}, \n\
flag : {:x}, \n \
flags::HasRestorer: {}, \n \
flags::IsOnStack: {}, \n \
flags::IsRestart: {}, \n \
flags::IsResetHandler: {}, \n \
flags::IsNoDefer: {}, \n \
flags::IsSigInfo: {}, \n \
restorer : {:x}, \n\
mask: {:x}, \n}}",
self.handler,
self.flags.0,
self.flags.HasRestorer(),
self.flags.IsOnStack(),
self.flags.IsRestart(),
self.flags.IsResetHandler(),
self.flags.IsNoDefer(),
self.flags.IsSigInfo(),
self.restorer,
self.mask
)
}
}
impl SigAct {
// SignalActDefault is SIG_DFL and specifies that the default behavior for
// a signal should be taken.
pub const SIGNAL_ACT_DEFAULT: u64 = 0;
// SignalActIgnore is SIG_IGN and specifies that a signal should be
// ignored.
pub const SIGNAL_ACT_IGNORE: u64 = 1;
}
pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1);
#[derive(Clone, Copy, Debug)]
pub struct SignalSet(pub u64);
impl Default for SignalSet {
fn default() -> Self {
return Self(0)
}
}
impl SignalSet {
pub fn | New | identifier_name |
|
SignalDef.rs | pub rsp: u64,
pub ss: u64,
/* top of stack page */
}
impl PtRegs {
pub fn Set(&mut self, ctx: &SigContext) {
self.r15 = ctx.r15;
self.r14 = ctx.r14;
self.r13 = ctx.r13;
self.r12 = ctx.r12;
self.rbp = ctx.rbp;
self.rbx = ctx.rbx;
self.r11 = ctx.r11;
self.r10 = ctx.r10;
self.r9 = ctx.r9;
self.r8 = ctx.r8;
self.rax = ctx.rax;
self.rcx = ctx.rcx;
self.rdx = ctx.rdx;
self.rsi = ctx.rsi;
self.rdi = ctx.rdi;
self.orig_rax = ctx.rax;
self.rip = ctx.rip;
self.cs = ctx.cs as u64;
self.eflags = ctx.eflags;
self.rsp = ctx.rsp;
self.ss = ctx.ss as u64;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRetInfo {
pub sigInfoAddr: u64,
pub sigCtxAddr: u64,
pub ret: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct Kill {
pub pid: i32,
pub uid: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigTimer {
pub tid: i32,
pub overrun: i32,
pub sigval: u64,
pub sysPrivate: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRt {
pub pid: i32,
pub uid: u32,
pub sigval: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigChld {
pub pid: i32,
//child
pub uid: u32,
//sender's uid
pub status: i32,
//Exit code
pub uTime: i32,
pub sTime: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFault {
pub addr: u64,
pub lsb: u16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct SignalInfo {
pub Signo: i32,
// Signal number
pub Errno: i32,
// Errno value
pub Code: i32,
// Signal code
pub _r: u32,
pub fields: [u8; 128 - 16],
}
impl<'a> Default for SignalInfo {
fn default() -> Self {
return Self {
Signo: 0,
Errno: 0,
Code: 0,
_r: 0,
fields: [0; 128 - 16]
}
}
}
impl core::fmt::Debug for SignalInfo {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SignalInfo")
.field("Signo", &self.Signo)
.field("Errno", &self.Errno)
.field("Code", &self.Code)
.finish()
}
}
impl SignalInfo {
pub fn SignalInfoPriv(sig: Signal) -> Self {
return Self {
Signo: sig.0,
Code: Self::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// FixSignalCodeForUser fixes up si_code.
//
// The si_code we get from Linux may contain the kernel-specific code in the
// top 16 bits if it's positive (e.g., from ptrace). Linux's
// copy_siginfo_to_user does
// err |= __put_user((short)from->si_code, &to->si_code);
// to mask out those bits and we need to do the same.
pub fn FixSignalCodeForUser(&mut self) {
if self.Code > 0 {
self.Code &= 0xffff;
}
}
pub fn Kill(&self) -> &mut Kill {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut Kill)
}
}
pub fn SigTimer(&mut self) -> &mut SigTimer {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigTimer)
}
}
pub fn SigRt(&mut self) -> &mut SigRt {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigRt)
}
}
pub fn SigChld(&mut self) -> &mut SigChld {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigChld)
}
}
pub fn SigFault(&self) -> &mut SigFault {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigFault)
}
}
// SignalInfoUser (properly SI_USER) indicates that a signal was sent from
// a kill() or raise() syscall.
pub const SIGNAL_INFO_USER: i32 = 0;
// SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent
// by the kernel.
pub const SIGNAL_INFO_KERNEL: i32 = 0x80;
// SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent
// by an expired timer.
pub const SIGNAL_INFO_TIMER: i32 = -2;
// SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent
// from a tkill() or tgkill() syscall.
pub const SIGNAL_INFO_TKILL: i32 = -6;
// CLD_* codes are only meaningful for SIGCHLD.
// CLD_EXITED indicates that a task exited.
pub const CLD_EXITED: i32 = 1;
// CLD_KILLED indicates that a task was killed by a signal.
pub const CLD_KILLED: i32 = 2;
// CLD_DUMPED indicates that a task was killed by a signal and then dumped
// core.
pub const CLD_DUMPED: i32 = 3;
// CLD_TRAPPED indicates that a task was stopped by ptrace.
pub const CLD_TRAPPED: i32 = 4;
// CLD_STOPPED indicates that a thread group completed a group stop.
pub const CLD_STOPPED: i32 = 5;
// CLD_CONTINUED indicates that a group-stopped thread group was continued.
pub const CLD_CONTINUED: i32 = 6;
// SYS_* codes are only meaningful for SIGSYS.
// SYS_SECCOMP indicates that a signal originates from seccomp.
pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64 | pub cs: u64,
pub eflags: u64, | random_line_split |
|
SignalDef.rs | pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) {
for i in 0..64 {
if self.0 & (1 << i) != 0 {
f(Signal(i as i32 + 1))
}
}
}
}
#[derive(Debug, Clone, Default)]
pub struct SignalQueue {
signals: LinkedList<PendingSignal>,
}
impl SignalQueue {
pub const RT_SIG_CAP: usize = 32;
pub fn Len(&mut self) -> u64 {
return self.signals.len() as u64;
}
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool {
if self.signals.len() == Self::RT_SIG_CAP {
return false
}
self.signals.push_back(PendingSignal {
sigInfo: info,
timer: timer,
});
return true
}
pub fn Deque(&mut self) -> Option<PendingSignal> {
return self.signals.pop_front();
}
pub fn Clear(&mut self) {
self.signals.clear();
}
}
pub const SIGNAL_COUNT: usize = 64;
pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31
pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64
pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64
#[derive(Debug, Clone, Default)]
pub struct PendingSignal {
pub sigInfo: Box<SignalInfo>,
pub timer: Option<IntervalTimer>,
}
pub struct PendingSignals {
pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT],
pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT],
pub pendingSet: SignalSet,
}
impl fmt::Debug for PendingSignals {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PendingSignals")
.field("stdSignals", &self.stdSignals)
.field("rtSignals0", &self.rtSignals[0])
.field("rtSignals2", &self.rtSignals[32])
.field("pendingSet", &self.pendingSet)
.finish()
}
}
impl Default for PendingSignals {
fn default() -> Self {
return Self {
stdSignals : Default::default(),
rtSignals : [
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(),
],
pendingSet: Default::default(),
}
}
}
impl PendingSignals {
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> {
let sig = Signal(info.Signo);
if sig.IsStandard() {
match &self.stdSignals[sig.Index()] {
None => (),
_ => return Ok(false),
}
self.stdSignals[sig.Index()] = Some(PendingSignal {
sigInfo: info,
timer: timer,
});
self.pendingSet.Add(sig);
return Ok(true);
} else if sig.IsRealtime() {
let q = &mut self.rtSignals[sig.Index() - 31];
self.pendingSet.Add(sig);
return Ok(q.Enque(info, timer));
} else {
return Err(Error::InvalidInput)
}
}
pub fn HasSignal(&self, mask: SignalSet) -> bool {
let set = SignalSet(self.pendingSet.0 & !(mask.0));
if set.0 == 0 {
return false
}
return true;
}
pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> {
let set = SignalSet(self.pendingSet.0 & !(mask.0));
if set.0 == 0 {
return None
}
let lastOne = set.TailingZero();
if lastOne < STD_SIGNAL_COUNT {
self.pendingSet.0 &= !(1 << lastOne);
let ps = self.stdSignals[lastOne].take();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 {
self.pendingSet.0 &= !(1 << lastOne);
}
let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
pub fn Discard(&mut self, sig: Signal) {
self.pendingSet.0 &= !(1 << sig.Index());
if sig.0 <= STD_SIGNAL_COUNT as i32 {
self.stdSignals[sig.Index()] = None;
return
}
self.rtSignals[sig.0 as usize - RT_SIGNAL_START].Clear()
}
}
#[derive(Default, Debug)]
pub struct SignalStruct {
pendingSignals: PendingSignals,
signalMask: SignalSet,
realSignalMask: SignalSet,
//sigtimedwait
groupStopPending: bool,
groupStopAck: bool,
trapStopPending: bool,
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/signal.h#L132
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct SignalStack {
pub addr: u64,
pub flags: u32,
pub size: u64,
}
impl Default for SignalStack {
fn default() -> Self {
return Self {
addr: 0,
flags: Self::FLAG_DISABLE,
size: 0,
}
}
}
impl SignalStack {
pub const FLAG_ON_STACK: u32 = 1;
pub const FLAG_DISABLE: u32 = 2;
pub fn Contains(&self, sp: u64) -> bool {
return self.addr < sp && sp <= self.addr + self.size
}
pub fn SetOnStack(&mut self) {
self.flags |= Self::FLAG_ON_STACK;
}
pub fn IsEnable(&self) -> bool {
return self.flags & Self::FLAG_DISABLE == 0
}
pub fn Top(&self) -> u64 {
return self.addr + self.size
}
}
pub struct SigHow {}
impl SigHow {
pub const SIG_BLOCK: u64 = 0;
pub const SIG_UNBLOCK: u64 = 1;
pub const SIG_SETMASK: u64 = 2;
}
pub fn SignalInfoPriv(sig: i32) -> SignalInfo {
return SignalInfo {
Signo: sig,
Code: SignalInfo::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// Sigevent represents struct sigevent.
#[repr(C)]
#[derive(Default, Copy, Clone)]
pub struct Sigevent {
pub Value: u64,
pub Signo: i32,
pub Notify: i32,
pub Tid: i32,
// struct sigevent here contains 48-byte union _sigev_un. However, only
// member _tid is significant to the kernel.
pub UnRemainder1: [u8; 32],
pub UnRemainder: [u8; 12],
}
pub const SIGEV_SIGNAL: i32 = 0;
pub const SIGEV_NONE: i32 = 1;
pub const SIGEV_THREAD: i32 = 2;
pub const SIGEV_THREAD_ID: i32 = 4;
// copyInSigSetWithSize copies in a structure as below
//
// struct {
// const sigset_t *ss; /* Pointer to signal set */
// size_t ss_len; /* Size (in bytes) of object pointed to by 'ss' */
// };
//
// and returns sigset_addr and size.
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct SigMask {
pub addr: u64,
pub len: usize,
}
pub fn CopyInSigSetWithSize(task: &Task, addr: u64) -> Result<(u64, usize)> | {
let mask : SigMask = task.CopyInObj(addr)?;
return Ok((mask.addr, mask.len))
} | identifier_body |
|
api-put-object-multipart.go | Upload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
if err != nil {
return UploadInfo{}, err
}
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
if len(hashSums) == 0 |
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return UploadInfo{}, err
}
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
defer func() {
if err != nil {
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
// Part number always starts with '1'.
partNumber := 1
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Create a buffer.
buf := make([]byte, partSize)
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
for partNumber <= totalPartsCount {
length, rErr := readFull(reader, buf)
if rErr == io.EOF && partNumber > 1 {
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
return UploadInfo{}, rErr
}
// Calculates hash sums while copying partSize bytes into cw.
for k, v := range hashAlgos {
v.Write(buf[:length])
hashSums[k] = v.Sum(nil)
v.Close()
}
// Update progress reader appropriately to the latest offset
// as we read from the source.
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Checksums..
var (
md5Base64 string
sha256Hex string
)
if hashSums["md5"] != nil {
md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
}
if hashSums["sha256"] != nil {
sha256Hex = hex.EncodeToString(hashSums["sha256"])
}
if len(hashSums) == 0 {
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
// Proceed to upload the part.
objPart, uerr := c.uploadPart(ctx, p)
if uerr != nil {
return UploadInfo{}, uerr
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Save successfully uploaded size.
totalUploadedSize += int64(length)
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if rErr == io.EOF {
break
}
}
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ChecksumCRC32: part.ChecksumCRC32,
ChecksumCRC32C: part.ChecksumCRC32C,
ChecksumSHA1: part.ChecksumSHA1,
ChecksumSHA256: part.ChecksumSHA256,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalUploadedSize
return uploadInfo, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
}
}
urlValues.Set("versionId", opts.Internal.SourceVersionID)
}
// Set ContentType header.
customHeader := opts.Header()
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
type uploadPartParams struct {
bucketName string
objectName string
uploadID string
reader io.Reader
partNumber int
md5Base64 string
sha256Hex string
size int64
sse encrypt.ServerSide
streamSha256 bool
customHeader http.Header
trailer http.Header
}
// uploadPart - Uploads a part in a multipart upload.
func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
return ObjectPart{}, err
}
if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
return ObjectPart{}, err
}
if p.size > maxPartSize {
return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
}
if p.size <= -1 {
return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName)
}
if p.partNumber <= 0 {
return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
}
if p.uploadID == "" {
return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(p.partNumber))
// Set upload id.
urlValues.Set("uploadId", p.upload | {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
} | conditional_block |
api-put-object-multipart.go | (ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
}
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
// Total data read and written to server. should be equal to
// 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
if err != nil {
return UploadInfo{}, err
}
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
if len(hashSums) == 0 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return UploadInfo{}, err
}
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
defer func() {
if err != nil {
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
// Part number always starts with '1'.
partNumber := 1
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Create a buffer.
buf := make([]byte, partSize)
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
for partNumber <= totalPartsCount {
length, rErr := readFull(reader, buf)
if rErr == io.EOF && partNumber > 1 {
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
return UploadInfo{}, rErr
}
// Calculates hash sums while copying partSize bytes into cw.
for k, v := range hashAlgos {
v.Write(buf[:length])
hashSums[k] = v.Sum(nil)
v.Close()
}
// Update progress reader appropriately to the latest offset
// as we read from the source.
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Checksums..
var (
md5Base64 string
sha256Hex string
)
if hashSums["md5"] != nil {
md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
}
if hashSums["sha256"] != nil {
sha256Hex = hex.EncodeToString(hashSums["sha256"])
}
if len(hashSums) == 0 {
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
// Proceed to upload the part.
objPart, uerr := c.uploadPart(ctx, p)
if uerr != nil {
return UploadInfo{}, uerr
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Save successfully uploaded size.
totalUploadedSize += int64(length)
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if rErr == io.EOF {
break
}
}
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ChecksumCRC32: part.ChecksumCRC32,
ChecksumCRC32C: part.ChecksumCRC32C,
ChecksumSHA1: part.ChecksumSHA1,
ChecksumSHA256: part.ChecksumSHA256,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalUploadedSize
return uploadInfo, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
}
}
urlValues.Set("versionId", opts.Internal.SourceVersionID)
}
// Set ContentType header.
customHeader := opts.Header()
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
type uploadPartParams struct {
bucketName string
objectName string
uploadID string
reader io.Reader
partNumber int
md5Base64 string
sha256Hex string
size int64
sse encrypt.ServerSide
streamSha256 bool
customHeader http.Header
trailer http.Header
}
// uploadPart - Uploads a part in a multipart upload.
func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
return ObjectPart{}, err
}
if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
return ObjectPart{}, err
}
if p.size > maxPartSize {
return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
}
if p.size <= - | putObjectMultipartNoStream | identifier_name |
|
api-put-object-multipart.go | // Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ChecksumCRC32: part.ChecksumCRC32,
ChecksumCRC32C: part.ChecksumCRC32C,
ChecksumSHA1: part.ChecksumSHA1,
ChecksumSHA256: part.ChecksumSHA256,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalUploadedSize
return uploadInfo, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
}
}
urlValues.Set("versionId", opts.Internal.SourceVersionID)
}
// Set ContentType header.
customHeader := opts.Header()
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
type uploadPartParams struct {
bucketName string
objectName string
uploadID string
reader io.Reader
partNumber int
md5Base64 string
sha256Hex string
size int64
sse encrypt.ServerSide
streamSha256 bool
customHeader http.Header
trailer http.Header
}
// uploadPart - Uploads a part in a multipart upload.
func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
return ObjectPart{}, err
}
if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
return ObjectPart{}, err
}
if p.size > maxPartSize {
return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
}
if p.size <= -1 {
return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName)
}
if p.partNumber <= 0 {
return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
}
if p.uploadID == "" {
return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(p.partNumber))
// Set upload id.
urlValues.Set("uploadId", p.uploadID)
// Set encryption headers, if any.
if p.customHeader == nil {
p.customHeader = make(http.Header)
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
// Server-side encryption is supported by the S3 Multipart Upload actions.
// Unless you are using a customer-provided encryption key, you don't need
// to specify the encryption parameters in each UploadPart request.
if p.sse != nil && p.sse.Type() == encrypt.SSEC {
p.sse.Marshal(p.customHeader)
}
reqMetadata := requestMetadata{
bucketName: p.bucketName,
objectName: p.objectName,
queryValues: urlValues,
customHeader: p.customHeader,
contentBody: p.reader,
contentLength: p.size,
contentMD5Base64: p.md5Base64,
contentSHA256Hex: p.sha256Hex,
streamSha256: p.streamSha256,
trailer: p.trailer,
}
// Execute PUT on each part.
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName)
}
}
// Once successfully uploaded, return completed part.
h := resp.Header
objPart := ObjectPart{
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
}
objPart.Size = p.size
objPart.PartNumber = p.partNumber
// Trim off the odd double quotes from ETag in the beginning and end.
objPart.ETag = trimEtag(h.Get("ETag"))
return objPart, nil
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
complete completeMultipartUpload, opts PutObjectOptions,
) (UploadInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return UploadInfo{}, err
}
headers := opts.Header()
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
customHeader: headers,
}
// Execute POST to complete multipart upload for an objectName.
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) | defer closeResponse(resp)
if err != nil {
return UploadInfo{}, err
} | random_line_split |
|
api-put-object-multipart.go |
func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
}
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
// Total data read and written to server. should be equal to
// 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
if err != nil {
return UploadInfo{}, err
}
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
if len(hashSums) == 0 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return UploadInfo{}, err
}
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
defer func() {
if err != nil {
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
// Part number always starts with '1'.
partNumber := 1
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Create a buffer.
buf := make([]byte, partSize)
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
for partNumber <= totalPartsCount {
length, rErr := readFull(reader, buf)
if rErr == io.EOF && partNumber > 1 {
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
return UploadInfo{}, rErr
}
// Calculates hash sums while copying partSize bytes into cw.
for k, v := range hashAlgos {
v.Write(buf[:length])
hashSums[k] = v.Sum(nil)
v.Close()
}
// Update progress reader appropriately to the latest offset
// as we read from the source.
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Checksums..
var (
md5Base64 string
sha256Hex string
)
if hashSums["md5"] != nil {
md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
}
if hashSums["sha256"] != nil {
sha256Hex = hex.EncodeToString(hashSums["sha256"])
}
if len(hashSums) == 0 {
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
// Proceed to upload the part.
objPart, uerr := c.uploadPart(ctx, p)
if uerr != nil {
return UploadInfo{}, uerr
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Save successfully uploaded size.
totalUploadedSize += int64(length)
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if rErr == io.EOF {
break
}
}
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ChecksumCRC32: part.ChecksumCRC32,
ChecksumCRC32C: part.ChecksumCRC32C,
ChecksumSHA1: part.ChecksumSHA1,
ChecksumSHA256: part.ChecksumSHA256,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalUploadedSize
return uploadInfo, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
}
}
urlValues.Set("versionId", opts.Internal.SourceVersionID)
}
// Set ContentType header.
customHeader := opts.Header()
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
type uploadPartParams struct {
bucketName string
objectName string
uploadID string
reader io.Reader
partNumber | {
info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
}
}
return info, err
} | identifier_body |
|
manager.go | (context.Context, ...UninstallOption) (*rpb.Release, error)
CleanupRelease(context.Context, string) (bool, error)
}
type manager struct {
actionConfig *action.Configuration
storageBackend *storage.Storage
kubeClient kube.Interface
releaseName string
namespace string
values map[string]interface{}
status *types.HelmAppStatus
isInstalled bool
isUpgradeRequired bool
deployedRelease *rpb.Release
chart *cpb.Chart
}
type InstallOption func(*action.Install) error
type UpgradeOption func(*action.Upgrade) error
type UninstallOption func(*action.Uninstall) error
// ReleaseName returns the name of the release.
func (m manager) ReleaseName() string {
return m.releaseName
}
func (m manager) IsInstalled() bool {
return m.isInstalled
}
func (m manager) IsUpgradeRequired() bool {
return m.isUpgradeRequired
}
// Sync ensures the Helm storage backend is in sync with the status of the
// custom resource.
func (m *manager) Sync(ctx context.Context) error {
// Get release history for this release name
releases, err := m.storageBackend.History(m.releaseName)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to retrieve release history: %w", err)
}
// Cleanup non-deployed release versions. If all release versions are
// non-deployed, this will ensure that failed installations are correctly
// retried.
for _, rel := range releases {
if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed {
_, err := m.storageBackend.Delete(rel.Name, rel.Version)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to delete stale release version: %w", err)
}
}
}
// Load the most recently deployed release from the storage backend.
deployedRelease, err := m.getDeployedRelease()
if errors.Is(err, driver.ErrReleaseNotFound) {
return nil
}
if err != nil {
return fmt.Errorf("failed to get deployed release: %w", err)
}
m.deployedRelease = deployedRelease
m.isInstalled = true
// Get the next candidate release to determine if an upgrade is necessary.
candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values)
if err != nil {
return fmt.Errorf("failed to get candidate release: %w", err)
}
if deployedRelease.Manifest != candidateRelease.Manifest {
m.isUpgradeRequired = true
}
return nil
}
func notFoundErr(err error) bool {
return err != nil && strings.Contains(err.Error(), "not found")
}
func (m manager) getDeployedRelease() (*rpb.Release, error) {
deployedRelease, err := m.storageBackend.Deployed(m.releaseName)
if err != nil {
if strings.Contains(err.Error(), "has no deployed releases") {
return nil, driver.ErrReleaseNotFound
}
return nil, err
}
return deployedRelease, nil
}
func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart,
values map[string]interface{}) (*rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = namespace
upgrade.DryRun = true
return upgrade.Run(name, chart, values)
}
// InstallRelease performs a Helm release install.
func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) {
install := action.NewInstall(m.actionConfig)
install.ReleaseName = m.releaseName
install.Namespace = m.namespace
for _, o := range opts {
if err := o(install); err != nil {
return nil, fmt.Errorf("failed to apply install option: %w", err)
}
}
installedRelease, err := install.Run(m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if installedRelease != nil {
uninstall := action.NewUninstall(m.actionConfig)
_, uninstallErr := uninstall.Run(m.releaseName)
// In certain cases, InstallRelease will return a partial release in
// the response even when it doesn't record the release in its release
// store (e.g. when there is an error rendering the release manifest).
// In that case the rollback will fail with a not found error because
// there was nothing to rollback.
//
// Only log a message about a rollback failure if the failure was caused
// by something other than the release not being found.
if uninstallErr != nil && !notFoundErr(uninstallErr) {
return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr)
}
}
return nil, fmt.Errorf("failed to install release: %w", err)
}
return installedRelease, nil
}
func ForceUpgrade(force bool) UpgradeOption {
return func(u *action.Upgrade) error {
u.Force = force
return nil
}
}
// UpgradeRelease performs a Helm release upgrade.
func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = m.namespace
for _, o := range opts {
if err := o(upgrade); err != nil {
return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err)
}
}
upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if upgradedRelease != nil {
rollback := action.NewRollback(m.actionConfig)
rollback.Force = true
// As of Helm 2.13, if UpgradeRelease returns a non-nil release, that
// means the release was also recorded in the release store.
// Therefore, we should perform the rollback when we have a non-nil
// release. Any rollback error here would be unexpected, so always
// log both the upgrade and rollback errors.
rollbackErr := rollback.Run(m.releaseName)
if rollbackErr != nil {
return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr)
}
}
return nil, nil, fmt.Errorf("failed to upgrade release: %w", err)
}
return m.deployedRelease, upgradedRelease, err
}
// ReconcileRelease creates or patches resources as necessary to match the
// deployed release's manifest.
func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) {
err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest)
return m.deployedRelease, err
}
func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error {
expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false)
if err != nil {
return err
}
return expectedInfos.Visit(func(expected *resource.Info, err error) error {
if err != nil {
return fmt.Errorf("visit error: %w", err)
}
helper := resource.NewHelper(expected.Client, expected.Mapping)
existing, err := helper.Get(expected.Namespace, expected.Name)
if apierrors.IsNotFound(err) {
if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil {
return fmt.Errorf("create error: %s", err)
}
return nil
} else if err != nil {
return fmt.Errorf("could not get object: %w", err)
}
// Replicate helm's patch creation, which will create a Three-Way-Merge patch for
// native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs
// We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes
// Reference in the helm source code:
// https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392
patch, patchType, err := createPatch(existing, expected)
if err != nil {
return fmt.Errorf("error creating patch: %w", err)
}
if patch == nil {
// nothing to do
return nil
}
_, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch,
&metav1.PatchOptions{})
if err != nil |
return nil
})
}
func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) {
existingJSON, err := json.Marshal(existing)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
expectedJSON, err := json.Marshal(expected.Object)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
// Get a | {
return fmt.Errorf("patch error: %w", err)
} | conditional_block |
manager.go | (context.Context, ...UninstallOption) (*rpb.Release, error)
CleanupRelease(context.Context, string) (bool, error)
}
type manager struct {
actionConfig *action.Configuration
storageBackend *storage.Storage
kubeClient kube.Interface
releaseName string
namespace string
values map[string]interface{}
status *types.HelmAppStatus
isInstalled bool
isUpgradeRequired bool
deployedRelease *rpb.Release
chart *cpb.Chart
}
type InstallOption func(*action.Install) error
type UpgradeOption func(*action.Upgrade) error
type UninstallOption func(*action.Uninstall) error
// ReleaseName returns the name of the release.
func (m manager) ReleaseName() string {
return m.releaseName
}
func (m manager) IsInstalled() bool {
return m.isInstalled
}
func (m manager) IsUpgradeRequired() bool {
return m.isUpgradeRequired
}
// Sync ensures the Helm storage backend is in sync with the status of the
// custom resource.
func (m *manager) Sync(ctx context.Context) error {
// Get release history for this release name
releases, err := m.storageBackend.History(m.releaseName)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to retrieve release history: %w", err)
}
// Cleanup non-deployed release versions. If all release versions are
// non-deployed, this will ensure that failed installations are correctly
// retried.
for _, rel := range releases {
if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed {
_, err := m.storageBackend.Delete(rel.Name, rel.Version)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to delete stale release version: %w", err)
}
}
}
// Load the most recently deployed release from the storage backend.
deployedRelease, err := m.getDeployedRelease()
if errors.Is(err, driver.ErrReleaseNotFound) {
return nil
}
if err != nil {
return fmt.Errorf("failed to get deployed release: %w", err)
}
m.deployedRelease = deployedRelease
m.isInstalled = true
// Get the next candidate release to determine if an upgrade is necessary.
candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values)
if err != nil {
return fmt.Errorf("failed to get candidate release: %w", err)
}
if deployedRelease.Manifest != candidateRelease.Manifest {
m.isUpgradeRequired = true
}
return nil
}
func notFoundErr(err error) bool |
func (m manager) getDeployedRelease() (*rpb.Release, error) {
deployedRelease, err := m.storageBackend.Deployed(m.releaseName)
if err != nil {
if strings.Contains(err.Error(), "has no deployed releases") {
return nil, driver.ErrReleaseNotFound
}
return nil, err
}
return deployedRelease, nil
}
func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart,
values map[string]interface{}) (*rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = namespace
upgrade.DryRun = true
return upgrade.Run(name, chart, values)
}
// InstallRelease performs a Helm release install.
func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) {
install := action.NewInstall(m.actionConfig)
install.ReleaseName = m.releaseName
install.Namespace = m.namespace
for _, o := range opts {
if err := o(install); err != nil {
return nil, fmt.Errorf("failed to apply install option: %w", err)
}
}
installedRelease, err := install.Run(m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if installedRelease != nil {
uninstall := action.NewUninstall(m.actionConfig)
_, uninstallErr := uninstall.Run(m.releaseName)
// In certain cases, InstallRelease will return a partial release in
// the response even when it doesn't record the release in its release
// store (e.g. when there is an error rendering the release manifest).
// In that case the rollback will fail with a not found error because
// there was nothing to rollback.
//
// Only log a message about a rollback failure if the failure was caused
// by something other than the release not being found.
if uninstallErr != nil && !notFoundErr(uninstallErr) {
return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr)
}
}
return nil, fmt.Errorf("failed to install release: %w", err)
}
return installedRelease, nil
}
func ForceUpgrade(force bool) UpgradeOption {
return func(u *action.Upgrade) error {
u.Force = force
return nil
}
}
// UpgradeRelease performs a Helm release upgrade.
func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = m.namespace
for _, o := range opts {
if err := o(upgrade); err != nil {
return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err)
}
}
upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if upgradedRelease != nil {
rollback := action.NewRollback(m.actionConfig)
rollback.Force = true
// As of Helm 2.13, if UpgradeRelease returns a non-nil release, that
// means the release was also recorded in the release store.
// Therefore, we should perform the rollback when we have a non-nil
// release. Any rollback error here would be unexpected, so always
// log both the upgrade and rollback errors.
rollbackErr := rollback.Run(m.releaseName)
if rollbackErr != nil {
return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr)
}
}
return nil, nil, fmt.Errorf("failed to upgrade release: %w", err)
}
return m.deployedRelease, upgradedRelease, err
}
// ReconcileRelease creates or patches resources as necessary to match the
// deployed release's manifest.
func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) {
err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest)
return m.deployedRelease, err
}
func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error {
expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false)
if err != nil {
return err
}
return expectedInfos.Visit(func(expected *resource.Info, err error) error {
if err != nil {
return fmt.Errorf("visit error: %w", err)
}
helper := resource.NewHelper(expected.Client, expected.Mapping)
existing, err := helper.Get(expected.Namespace, expected.Name)
if apierrors.IsNotFound(err) {
if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil {
return fmt.Errorf("create error: %s", err)
}
return nil
} else if err != nil {
return fmt.Errorf("could not get object: %w", err)
}
// Replicate helm's patch creation, which will create a Three-Way-Merge patch for
// native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs
// We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes
// Reference in the helm source code:
// https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392
patch, patchType, err := createPatch(existing, expected)
if err != nil {
return fmt.Errorf("error creating patch: %w", err)
}
if patch == nil {
// nothing to do
return nil
}
_, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch,
&metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("patch error: %w", err)
}
return nil
})
}
func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) {
existingJSON, err := json.Marshal(existing)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
expectedJSON, err := json.Marshal(expected.Object)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
// Get a | {
return err != nil && strings.Contains(err.Error(), "not found")
} | identifier_body |
manager.go | Release(context.Context, ...UninstallOption) (*rpb.Release, error)
CleanupRelease(context.Context, string) (bool, error)
}
type manager struct {
actionConfig *action.Configuration
storageBackend *storage.Storage
kubeClient kube.Interface
releaseName string
namespace string
values map[string]interface{}
status *types.HelmAppStatus
isInstalled bool
isUpgradeRequired bool
deployedRelease *rpb.Release
chart *cpb.Chart
}
type InstallOption func(*action.Install) error
type UpgradeOption func(*action.Upgrade) error
type UninstallOption func(*action.Uninstall) error
// ReleaseName returns the name of the release.
func (m manager) | () string {
return m.releaseName
}
func (m manager) IsInstalled() bool {
return m.isInstalled
}
func (m manager) IsUpgradeRequired() bool {
return m.isUpgradeRequired
}
// Sync ensures the Helm storage backend is in sync with the status of the
// custom resource.
func (m *manager) Sync(ctx context.Context) error {
// Get release history for this release name
releases, err := m.storageBackend.History(m.releaseName)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to retrieve release history: %w", err)
}
// Cleanup non-deployed release versions. If all release versions are
// non-deployed, this will ensure that failed installations are correctly
// retried.
for _, rel := range releases {
if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed {
_, err := m.storageBackend.Delete(rel.Name, rel.Version)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to delete stale release version: %w", err)
}
}
}
// Load the most recently deployed release from the storage backend.
deployedRelease, err := m.getDeployedRelease()
if errors.Is(err, driver.ErrReleaseNotFound) {
return nil
}
if err != nil {
return fmt.Errorf("failed to get deployed release: %w", err)
}
m.deployedRelease = deployedRelease
m.isInstalled = true
// Get the next candidate release to determine if an upgrade is necessary.
candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values)
if err != nil {
return fmt.Errorf("failed to get candidate release: %w", err)
}
if deployedRelease.Manifest != candidateRelease.Manifest {
m.isUpgradeRequired = true
}
return nil
}
func notFoundErr(err error) bool {
return err != nil && strings.Contains(err.Error(), "not found")
}
func (m manager) getDeployedRelease() (*rpb.Release, error) {
deployedRelease, err := m.storageBackend.Deployed(m.releaseName)
if err != nil {
if strings.Contains(err.Error(), "has no deployed releases") {
return nil, driver.ErrReleaseNotFound
}
return nil, err
}
return deployedRelease, nil
}
func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart,
values map[string]interface{}) (*rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = namespace
upgrade.DryRun = true
return upgrade.Run(name, chart, values)
}
// InstallRelease performs a Helm release install.
func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) {
install := action.NewInstall(m.actionConfig)
install.ReleaseName = m.releaseName
install.Namespace = m.namespace
for _, o := range opts {
if err := o(install); err != nil {
return nil, fmt.Errorf("failed to apply install option: %w", err)
}
}
installedRelease, err := install.Run(m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if installedRelease != nil {
uninstall := action.NewUninstall(m.actionConfig)
_, uninstallErr := uninstall.Run(m.releaseName)
// In certain cases, InstallRelease will return a partial release in
// the response even when it doesn't record the release in its release
// store (e.g. when there is an error rendering the release manifest).
// In that case the rollback will fail with a not found error because
// there was nothing to rollback.
//
// Only log a message about a rollback failure if the failure was caused
// by something other than the release not being found.
if uninstallErr != nil && !notFoundErr(uninstallErr) {
return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr)
}
}
return nil, fmt.Errorf("failed to install release: %w", err)
}
return installedRelease, nil
}
func ForceUpgrade(force bool) UpgradeOption {
return func(u *action.Upgrade) error {
u.Force = force
return nil
}
}
// UpgradeRelease performs a Helm release upgrade.
func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = m.namespace
for _, o := range opts {
if err := o(upgrade); err != nil {
return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err)
}
}
upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if upgradedRelease != nil {
rollback := action.NewRollback(m.actionConfig)
rollback.Force = true
// As of Helm 2.13, if UpgradeRelease returns a non-nil release, that
// means the release was also recorded in the release store.
// Therefore, we should perform the rollback when we have a non-nil
// release. Any rollback error here would be unexpected, so always
// log both the upgrade and rollback errors.
rollbackErr := rollback.Run(m.releaseName)
if rollbackErr != nil {
return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr)
}
}
return nil, nil, fmt.Errorf("failed to upgrade release: %w", err)
}
return m.deployedRelease, upgradedRelease, err
}
// ReconcileRelease creates or patches resources as necessary to match the
// deployed release's manifest.
func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) {
err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest)
return m.deployedRelease, err
}
func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error {
expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false)
if err != nil {
return err
}
return expectedInfos.Visit(func(expected *resource.Info, err error) error {
if err != nil {
return fmt.Errorf("visit error: %w", err)
}
helper := resource.NewHelper(expected.Client, expected.Mapping)
existing, err := helper.Get(expected.Namespace, expected.Name)
if apierrors.IsNotFound(err) {
if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil {
return fmt.Errorf("create error: %s", err)
}
return nil
} else if err != nil {
return fmt.Errorf("could not get object: %w", err)
}
// Replicate helm's patch creation, which will create a Three-Way-Merge patch for
// native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs
// We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes
// Reference in the helm source code:
// https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392
patch, patchType, err := createPatch(existing, expected)
if err != nil {
return fmt.Errorf("error creating patch: %w", err)
}
if patch == nil {
// nothing to do
return nil
}
_, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch,
&metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("patch error: %w", err)
}
return nil
})
}
func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) {
existingJSON, err := json.Marshal(existing)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
expectedJSON, err := json.Marshal(expected.Object)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
// Get a | ReleaseName | identifier_name |
manager.go | Release(context.Context, ...UninstallOption) (*rpb.Release, error)
CleanupRelease(context.Context, string) (bool, error)
}
type manager struct {
actionConfig *action.Configuration
storageBackend *storage.Storage
kubeClient kube.Interface
releaseName string
namespace string
values map[string]interface{}
status *types.HelmAppStatus
isInstalled bool
isUpgradeRequired bool
deployedRelease *rpb.Release
chart *cpb.Chart
}
type InstallOption func(*action.Install) error
type UpgradeOption func(*action.Upgrade) error
type UninstallOption func(*action.Uninstall) error
// ReleaseName returns the name of the release.
func (m manager) ReleaseName() string {
return m.releaseName
}
func (m manager) IsInstalled() bool {
return m.isInstalled
}
func (m manager) IsUpgradeRequired() bool {
return m.isUpgradeRequired
}
// Sync ensures the Helm storage backend is in sync with the status of the
// custom resource.
func (m *manager) Sync(ctx context.Context) error {
// Get release history for this release name
releases, err := m.storageBackend.History(m.releaseName)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to retrieve release history: %w", err)
}
// Cleanup non-deployed release versions. If all release versions are
// non-deployed, this will ensure that failed installations are correctly
// retried.
for _, rel := range releases {
if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed {
_, err := m.storageBackend.Delete(rel.Name, rel.Version)
if err != nil && !notFoundErr(err) {
return fmt.Errorf("failed to delete stale release version: %w", err)
}
}
}
// Load the most recently deployed release from the storage backend.
deployedRelease, err := m.getDeployedRelease()
if errors.Is(err, driver.ErrReleaseNotFound) {
return nil
}
if err != nil {
return fmt.Errorf("failed to get deployed release: %w", err)
}
m.deployedRelease = deployedRelease
m.isInstalled = true
// Get the next candidate release to determine if an upgrade is necessary.
candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values)
if err != nil {
return fmt.Errorf("failed to get candidate release: %w", err)
}
if deployedRelease.Manifest != candidateRelease.Manifest {
m.isUpgradeRequired = true
}
return nil
}
func notFoundErr(err error) bool {
return err != nil && strings.Contains(err.Error(), "not found")
}
func (m manager) getDeployedRelease() (*rpb.Release, error) {
deployedRelease, err := m.storageBackend.Deployed(m.releaseName)
if err != nil {
if strings.Contains(err.Error(), "has no deployed releases") {
return nil, driver.ErrReleaseNotFound
}
return nil, err
}
return deployedRelease, nil
}
func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart,
values map[string]interface{}) (*rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = namespace
upgrade.DryRun = true
return upgrade.Run(name, chart, values)
}
// InstallRelease performs a Helm release install.
func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) {
install := action.NewInstall(m.actionConfig)
install.ReleaseName = m.releaseName
install.Namespace = m.namespace
for _, o := range opts {
if err := o(install); err != nil {
return nil, fmt.Errorf("failed to apply install option: %w", err)
}
}
installedRelease, err := install.Run(m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if installedRelease != nil { | // the response even when it doesn't record the release in its release
// store (e.g. when there is an error rendering the release manifest).
// In that case the rollback will fail with a not found error because
// there was nothing to rollback.
//
// Only log a message about a rollback failure if the failure was caused
// by something other than the release not being found.
if uninstallErr != nil && !notFoundErr(uninstallErr) {
return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr)
}
}
return nil, fmt.Errorf("failed to install release: %w", err)
}
return installedRelease, nil
}
func ForceUpgrade(force bool) UpgradeOption {
return func(u *action.Upgrade) error {
u.Force = force
return nil
}
}
// UpgradeRelease performs a Helm release upgrade.
func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) {
upgrade := action.NewUpgrade(m.actionConfig)
upgrade.Namespace = m.namespace
for _, o := range opts {
if err := o(upgrade); err != nil {
return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err)
}
}
upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values)
if err != nil {
// Workaround for helm/helm#3338
if upgradedRelease != nil {
rollback := action.NewRollback(m.actionConfig)
rollback.Force = true
// As of Helm 2.13, if UpgradeRelease returns a non-nil release, that
// means the release was also recorded in the release store.
// Therefore, we should perform the rollback when we have a non-nil
// release. Any rollback error here would be unexpected, so always
// log both the upgrade and rollback errors.
rollbackErr := rollback.Run(m.releaseName)
if rollbackErr != nil {
return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr)
}
}
return nil, nil, fmt.Errorf("failed to upgrade release: %w", err)
}
return m.deployedRelease, upgradedRelease, err
}
// ReconcileRelease creates or patches resources as necessary to match the
// deployed release's manifest.
func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) {
err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest)
return m.deployedRelease, err
}
func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error {
expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false)
if err != nil {
return err
}
return expectedInfos.Visit(func(expected *resource.Info, err error) error {
if err != nil {
return fmt.Errorf("visit error: %w", err)
}
helper := resource.NewHelper(expected.Client, expected.Mapping)
existing, err := helper.Get(expected.Namespace, expected.Name)
if apierrors.IsNotFound(err) {
if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil {
return fmt.Errorf("create error: %s", err)
}
return nil
} else if err != nil {
return fmt.Errorf("could not get object: %w", err)
}
// Replicate helm's patch creation, which will create a Three-Way-Merge patch for
// native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs
// We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes
// Reference in the helm source code:
// https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392
patch, patchType, err := createPatch(existing, expected)
if err != nil {
return fmt.Errorf("error creating patch: %w", err)
}
if patch == nil {
// nothing to do
return nil
}
_, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch,
&metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("patch error: %w", err)
}
return nil
})
}
func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) {
existingJSON, err := json.Marshal(existing)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
expectedJSON, err := json.Marshal(expected.Object)
if err != nil {
return nil, apitypes.StrategicMergePatchType, err
}
// Get a versioned | uninstall := action.NewUninstall(m.actionConfig)
_, uninstallErr := uninstall.Run(m.releaseName)
// In certain cases, InstallRelease will return a partial release in | random_line_split |
player.go | "go.minekube.com/gate/pkg/edition/java/proto/packet/title"
"go.minekube.com/gate/pkg/edition/java/proto/util"
"go.minekube.com/gate/pkg/edition/java/proto/version"
"go.minekube.com/gate/pkg/edition/java/proxy/message"
"go.minekube.com/gate/pkg/edition/java/proxy/player"
"go.minekube.com/gate/pkg/gate/proto"
"go.minekube.com/gate/pkg/runtime/logr"
"go.minekube.com/gate/pkg/util/permission"
"go.minekube.com/gate/pkg/util/sets"
"go.minekube.com/gate/pkg/util/uuid"
"go.uber.org/atomic"
"net"
"strings"
"sync"
"time"
)
// Player is a connected Minecraft player.
type Player interface {
Inbound
command.Source
message.ChannelMessageSource
message.ChannelMessageSink
ID() uuid.UUID // The Minecraft ID of the player.
Username() string // The username of the player.
// CurrentServer returns the current server connection of the player.
CurrentServer() ServerConnection // May be nil, if there is no backend server connection!
Ping() time.Duration // The player's ping or -1 if currently unknown.
OnlineMode() bool // Whether the player was authenticated with Mojang's session servers.
// CreateConnectionRequest creates a connection request to begin switching the backend server.
CreateConnectionRequest(target RegisteredServer) ConnectionRequest
GameProfile() profile.GameProfile // Returns the player's game profile.
Settings() player.Settings // The players client settings. Returns player.DefaultSettings if not yet unknown.
// Disconnect disconnects the player with a reason.
// Once called, further interface calls to this player become undefined.
Disconnect(reason component.Component)
// SpoofChatInput sends chats input onto the player's current server as if
// they typed it into the client chat box.
SpoofChatInput(input string) error
// SendResourcePack sends the specified resource pack from url to the user. If at all possible,
// send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status
// of the sent resource pack, subscribe to PlayerResourcePackStatusEvent.
SendResourcePack(url string) error
// SendResourcePackWithHash sends the specified resource pack from url to the user,
// using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the
// status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent.
SendResourcePackWithHash(url string, sha1Hash []byte) error
// SendActionBar sends an action bar to the player.
SendActionBar(msg component.Component) error
// SendMessageWith sends a chat message with optional modifications.
SendMessageWith(msg component.Component, opts ...MessageOption) error
player.TabList
// TODO add title and more
}
type connectedPlayer struct {
*minecraftConn
log logr.Logger
virtualHost net.Addr
onlineMode bool
profile *profile.GameProfile
ping atomic.Duration
permFunc permission.Func
// This field is true if this connection is being disconnected
// due to another connection logging in with the same GameProfile.
disconnectDueToDuplicateConnection atomic.Bool
pluginChannelsMu sync.RWMutex // Protects following field
pluginChannels sets.String // Known plugin channels
*tabList // Player's tab list
mu sync.RWMutex // Protects following fields
connectedServer_ *serverConnection
connInFlight *serverConnection
settings player.Settings
modInfo *modinfo.ModInfo
connPhase clientConnectionPhase
serversToTry []string // names of servers to try if we got disconnected from previous
tryIndex int
}
var _ Player = (*connectedPlayer)(nil)
func newConnectedPlayer(
conn *minecraftConn,
profile *profile.GameProfile,
virtualHost net.Addr,
onlineMode bool,
) *connectedPlayer {
ping := atomic.Duration{}
ping.Store(-1)
return &connectedPlayer{
minecraftConn: conn,
log: conn.log.WithName("player").WithValues(
"name", profile.Name, "id", profile.ID),
profile: profile,
virtualHost: virtualHost,
onlineMode: onlineMode,
pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels?
connPhase: conn.Type().initialClientPhase(),
ping: ping,
tabList: newTabList(conn),
permFunc: func(string) permission.TriState { return permission.Undefined },
}
}
func (p *connectedPlayer) connectionInFlight() *serverConnection {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connInFlight
}
func (p *connectedPlayer) phase() clientConnectionPhase {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connPhase
}
func (p *connectedPlayer) HasPermission(permission string) bool {
return p.PermissionValue(permission).Bool()
}
func (p *connectedPlayer) PermissionValue(permission string) permission.TriState {
return p.permFunc(permission)
}
func (p *connectedPlayer) Ping() time.Duration {
return p.ping.Load()
}
func (p *connectedPlayer) OnlineMode() bool {
return p.onlineMode
}
func (p *connectedPlayer) GameProfile() profile.GameProfile {
return *p.profile
}
var (
ErrNoBackendConnection = errors.New("player has no backend server connection yet")
ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters")
)
func (p *connectedPlayer) SpoofChatInput(input string) error {
if len(input) > packet.MaxServerBoundMessageLength {
return ErrTooLongChatMessage
}
serverMc, ok := p.ensureBackendConnection()
if !ok {
return ErrNoBackendConnection
}
return serverMc.WritePacket(&packet.Chat{
Message: input,
Type: packet.ChatMessageType,
})
}
func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) {
p.mu.RLock()
defer p.mu.RUnlock()
if p.connectedServer_ == nil {
// Player has no backend connection.
return nil, false
}
serverMc := p.connectedServer_.conn()
if serverMc == nil {
// Player's backend connection is not yet connected to a server.
return nil, false
}
return serverMc, true
}
func (p *connectedPlayer) SendResourcePack(url string) error {
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: "",
})
}
func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error {
if len(sha1Hash) != 20 {
return errors.New("hash length must be 20")
}
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: hex.EncodeToString(sha1Hash),
})
}
func (p *connectedPlayer) VirtualHost() net.Addr {
return p.virtualHost
}
func (p *connectedPlayer) Active() bool {
return !p.minecraftConn.Closed()
}
// MessageOption is an option for Player.SendMessageWith.
type MessageOption func(c *packet.Chat)
// MessageWithSender modifies the sender identity of the chat message.
func MessageWithSender(id uuid.UUID) MessageOption {
return func(c *packet.Chat) { c.Sender = id }
}
// MessageType is a chat message type.
type MessageType uint8
// Chat message types.
const (
// ChatMessageType is a standard chat message.
ChatMessageType MessageType = iota
// SystemMessageType is a system chat message.
// e.g. client is willing to accept messages from commands,
// but does not want general chat from other players.
SystemMessageType
)
// MessageWithType modifies chat message type.
func MessageWithType(t MessageType) MessageOption {
return func(c *packet.Chat) {
if t == SystemMessageType {
c.Type = packet.SystemMessageType
} else {
c.Type = packet.ChatMessageType
}
}
}
func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) }
func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error {
if msg == nil {
return nil // skip nil message
}
m := new(strings.Builder)
if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil {
return err
}
chat := &packet.Chat{
Message: m.String(),
Type: packet.ChatMessageType,
Sender: uuid.Nil,
}
for _, o := range opts {
o(chat)
}
return p.WritePacket(chat)
}
var legacyJsonCodec = &legacy.Legacy{}
func (p *connectedPlayer) SendActionBar(msg component.Component) error {
if msg == nil {
return nil // skip nil message
}
protocol := p.Protocol()
if protocol.GreaterEqual(version.Minecraft_1_11) {
// Use the title packet instead.
pkt, err := title.New(protocol | "go.minekube.com/gate/pkg/edition/java/forge"
"go.minekube.com/gate/pkg/edition/java/modinfo"
"go.minekube.com/gate/pkg/edition/java/profile"
"go.minekube.com/gate/pkg/edition/java/proto/packet"
"go.minekube.com/gate/pkg/edition/java/proto/packet/plugin" | random_line_split |
|
player.go | // SendResourcePack sends the specified resource pack from url to the user. If at all possible,
// send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status
// of the sent resource pack, subscribe to PlayerResourcePackStatusEvent.
SendResourcePack(url string) error
// SendResourcePackWithHash sends the specified resource pack from url to the user,
// using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the
// status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent.
SendResourcePackWithHash(url string, sha1Hash []byte) error
// SendActionBar sends an action bar to the player.
SendActionBar(msg component.Component) error
// SendMessageWith sends a chat message with optional modifications.
SendMessageWith(msg component.Component, opts ...MessageOption) error
player.TabList
// TODO add title and more
}
type connectedPlayer struct {
*minecraftConn
log logr.Logger
virtualHost net.Addr
onlineMode bool
profile *profile.GameProfile
ping atomic.Duration
permFunc permission.Func
// This field is true if this connection is being disconnected
// due to another connection logging in with the same GameProfile.
disconnectDueToDuplicateConnection atomic.Bool
pluginChannelsMu sync.RWMutex // Protects following field
pluginChannels sets.String // Known plugin channels
*tabList // Player's tab list
mu sync.RWMutex // Protects following fields
connectedServer_ *serverConnection
connInFlight *serverConnection
settings player.Settings
modInfo *modinfo.ModInfo
connPhase clientConnectionPhase
serversToTry []string // names of servers to try if we got disconnected from previous
tryIndex int
}
var _ Player = (*connectedPlayer)(nil)
func newConnectedPlayer(
conn *minecraftConn,
profile *profile.GameProfile,
virtualHost net.Addr,
onlineMode bool,
) *connectedPlayer {
ping := atomic.Duration{}
ping.Store(-1)
return &connectedPlayer{
minecraftConn: conn,
log: conn.log.WithName("player").WithValues(
"name", profile.Name, "id", profile.ID),
profile: profile,
virtualHost: virtualHost,
onlineMode: onlineMode,
pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels?
connPhase: conn.Type().initialClientPhase(),
ping: ping,
tabList: newTabList(conn),
permFunc: func(string) permission.TriState { return permission.Undefined },
}
}
func (p *connectedPlayer) connectionInFlight() *serverConnection {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connInFlight
}
func (p *connectedPlayer) phase() clientConnectionPhase {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connPhase
}
func (p *connectedPlayer) HasPermission(permission string) bool {
return p.PermissionValue(permission).Bool()
}
func (p *connectedPlayer) PermissionValue(permission string) permission.TriState {
return p.permFunc(permission)
}
func (p *connectedPlayer) Ping() time.Duration {
return p.ping.Load()
}
func (p *connectedPlayer) | () bool {
return p.onlineMode
}
func (p *connectedPlayer) GameProfile() profile.GameProfile {
return *p.profile
}
var (
ErrNoBackendConnection = errors.New("player has no backend server connection yet")
ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters")
)
func (p *connectedPlayer) SpoofChatInput(input string) error {
if len(input) > packet.MaxServerBoundMessageLength {
return ErrTooLongChatMessage
}
serverMc, ok := p.ensureBackendConnection()
if !ok {
return ErrNoBackendConnection
}
return serverMc.WritePacket(&packet.Chat{
Message: input,
Type: packet.ChatMessageType,
})
}
func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) {
p.mu.RLock()
defer p.mu.RUnlock()
if p.connectedServer_ == nil {
// Player has no backend connection.
return nil, false
}
serverMc := p.connectedServer_.conn()
if serverMc == nil {
// Player's backend connection is not yet connected to a server.
return nil, false
}
return serverMc, true
}
func (p *connectedPlayer) SendResourcePack(url string) error {
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: "",
})
}
func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error {
if len(sha1Hash) != 20 {
return errors.New("hash length must be 20")
}
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: hex.EncodeToString(sha1Hash),
})
}
func (p *connectedPlayer) VirtualHost() net.Addr {
return p.virtualHost
}
func (p *connectedPlayer) Active() bool {
return !p.minecraftConn.Closed()
}
// MessageOption is an option for Player.SendMessageWith.
type MessageOption func(c *packet.Chat)
// MessageWithSender modifies the sender identity of the chat message.
func MessageWithSender(id uuid.UUID) MessageOption {
return func(c *packet.Chat) { c.Sender = id }
}
// MessageType is a chat message type.
type MessageType uint8
// Chat message types.
const (
// ChatMessageType is a standard chat message.
ChatMessageType MessageType = iota
// SystemMessageType is a system chat message.
// e.g. client is willing to accept messages from commands,
// but does not want general chat from other players.
SystemMessageType
)
// MessageWithType modifies chat message type.
func MessageWithType(t MessageType) MessageOption {
return func(c *packet.Chat) {
if t == SystemMessageType {
c.Type = packet.SystemMessageType
} else {
c.Type = packet.ChatMessageType
}
}
}
func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) }
func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error {
if msg == nil {
return nil // skip nil message
}
m := new(strings.Builder)
if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil {
return err
}
chat := &packet.Chat{
Message: m.String(),
Type: packet.ChatMessageType,
Sender: uuid.Nil,
}
for _, o := range opts {
o(chat)
}
return p.WritePacket(chat)
}
var legacyJsonCodec = &legacy.Legacy{}
func (p *connectedPlayer) SendActionBar(msg component.Component) error {
if msg == nil {
return nil // skip nil message
}
protocol := p.Protocol()
if protocol.GreaterEqual(version.Minecraft_1_11) {
// Use the title packet instead.
pkt, err := title.New(protocol, &title.Builder{
Action: title.SetActionBar,
Component: msg,
})
if err != nil {
return err
}
return p.WritePacket(pkt)
}
// Due to issues with action bar packets, we'll need to convert the text message into a
// legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻!
b := new(strings.Builder)
if err := legacyJsonCodec.Marshal(b, msg); err != nil {
return err
}
m, err := json.Marshal(map[string]string{"text": b.String()})
if err != nil {
return err
}
return p.WritePacket(&packet.Chat{
Message: string(m),
Type: packet.GameInfoMessageType,
Sender: uuid.Nil,
})
}
func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error {
return p.WritePacket(&plugin.Message{
Channel: identifier.ID(),
Data: data,
})
}
// TODO add header/footer, title & boss bar methods
// Finds another server to attempt to log into, if we were unexpectedly disconnected from the server.
// current is the current server of the player is on, so we skip this server and not connect to it.
// current can be nil if there is no current server.
// MAY RETURN NIL if no next server available!
func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.serversToTry) == 0 {
p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()]
}
if len(p.serversToTry) == 0 {
p.serversToTry = p.proxy.Config().Try
}
sameName := func(rs RegisteredServer, name string) bool {
return rs.ServerInfo().Name() == name
}
for i := p.tryIndex; i < len(p.serversToTry); i++ {
toTry := p.serversToTry[i]
if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) ||
(p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) ||
(current != nil && sameName(current, toTry | OnlineMode | identifier_name |
player.go | Known plugin channels
*tabList // Player's tab list
mu sync.RWMutex // Protects following fields
connectedServer_ *serverConnection
connInFlight *serverConnection
settings player.Settings
modInfo *modinfo.ModInfo
connPhase clientConnectionPhase
serversToTry []string // names of servers to try if we got disconnected from previous
tryIndex int
}
var _ Player = (*connectedPlayer)(nil)
func newConnectedPlayer(
conn *minecraftConn,
profile *profile.GameProfile,
virtualHost net.Addr,
onlineMode bool,
) *connectedPlayer {
ping := atomic.Duration{}
ping.Store(-1)
return &connectedPlayer{
minecraftConn: conn,
log: conn.log.WithName("player").WithValues(
"name", profile.Name, "id", profile.ID),
profile: profile,
virtualHost: virtualHost,
onlineMode: onlineMode,
pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels?
connPhase: conn.Type().initialClientPhase(),
ping: ping,
tabList: newTabList(conn),
permFunc: func(string) permission.TriState { return permission.Undefined },
}
}
func (p *connectedPlayer) connectionInFlight() *serverConnection {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connInFlight
}
func (p *connectedPlayer) phase() clientConnectionPhase {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connPhase
}
func (p *connectedPlayer) HasPermission(permission string) bool {
return p.PermissionValue(permission).Bool()
}
func (p *connectedPlayer) PermissionValue(permission string) permission.TriState {
return p.permFunc(permission)
}
func (p *connectedPlayer) Ping() time.Duration {
return p.ping.Load()
}
func (p *connectedPlayer) OnlineMode() bool {
return p.onlineMode
}
func (p *connectedPlayer) GameProfile() profile.GameProfile {
return *p.profile
}
var (
ErrNoBackendConnection = errors.New("player has no backend server connection yet")
ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters")
)
func (p *connectedPlayer) SpoofChatInput(input string) error {
if len(input) > packet.MaxServerBoundMessageLength {
return ErrTooLongChatMessage
}
serverMc, ok := p.ensureBackendConnection()
if !ok {
return ErrNoBackendConnection
}
return serverMc.WritePacket(&packet.Chat{
Message: input,
Type: packet.ChatMessageType,
})
}
func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) {
p.mu.RLock()
defer p.mu.RUnlock()
if p.connectedServer_ == nil {
// Player has no backend connection.
return nil, false
}
serverMc := p.connectedServer_.conn()
if serverMc == nil {
// Player's backend connection is not yet connected to a server.
return nil, false
}
return serverMc, true
}
func (p *connectedPlayer) SendResourcePack(url string) error {
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: "",
})
}
func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error {
if len(sha1Hash) != 20 {
return errors.New("hash length must be 20")
}
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: hex.EncodeToString(sha1Hash),
})
}
func (p *connectedPlayer) VirtualHost() net.Addr {
return p.virtualHost
}
func (p *connectedPlayer) Active() bool {
return !p.minecraftConn.Closed()
}
// MessageOption is an option for Player.SendMessageWith.
type MessageOption func(c *packet.Chat)
// MessageWithSender modifies the sender identity of the chat message.
func MessageWithSender(id uuid.UUID) MessageOption {
return func(c *packet.Chat) { c.Sender = id }
}
// MessageType is a chat message type.
type MessageType uint8
// Chat message types.
const (
// ChatMessageType is a standard chat message.
ChatMessageType MessageType = iota
// SystemMessageType is a system chat message.
// e.g. client is willing to accept messages from commands,
// but does not want general chat from other players.
SystemMessageType
)
// MessageWithType modifies chat message type.
func MessageWithType(t MessageType) MessageOption {
return func(c *packet.Chat) {
if t == SystemMessageType {
c.Type = packet.SystemMessageType
} else {
c.Type = packet.ChatMessageType
}
}
}
func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) }
func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error {
if msg == nil {
return nil // skip nil message
}
m := new(strings.Builder)
if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil {
return err
}
chat := &packet.Chat{
Message: m.String(),
Type: packet.ChatMessageType,
Sender: uuid.Nil,
}
for _, o := range opts {
o(chat)
}
return p.WritePacket(chat)
}
var legacyJsonCodec = &legacy.Legacy{}
func (p *connectedPlayer) SendActionBar(msg component.Component) error {
if msg == nil {
return nil // skip nil message
}
protocol := p.Protocol()
if protocol.GreaterEqual(version.Minecraft_1_11) {
// Use the title packet instead.
pkt, err := title.New(protocol, &title.Builder{
Action: title.SetActionBar,
Component: msg,
})
if err != nil {
return err
}
return p.WritePacket(pkt)
}
// Due to issues with action bar packets, we'll need to convert the text message into a
// legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻!
b := new(strings.Builder)
if err := legacyJsonCodec.Marshal(b, msg); err != nil {
return err
}
m, err := json.Marshal(map[string]string{"text": b.String()})
if err != nil {
return err
}
return p.WritePacket(&packet.Chat{
Message: string(m),
Type: packet.GameInfoMessageType,
Sender: uuid.Nil,
})
}
func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error {
return p.WritePacket(&plugin.Message{
Channel: identifier.ID(),
Data: data,
})
}
// TODO add header/footer, title & boss bar methods
// Finds another server to attempt to log into, if we were unexpectedly disconnected from the server.
// current is the current server of the player is on, so we skip this server and not connect to it.
// current can be nil if there is no current server.
// MAY RETURN NIL if no next server available!
func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.serversToTry) == 0 {
p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()]
}
if len(p.serversToTry) == 0 {
p.serversToTry = p.proxy.Config().Try
}
sameName := func(rs RegisteredServer, name string) bool {
return rs.ServerInfo().Name() == name
}
for i := p.tryIndex; i < len(p.serversToTry); i++ {
toTry := p.serversToTry[i]
if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) ||
(p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) ||
(current != nil && sameName(current, toTry)) {
continue
}
p.tryIndex = i
if s := p.proxy.Server(toTry); s != nil {
return s
}
}
return nil
}
// player's connection is closed at this point,
// now need to disconnect backend server connection, if any.
func (p *connectedPlayer) teardown() {
p.mu.RLock()
connInFlight := p.connInFlight
connectedServer := p.connectedServer_
p.mu.RUnlock()
if connInFlight != nil {
connInFlight.disconnect()
}
if connectedServer != nil {
connectedServer.disconnect()
}
var status LoginStatus
if p.proxy.unregisterConnection(p) {
if p.disconnectDueToDuplicateConnection.Load() {
status = ConflictingLoginStatus
} else {
status = SuccessfulLoginStatus
}
} else {
if p.knownDisconnect.Load() {
status = CanceledByProxyLoginStatus
} else {
status = CanceledByUserLoginStatus
}
}
p.proxy.event.Fire(&DisconnectEvent{
player: p,
loginStatus: status,
})
}
// may be nil!
func (p *connectedPlayer) CurrentServer() ServerConnection {
if cs := p.co | nnectedServer(); cs != nil {
return cs
}
// We must return an explicit nil, not a (*serverConnection)(nil).
return nil
}
func (p *conne | identifier_body |
|
player.go | to the user. If at all possible,
// send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status
// of the sent resource pack, subscribe to PlayerResourcePackStatusEvent.
SendResourcePack(url string) error
// SendResourcePackWithHash sends the specified resource pack from url to the user,
// using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the
// status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent.
SendResourcePackWithHash(url string, sha1Hash []byte) error
// SendActionBar sends an action bar to the player.
SendActionBar(msg component.Component) error
// SendMessageWith sends a chat message with optional modifications.
SendMessageWith(msg component.Component, opts ...MessageOption) error
player.TabList
// TODO add title and more
}
type connectedPlayer struct {
*minecraftConn
log logr.Logger
virtualHost net.Addr
onlineMode bool
profile *profile.GameProfile
ping atomic.Duration
permFunc permission.Func
// This field is true if this connection is being disconnected
// due to another connection logging in with the same GameProfile.
disconnectDueToDuplicateConnection atomic.Bool
pluginChannelsMu sync.RWMutex // Protects following field
pluginChannels sets.String // Known plugin channels
*tabList // Player's tab list
mu sync.RWMutex // Protects following fields
connectedServer_ *serverConnection
connInFlight *serverConnection
settings player.Settings
modInfo *modinfo.ModInfo
connPhase clientConnectionPhase
serversToTry []string // names of servers to try if we got disconnected from previous
tryIndex int
}
var _ Player = (*connectedPlayer)(nil)
func newConnectedPlayer(
conn *minecraftConn,
profile *profile.GameProfile,
virtualHost net.Addr,
onlineMode bool,
) *connectedPlayer {
ping := atomic.Duration{}
ping.Store(-1)
return &connectedPlayer{
minecraftConn: conn,
log: conn.log.WithName("player").WithValues(
"name", profile.Name, "id", profile.ID),
profile: profile,
virtualHost: virtualHost,
onlineMode: onlineMode,
pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels?
connPhase: conn.Type().initialClientPhase(),
ping: ping,
tabList: newTabList(conn),
permFunc: func(string) permission.TriState { return permission.Undefined },
}
}
func (p *connectedPlayer) connectionInFlight() *serverConnection {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connInFlight
}
func (p *connectedPlayer) phase() clientConnectionPhase {
p.mu.RLock()
defer p.mu.RUnlock()
return p.connPhase
}
func (p *connectedPlayer) HasPermission(permission string) bool {
return p.PermissionValue(permission).Bool()
}
func (p *connectedPlayer) PermissionValue(permission string) permission.TriState {
return p.permFunc(permission)
}
func (p *connectedPlayer) Ping() time.Duration {
return p.ping.Load()
}
func (p *connectedPlayer) OnlineMode() bool {
return p.onlineMode
}
func (p *connectedPlayer) GameProfile() profile.GameProfile {
return *p.profile
}
var (
ErrNoBackendConnection = errors.New("player has no backend server connection yet")
ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters")
)
func (p *connectedPlayer) SpoofChatInput(input string) error {
if len(input) > packet.MaxServerBoundMessageLength {
return ErrTooLongChatMessage
}
serverMc, ok := p.ensureBackendConnection()
if !ok {
return ErrNoBackendConnection
}
return serverMc.WritePacket(&packet.Chat{
Message: input,
Type: packet.ChatMessageType,
})
}
func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) {
p.mu.RLock()
defer p.mu.RUnlock()
if p.connectedServer_ == nil {
// Player has no backend connection.
return nil, false
}
serverMc := p.connectedServer_.conn()
if serverMc == nil {
// Player's backend connection is not yet connected to a server.
return nil, false
}
return serverMc, true
}
func (p *connectedPlayer) SendResourcePack(url string) error {
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: "",
})
}
func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error {
if len(sha1Hash) != 20 {
return errors.New("hash length must be 20")
}
return p.WritePacket(&packet.ResourcePackRequest{
Url: url,
Hash: hex.EncodeToString(sha1Hash),
})
}
func (p *connectedPlayer) VirtualHost() net.Addr {
return p.virtualHost
}
func (p *connectedPlayer) Active() bool {
return !p.minecraftConn.Closed()
}
// MessageOption is an option for Player.SendMessageWith.
type MessageOption func(c *packet.Chat)
// MessageWithSender modifies the sender identity of the chat message.
func MessageWithSender(id uuid.UUID) MessageOption {
return func(c *packet.Chat) { c.Sender = id }
}
// MessageType is a chat message type.
type MessageType uint8
// Chat message types.
const (
// ChatMessageType is a standard chat message.
ChatMessageType MessageType = iota
// SystemMessageType is a system chat message.
// e.g. client is willing to accept messages from commands,
// but does not want general chat from other players.
SystemMessageType
)
// MessageWithType modifies chat message type.
func MessageWithType(t MessageType) MessageOption {
return func(c *packet.Chat) {
if t == SystemMessageType {
c.Type = packet.SystemMessageType
} else {
c.Type = packet.ChatMessageType
}
}
}
func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) }
func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error {
if msg == nil {
return nil // skip nil message
}
m := new(strings.Builder)
if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil {
return err
}
chat := &packet.Chat{
Message: m.String(),
Type: packet.ChatMessageType,
Sender: uuid.Nil,
}
for _, o := range opts {
o(chat)
}
return p.WritePacket(chat)
}
var legacyJsonCodec = &legacy.Legacy{}
func (p *connectedPlayer) SendActionBar(msg component.Component) error {
if msg == nil {
return nil // skip nil message
}
protocol := p.Protocol()
if protocol.GreaterEqual(version.Minecraft_1_11) {
// Use the title packet instead.
pkt, err := title.New(protocol, &title.Builder{
Action: title.SetActionBar,
Component: msg,
})
if err != nil {
return err
}
return p.WritePacket(pkt)
}
// Due to issues with action bar packets, we'll need to convert the text message into a
// legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻!
b := new(strings.Builder)
if err := legacyJsonCodec.Marshal(b, msg); err != nil {
return err
}
m, err := json.Marshal(map[string]string{"text": b.String()})
if err != nil {
return err
}
return p.WritePacket(&packet.Chat{
Message: string(m),
Type: packet.GameInfoMessageType,
Sender: uuid.Nil,
})
}
func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error {
return p.WritePacket(&plugin.Message{
Channel: identifier.ID(),
Data: data,
})
}
// TODO add header/footer, title & boss bar methods
// Finds another server to attempt to log into, if we were unexpectedly disconnected from the server.
// current is the current server of the player is on, so we skip this server and not connect to it.
// current can be nil if there is no current server.
// MAY RETURN NIL if no next server available!
func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.serversToTry) == 0 {
p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()]
}
if len(p.serversToTry) == 0 {
p.serversToTry = p.proxy.Config().Try
}
sameName := func(rs RegisteredServer, name string) bool {
return rs.ServerInfo().Name() == name
}
for i := p.tryIndex; i < len(p.serversToTry); i++ {
toTry := p.serversToTry[i]
if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) ||
(p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) ||
(current != nil && sameName(current, toTry)) {
continue
| }
p.tryIndex = | conditional_block |
|
dream.go | (t time.Time) time.Time {
return t.Truncate(24 * time.Hour)
}
func now() string {
return time.Now().Format(time.Kitchen)
}
// Dream is exported so it can be an api, haha what fun. Games perhaps? Stock trading? Some real time video effect?
func Dream(c *gin.Context) {
start := time.Now()
defer func() {
elapsed := fmt.Sprintf("%s %s", now(), time.Since(start))
elapsed = strings.Split(elapsed, ".")[0] + "s"
Log.Info("job took ", elapsed)
mel.Broadcast([]byte(elapsed))
}()
yt := c.PostForm("yt")
fps := c.PostForm("fps")
ov := c.PostForm("ov") //data the user uploaded we want
ovf := c.PostForm("ovf")
of := c.PostForm("of")
oo := c.PostForm("oo")
it := c.PostForm("iterations")
oc := c.PostForm("octaves")
la := c.PostForm("layer")
rl := c.PostForm("rl")
Log.Info("rl: ", rl)
ow := c.PostForm("ow")
li := c.PostForm("li")
iw := c.PostForm("iw")
rle := c.PostForm("rle")
ocs := c.PostForm("ocscale")
// stretch:=c.Postform("stretchvideo")
isJob = true
defer func() {
isJob = false
}()
Log.WithFields(logrus.Fields{
"event": "new job started",
})
jobLog.WithFields(logrus.Fields{
"time": time.Now().UTC().UnixNano(),
"title": name,
"fps": fps,
"it": it,
"oc": oc,
"la": la,
"rl": rl,
"ow": ow,
"li": li,
"iw": iw,
"rle": rle,
})
Log.Info("base path is ", basePath)
newJobLog(name)
//let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip)
jobLog.WithFields(logrus.Fields{
"fps": fps,
"iterations": it,
"octaves": oc,
"layer": la,
"linear increase": li,
"iteration waver": iw,
"octave waver": ow,
"randomization type": rl,
"random layer every n frames": rle,
}).Info("job name: ", name)
//
var uploadedFile, framesDirPath string
var name, fullName, ext string
if yt != "" { //if "yt" checkbox checked
youtubeDl := goydl.NewYoutubeDl()
for { //we loop until we got an acceptable ytURL
fmt.Println("waiting...")
youtubeDl.VideoURL = ytURL
fmt.Println("videoURL:", ytURL)
if ytURL == "" { //we didn't get a url, so just cancel the job
Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job")
return
}
info, err := youtubeDl.GetInfo()
if err != nil {
Log.WithFields(logrus.Fields{
"event": "ytdl",
"error": err,
}).Error("we should never fail here")
continue
}
fmt.Println(youtubeDl.VideoURL, "blah")
ext = info.Ext
name = strings.Split(info.Title, " ")[0]
fullName = name + ".mp4"
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + ".mp4"
Log.Info("\nwe renamed as: ", fullName)
}
uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
fmt.Println("uploaded file: ", uploadedFile)
youtubeDl.Options.Output.Value = uploadedFile
youtubeDl.Options.Format.Value = "mp4"
cmd, err := youtubeDl.Download(youtubeDl.VideoURL)
if err != nil {
Log.WithFields(logrus.Fields{
"event": "error",
"err": err,
"uploadedFile": uploadedFile,
}).Error("dl'ing from yt failed w err")
} else {
Log.WithFields(logrus.Fields{
"event": "download",
"path": uploadedFile,
}).Info("downloaded a yt video")
println("starting download")
cmd.Wait()
println("finished download")
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
break //we got our file, now we move on, we don't need to keep listening for URL
}
}
} else { // if no youtube, then get file from form upload
file, err := c.FormFile("file")
if err != nil {
Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now
return
}
name = strings.Split(file.Filename, ".")[0]
fullName = file.Filename
ext = strings.Split(fullName, ".")[1]
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + "." + strings.Split(file.Filename, ".")[1]
Log.Info("\nwe renamed as: ", fullName)
}
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
if err := c.SaveUploadedFile(file, uploadedFile); err != nil {
Log.Error("failed to save file at path ", uploadedFile, " err is: ", err)
} else {
Log.Info("saved file at path ", uploadedFile)
}
}
// make a new output folder
outputPath := fmt.Sprintf("%s/output", framesDirPath)
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
os.Mkdir(outputPath, 0777)
Log.Info("output folder for new job was created at ", outputPath)
}
Log.Info("saved output dir at path ", outputPath)
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
mel.Broadcast([]byte(name))
itsAVideo := false
// decide what to do with the file we've gotten, if it's an image:
if ext == "png" { //it's perfect, leave it alone...
} else if ext == "jpg" || ext == "jpeg" {
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput()
if err != nil {
Log.Error("oops, failed trying to make some image of ext ", ext, " to png")
} else {
Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd))
}
} else if ext == "gif" {
itsAVideo = true
Log.Info("trying to convert a gif")
// ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4
savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4)
cmd.Stdin = strings.NewReader("")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
Log.Error("failed to make mp4 from gif ", err)
} else {
uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4"
Log.Info(" | Truncate | identifier_name |
|
dream.go | le,
})
Log.Info("base path is ", basePath)
newJobLog(name)
//let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip)
jobLog.WithFields(logrus.Fields{
"fps": fps,
"iterations": it,
"octaves": oc,
"layer": la,
"linear increase": li,
"iteration waver": iw,
"octave waver": ow,
"randomization type": rl,
"random layer every n frames": rle,
}).Info("job name: ", name)
//
var uploadedFile, framesDirPath string
var name, fullName, ext string
if yt != "" { //if "yt" checkbox checked
youtubeDl := goydl.NewYoutubeDl()
for { //we loop until we got an acceptable ytURL
fmt.Println("waiting...")
youtubeDl.VideoURL = ytURL
fmt.Println("videoURL:", ytURL)
if ytURL == "" { //we didn't get a url, so just cancel the job
Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job")
return
}
info, err := youtubeDl.GetInfo()
if err != nil {
Log.WithFields(logrus.Fields{
"event": "ytdl",
"error": err,
}).Error("we should never fail here")
continue
}
fmt.Println(youtubeDl.VideoURL, "blah")
ext = info.Ext
name = strings.Split(info.Title, " ")[0]
fullName = name + ".mp4"
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + ".mp4"
Log.Info("\nwe renamed as: ", fullName)
}
uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
fmt.Println("uploaded file: ", uploadedFile)
youtubeDl.Options.Output.Value = uploadedFile
youtubeDl.Options.Format.Value = "mp4"
cmd, err := youtubeDl.Download(youtubeDl.VideoURL)
if err != nil {
Log.WithFields(logrus.Fields{
"event": "error",
"err": err,
"uploadedFile": uploadedFile,
}).Error("dl'ing from yt failed w err")
} else {
Log.WithFields(logrus.Fields{
"event": "download",
"path": uploadedFile,
}).Info("downloaded a yt video")
println("starting download")
cmd.Wait()
println("finished download")
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
break //we got our file, now we move on, we don't need to keep listening for URL | } else { // if no youtube, then get file from form upload
file, err := c.FormFile("file")
if err != nil {
Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now
return
}
name = strings.Split(file.Filename, ".")[0]
fullName = file.Filename
ext = strings.Split(fullName, ".")[1]
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + "." + strings.Split(file.Filename, ".")[1]
Log.Info("\nwe renamed as: ", fullName)
}
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
if err := c.SaveUploadedFile(file, uploadedFile); err != nil {
Log.Error("failed to save file at path ", uploadedFile, " err is: ", err)
} else {
Log.Info("saved file at path ", uploadedFile)
}
}
// make a new output folder
outputPath := fmt.Sprintf("%s/output", framesDirPath)
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
os.Mkdir(outputPath, 0777)
Log.Info("output folder for new job was created at ", outputPath)
}
Log.Info("saved output dir at path ", outputPath)
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
mel.Broadcast([]byte(name))
itsAVideo := false
// decide what to do with the file we've gotten, if it's an image:
if ext == "png" { //it's perfect, leave it alone...
} else if ext == "jpg" || ext == "jpeg" {
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput()
if err != nil {
Log.Error("oops, failed trying to make some image of ext ", ext, " to png")
} else {
Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd))
}
} else if ext == "gif" {
itsAVideo = true
Log.Info("trying to convert a gif")
// ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4
savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4)
cmd.Stdin = strings.NewReader("")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
Log.Error("failed to make mp4 from gif ", err)
} else {
uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4"
Log.Info("made mp4 from GIF")
}
} else { // if file not gif or img try to make it mp4
itsAVideo = true
Log.Info("ext: ", ext)
Log.Info("file.filename ", fullName)
if ext != "mp4" {
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, strings.Split(uploadedFile, ".")[0]+".mp4").CombinedOutput()
if err != nil {
Log.Error("failed to make a .any to .mp4 , ", err)
} else {
Log.Info("made a ", ext, " into .mp4 with cmd ", string(cmd))
err := os.Remove(uploadedFile)
if err != nil {
Log.Info("err removing original .mp4 as err: ", err)
} else {
uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4"
Log.Info("deleted original at ext: ", ext)
}
}
}
}
// open finder
if of == "of" {
open.Run(framesDirPath)
}
if oo == "oo" {
open.Run(outputPath)
}
if itsAVideo {
// create frames from mp4
framesOut := fmt.Sprintf("%s/frames/%s/%s.png", basePath, name, "%d")
Log.Info("framesOut: ", framesOut)
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, "-vf", "fps="+fps, "-c:v", "png", framesOut).CombinedOutput()
if err != nil {
Log.Error("failed to make frames", err)
} else {
Log.Info("made frames from MP4 with cmd: ", string(cmd))
}
}
Log.Info("entering dreamer goroutine")
// deep dream the frames
cmd, err := exec.Command("python3", "folder.py", "--input", framesDirPath, "-os", ocs, "-it | }
} | random_line_split |
dream.go |
func now() string {
return time.Now().Format(time.Kitchen)
}
// Dream is exported so it can be an api, haha what fun. Games perhaps? Stock trading? Some real time video effect?
func Dream(c *gin.Context) {
start := time.Now()
defer func() {
elapsed := fmt.Sprintf("%s %s", now(), time.Since(start))
elapsed = strings.Split(elapsed, ".")[0] + "s"
Log.Info("job took ", elapsed)
mel.Broadcast([]byte(elapsed))
}()
yt := c.PostForm("yt")
fps := c.PostForm("fps")
ov := c.PostForm("ov") //data the user uploaded we want
ovf := c.PostForm("ovf")
of := c.PostForm("of")
oo := c.PostForm("oo")
it := c.PostForm("iterations")
oc := c.PostForm("octaves")
la := c.PostForm("layer")
rl := c.PostForm("rl")
Log.Info("rl: ", rl)
ow := c.PostForm("ow")
li := c.PostForm("li")
iw := c.PostForm("iw")
rle := c.PostForm("rle")
ocs := c.PostForm("ocscale")
// stretch:=c.Postform("stretchvideo")
isJob = true
defer func() {
isJob = false
}()
Log.WithFields(logrus.Fields{
"event": "new job started",
})
jobLog.WithFields(logrus.Fields{
"time": time.Now().UTC().UnixNano(),
"title": name,
"fps": fps,
"it": it,
"oc": oc,
"la": la,
"rl": rl,
"ow": ow,
"li": li,
"iw": iw,
"rle": rle,
})
Log.Info("base path is ", basePath)
newJobLog(name)
//let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip)
jobLog.WithFields(logrus.Fields{
"fps": fps,
"iterations": it,
"octaves": oc,
"layer": la,
"linear increase": li,
"iteration waver": iw,
"octave waver": ow,
"randomization type": rl,
"random layer every n frames": rle,
}).Info("job name: ", name)
//
var uploadedFile, framesDirPath string
var name, fullName, ext string
if yt != "" { //if "yt" checkbox checked
youtubeDl := goydl.NewYoutubeDl()
for { //we loop until we got an acceptable ytURL
fmt.Println("waiting...")
youtubeDl.VideoURL = ytURL
fmt.Println("videoURL:", ytURL)
if ytURL == "" { //we didn't get a url, so just cancel the job
Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job")
return
}
info, err := youtubeDl.GetInfo()
if err != nil {
Log.WithFields(logrus.Fields{
"event": "ytdl",
"error": err,
}).Error("we should never fail here")
continue
}
fmt.Println(youtubeDl.VideoURL, "blah")
ext = info.Ext
name = strings.Split(info.Title, " ")[0]
fullName = name + ".mp4"
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + ".mp4"
Log.Info("\nwe renamed as: ", fullName)
}
uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
fmt.Println("uploaded file: ", uploadedFile)
youtubeDl.Options.Output.Value = uploadedFile
youtubeDl.Options.Format.Value = "mp4"
cmd, err := youtubeDl.Download(youtubeDl.VideoURL)
if err != nil {
Log.WithFields(logrus.Fields{
"event": "error",
"err": err,
"uploadedFile": uploadedFile,
}).Error("dl'ing from yt failed w err")
} else {
Log.WithFields(logrus.Fields{
"event": "download",
"path": uploadedFile,
}).Info("downloaded a yt video")
println("starting download")
cmd.Wait()
println("finished download")
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
break //we got our file, now we move on, we don't need to keep listening for URL
}
}
} else { // if no youtube, then get file from form upload
file, err := c.FormFile("file")
if err != nil {
Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now
return
}
name = strings.Split(file.Filename, ".")[0]
fullName = file.Filename
ext = strings.Split(fullName, ".")[1]
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + "." + strings.Split(file.Filename, ".")[1]
Log.Info("\nwe renamed as: ", fullName)
}
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
if err := c.SaveUploadedFile(file, uploadedFile); err != nil {
Log.Error("failed to save file at path ", uploadedFile, " err is: ", err)
} else {
Log.Info("saved file at path ", uploadedFile)
}
}
// make a new output folder
outputPath := fmt.Sprintf("%s/output", framesDirPath)
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
os.Mkdir(outputPath, 0777)
Log.Info("output folder for new job was created at ", outputPath)
}
Log.Info("saved output dir at path ", outputPath)
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
mel.Broadcast([]byte(name))
itsAVideo := false
// decide what to do with the file we've gotten, if it's an image:
if ext == "png" { //it's perfect, leave it alone...
} else if ext == "jpg" || ext == "jpeg" {
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput()
if err != nil {
Log.Error("oops, failed trying to make some image of ext ", ext, " to png")
} else {
Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd))
}
} else if ext == "gif" {
itsAVideo = true
Log.Info("trying to convert a gif")
// ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4
savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4)
cmd.Stdin = strings.NewReader("")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
Log.Error("failed to make mp4 from gif ", err)
} else {
uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4"
Log.Info("made mp4 from GIF")
| {
return t.Truncate(24 * time.Hour)
} | identifier_body |
|
dream.go | ,
})
Log.Info("base path is ", basePath)
newJobLog(name)
//let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip)
jobLog.WithFields(logrus.Fields{
"fps": fps,
"iterations": it,
"octaves": oc,
"layer": la,
"linear increase": li,
"iteration waver": iw,
"octave waver": ow,
"randomization type": rl,
"random layer every n frames": rle,
}).Info("job name: ", name)
//
var uploadedFile, framesDirPath string
var name, fullName, ext string
if yt != "" { //if "yt" checkbox checked
youtubeDl := goydl.NewYoutubeDl()
for { //we loop until we got an acceptable ytURL
fmt.Println("waiting...")
youtubeDl.VideoURL = ytURL
fmt.Println("videoURL:", ytURL)
if ytURL == "" { //we didn't get a url, so just cancel the job
Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job")
return
}
info, err := youtubeDl.GetInfo()
if err != nil {
Log.WithFields(logrus.Fields{
"event": "ytdl",
"error": err,
}).Error("we should never fail here")
continue
}
fmt.Println(youtubeDl.VideoURL, "blah")
ext = info.Ext
name = strings.Split(info.Title, " ")[0]
fullName = name + ".mp4"
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + ".mp4"
Log.Info("\nwe renamed as: ", fullName)
}
uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
fmt.Println("uploaded file: ", uploadedFile)
youtubeDl.Options.Output.Value = uploadedFile
youtubeDl.Options.Format.Value = "mp4"
cmd, err := youtubeDl.Download(youtubeDl.VideoURL)
if err != nil {
Log.WithFields(logrus.Fields{
"event": "error",
"err": err,
"uploadedFile": uploadedFile,
}).Error("dl'ing from yt failed w err")
} else {
Log.WithFields(logrus.Fields{
"event": "download",
"path": uploadedFile,
}).Info("downloaded a yt video")
println("starting download")
cmd.Wait()
println("finished download")
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) |
break //we got our file, now we move on, we don't need to keep listening for URL
}
}
} else { // if no youtube, then get file from form upload
file, err := c.FormFile("file")
if err != nil {
Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now
return
}
name = strings.Split(file.Filename, ".")[0]
fullName = file.Filename
ext = strings.Split(fullName, ".")[1]
if alreadyHave(basePath + "/frames/" + name) {
name = renamer(name)
fullName = name + "." + strings.Split(file.Filename, ".")[1]
Log.Info("\nwe renamed as: ", fullName)
}
// make new folder for job
framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name)
if _, err := os.Stat(framesDirPath); os.IsNotExist(err) {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
}
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
if err := c.SaveUploadedFile(file, uploadedFile); err != nil {
Log.Error("failed to save file at path ", uploadedFile, " err is: ", err)
} else {
Log.Info("saved file at path ", uploadedFile)
}
}
// make a new output folder
outputPath := fmt.Sprintf("%s/output", framesDirPath)
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
os.Mkdir(outputPath, 0777)
Log.Info("output folder for new job was created at ", outputPath)
}
Log.Info("saved output dir at path ", outputPath)
uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName)
mel.Broadcast([]byte(name))
itsAVideo := false
// decide what to do with the file we've gotten, if it's an image:
if ext == "png" { //it's perfect, leave it alone...
} else if ext == "jpg" || ext == "jpeg" {
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput()
if err != nil {
Log.Error("oops, failed trying to make some image of ext ", ext, " to png")
} else {
Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd))
}
} else if ext == "gif" {
itsAVideo = true
Log.Info("trying to convert a gif")
// ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4
savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name)
cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4)
cmd.Stdin = strings.NewReader("")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
Log.Error("failed to make mp4 from gif ", err)
} else {
uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4"
Log.Info("made mp4 from GIF")
}
} else { // if file not gif or img try to make it mp4
itsAVideo = true
Log.Info("ext: ", ext)
Log.Info("file.filename ", fullName)
if ext != "mp4" {
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, strings.Split(uploadedFile, ".")[0]+".mp4").CombinedOutput()
if err != nil {
Log.Error("failed to make a .any to .mp4 , ", err)
} else {
Log.Info("made a ", ext, " into .mp4 with cmd ", string(cmd))
err := os.Remove(uploadedFile)
if err != nil {
Log.Info("err removing original .mp4 as err: ", err)
} else {
uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4"
Log.Info("deleted original at ext: ", ext)
}
}
}
}
// open finder
if of == "of" {
open.Run(framesDirPath)
}
if oo == "oo" {
open.Run(outputPath)
}
if itsAVideo {
// create frames from mp4
framesOut := fmt.Sprintf("%s/frames/%s/%s.png", basePath, name, "%d")
Log.Info("framesOut: ", framesOut)
cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, "-vf", "fps="+fps, "-c:v", "png", framesOut).CombinedOutput()
if err != nil {
Log.Error("failed to make frames", err)
} else {
Log.Info("made frames from MP4 with cmd: ", string(cmd))
}
}
Log.Info("entering dreamer goroutine")
// deep dream the frames
cmd, err := exec.Command("python3", "folder.py", "--input", framesDirPath, "-os", ocs, | {
if err = os.Mkdir(framesDirPath, 0777); err != nil {
Log.Error("failed to make a new job dir w/ error: ", err)
}
Log.Info("frames folder for new job was created at ", framesDirPath)
} | conditional_block |
buffer.go | have to look in our trusy
// shift mapping thing.
if val, ok := shiftAlternative[r]; ok {
r = val
}
}
}
// NOTE: we have to do this AFTER we map the
// shift combo for the value!
// this will not insert a ), }, or ] if there
// is one to the right of us... basically
// this escapes out of a closing bracket
// rather than inserting a new one IF we are inside
// brackets.
if b.cfg.Editor.Match_Braces {
if r == ')' || r == '}' || r == ']' {
currLine := b.contents[b.curs.y]
if b.curs.x < currLine.Len() {
curr := currLine.Index(b.curs.x + 1)
if curr == r {
b.curs.move(1, 0)
return true
} else {
log.Print("no it's ", curr)
}
}
}
}
b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r))
b.curs.move(1, 0)
// we don't need to match braces
// let's not continue any further
if !b.cfg.Editor.Match_Braces {
return true
}
// TODO: shall we match single quotes and double quotes too?
matchingPair := int(r)
// the offset in the ASCII Table is +2 for { and for [
// but its +1 for parenthesis (
offset := 2
switch r {
case '(':
offset = 1
fallthrough
case '{':
fallthrough
case '[':
matchingPair += offset
b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair)))
}
return true
}
func remove(slice []*rope.Rope, s int) []*rope.Rope {
return append(slice[:s], slice[s+1:]...)
}
func (b *Buffer) deleteNext() {
b.moveRight()
b.deletePrev()
}
func (b *Buffer) deletePrev() {
if b.curs.x > 0 {
offs := -1
if !b.cfg.Editor.Tabs_Are_Spaces {
if b.contents[b.curs.y].Index(b.curs.x) == '\t' {
offs = int(-b.cfg.Editor.Tab_Size)
}
} else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) {
// cut out the last {TAB_SIZE} amount of characters
// and check em
tabSize := int(b.cfg.Editor.Tab_Size)
lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String()
if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 {
// delete {TAB_SIZE} amount of characters
// from the cursors x pos
for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ {
b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1)
b.curs.move(-1, 0)
}
return
}
}
b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1)
b.curs.moveRender(-1, 0, offs, 0)
} else if b.curs.x == 0 && b.curs.y > 0 {
// start of line, wrap to previous
prevLineLen := b.contents[b.curs.y-1].Len()
b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y])
b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...)
b.curs.move(prevLineLen, -1)
}
}
func (b *Buffer) deleteBeforeCursor() {
// delete so we're at the end
// of the previous line
if b.curs.x == 0 {
b.deletePrev()
return
}
for b.curs.x > 0 {
b.deletePrev()
}
}
func (b *Buffer) moveLeft() {
if b.curs.x == 0 && b.curs.y > 0 {
b.curs.move(b.contents[b.curs.y-1].Len(), -1)
} else if b.curs.x > 0 {
b.curs.move(-1, 0)
}
}
func (b *Buffer) moveRight() {
currLineLength := b.contents[b.curs.y].Len()
if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 {
// we're at the end of the line and we have
// some lines after, let's wrap around
b.curs.move(0, 1)
b.curs.move(-currLineLength, 0)
} else if b.curs.x < b.contents[b.curs.y].Len() {
// we have characters to the right, let's move along
b.curs.move(1, 0)
}
}
func (b *Buffer) moveToEndOfLine() {
lineLen := b.contents[b.curs.y].Len()
if b.curs.x > lineLen {
distToMove := b.curs.x - lineLen
for i := 0; i < distToMove; i++ {
b.moveLeft()
}
}
}
func (b *Buffer) moveUp() {
if b.curs.y > 0 {
b.curs.move(0, -1)
}
}
func (b *Buffer) moveDown() {
if b.curs.y < len(b.contents) {
b.curs.move(0, 1)
}
}
func (b *Buffer) swapLineUp() bool {
if b.curs.y | urrLine := b.contents[b.curs.y]
prevLine := b.contents[b.curs.y-1]
b.contents[b.curs.y-1] = currLine
b.contents[b.curs.y] = prevLine
b.moveUp()
}
return true
}
func (b *Buffer) swapLineDown() bool {
if b.curs.y < len(b.contents) {
currLine := b.contents[b.curs.y]
nextLine := b.contents[b.curs.y+1]
b.contents[b.curs.y+1] = currLine
b.contents[b.curs.y] = nextLine
b.moveDown()
}
return true
}
func (b *Buffer) scrollUp() {
if b.cam.y > 0 {
// TODO move the cursor down 45 lines
// IF the buffer exceeds the window size.
lineScrollAmount := 10
b.cam.y -= lineScrollAmount
for i := 0; i < lineScrollAmount; i++ {
b.moveUp()
}
}
}
func (b *Buffer) scrollDown() {
if b.cam.y < len(b.contents) {
// TODO move the cursor down 45 lines
// IF the buffer exceeds the window size.
lineScrollAmount := 10
b.cam.y += lineScrollAmount
for i := 0; i < lineScrollAmount; i++ {
b.moveDown()
}
}
}
// processes a key press. returns if there
// was a key that MODIFIED the buffer.
func (b *Buffer) processActionKey(key int) bool {
switch key {
case sdl.K_CAPSLOCK:
CAPS_LOCK = !CAPS_LOCK
return true
case sdl.K_RETURN:
if SUPER_DOWN {
// in sublime this goes
// into the next block
// nicely indented!
}
initial_x := b.curs.x
prevLineLen := b.contents[b.curs.y].Len()
var newRope *rope.Rope
if initial_x < prevLineLen && initial_x > 0 {
// we're not at the end of the line, but we're not at
// the start, i.e. we're SPLITTING the line
left, right := b.contents[b.curs.y].Split(initial_x)
newRope = right
b.contents[b.curs.y] = left
} else if initial_x == 0 {
// we're at the start of a line, so we want to
// shift the line down and insert an empty line
// above it!
b.contents = append(b.contents, new(rope.Rope)) // grow
copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) // shift
b.contents[b.curs.y] = new(rope.Rope) // set
b.curs.move(0, 1)
return true
} else {
// we're at the end of a line
newRope = new(rope.Rope)
}
b.curs.move(0, 1)
for x := 0; x < initial_x; x++ {
// TODO(F | > 0 {
c | identifier_name |
buffer.go | we have to look in our trusy
// shift mapping thing.
if val, ok := shiftAlternative[r]; ok {
r = val
}
}
}
// NOTE: we have to do this AFTER we map the
// shift combo for the value!
// this will not insert a ), }, or ] if there
// is one to the right of us... basically
// this escapes out of a closing bracket
// rather than inserting a new one IF we are inside
// brackets.
if b.cfg.Editor.Match_Braces {
if r == ')' || r == '}' || r == ']' {
currLine := b.contents[b.curs.y]
if b.curs.x < currLine.Len() {
curr := currLine.Index(b.curs.x + 1)
if curr == r {
b.curs.move(1, 0)
return true
} else {
log.Print("no it's ", curr)
}
}
}
}
b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r))
b.curs.move(1, 0)
// we don't need to match braces
// let's not continue any further
if !b.cfg.Editor.Match_Braces {
return true
}
// TODO: shall we match single quotes and double quotes too?
matchingPair := int(r)
// the offset in the ASCII Table is +2 for { and for [
// but its +1 for parenthesis (
offset := 2
switch r {
case '(':
offset = 1
fallthrough
case '{':
fallthrough
case '[':
matchingPair += offset
b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair)))
}
return true
}
func remove(slice []*rope.Rope, s int) []*rope.Rope {
return append(slice[:s], slice[s+1:]...)
}
func (b *Buffer) deleteNext() {
b.moveRight()
b.deletePrev()
}
func (b *Buffer) deletePrev() {
if b.curs.x > 0 {
offs := -1
if !b.cfg.Editor.Tabs_Are_Spaces {
if b.contents[b.curs.y].Index(b.curs.x) == '\t' {
offs = int(-b.cfg.Editor.Tab_Size)
}
} else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) {
// cut out the last {TAB_SIZE} amount of characters
// and check em
tabSize := int(b.cfg.Editor.Tab_Size)
lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String()
if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 {
// delete {TAB_SIZE} amount of characters
// from the cursors x pos
for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ {
b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1)
b.curs.move(-1, 0)
}
return
}
}
b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1)
b.curs.moveRender(-1, 0, offs, 0)
} else if b.curs.x == 0 && b.curs.y > 0 {
// start of line, wrap to previous
prevLineLen := b.contents[b.curs.y-1].Len()
b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y])
b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...)
b.curs.move(prevLineLen, -1)
}
}
func (b *Buffer) deleteBeforeCursor() {
// delete so we're at the end
// of the previous line
if b.curs.x == 0 {
b.deletePrev()
return
}
for b.curs.x > 0 {
b.deletePrev()
}
}
func (b *Buffer) moveLeft() {
if b.curs.x == 0 && b.curs.y > 0 {
b.curs.move(b.contents[b.curs.y-1].Len(), -1)
} else if b.curs.x > 0 {
b.curs.move(-1, 0)
}
}
func (b *Buffer) moveRight() {
currLineLength := b.contents[b.curs.y].Len()
if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 {
// we're at the end of the line and we have
// some lines after, let's wrap around
b.curs.move(0, 1)
b.curs.move(-currLineLength, 0)
} else if b.curs.x < b.contents[b.curs.y].Len() {
// we have characters to the right, let's move along
b.curs.move(1, 0)
}
}
func (b *Buffer) moveToEndOfLine() {
lineLen := b.contents[b.curs.y].Len()
if b.curs.x > lineLen {
distToMove := b.curs.x - lineLen
for i := 0; i < distToMove; i++ {
b.moveLeft()
}
}
}
func (b *Buffer) moveUp() {
if b.curs.y > 0 {
b.curs.move(0, -1)
}
}
func (b *Buffer) moveDown() {
if b.curs.y < len(b.contents) {
b.curs.move(0, 1)
}
}
func (b *Buffer) swapLineUp() bool {
if b.curs.y > 0 {
currLine := b.contents[b.cur | ) swapLineDown() bool {
if b.curs.y < len(b.contents) {
currLine := b.contents[b.curs.y]
nextLine := b.contents[b.curs.y+1]
b.contents[b.curs.y+1] = currLine
b.contents[b.curs.y] = nextLine
b.moveDown()
}
return true
}
func (b *Buffer) scrollUp() {
if b.cam.y > 0 {
// TODO move the cursor down 45 lines
// IF the buffer exceeds the window size.
lineScrollAmount := 10
b.cam.y -= lineScrollAmount
for i := 0; i < lineScrollAmount; i++ {
b.moveUp()
}
}
}
func (b *Buffer) scrollDown() {
if b.cam.y < len(b.contents) {
// TODO move the cursor down 45 lines
// IF the buffer exceeds the window size.
lineScrollAmount := 10
b.cam.y += lineScrollAmount
for i := 0; i < lineScrollAmount; i++ {
b.moveDown()
}
}
}
// processes a key press. returns if there
// was a key that MODIFIED the buffer.
func (b *Buffer) processActionKey(key int) bool {
switch key {
case sdl.K_CAPSLOCK:
CAPS_LOCK = !CAPS_LOCK
return true
case sdl.K_RETURN:
if SUPER_DOWN {
// in sublime this goes
// into the next block
// nicely indented!
}
initial_x := b.curs.x
prevLineLen := b.contents[b.curs.y].Len()
var newRope *rope.Rope
if initial_x < prevLineLen && initial_x > 0 {
// we're not at the end of the line, but we're not at
// the start, i.e. we're SPLITTING the line
left, right := b.contents[b.curs.y].Split(initial_x)
newRope = right
b.contents[b.curs.y] = left
} else if initial_x == 0 {
// we're at the start of a line, so we want to
// shift the line down and insert an empty line
// above it!
b.contents = append(b.contents, new(rope.Rope)) // grow
copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) // shift
b.contents[b.curs.y] = new(rope.Rope) // set
b.curs.move(0, 1)
return true
} else {
// we're at the end of a line
newRope = new(rope.Rope)
}
b.curs.move(0, 1)
for x := 0; x < initial_x; x++ {
// TODO(F | s.y]
prevLine := b.contents[b.curs.y-1]
b.contents[b.curs.y-1] = currLine
b.contents[b.curs.y] = prevLine
b.moveUp()
}
return true
}
func (b *Buffer | conditional_block |
buffer.go | // my UK macbook key layout.
var shiftAlternative = map[rune]rune{
'1': '!',
'2': '@',
'3': '£',
'4': '$',
'5': '%',
'6': '^',
'7': '&',
'8': '*',
'9': '(',
'0': ')',
'-': '_',
'=': '+',
'`': '~',
'/': '?',
'.': '>',
',': '<',
'[': '{',
']': '}',
';': ':',
'\'': '"',
'\\': '|',
'§': '±',
}
var altAlternative = map[rune]rune{
'1': '¡',
'2': '€',
'3': '#',
'4': '¢',
'5': '∞',
'6': '§',
'7': '¶',
'8': '•',
'9': 'ª',
'0': 'º',
'-': '–',
'=': '≠',
'`': '`',
'/': '÷',
'.': '≥',
',': '≤',
'[': '“',
']': '‘',
';': '…',
'\'': 'æ',
'\\': '«',
}
func (b *Buffer) processTextInput(r rune) bool {
if ALT_DOWN && r == '\t' {
// nop, we dont want to
// insert tabs when we
// alt tab out of view of this app
return true
}
// only do the alt alternatives on mac osx
// todo change this so it's not checking on every
// input
if runtime.GOOS == "darwin" && ALT_DOWN {
if val, ok := altAlternative[r]; ok {
r = val
}
}
if CAPS_LOCK {
if unicode.IsLetter(r) {
r = unicode.ToUpper(r)
}
}
if CONTROL_DOWN {
actionName, actionExists := cfg.Shortcuts.Controls[string(unicode.ToLower(r))]
if actionExists {
if proc, ok := actions[actionName]; ok {
return proc(b)
}
} else {
log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName)
}
}
if SUPER_DOWN {
actionName, actionExists := cfg.Shortcuts.Supers[string(unicode.ToLower(r))]
if actionExists {
if proc, ok := actions[actionName]; ok {
return proc(b)
}
} else {
log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName)
}
}
if SHIFT_DOWN {
// if it's a letter convert to uppercase
if unicode.IsLetter(r) {
r = unicode.ToUpper(r)
} else {
// otherwise we have to look in our trusy
// shift mapping thing.
if val, ok := shiftAlternative[r]; ok {
r = val
}
}
}
// NOTE: we have to do this AFTER we map the
// shift combo for the value!
// this will not insert a ), }, or ] if there
// is one to the right of us... basically
// this escapes out of a closing bracket
// rather than inserting a new one IF we are inside
// brackets.
if b.cfg.Editor.Match_Braces {
if r == ')' || r == '}' || r == ']' {
currLine := b.contents[b.curs.y]
if b.curs.x < currLine.Len() {
curr := currLine.Index(b.curs.x + 1)
if curr == r {
b.curs.move(1, 0)
return true
} else {
log.Print("no it's ", curr)
}
}
}
}
b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r))
b.curs.move(1, 0)
// we don't need to match braces
// let's not continue any further
if !b.cfg.Editor.Match_Braces {
return true
}
// TODO: shall we match single quotes and double quotes too?
matchingPair := int(r)
// the offset in the ASCII Table is +2 for { and for [
// but its +1 for parenthesis (
offset := 2
switch r {
case '(':
offset = 1
fallthrough
case '{':
fallthrough
case '[':
matchingPair += offset
b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair)))
}
return true
}
func remove(slice []*rope.Rope, s int) []*rope.Rope {
return append(slice[:s], slice[s+1:]...)
}
func (b *Buffer) deleteNext() {
b.moveRight()
b.deletePrev()
}
func (b *Buffer) deletePrev() {
if b.curs.x > 0 {
offs := -1
if !b.cfg.Editor.Tabs_Are_Spaces {
if b.contents[b.curs.y].Index(b.curs.x) == '\t' {
offs = int(-b.cfg.Editor.Tab_Size)
}
} else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) {
// cut out the last {TAB_SIZE} amount of characters
// and check em
tabSize := int(b.cfg.Editor.Tab_Size)
lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String()
if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 {
// delete {TAB_SIZE} amount of characters
// from the cursors x pos
for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ {
b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1)
b.curs.move(-1, 0)
}
return
}
}
b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1)
b.curs.moveRender(-1, 0, offs, 0)
} else if b.curs.x == 0 && b.curs.y > 0 {
// start of line, wrap to previous
prevLineLen := b.contents[b.curs.y-1].Len()
b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y])
b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...)
b.curs.move(prevLineLen, -1)
}
}
func (b *Buffer) deleteBeforeCursor() {
// delete so we're at the end
// of the previous line
if b.curs.x == 0 {
b.deletePrev()
return
}
for b.curs.x > 0 {
b.deletePrev()
}
}
func (b *Buffer) moveLeft() {
if b.curs.x == 0 && b.curs.y > 0 {
b.curs.move(b.contents[b.curs.y-1].Len(), -1)
} else if b.curs.x > 0 {
b.curs.move(-1, 0)
}
}
func (b *Buffer) moveRight() {
currLineLength := b.contents[b.curs.y].Len()
if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 {
// we're at the end of the line and we have
// some lines after, let's wrap around
b.curs.move(0, 1)
b.curs.move(-currLineLength, 0)
} else if b.curs.x < b.contents[b.curs.y].Len() {
// we have characters to the right, let's move along
b.curs.move(1, 0)
}
}
func (b *Buffer) moveToEndOfLine() {
lineLen := b.contents[b.curs.y].Len()
if b.curs.x > lineLen {
distToMove := b.curs.x - lineLen
for i := 0; i < distToMove; i++ {
b.moveLeft()
}
}
}
func (b *Buffer) moveUp() {
if b.curs.y > 0 {
b.curs.move(0, -1)
}
}
func (b *Buffer) moveDown() {
if b.curs.y < len(b.contents) {
b.curs.move(0, 1)
}
}
func (b *Buffer) swapLineUp() bool {
if b.curs.y > 0 {
currLine := b.contents[b.curs.y]
prevLine := b.contents[b.curs.y-1]
b.contents[b.curs.y | }
// TODO handle EVERYTHING but for now im handling | random_line_split |
|
buffer.go | SHIFT_DOWN bool = false
SUPER_DOWN = false // cmd on mac, ctrl on windows
CONTROL_DOWN = false // what is this on windows?
ALT_DOWN = false // option on mac
CAPS_LOCK = false
)
// TODO(Felix) this is really stupid
func (b *Buffer) makeTab() string {
blah := []rune{}
for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ {
blah = append(blah, ' ')
}
return string(blah)
}
func (b *Buffer) HandleEvent(evt strife.StrifeEvent) {
switch event := evt.(type) {
case *strife.MouseWheelEvent:
if event.Y > 0 {
b.scrollDown()
}
if event.Y < 0 {
b.scrollUp()
}
}
}
func (b *Buffer) OnUpdate() bool {
if !b.HasFocus {
return false
}
prev_x := b.curs.x
prev_y := b.curs.y
SHIFT_DOWN = strife.KeyPressed(sdl.K_LSHIFT) || strife.KeyPressed(sdl.K_RSHIFT)
SUPER_DOWN = strife.KeyPressed(sdl.K_LGUI) || strife.KeyPressed(sdl.K_RGUI)
ALT_DOWN = strife.KeyPressed(sdl.K_LALT) || strife.KeyPressed(sdl.K_RALT)
CONTROL_DOWN = strife.KeyPressed(sdl.K_LCTRL) || strife.KeyPressed(sdl.K_RCTRL)
if strife.PollKeys() {
keyCode := strife.PopKey()
// try process this key input as an
// action first
actionPerformed := b.processActionKey(keyCode)
if actionPerformed {
return true
}
textEntered := b.processTextInput(rune(keyCode))
if textEntered {
return true
}
}
// FIXME handle focus properly
if b.inputHandler == nil {
return false
}
if b.curs.x != prev_x || b.curs.y != prev_y {
should_draw = true
should_flash = false
reset_timer = strife.CurrentTimeMillis()
}
// fixme to not use CurrentTimeMillis
if !should_flash && strife.CurrentTimeMillis()-reset_timer > b.cfg.Cursor.Reset_Delay {
should_flash = true
}
if strife.CurrentTimeMillis()-timer > b.cfg.Cursor.Flash_Rate && (should_flash && b.cfg.Cursor.Flash) {
timer = strife.CurrentTimeMillis()
should_draw = !should_draw
}
return false
}
type syntaxRuneInfo struct {
background int
foreground int
length int
}
// dimensions of the last character we rendered
var last_w, last_h int
// editor x and y offsets
var ex, ey = 0, 0
var compiledRegex = map[string]*regexp.Regexp{}
func (b *Buffer) renderAt(ctx *strife.Renderer, rx int, ry int) {
// BACKGROUND
ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background))
ctx.Rect(b.x, b.y, b.w, b.h, strife.Fill)
if b.cfg.Editor.Highlight_Line && b.HasFocus {
ctx.SetColor(strife.Black) // highlight_line_col?
ctx.Rect(ex+rx, ey+(ry+b.curs.ry*last_h)-(b.cam.y*last_h), b.w, last_h, strife.Fill)
}
// render the ol' cursor
if should_draw && b.cfg.Cursor.Draw && b.HasFocus {
cursorWidth := b.cfg.Cursor.GetCaretWidth()
if cursorWidth == -1 {
cursorWidth = last_w
}
ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor)) // caret colour
ctx.Rect(ex+(rx+b.curs.rx*last_w)-(b.cam.x*last_w), (ry+b.curs.ry*last_h)-(b.cam.y*last_h), cursorWidth, last_h, strife.Fill)
}
var visibleLines int = 50
// last_h > 0 means we have done
// a render.
if int(last_h) > 0 && int(b.h) != 0 {
// render an extra three lines just
// so we dont cut anything off if its
// not evenly divisible
visibleLines = (int(b.h) / int(last_h)) + 3
}
start := b.cam.y
upper := b.cam.y + visibleLines
if upper > len(b.contents) {
upper = len(b.contents)
}
numLines := len(b.contents)
var y_col int
for lineNum, rope := range b.contents[start:upper] {
currLine := []rune(rope.String())
// char index => colour
matches := map[int]syntaxRuneInfo{}
stuff := b.cfg.Syntax[b.languageInfo]
subjects := make([]cfg.SyntaxCriteria, len(stuff))
colours := make([]int, len(stuff))
idx := 0
for _, criteria := range stuff {
colours[idx] = criteria.Colour
subjects[idx] = criteria
idx++
}
// HOLY SLOW BATMAN
for charIndex := 0; charIndex < len(currLine); charIndex++ {
for syntaxIndex, syntax := range subjects {
if syntax.Pattern != "" {
// we have a regex pattern
// FIXME this is also very slow!
// we could easily compile all of these
// regular expressions when we load the
// syntax highlighter.
a := string(currLine[charIndex:])
// no need to compile the same regex
// pattern multiple times.
regex, ok := compiledRegex[syntax.Pattern]
if !ok {
var err error
regex, err = regexp.Compile(syntax.Pattern)
if err != nil {
log.Println(err.Error())
}
}
matched := regex.FindString(a)
if matched != "" && len(matched) > 0 {
// for some reason this affects the whole line
if _, ok := matches[charIndex]; !ok {
matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(matched)}
charIndex = charIndex + len(matched)
}
}
} else {
for _, subject := range syntax.Match {
if charIndex+len(subject)+1 > len(currLine) {
continue
}
a := currLine[charIndex : charIndex+len(subject)+1]
// we only want to match words. so we check that it has a space
// before or after the subject word.
if strings.Compare(string(a), subject+" ") == 0 || strings.Compare(string(a), " "+subject) == 0 {
if _, ok := matches[charIndex]; !ok {
matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(string(a))}
break
}
charIndex += len(subject)
}
}
}
}
}
colorStack := []int{}
var x_col int
for idx, char := range currLine {
switch char {
case '\n':
x_col = 0
y_col += 1
continue
case '\t':
x_col += b.cfg.Editor.Tab_Size
continue
}
x_col += 1
ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground))
// if we're currently over a character then set
// the font colour to something else
// ONLY SET THE COLOUR IF WE HAVE FOCUS ALSO!
if b.HasFocus && b.curs.x+1 == x_col && b.curs.y == y_col && should_draw {
ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor_Invert))
}
if info, ok := matches[idx]; ok {
for i := 0; i < info.length; i++ {
colorStack = append(colorStack, info.background)
}
}
if len(colorStack) > 0 {
var a int32
a, colorStack = int32(colorStack[len(colorStack)-1]), colorStack[:len(colorStack)-1]
ctx.SetColor(strife.HexRGB(a))
}
last_w, last_h = ctx.String(string(char), ex+(rx+((x_col-1)*last_w)), (ry + (y_col * last_h)))
}
if b.cfg.Editor.Show_Line_Numbers {
gutterPadPx := 10
numLinesWidth := len(string(numLines)) + 1
gutterWidth := last_w*numLinesWidth + (gutterPadPx * 2)
// render the line numbers
ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background))
ctx.Rect(rx, (ry + (y_col * last_h)), gutterWidth, b.h, strife.Fill)
ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground))
ctx.String(fmt.Sprintf("%*d", numLinesWidth, start+lineNum), rx+gutterPadPx, (ry + (y_col * last_h)))
ex = gutterWidth
}
y_col += 1
}
}
func (b *Buffer) OnRender(ctx *strife.Renderer) {
b.renderAt(ctx, b.x, b.y)
}
| identifier_body |
||
lib.rs | .
assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready());
}
None => warn!("Called stop on already stopped MLME"),
}
}
pub fn delete(mut self) {
if self.internal.is_some() {
warn!("Called delete on MlmeHandle before calling stop.");
self.stop()
}
}
pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> {
self.driver_event_sink
.unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() })
.map_err(|e| e.into())
}
// Fns used to interact with an MLME running in test mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real { .. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real { .. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool |
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T: 'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
));
let _ = executor.run_until_stalled(&mut future.as_mut());
startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol | {
mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported
} | identifier_body |
lib.rs | wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) {
Ok(handle) => handle,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap();
return;
}
};
let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation);
let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel);
let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() {
Ok(res) => res,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender
.send(Err(anyhow!("Failed to get MLME request stream: {}", e)))
.unwrap();
return;
}
};
let device_mac_sublayer_support = device.mac_sublayer_support();
let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer();
let update_interval = if device_mac_sublayer_support.device.is_synthetic {
MINSTREL_UPDATE_INTERVAL_HW_SIM
} else {
MINSTREL_UPDATE_INTERVAL
};
let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) {
let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None };
let probe_sequence = probe_sequence::ProbeSequence::random_new();
Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new(
timer_manager,
update_interval,
probe_sequence,
))))
} else {
None
};
let new_device = Device::new(device, minstrel.clone(), control_handle);
let (timer, time_stream) = common::timer::create_timer();
let mlme_impl = T::new(config, new_device, buf_provider, timer);
let mlme = Self {
mlme_impl,
minstrel,
mlme_request_stream,
driver_event_stream,
time_stream,
minstrel_time_stream,
};
// Startup is complete. Signal the main thread to proceed.
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Ok(())).unwrap();
let result = Self::run_main_loop(mlme).await;
match result {
Ok(()) => info!("MLME event loop exited gracefully."),
Err(e) => error!("MLME event loop exited with error: {:?}", e),
}
}
/// Begin processing MLME events.
/// Does not return until iface destruction is requested via DriverEvent::Stop, unless
/// a critical error occurs. Note that MlmeHandle::stop will work in either case.
pub async fn run_main_loop(mut self) -> Result<(), Error> {
let mut timer_stream =
common::timer::make_async_timed_event_stream(self.time_stream).fuse();
let mut minstrel_timer_stream =
common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse();
loop {
select! {
// Process requests from SME.
mlme_request = self.mlme_request_stream.next() => match mlme_request {
Some(req) => {
match req {
Ok(req) => {
let method_name = req.method_name();
if let Err(e) = self.mlme_impl.handle_mlme_message(req) {
info!("Failed to handle mlme {} request: {}", method_name, e);
}
}
Err(e) => {
info!("Failure while receiving mlme request: {}", e);
}
}
}
None => bail!("MLME request stream terminated unexpectedly."),
},
// Process requests from our C++ drivers.
driver_event = self.driver_event_stream.next() => match driver_event {
Some(event) => match event {
// DriverEvent::Stop indicates a safe shutdown.
DriverEvent::Stop => return Ok(()),
DriverEvent::MacFrameRx { bytes, rx_info } => {
self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info);
}
DriverEvent::EthFrameTx { bytes } => {
if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) {
// TODO(fxbug.dev/45464): Keep a counter of these failures.
info!("Failed to handle eth frame: {}", e);
}
}
DriverEvent::ScanComplete { status, scan_id } => {
self.mlme_impl.handle_scan_complete(status, scan_id)
},
DriverEvent::TxStatusReport { tx_status } => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_tx_status_report(&tx_status)
}
}
DriverEvent::Status { status } => {
self.mlme_impl.access_device().set_eth_status(status)
}
},
None => bail!("Driver event stream terminated unexpectedly."),
},
timed_event = timer_stream.select_next_some() => {
self.mlme_impl.handle_timeout(timed_event.id, timed_event.event);
}
_minstrel_timeout = minstrel_timer_stream.select_next_some() => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_timeout()
}
}
}
}
}
}
#[cfg(test)]
mod test_utils {
use {
super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo,
banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream,
std::default::Default,
};
#[derive(Copy, Clone, Debug)]
pub struct MockWlanRxInfo {
pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags,
pub valid_fields: u32,
pub phy: banjo_common::WlanPhyType,
pub data_rate: u32,
pub channel: banjo_common::WlanChannel,
pub mcs: u8,
pub rssi_dbm: i8,
pub snr_dbh: i16,
}
impl Default for MockWlanRxInfo {
fn default() -> Self {
Self {
valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0
| banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0
| banjo_wlan_associnfo::WlanRxInfoValid::SNR.0,
channel: banjo_common::WlanChannel {
primary: 1,
cbw: banjo_common::ChannelBandwidth::CBW20,
secondary80: 0,
},
rssi_dbm: -40,
snr_dbh: 35,
// Default to 0 for these fields since there are no
// other reasonable values to mock.
rx_flags: banjo_wlan_softmac::WlanRxInfoFlags(0),
phy: banjo_common::WlanPhyType::DSSS,
data_rate: 0,
mcs: 0,
}
}
}
impl From<MockWlanRxInfo> for banjo_wlan_softmac::WlanRxInfo {
fn from(mock_rx_info: MockWlanRxInfo) -> banjo_wlan_softmac::WlanRxInfo {
banjo_wlan_softmac::WlanRxInfo {
rx_flags: mock_rx_info.rx_flags,
valid_fields: mock_rx_info.valid_fields,
phy: mock_rx_info.phy,
data_rate: mock_rx_info.data_rate,
channel: mock_rx_info.channel,
mcs: mock_rx_info.mcs,
rssi_dbm: mock_rx_info.rssi_dbm,
snr_dbh: mock_rx_info.snr_dbh,
}
}
}
pub(crate) fn fake_control_handle(
// We use this unused parameter to ensure that an executor exists.
_exec: &fuchsia_async::TestExecutor,
) -> (fidl_mlme::MlmeControlHandle, fuchsia_zircon::Channel) {
let (c1, c2) = fuchsia_zircon::Channel::create().unwrap();
let async_c1 = fidl::AsyncChannel::from_channel(c1).unwrap();
let request_stream = fidl_mlme::MlmeRequestStream::from_channel(async_c1);
let control_handle = request_stream.control_handle();
(control_handle, c2)
}
pub struct FakeMlme {
device: Device,
}
impl MlmeImpl for FakeMlme {
type Config = ();
type TimerEvent = ();
fn new(
_config: Self::Config,
device: Device,
_buf_provider: buffer::BufferProvider,
_scheduler: common::timer::Timer<Self::TimerEvent>,
) -> Self {
Self { device }
}
fn handle_mlme_message(&mut self, _msg: fidl_mlme::MlmeRequest) -> Result<(), Error> {
unimplemented!()
}
fn handle_mac_frame_rx(&mut self, _bytes: &[u8], _rx_info: banjo_wlan_softmac::WlanRxInfo) {
unimplemented!()
}
fn | handle_eth_frame_tx | identifier_name |
|
lib.rs | before calling stop.");
self.stop()
}
}
pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> {
self.driver_event_sink
.unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() })
.map_err(|e| e.into())
}
// Fns used to interact with an MLME running in test mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real { .. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real { .. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool {
mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported
}
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T: 'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
));
let _ = executor.run_until_stalled(&mut future.as_mut());
startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) {
Ok(handle) => handle,
Err(e) => | {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap();
return;
} | conditional_block |
|
lib.rs | now.
assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready());
}
None => warn!("Called stop on already stopped MLME"),
}
}
pub fn delete(mut self) {
if self.internal.is_some() {
warn!("Called delete on MlmeHandle before calling stop.");
self.stop()
}
}
pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> {
self.driver_event_sink
.unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() })
.map_err(|e| e.into())
}
// Fns used to interact with an MLME running in test mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real { .. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real { .. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool {
mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported
}
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T: 'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender, | startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle | ));
let _ = executor.run_until_stalled(&mut future.as_mut()); | random_line_split |
io_export_arm.py | pdata = np.array(pdata, dtype='<i2')
ndata = np.array(ndata, dtype='<i2')
if has_tex:
t0data *= invscale_tex
t0data = np.array(t0data, dtype='<i2')
if has_tex1:
t1data *= invscale_tex
t1data = np.array(t1data, dtype='<i2')
if has_col:
cdata *= 32767
cdata = np.array(cdata, dtype='<i2')
if has_tang:
tangdata *= 32767
tangdata = np.array(tangdata, dtype='<i2')
# Output
o['vertex_arrays'] = []
o['vertex_arrays'].append({ 'attrib': 'pos', 'values': pdata })
o['vertex_arrays'].append({ 'attrib': 'nor', 'values': ndata })
if has_tex:
o['vertex_arrays'].append({ 'attrib': 'tex', 'values': t0data })
if has_tex1:
o['vertex_arrays'].append({ 'attrib': 'tex1', 'values': t1data })
if has_col:
o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata })
if has_tang:
o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata })
def export_mesh(self, bobject, scene):
# This function exports a single mesh object
print('Exporting mesh ' + bobject.data.name)
o = {}
o['name'] = bobject.name
mesh = bobject.data
armature = bobject.find_armature()
apply_modifiers = not armature
bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject
exportMesh = bobject_eval.to_mesh()
self.calc_aabb(bobject)
self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None)
# if armature:
# self.export_skin(bobject, armature, exportMesh, o)
self.write_mesh(bobject, o)
bobject_eval.to_mesh_clear()
def export_objects(self, scene):
meshes = []
self.output['mesh_datas'] = [];
for o in scene.objects:
if o.type == 'MESH' and o.data != None and o.data not in meshes:
meshes.append(o.data)
self.export_mesh(o, scene)
def write_arm(self, filepath, output):
with open(filepath, 'wb') as f:
f.write(packb(output))
def execute(self, context):
profile_time = time.time()
self.depsgraph = context.evaluated_depsgraph_get()
self.output = {}
self.export_objects(context.scene)
self.write_arm(self.filepath, self.output)
print('Scene exported in ' + str(time.time() - profile_time))
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(ArmoryExporter.bl_idname, text="Armory (.arm)")
def register():
bpy.utils.register_class(ArmoryExporter)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
def unregister():
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
bpy.utils.unregister_class(ArmoryExporter)
if __name__ == "__main__":
register()
# Msgpack parser with typed arrays
# Based on u-msgpack-python v2.4.1 - v at sergeev.io
# https://github.com/vsergeev/u-msgpack-python
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import struct
import io
import numpy as np
def _pack_integer(obj, fp):
if obj < 0:
if obj >= -32:
fp.write(struct.pack("b", obj))
elif obj >= -2**(8 - 1):
fp.write(b"\xd0" + struct.pack("b", obj))
elif obj >= -2**(16 - 1):
fp.write(b"\xd1" + struct.pack("<h", obj))
elif obj >= -2**(32 - 1):
fp.write(b"\xd2" + struct.pack("<i", obj))
elif obj >= -2**(64 - 1):
fp.write(b"\xd3" + struct.pack("<q", obj))
else:
raise Exception("huge signed int")
else:
if obj <= 127:
fp.write(struct.pack("B", obj))
elif obj <= 2**8 - 1:
fp.write(b"\xcc" + struct.pack("B", obj))
elif obj <= 2**16 - 1:
fp.write(b"\xcd" + struct.pack("<H", obj))
elif obj <= 2**32 - 1:
fp.write(b"\xce" + struct.pack("<I", obj))
elif obj <= 2**64 - 1:
fp.write(b"\xcf" + struct.pack("<Q", obj))
else:
raise Exception("huge unsigned int")
def _pack_nil(obj, fp):
fp.write(b"\xc0")
def _pack_boolean(obj, fp):
fp.write(b"\xc3" if obj else b"\xc2")
def _pack_float(obj, fp):
# NOTE: forced 32-bit floats for Armory
# fp.write(b"\xcb" + struct.pack("<d", obj)) # Double
fp.write(b"\xca" + struct.pack("<f", obj))
def _pack_string(obj, fp):
obj = obj.encode('utf-8')
if len(obj) <= 31:
fp.write(struct.pack("B", 0xa0 | len(obj)) + obj)
elif len(obj) <= 2**8 - 1:
fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xda" + struct.pack("<H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdb" + struct.pack("<I", len(obj)) + obj)
else:
raise Exception("huge string")
def _pack_binary(obj, fp):
if len(obj) <= 2**8 - 1:
fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xc5" + struct.pack("<H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xc6" + struct.pack("<I", len(obj)) + obj)
else:
raise Exception("huge binary string")
def _pack_array(obj, fp):
if len(obj) <= 15:
fp.write(struct.pack("B", 0x90 | len(obj)))
elif len(obj) <= 2**16 - 1:
fp.write(b"\xdc" + struct.pack("<H", len(obj)))
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdd" + struct.pack("<I", len(obj)))
else:
raise Exception("huge array")
if len(obj) > 0 and isinstance(obj[0], float):
fp.write(b"\xca")
for e in obj:
fp.write(struct.pack("<f", e))
elif len(obj) > 0 and isinstance(obj[0], bool):
for e in obj:
pack(e, fp)
elif len(obj) > 0 and isinstance(obj[0], int):
fp.write(b"\xd2")
for e in obj:
fp.write(struct.pack("<i", e))
# Float32
elif len(obj) > 0 and isinstance(obj[0], np.float32):
fp.write(b"\xca")
fp.write(obj.tobytes())
# Int32
elif len(obj) > 0 and isinstance(obj[0], np.int32):
fp.write(b"\xd2")
fp.write(obj.tobytes())
# Int16
elif len(obj) > 0 and isinstance(obj[0], np.int16):
fp.write(b"\xd1")
fp.write(obj.tobytes())
# Regular
else:
for e in obj:
pack(e, fp)
def | _pack_map | identifier_name |
|
io_export_arm.py | o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata })
if has_tang:
o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata })
def export_mesh(self, bobject, scene):
# This function exports a single mesh object
print('Exporting mesh ' + bobject.data.name)
o = {}
o['name'] = bobject.name
mesh = bobject.data
armature = bobject.find_armature()
apply_modifiers = not armature
bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject
exportMesh = bobject_eval.to_mesh()
self.calc_aabb(bobject)
self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None)
# if armature:
# self.export_skin(bobject, armature, exportMesh, o)
self.write_mesh(bobject, o)
bobject_eval.to_mesh_clear()
def export_objects(self, scene):
meshes = []
self.output['mesh_datas'] = [];
for o in scene.objects:
if o.type == 'MESH' and o.data != None and o.data not in meshes:
meshes.append(o.data)
self.export_mesh(o, scene)
def write_arm(self, filepath, output):
with open(filepath, 'wb') as f:
f.write(packb(output))
def execute(self, context):
profile_time = time.time()
self.depsgraph = context.evaluated_depsgraph_get()
self.output = {}
self.export_objects(context.scene)
self.write_arm(self.filepath, self.output)
print('Scene exported in ' + str(time.time() - profile_time))
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(ArmoryExporter.bl_idname, text="Armory (.arm)")
def register():
bpy.utils.register_class(ArmoryExporter)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
def unregister():
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
bpy.utils.unregister_class(ArmoryExporter)
if __name__ == "__main__":
register()
# Msgpack parser with typed arrays
# Based on u-msgpack-python v2.4.1 - v at sergeev.io
# https://github.com/vsergeev/u-msgpack-python
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import struct
import io
import numpy as np
def _pack_integer(obj, fp):
if obj < 0:
if obj >= -32:
fp.write(struct.pack("b", obj))
elif obj >= -2**(8 - 1):
fp.write(b"\xd0" + struct.pack("b", obj))
elif obj >= -2**(16 - 1):
fp.write(b"\xd1" + struct.pack("<h", obj))
elif obj >= -2**(32 - 1):
fp.write(b"\xd2" + struct.pack("<i", obj))
elif obj >= -2**(64 - 1):
fp.write(b"\xd3" + struct.pack("<q", obj))
else:
raise Exception("huge signed int")
else:
if obj <= 127:
fp.write(struct.pack("B", obj))
elif obj <= 2**8 - 1:
fp.write(b"\xcc" + struct.pack("B", obj))
elif obj <= 2**16 - 1:
fp.write(b"\xcd" + struct.pack("<H", obj))
elif obj <= 2**32 - 1:
fp.write(b"\xce" + struct.pack("<I", obj))
elif obj <= 2**64 - 1:
fp.write(b"\xcf" + struct.pack("<Q", obj))
else:
raise Exception("huge unsigned int")
def _pack_nil(obj, fp):
fp.write(b"\xc0")
def _pack_boolean(obj, fp):
fp.write(b"\xc3" if obj else b"\xc2")
def _pack_float(obj, fp):
# NOTE: forced 32-bit floats for Armory
# fp.write(b"\xcb" + struct.pack("<d", obj)) # Double
fp.write(b"\xca" + struct.pack("<f", obj))
def _pack_string(obj, fp):
obj = obj.encode('utf-8')
if len(obj) <= 31:
fp.write(struct.pack("B", 0xa0 | len(obj)) + obj)
elif len(obj) <= 2**8 - 1:
fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xda" + struct.pack("<H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdb" + struct.pack("<I", len(obj)) + obj)
else:
raise Exception("huge string")
def _pack_binary(obj, fp):
if len(obj) <= 2**8 - 1:
fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xc5" + struct.pack("<H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xc6" + struct.pack("<I", len(obj)) + obj)
else:
raise Exception("huge binary string")
def _pack_array(obj, fp):
if len(obj) <= 15:
fp.write(struct.pack("B", 0x90 | len(obj)))
elif len(obj) <= 2**16 - 1:
fp.write(b"\xdc" + struct.pack("<H", len(obj)))
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdd" + struct.pack("<I", len(obj)))
else:
raise Exception("huge array")
if len(obj) > 0 and isinstance(obj[0], float):
fp.write(b"\xca")
for e in obj:
fp.write(struct.pack("<f", e))
elif len(obj) > 0 and isinstance(obj[0], bool):
for e in obj:
pack(e, fp)
elif len(obj) > 0 and isinstance(obj[0], int):
fp.write(b"\xd2")
for e in obj:
fp.write(struct.pack("<i", e))
# Float32
elif len(obj) > 0 and isinstance(obj[0], np.float32):
fp.write(b"\xca")
fp.write(obj.tobytes())
# Int32
elif len(obj) > 0 and isinstance(obj[0], np.int32):
fp.write(b"\xd2")
fp.write(obj.tobytes())
# Int16
elif len(obj) > 0 and isinstance(obj[0], np.int16):
fp.write(b"\xd1")
fp.write(obj.tobytes())
# Regular
else:
for e in obj:
pack(e, fp)
def _pack_map(obj, fp):
if len(obj) <= 15:
fp.write(struct.pack("B", 0x80 | len(obj)))
elif len(obj) <= 2**16 - 1:
fp.write(b"\xde" + struct.pack("<H", len(obj)))
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdf" + struct.pack("<I", len(obj)))
else:
raise Exception("huge array")
for k, v in obj.items():
pack(k, fp)
pack(v, fp)
def pack(obj, fp):
if obj is None:
_pack_nil(obj, fp)
elif isinstance(obj, bool):
_pack_boolean(obj, fp)
elif isinstance(obj, int):
_pack_integer(obj, fp)
elif isinstance(obj, float):
_pack_float(obj, fp)
elif isinstance(obj, str):
_pack_string(obj, fp)
elif isinstance(obj, bytes):
_pack_binary(obj, fp)
elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, np.ndarray):
_pack_array(obj, fp)
elif isinstance(obj, dict):
| _pack_map(obj, fp) | conditional_block |
|
io_export_arm.py | ] = normal[0]
ndata[i2 + 1] = normal[1]
if has_tex:
uv = lay0.data[loop.index].uv
t0data[i2 ] = uv[0]
t0data[i2 + 1] = 1.0 - uv[1] # Reverse Y
if has_tex1:
uv = lay1.data[loop.index].uv
t1data[i2 ] = uv[0]
t1data[i2 + 1] = 1.0 - uv[1]
if has_tang:
i3 = i * 3
tangdata[i3 ] = tang[0]
tangdata[i3 + 1] = tang[1]
tangdata[i3 + 2] = tang[2]
if has_col:
i3 = i * 3
cdata[i3 ] = pow(v.col[0], 2.2)
cdata[i3 + 1] = pow(v.col[1], 2.2)
cdata[i3 + 2] = pow(v.col[2], 2.2)
mats = exportMesh.materials
poly_map = []
for i in range(max(len(mats), 1)):
poly_map.append([])
for poly in exportMesh.polygons:
poly_map[poly.material_index].append(poly)
o['index_arrays'] = []
for index, polys in enumerate(poly_map):
tris = 0
for poly in polys:
tris += poly.loop_total - 2
if tris == 0: # No face assigned
continue
prim = np.empty(tris * 3, dtype='<i4')
i = 0
for poly in polys:
first = poly.loop_start
total = poly.loop_total
if total == 3:
prim[i ] = loops[first ].index
prim[i + 1] = loops[first + 1].index
prim[i + 2] = loops[first + 2].index
i += 3
else:
for j in range(total - 2):
prim[i ] = loops[first + total - 1].index
prim[i + 1] = loops[first + j ].index
prim[i + 2] = loops[first + j + 1 ].index
i += 3
ia = {}
ia['values'] = prim
ia['material'] = 0
if len(mats) > 1:
for i in range(len(mats)): # Multi-mat mesh
if (mats[i] == mats[index]): # Default material for empty slots
ia['material'] = i
break
o['index_arrays'].append(ia)
# Pack
pdata *= invscale_pos
ndata *= 32767
pdata = np.array(pdata, dtype='<i2')
ndata = np.array(ndata, dtype='<i2')
if has_tex:
t0data *= invscale_tex
t0data = np.array(t0data, dtype='<i2')
if has_tex1:
t1data *= invscale_tex
t1data = np.array(t1data, dtype='<i2')
if has_col:
cdata *= 32767
cdata = np.array(cdata, dtype='<i2')
if has_tang:
tangdata *= 32767
tangdata = np.array(tangdata, dtype='<i2')
# Output
o['vertex_arrays'] = []
o['vertex_arrays'].append({ 'attrib': 'pos', 'values': pdata })
o['vertex_arrays'].append({ 'attrib': 'nor', 'values': ndata })
if has_tex:
o['vertex_arrays'].append({ 'attrib': 'tex', 'values': t0data })
if has_tex1:
o['vertex_arrays'].append({ 'attrib': 'tex1', 'values': t1data })
if has_col:
o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata })
if has_tang:
o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata })
def export_mesh(self, bobject, scene):
# This function exports a single mesh object
print('Exporting mesh ' + bobject.data.name)
o = {}
o['name'] = bobject.name
mesh = bobject.data
armature = bobject.find_armature()
apply_modifiers = not armature
bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject
exportMesh = bobject_eval.to_mesh()
self.calc_aabb(bobject)
self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None)
# if armature:
# self.export_skin(bobject, armature, exportMesh, o)
self.write_mesh(bobject, o)
bobject_eval.to_mesh_clear()
def export_objects(self, scene):
meshes = []
self.output['mesh_datas'] = [];
for o in scene.objects:
if o.type == 'MESH' and o.data != None and o.data not in meshes:
meshes.append(o.data)
self.export_mesh(o, scene)
def write_arm(self, filepath, output):
with open(filepath, 'wb') as f:
f.write(packb(output))
def execute(self, context):
profile_time = time.time()
self.depsgraph = context.evaluated_depsgraph_get()
self.output = {}
self.export_objects(context.scene)
self.write_arm(self.filepath, self.output)
print('Scene exported in ' + str(time.time() - profile_time))
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(ArmoryExporter.bl_idname, text="Armory (.arm)")
def register():
bpy.utils.register_class(ArmoryExporter)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
def unregister():
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
bpy.utils.unregister_class(ArmoryExporter)
if __name__ == "__main__":
register()
# Msgpack parser with typed arrays
# Based on u-msgpack-python v2.4.1 - v at sergeev.io
# https://github.com/vsergeev/u-msgpack-python
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import struct
import io
import numpy as np
def _pack_integer(obj, fp):
if obj < 0:
if obj >= -32:
fp.write(struct.pack("b", obj))
elif obj >= -2**(8 - 1):
fp.write(b"\xd0" + struct.pack("b", obj))
elif obj >= -2**(16 - 1):
fp.write(b"\xd1" + struct.pack("<h", obj))
elif obj >= -2**(32 - 1):
fp.write(b"\xd2" + struct.pack("<i", obj))
elif obj >= -2**(64 - 1):
fp.write(b"\xd3" + struct.pack("<q", obj))
else:
raise Exception("huge signed int")
else:
if obj <= 127:
fp.write(struct.pack("B", obj))
elif obj <= 2**8 - 1:
fp.write(b"\xcc" + struct.pack("B", obj))
elif obj <= 2**16 - 1:
fp.write(b"\xcd" + struct.pack("<H", obj))
elif obj <= 2**32 - 1:
fp.write(b"\xce" + struct.pack("<I", obj))
elif obj <= 2**64 - 1:
fp.write(b"\xcf" + struct.pack("<Q", obj))
else:
raise Exception("huge unsigned int")
def _pack_nil(obj, fp):
fp.write(b"\xc0")
def _pack_boolean(obj, fp):
fp.write(b"\xc3" if obj else b"\xc2")
def _pack_float(obj, fp):
# NOTE: forced 32-bit floats for Armory
# fp.write(b"\xcb" + struct.pack("<d", obj)) # Double
| fp.write(b"\xca" + struct.pack("<f", obj)) | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.